linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [patch 1/8] mempool - Add page allocator
       [not found] <20060130211951.225129000@localhost.localdomain>
@ 2006-01-30 21:23 ` Matthew Dobson
  2006-01-30 21:23 ` [patch 2/8] mempool - Use common mempool " Matthew Dobson
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Matthew Dobson @ 2006-01-30 21:23 UTC (permalink / raw)
  To: linux-kernel; +Cc: penberg, akpm

plain text document attachment (mempool-add_page_allocator.patch)
Add an allocator to the common mempool code: a simple page allocator

This will be used by the next patch in the series to replace duplicate
mempool-backed page allocators in 2 places in the kernel.  It is also
likely that there will be more users in the future.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

 include/linux/mempool.h |   12 ++++++++++++
 mm/mempool.c            |   18 ++++++++++++++++++
 2 files changed, 30 insertions(+)

Index: linux-2.6.16-rc1-mm4+mempool_work/mm/mempool.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/mm/mempool.c
+++ linux-2.6.16-rc1-mm4+mempool_work/mm/mempool.c
@@ -289,3 +289,21 @@ void mempool_free_slab(void *element, vo
 	kmem_cache_free(mem, element);
 }
 EXPORT_SYMBOL(mempool_free_slab);
+
+/*
+ * A simple mempool-backed page allocator that allocates pages
+ * of the order specified by pool_data.
+ */
+void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
+{
+	int order = (int) pool_data;
+	return alloc_pages(gfp_mask, order);
+}
+EXPORT_SYMBOL(mempool_alloc_pages);
+
+void mempool_free_pages(void *element, void *pool_data)
+{
+	int order = (int) pool_data;
+	__free_pages(element, order);
+}
+EXPORT_SYMBOL(mempool_free_pages);
Index: linux-2.6.16-rc1-mm4+mempool_work/include/linux/mempool.h
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/include/linux/mempool.h
+++ linux-2.6.16-rc1-mm4+mempool_work/include/linux/mempool.h
@@ -38,4 +38,16 @@ extern void mempool_free(void *element, 
 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
 void mempool_free_slab(void *element, void *pool_data);
 
+/*
+ * A mempool_alloc_t and mempool_free_t for a simple page allocator that
+ * allocates pages of the order specified by pool_data
+ */
+void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
+void mempool_free_pages(void *element, void *pool_data);
+static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
+{
+	return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages,
+			      (void *) order);
+}
+
 #endif /* _LINUX_MEMPOOL_H */

--


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch 2/8] mempool - Use common mempool page allocator
       [not found] <20060130211951.225129000@localhost.localdomain>
  2006-01-30 21:23 ` [patch 1/8] mempool - Add page allocator Matthew Dobson
@ 2006-01-30 21:23 ` Matthew Dobson
  2006-01-30 21:23 ` [patch 3/8] mempool - Add kmalloc allocator Matthew Dobson
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Matthew Dobson @ 2006-01-30 21:23 UTC (permalink / raw)
  To: linux-kernel; +Cc: penberg, akpm

plain text document attachment (mempool-use_page_allocator.patch)
Convert two mempool users that currently use their own mempool-backed page
allocators to use the generic mempool page allocator.

Also included are 2 trivial whitespace fixes.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

 drivers/md/dm-crypt.c |   17 +----------------
 mm/highmem.c          |   23 +++++++----------------
 2 files changed, 8 insertions(+), 32 deletions(-)

Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-crypt.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/dm-crypt.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-crypt.c
@@ -94,20 +94,6 @@ struct crypt_config {
 static kmem_cache_t *_crypt_io_pool;
 
 /*
- * Mempool alloc and free functions for the page
- */
-static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
-{
-	return alloc_page(gfp_mask);
-}
-
-static void mempool_free_page(void *page, void *data)
-{
-	__free_page(page);
-}
-
-
-/*
  * Different IV generation algorithms:
  *
  * plain: the initial vector is the 32-bit low-endian version of the sector
@@ -637,8 +623,7 @@ static int crypt_ctr(struct dm_target *t
 		goto bad3;
 	}
 
-	cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page,
-				       mempool_free_page, NULL);
+	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
 	if (!cc->page_pool) {
 		ti->error = PFX "Cannot allocate page mempool";
 		goto bad4;
Index: linux-2.6.16-rc1-mm4+mempool_work/mm/highmem.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/mm/highmem.c
+++ linux-2.6.16-rc1-mm4+mempool_work/mm/highmem.c
@@ -31,14 +31,9 @@
 
 static mempool_t *page_pool, *isa_page_pool;
 
-static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
+static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
 {
-	return alloc_page(gfp_mask | GFP_DMA);
-}
-
-static void page_pool_free(void *page, void *data)
-{
-	__free_page(page);
+	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
 }
 
 /*
@@ -51,11 +46,6 @@ static void page_pool_free(void *page, v
  */
 #ifdef CONFIG_HIGHMEM
 
-static void *page_pool_alloc(gfp_t gfp_mask, void *data)
-{
-	return alloc_page(gfp_mask);
-}
-
 static int pkmap_count[LAST_PKMAP];
 static unsigned int last_pkmap_nr;
 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -229,7 +219,7 @@ static __init int init_emergency_pool(vo
 	if (!i.totalhigh)
 		return 0;
 
-	page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
+	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
 	if (!page_pool)
 		BUG();
 	printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
@@ -272,7 +262,8 @@ int init_emergency_isa_pool(void)
 	if (isa_page_pool)
 		return 0;
 
-	isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
+	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
+				       mempool_free_pages, (void *) 0);
 	if (!isa_page_pool)
 		BUG();
 
@@ -337,7 +328,7 @@ static void bounce_end_io(struct bio *bi
 	bio_put(bio);
 }
 
-static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err)
+static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
 {
 	if (bio->bi_size)
 		return 1;
@@ -384,7 +375,7 @@ static int bounce_end_io_read_isa(struct
 }
 
 static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
-			mempool_t *pool)
+			       mempool_t *pool)
 {
 	struct page *page;
 	struct bio *bio = NULL;

--


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch 3/8] mempool - Add kmalloc allocator
       [not found] <20060130211951.225129000@localhost.localdomain>
  2006-01-30 21:23 ` [patch 1/8] mempool - Add page allocator Matthew Dobson
  2006-01-30 21:23 ` [patch 2/8] mempool - Use common mempool " Matthew Dobson
@ 2006-01-30 21:23 ` Matthew Dobson
  2006-01-30 21:23 ` [patch 4/8] mempool - Use common mempool " Matthew Dobson
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Matthew Dobson @ 2006-01-30 21:23 UTC (permalink / raw)
  To: linux-kernel; +Cc: penberg, akpm

plain text document attachment (mempool-add_kmalloc_allocator.patch)
Add another allocator to the common mempool code: a kmalloc/kfree allocator

This will be used by the next patch in the series to replace duplicate
mempool-backed kmalloc allocators in several places in the kernel.
It is also very likely that there will be more users in the future.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

 include/linux/mempool.h |   12 ++++++++++++
 mm/mempool.c            |   17 +++++++++++++++++
 2 files changed, 29 insertions(+)

Index: linux-2.6.16-rc1-mm4+mempool_work/include/linux/mempool.h
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/include/linux/mempool.h
+++ linux-2.6.16-rc1-mm4+mempool_work/include/linux/mempool.h
@@ -39,6 +39,18 @@ void *mempool_alloc_slab(gfp_t gfp_mask,
 void mempool_free_slab(void *element, void *pool_data);
 
 /*
+ * A mempool_alloc_t and mempool_free_t to kmalloc the amount of memory
+ * specified by pool_data
+ */
+void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
+void mempool_kfree(void *element, void *pool_data);
+static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
+{
+	return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
+			      (void *) size);
+}
+
+/*
  * A mempool_alloc_t and mempool_free_t for a simple page allocator that
  * allocates pages of the order specified by pool_data
  */
Index: linux-2.6.16-rc1-mm4+mempool_work/mm/mempool.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/mm/mempool.c
+++ linux-2.6.16-rc1-mm4+mempool_work/mm/mempool.c
@@ -291,6 +291,23 @@ void mempool_free_slab(void *element, vo
 EXPORT_SYMBOL(mempool_free_slab);
 
 /*
+ * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
+ * specfied by pool_data
+ */
+void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
+{
+	size_t size = (size_t) pool_data;
+	return kmalloc(size, gfp_mask);
+}
+EXPORT_SYMBOL(mempool_kmalloc);
+
+void mempool_kfree(void *element, void *pool_data)
+{
+	kfree(element);
+}
+EXPORT_SYMBOL(mempool_kfree);
+
+/*
  * A simple mempool-backed page allocator that allocates pages
  * of the order specified by pool_data.
  */

--


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch 4/8] mempool - Use common mempool kmalloc allocator
       [not found] <20060130211951.225129000@localhost.localdomain>
                   ` (2 preceding siblings ...)
  2006-01-30 21:23 ` [patch 3/8] mempool - Add kmalloc allocator Matthew Dobson
@ 2006-01-30 21:23 ` Matthew Dobson
  2006-01-30 21:23 ` [patch 5/8] mempool - Add kzalloc allocator Matthew Dobson
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Matthew Dobson @ 2006-01-30 21:23 UTC (permalink / raw)
  To: linux-kernel; +Cc: penberg, akpm

plain text document attachment (mempool-use_kmalloc_allocator.patch)
This patch changes several mempool users, all of which are basically
just wrappers around kmalloc(), to use the common mempool_kmalloc/kfree,
rather than their own wrapper function, removing a bunch of duplicated code.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

 drivers/block/pktcdvd.c      |   26 ++----------------
 drivers/md/bitmap.c          |   14 +---------
 drivers/md/dm-io.c           |   13 +--------
 drivers/md/dm-raid1.c        |   14 +---------
 drivers/s390/scsi/zfcp_aux.c |   60 ++++++++++++-------------------------------
 drivers/scsi/lpfc/lpfc_mem.c |   22 ++-------------
 fs/bio.c                     |   14 +---------
 7 files changed, 34 insertions(+), 129 deletions(-)

Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/block/pktcdvd.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/block/pktcdvd.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/block/pktcdvd.c
@@ -230,16 +230,6 @@ static int pkt_grow_pktlist(struct pktcd
 	return 1;
 }
 
-static void *pkt_rb_alloc(gfp_t gfp_mask, void *data)
-{
-	return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
-}
-
-static void pkt_rb_free(void *ptr, void *data)
-{
-	kfree(ptr);
-}
-
 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
 {
 	struct rb_node *n = rb_next(&node->rb_node);
@@ -2086,16 +2076,6 @@ static int pkt_close(struct inode *inode
 }
 

-static void *psd_pool_alloc(gfp_t gfp_mask, void *data)
-{
-	return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
-}
-
-static void psd_pool_free(void *ptr, void *data)
-{
-	kfree(ptr);
-}
-
 static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
 {
 	struct packet_stacked_data *psd = bio->bi_private;
@@ -2495,7 +2475,8 @@ static int pkt_setup_dev(struct pkt_ctrl
 	if (!pd)
 		return ret;
 
-	pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
+	pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
+						  sizeof(struct pkt_rb_node));
 	if (!pd->rb_pool)
 		goto out_mem;
 
@@ -2657,7 +2638,8 @@ static int __init pkt_init(void)
 {
 	int ret;
 
-	psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL);
+	psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
+					sizeof(struct packet_stacked_data));
 	if (!psd_pool)
 		return -ENOMEM;
 
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/scsi/lpfc/lpfc_mem.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/scsi/lpfc/lpfc_mem.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,18 +38,6 @@
 #define LPFC_MBUF_POOL_SIZE     64      /* max elements in MBUF safety pool */
 #define LPFC_MEM_POOL_SIZE      64      /* max elem in non-DMA safety pool */
 
-static void *
-lpfc_pool_kmalloc(gfp_t gfp_flags, void *data)
-{
-	return kmalloc((unsigned long)data, gfp_flags);
-}
-
-static void
-lpfc_pool_kfree(void *obj, void *data)
-{
-	kfree(obj);
-}
-
 int
 lpfc_mem_alloc(struct lpfc_hba * phba)
 {
@@ -79,15 +67,13 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
 		pool->current_count++;
 	}
 
-	phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
-				lpfc_pool_kmalloc, lpfc_pool_kfree,
-				(void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
+	phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+							 sizeof(LPFC_MBOXQ_t));
 	if (!phba->mbox_mem_pool)
 		goto fail_free_mbuf_pool;
 
-	phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
-			lpfc_pool_kmalloc, lpfc_pool_kfree,
-			(void *)(unsigned long)sizeof(struct lpfc_nodelist));
+	phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+						sizeof(struct lpfc_nodelist));
 	if (!phba->nlp_mem_pool)
 		goto fail_free_mbox_pool;
 
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-raid1.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/dm-raid1.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-raid1.c
@@ -122,16 +122,6 @@ static inline sector_t region_to_sector(
 /* FIXME move this */
 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
 
-static void *region_alloc(gfp_t gfp_mask, void *pool_data)
-{
-	return kmalloc(sizeof(struct region), gfp_mask);
-}
-
-static void region_free(void *element, void *pool_data)
-{
-	kfree(element);
-}
-
 #define MIN_REGIONS 64
 #define MAX_RECOVERY 1
 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
@@ -173,8 +163,8 @@ static int rh_init(struct region_hash *r
 	INIT_LIST_HEAD(&rh->quiesced_regions);
 	INIT_LIST_HEAD(&rh->recovered_regions);
 
-	rh->region_pool = mempool_create(MIN_REGIONS, region_alloc,
-					 region_free, NULL);
+	rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
+						      sizeof(struct region));
 	if (!rh->region_pool) {
 		vfree(rh->buckets);
 		rh->buckets = NULL;
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/s390/scsi/zfcp_aux.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/s390/scsi/zfcp_aux.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/s390/scsi/zfcp_aux.c
@@ -829,18 +829,6 @@ zfcp_unit_dequeue(struct zfcp_unit *unit
 	device_unregister(&unit->sysfs_device);
 }
 
-static void *
-zfcp_mempool_alloc(gfp_t gfp_mask, void *size)
-{
-	return kmalloc((size_t) size, gfp_mask);
-}
-
-static void
-zfcp_mempool_free(void *element, void *size)
-{
-	kfree(element);
-}
-
 /*
  * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
  * commands.
@@ -853,51 +841,39 @@ static int
 zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
 {
 	adapter->pool.fsf_req_erp =
-		mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR,
-			       zfcp_mempool_alloc, zfcp_mempool_free, (void *)
-			       sizeof(struct zfcp_fsf_req_pool_element));
-
-	if (NULL == adapter->pool.fsf_req_erp)
+		mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR,
+				sizeof(struct zfcp_fsf_req_pool_element));
+	if (!adapter->pool.fsf_req_erp)
 		return -ENOMEM;
 
 	adapter->pool.fsf_req_scsi =
-		mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR,
-			       zfcp_mempool_alloc, zfcp_mempool_free, (void *)
-			       sizeof(struct zfcp_fsf_req_pool_element));
-
-	if (NULL == adapter->pool.fsf_req_scsi)
+		mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR,
+				sizeof(struct zfcp_fsf_req_pool_element));
+	if (!adapter->pool.fsf_req_scsi)
 		return -ENOMEM;
 
 	adapter->pool.fsf_req_abort =
-		mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR,
-			       zfcp_mempool_alloc, zfcp_mempool_free, (void *)
-			       sizeof(struct zfcp_fsf_req_pool_element));
-
-	if (NULL == adapter->pool.fsf_req_abort)
+		mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR,
+				sizeof(struct zfcp_fsf_req_pool_element));
+	if (!adapter->pool.fsf_req_abort)
 		return -ENOMEM;
 
 	adapter->pool.fsf_req_status_read =
-		mempool_create(ZFCP_POOL_STATUS_READ_NR,
-			       zfcp_mempool_alloc, zfcp_mempool_free,
-			       (void *) sizeof(struct zfcp_fsf_req));
-
-	if (NULL == adapter->pool.fsf_req_status_read)
+		mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
+					    sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.fsf_req_status_read)
 		return -ENOMEM;
 
 	adapter->pool.data_status_read =
-		mempool_create(ZFCP_POOL_STATUS_READ_NR,
-			       zfcp_mempool_alloc, zfcp_mempool_free,
-			       (void *) sizeof(struct fsf_status_read_buffer));
-
-	if (NULL == adapter->pool.data_status_read)
+		mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
+					sizeof(struct fsf_status_read_buffer));
+	if (!adapter->pool.data_status_read)
 		return -ENOMEM;
 
 	adapter->pool.data_gid_pn =
-		mempool_create(ZFCP_POOL_DATA_GID_PN_NR,
-			       zfcp_mempool_alloc, zfcp_mempool_free, (void *)
-			       sizeof(struct zfcp_gid_pn_data));
-
-	if (NULL == adapter->pool.data_gid_pn)
+		mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR,
+					    sizeof(struct zfcp_gid_pn_data));
+	if (!adapter->pool.data_gid_pn)
 		return -ENOMEM;
 
 	return 0;
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-io.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/dm-io.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-io.c
@@ -32,16 +32,6 @@ struct io {
 static unsigned _num_ios;
 static mempool_t *_io_pool;
 
-static void *alloc_io(gfp_t gfp_mask, void *pool_data)
-{
-	return kmalloc(sizeof(struct io), gfp_mask);
-}
-
-static void free_io(void *element, void *pool_data)
-{
-	kfree(element);
-}
-
 static unsigned int pages_to_ios(unsigned int pages)
 {
 	return 4 * pages;	/* too many ? */
@@ -65,7 +55,8 @@ static int resize_pool(unsigned int new_
 
 	} else {
 		/* create new pool */
-		_io_pool = mempool_create(new_ios, alloc_io, free_io, NULL);
+		_io_pool = mempool_create_kmalloc_pool(new_ios,
+						       sizeof(struct io));
 		if (!_io_pool)
 			return -ENOMEM;
 
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/bitmap.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/bitmap.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/bitmap.c
@@ -89,16 +89,6 @@ int bitmap_active(struct bitmap *bitmap)
 }
 
 #define WRITE_POOL_SIZE 256
-/* mempool for queueing pending writes on the bitmap file */
-static void *write_pool_alloc(gfp_t gfp_flags, void *data)
-{
-	return kmalloc(sizeof(struct page_list), gfp_flags);
-}
-
-static void write_pool_free(void *ptr, void *data)
-{
-	kfree(ptr);
-}
 
 /*
  * just a placeholder - calls kmalloc for bitmap pages
@@ -1564,8 +1554,8 @@ int bitmap_create(mddev_t *mddev)
 	spin_lock_init(&bitmap->write_lock);
 	INIT_LIST_HEAD(&bitmap->complete_pages);
 	init_waitqueue_head(&bitmap->write_wait);
-	bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc,
-				write_pool_free, NULL);
+	bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE,
+						sizeof(struct page_list));
 	err = -ENOMEM;
 	if (!bitmap->write_pool)
 		goto error;
Index: linux-2.6.16-rc1-mm4+mempool_work/fs/bio.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/fs/bio.c
+++ linux-2.6.16-rc1-mm4+mempool_work/fs/bio.c
@@ -1127,16 +1127,6 @@ struct bio_pair *bio_split(struct bio *b
 	return bp;
 }
 
-static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
-{
-	return kmalloc(sizeof(struct bio_pair), gfp_flags);
-}
-
-static void bio_pair_free(void *bp, void *data)
-{
-	kfree(bp);
-}
-
 
 /*
  * create memory pools for biovec's in a bio_set.
@@ -1257,8 +1247,8 @@ static int __init init_bio(void)
 	if (!fs_bio_set)
 		panic("bio: can't allocate bios\n");
 
-	bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
-				bio_pair_alloc, bio_pair_free, NULL);
+	bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
+						     sizeof(struct bio_pair));
 	if (!bio_split_pool)
 		panic("bio: can't create split pool\n");
 

--


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch 5/8] mempool - Add kzalloc allocator
       [not found] <20060130211951.225129000@localhost.localdomain>
                   ` (3 preceding siblings ...)
  2006-01-30 21:23 ` [patch 4/8] mempool - Use common mempool " Matthew Dobson
@ 2006-01-30 21:23 ` Matthew Dobson
  2006-01-30 21:24 ` [patch 6/8] mempool - Use common mempool " Matthew Dobson
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Matthew Dobson @ 2006-01-30 21:23 UTC (permalink / raw)
  To: linux-kernel; +Cc: penberg, akpm

plain text document attachment (mempool-add_kzalloc_allocator.patch)
Add another allocator to the common mempool code: a kzalloc/kfree allocator

This will be used by the next patch in the series to replace a mempool-backed
kzalloc allocator. It is also very likely that there will be more users in the
future.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

 include/linux/mempool.h |   10 ++++++++--
 mm/mempool.c            |    7 +++++++
 2 files changed, 15 insertions(+), 2 deletions(-)

Index: linux-2.6.16-rc1-mm4+mempool_work/include/linux/mempool.h
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/include/linux/mempool.h
+++ linux-2.6.16-rc1-mm4+mempool_work/include/linux/mempool.h
@@ -39,16 +39,22 @@ void *mempool_alloc_slab(gfp_t gfp_mask,
 void mempool_free_slab(void *element, void *pool_data);
 
 /*
- * A mempool_alloc_t and mempool_free_t to kmalloc the amount of memory
- * specified by pool_data
+ * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree
+ * the amount of memory specified by pool_data
  */
 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
+void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data);
 void mempool_kfree(void *element, void *pool_data);
 static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
 {
 	return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
 			      (void *) size);
 }
+static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size)
+{
+	return mempool_create(min_nr, mempool_kzalloc, mempool_kfree,
+			      (void *) size);
+}
 
 /*
  * A mempool_alloc_t and mempool_free_t for a simple page allocator that
Index: linux-2.6.16-rc1-mm4+mempool_work/mm/mempool.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/mm/mempool.c
+++ linux-2.6.16-rc1-mm4+mempool_work/mm/mempool.c
@@ -301,6 +301,13 @@ void *mempool_kmalloc(gfp_t gfp_mask, vo
 }
 EXPORT_SYMBOL(mempool_kmalloc);
 
+void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
+{
+	size_t size = (size_t) pool_data;
+	return kzalloc(size, gfp_mask);
+}
+EXPORT_SYMBOL(mempool_kzalloc);
+
 void mempool_kfree(void *element, void *pool_data)
 {
 	kfree(element);

--


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch 6/8] mempool - Use common mempool kzalloc allocator
       [not found] <20060130211951.225129000@localhost.localdomain>
                   ` (4 preceding siblings ...)
  2006-01-30 21:23 ` [patch 5/8] mempool - Add kzalloc allocator Matthew Dobson
@ 2006-01-30 21:24 ` Matthew Dobson
  2006-01-30 21:24 ` [patch 7/8] mempool - Add mempool_create_slab_pool() Matthew Dobson
  2006-01-30 21:24 ` [patch 8/8] mempool - Use mempool_create_slab_pool() Matthew Dobson
  7 siblings, 0 replies; 8+ messages in thread
From: Matthew Dobson @ 2006-01-30 21:24 UTC (permalink / raw)
  To: linux-kernel; +Cc: penberg, akpm

plain text document attachment (mempool-use_kzalloc_allocator.patch)
This patch changes a mempool user, which is basically just a wrapper around
kzalloc(), to use the common mempool_kmalloc/kfree, rather than its own wrapper
function, removing duplicated code.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

 drivers/md/multipath.c |   17 ++---------------
 1 files changed, 2 insertions(+), 15 deletions(-)

Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/multipath.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/multipath.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/multipath.c
@@ -35,18 +35,6 @@
 #define	NR_RESERVED_BUFS	32
 

-static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
-{
-	struct multipath_bh *mpb;
-	mpb = kzalloc(sizeof(*mpb), gfp_flags);
-	return mpb;
-}
-
-static void mp_pool_free(void *mpb, void *data)
-{
-	kfree(mpb);
-}
-
 static int multipath_map (multipath_conf_t *conf)
 {
 	int i, disks = conf->raid_disks;
@@ -494,9 +482,8 @@ static int multipath_run (mddev_t *mddev
 	}
 	mddev->degraded = conf->raid_disks = conf->working_disks;
 
-	conf->pool = mempool_create(NR_RESERVED_BUFS,
-				    mp_pool_alloc, mp_pool_free,
-				    NULL);
+	conf->pool = mempool_create_kzalloc_pool(NR_RESERVED_BUFS,
+						 sizeof(struct multipath_bh));
 	if (conf->pool == NULL) {
 		printk(KERN_ERR 
 			"multipath: couldn't allocate memory for %s\n",

--


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch 7/8] mempool - Add mempool_create_slab_pool()
       [not found] <20060130211951.225129000@localhost.localdomain>
                   ` (5 preceding siblings ...)
  2006-01-30 21:24 ` [patch 6/8] mempool - Use common mempool " Matthew Dobson
@ 2006-01-30 21:24 ` Matthew Dobson
  2006-01-30 21:24 ` [patch 8/8] mempool - Use mempool_create_slab_pool() Matthew Dobson
  7 siblings, 0 replies; 8+ messages in thread
From: Matthew Dobson @ 2006-01-30 21:24 UTC (permalink / raw)
  To: linux-kernel; +Cc: penberg, akpm

plain text document attachment
(mempool-add_mempool_create_slab_pool.patch)
Create a simple wrapper function for the common case of creating a slab-based
mempool.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

 include/linux/mempool.h |    5 +++++
 1 files changed, 5 insertions(+)

Index: linux-2.6.16-rc1-mm4+mempool_work/include/linux/mempool.h
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/include/linux/mempool.h
+++ linux-2.6.16-rc1-mm4+mempool_work/include/linux/mempool.h
@@ -37,6 +37,11 @@ extern void mempool_free(void *element, 
  */
 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
 void mempool_free_slab(void *element, void *pool_data);
+static inline mempool_t *mempool_create_slab_pool(int min_nr, kmem_cache_t *kc)
+{
+	return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
+			      (void *) kc);
+}
 
 /*
  * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree

--


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [patch 8/8] mempool - Use mempool_create_slab_pool()
       [not found] <20060130211951.225129000@localhost.localdomain>
                   ` (6 preceding siblings ...)
  2006-01-30 21:24 ` [patch 7/8] mempool - Add mempool_create_slab_pool() Matthew Dobson
@ 2006-01-30 21:24 ` Matthew Dobson
  7 siblings, 0 replies; 8+ messages in thread
From: Matthew Dobson @ 2006-01-30 21:24 UTC (permalink / raw)
  To: linux-kernel; +Cc: penberg, akpm

plain text document attachment
(mempool-use_mempool_create_slab_pool.patch)
Modify well over a dozen mempool users to call mempool_create_slab_pool()
rather than calling mempool_create() with extra arguments, saving about
30 lines of code and increasing readability.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

 block/cfq-iosched.c             |    2 +-
 drivers/block/aoe/aoeblk.c      |    4 +---
 drivers/md/dm-crypt.c           |    3 +--
 drivers/md/dm-mpath.c           |    3 +--
 drivers/md/dm-snap.c            |    3 +--
 drivers/md/dm.c                 |    6 ++----
 drivers/md/kcopyd.c             |    3 +--
 drivers/message/i2o/i2o_block.c |    7 +++----
 drivers/scsi/iscsi_tcp.c        |    4 ++--
 drivers/scsi/qla2xxx/qla_os.c   |    3 +--
 drivers/scsi/scsi_lib.c         |    5 ++---
 fs/bio.c                        |    6 ++----
 fs/cifs/cifsfs.c                |   18 ++++++------------
 fs/jfs/jfs_metapage.c           |    4 ++--
 fs/nfs/read.c                   |    6 ++----
 fs/nfs/write.c                  |   12 ++++--------
 fs/xfs/linux-2.6/xfs_super.c    |    5 ++---
 include/linux/i2o.h             |    4 +---
 net/sunrpc/sched.c              |   12 ++++--------
 19 files changed, 39 insertions(+), 71 deletions(-)

Index: linux-2.6.16-rc1-mm4+mempool_work/block/cfq-iosched.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/block/cfq-iosched.c
+++ linux-2.6.16-rc1-mm4+mempool_work/block/cfq-iosched.c
@@ -2179,7 +2179,7 @@ static int cfq_init_queue(request_queue_
 	if (!cfqd->cfq_hash)
 		goto out_cfqhash;
 
-	cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
+	cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
 	if (!cfqd->crq_pool)
 		goto out_crqpool;
 
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/block/aoe/aoeblk.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/block/aoe/aoeblk.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/block/aoe/aoeblk.c
@@ -211,9 +211,7 @@ aoeblk_gdalloc(void *vp)
 		return;
 	}
 
-	d->bufpool = mempool_create(MIN_BUFS,
-				    mempool_alloc_slab, mempool_free_slab,
-				    buf_pool_cache);
+	d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
 	if (d->bufpool == NULL) {
 		printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool "
 			"for %ld.%ld\n", d->aoemajor, d->aoeminor);
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-crypt.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/dm-crypt.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-crypt.c
@@ -616,8 +616,7 @@ static int crypt_ctr(struct dm_target *t
 		}
 	}
 
-	cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
-				     mempool_free_slab, _crypt_io_pool);
+	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
 	if (!cc->io_pool) {
 		ti->error = PFX "Cannot allocate crypt io mempool";
 		goto bad3;
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-mpath.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/dm-mpath.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-mpath.c
@@ -179,8 +179,7 @@ static struct multipath *alloc_multipath
 		m->queue_io = 1;
 		INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
 		INIT_WORK(&m->trigger_event, trigger_event, m);
-		m->mpio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
-					      mempool_free_slab, _mpio_cache);
+		m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
 		if (!m->mpio_pool) {
 			kfree(m);
 			return NULL;
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-snap.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/dm-snap.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm-snap.c
@@ -1258,8 +1258,7 @@ static int __init dm_snapshot_init(void)
 		goto bad4;
 	}
 
-	pending_pool = mempool_create(128, mempool_alloc_slab,
-				      mempool_free_slab, pending_cache);
+	pending_pool = mempool_create_slab_pool(128, pending_cache);
 	if (!pending_pool) {
 		DMERR("Couldn't create pending pool.");
 		r = -ENOMEM;
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/dm.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/dm.c
@@ -819,13 +819,11 @@ static struct mapped_device *alloc_dev(u
 	md->queue->unplug_fn = dm_unplug_all;
 	md->queue->issue_flush_fn = dm_flush_all;
 
-	md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
-				     mempool_free_slab, _io_cache);
+	md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
  	if (!md->io_pool)
  		goto bad2;
 
-	md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
-				      mempool_free_slab, _tio_cache);
+	md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
 	if (!md->tio_pool)
 		goto bad3;
 
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/md/kcopyd.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/md/kcopyd.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/md/kcopyd.c
@@ -228,8 +228,7 @@ static int jobs_init(void)
 	if (!_job_cache)
 		return -ENOMEM;
 
-	_job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab,
-				   mempool_free_slab, _job_cache);
+	_job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
 	if (!_job_pool) {
 		kmem_cache_destroy(_job_cache);
 		return -ENOMEM;
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/message/i2o/i2o_block.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/message/i2o/i2o_block.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/message/i2o/i2o_block.c
@@ -1179,10 +1179,9 @@ static int __init i2o_block_init(void)
 		goto exit;
 	}
 
-	i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE,
-					       mempool_alloc_slab,
-					       mempool_free_slab,
-					       i2o_blk_req_pool.slab);
+	i2o_blk_req_pool.pool =
+		mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE,
+					 i2o_blk_req_pool.slab);
 	if (!i2o_blk_req_pool.pool) {
 		osm_err("can't init request mempool\n");
 		rc = -ENOMEM;
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/scsi/iscsi_tcp.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/scsi/iscsi_tcp.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/scsi/iscsi_tcp.c
@@ -3201,8 +3201,8 @@ iscsi_r2tpool_alloc(struct iscsi_session
 		 * Data-Out PDU's within R2T-sequence can be quite big;
 		 * using mempool
 		 */
-		ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX,
-			 mempool_alloc_slab, mempool_free_slab, taskcache);
+		ctask->datapool = mempool_create_slab_pool(ISCSI_DTASK_DEFAULT_MAX,
+							   taskcache);
 		if (ctask->datapool == NULL) {
 			kfifo_free(ctask->r2tqueue);
 			iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/scsi/qla2xxx/qla_os.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/scsi/qla2xxx/qla_os.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/scsi/qla2xxx/qla_os.c
@@ -2087,8 +2087,7 @@ qla2x00_allocate_sp_pool(scsi_qla_host_t
 	int      rval;
 
 	rval = QLA_SUCCESS;
-	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
-	    mempool_free_slab, srb_cachep);
+	ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
 	if (ha->srb_mempool == NULL) {
 		qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n");
 		rval = QLA_FUNCTION_FAILED;
Index: linux-2.6.16-rc1-mm4+mempool_work/drivers/scsi/scsi_lib.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/drivers/scsi/scsi_lib.c
+++ linux-2.6.16-rc1-mm4+mempool_work/drivers/scsi/scsi_lib.c
@@ -1787,9 +1787,8 @@ int __init scsi_init_queue(void)
 					sgp->name);
 		}
 
-		sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
-				mempool_alloc_slab, mempool_free_slab,
-				sgp->slab);
+		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
+						     sgp->slab);
 		if (!sgp->pool) {
 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
 					sgp->name);
Index: linux-2.6.16-rc1-mm4+mempool_work/fs/bio.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/fs/bio.c
+++ linux-2.6.16-rc1-mm4+mempool_work/fs/bio.c
@@ -1143,8 +1143,7 @@ static int biovec_create_pools(struct bi
 		if (i >= scale)
 			pool_entries >>= 1;
 
-		*bvp = mempool_create(pool_entries, mempool_alloc_slab,
-					mempool_free_slab, bp->slab);
+		*bvp = mempool_create_slab_pool(pool_entries, bp->slab);
 		if (!*bvp)
 			return -ENOMEM;
 	}
@@ -1182,8 +1181,7 @@ struct bio_set *bioset_create(int bio_po
 		return NULL;
 
 	memset(bs, 0, sizeof(*bs));
-	bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab,
-			mempool_free_slab, bio_slab);
+	bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
 
 	if (!bs->bio_pool)
 		goto bad;
Index: linux-2.6.16-rc1-mm4+mempool_work/fs/cifs/cifsfs.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/fs/cifs/cifsfs.c
+++ linux-2.6.16-rc1-mm4+mempool_work/fs/cifs/cifsfs.c
@@ -737,10 +737,8 @@ cifs_init_request_bufs(void)
 		cERROR(1,("cifs_min_rcv set to maximum (64)"));
 	}
 
-	cifs_req_poolp = mempool_create(cifs_min_rcv,
-					mempool_alloc_slab,
-					mempool_free_slab,
-					cifs_req_cachep);
+	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
+						  cifs_req_cachep);
 
 	if(cifs_req_poolp == NULL) {
 		kmem_cache_destroy(cifs_req_cachep);
@@ -770,10 +768,8 @@ cifs_init_request_bufs(void)
 		cFYI(1,("cifs_min_small set to maximum (256)"));
 	}
 
-	cifs_sm_req_poolp = mempool_create(cifs_min_small,
-				mempool_alloc_slab,
-				mempool_free_slab,
-				cifs_sm_req_cachep);
+	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
+						     cifs_sm_req_cachep);
 
 	if(cifs_sm_req_poolp == NULL) {
 		mempool_destroy(cifs_req_poolp);
@@ -807,10 +803,8 @@ cifs_init_mids(void)
 	if (cifs_mid_cachep == NULL)
 		return -ENOMEM;
 
-	cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */,
-					mempool_alloc_slab,
-					mempool_free_slab,
-					cifs_mid_cachep);
+	/* 3 is a reasonable minimum number of simultaneous operations */
+	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
 	if(cifs_mid_poolp == NULL) {
 		kmem_cache_destroy(cifs_mid_cachep);
 		return -ENOMEM;
Index: linux-2.6.16-rc1-mm4+mempool_work/fs/jfs/jfs_metapage.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/fs/jfs/jfs_metapage.c
+++ linux-2.6.16-rc1-mm4+mempool_work/fs/jfs/jfs_metapage.c
@@ -221,8 +221,8 @@ int __init metapage_init(void)
 	if (metapage_cache == NULL)
 		return -ENOMEM;
 
-	metapage_mempool = mempool_create(METAPOOL_MIN_PAGES, mempool_alloc_slab,
-					  mempool_free_slab, metapage_cache);
+	metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
+						    metapage_cache);
 
 	if (metapage_mempool == NULL) {
 		kmem_cache_destroy(metapage_cache);
Index: linux-2.6.16-rc1-mm4+mempool_work/fs/nfs/read.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/fs/nfs/read.c
+++ linux-2.6.16-rc1-mm4+mempool_work/fs/nfs/read.c
@@ -597,10 +597,8 @@ int nfs_init_readpagecache(void)
 	if (nfs_rdata_cachep == NULL)
 		return -ENOMEM;
 
-	nfs_rdata_mempool = mempool_create(MIN_POOL_READ,
-					   mempool_alloc_slab,
-					   mempool_free_slab,
-					   nfs_rdata_cachep);
+	nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
+						     nfs_rdata_cachep);
 	if (nfs_rdata_mempool == NULL)
 		return -ENOMEM;
 
Index: linux-2.6.16-rc1-mm4+mempool_work/fs/nfs/write.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/fs/nfs/write.c
+++ linux-2.6.16-rc1-mm4+mempool_work/fs/nfs/write.c
@@ -1407,17 +1407,13 @@ int nfs_init_writepagecache(void)
 	if (nfs_wdata_cachep == NULL)
 		return -ENOMEM;
 
-	nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE,
-					   mempool_alloc_slab,
-					   mempool_free_slab,
-					   nfs_wdata_cachep);
+	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
+						     nfs_wdata_cachep);
 	if (nfs_wdata_mempool == NULL)
 		return -ENOMEM;
 
-	nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT,
-					   mempool_alloc_slab,
-					   mempool_free_slab,
-					   nfs_wdata_cachep);
+	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
+						      nfs_wdata_cachep);
 	if (nfs_commit_mempool == NULL)
 		return -ENOMEM;
 
Index: linux-2.6.16-rc1-mm4+mempool_work/fs/xfs/linux-2.6/xfs_super.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/fs/xfs/linux-2.6/xfs_super.c
+++ linux-2.6.16-rc1-mm4+mempool_work/fs/xfs/linux-2.6/xfs_super.c
@@ -376,9 +376,8 @@ linvfs_init_zones(void)
 	if (!xfs_ioend_zone)
 		goto out_destroy_vnode_zone;
 
-	xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
-			mempool_alloc_slab, mempool_free_slab,
-			xfs_ioend_zone);
+	xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
+						  xfs_ioend_zone);
 	if (!xfs_ioend_pool)
 		goto out_free_ioend_zone;
 
Index: linux-2.6.16-rc1-mm4+mempool_work/include/linux/i2o.h
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/include/linux/i2o.h
+++ linux-2.6.16-rc1-mm4+mempool_work/include/linux/i2o.h
@@ -950,9 +950,7 @@ static inline int i2o_pool_alloc(struct 
 	if (!pool->slab)
 		goto free_name;
 
-	pool->mempool =
-	    mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
-			   pool->slab);
+	pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
 	if (!pool->mempool)
 		goto free_slab;
 
Index: linux-2.6.16-rc1-mm4+mempool_work/net/sunrpc/sched.c
===================================================================
--- linux-2.6.16-rc1-mm4+mempool_work.orig/net/sunrpc/sched.c
+++ linux-2.6.16-rc1-mm4+mempool_work/net/sunrpc/sched.c
@@ -1162,16 +1162,12 @@ rpc_init_mempool(void)
 					     NULL, NULL);
 	if (!rpc_buffer_slabp)
 		goto err_nomem;
-	rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE,
-					    mempool_alloc_slab,
-					    mempool_free_slab,
-					    rpc_task_slabp);
+	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
+						    rpc_task_slabp);
 	if (!rpc_task_mempool)
 		goto err_nomem;
-	rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE,
-					    mempool_alloc_slab,
-					    mempool_free_slab,
-					    rpc_buffer_slabp);
+	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
+						      rpc_buffer_slabp);
 	if (!rpc_buffer_mempool)
 		goto err_nomem;
 	return 0;

--


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2006-01-30 21:25 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20060130211951.225129000@localhost.localdomain>
2006-01-30 21:23 ` [patch 1/8] mempool - Add page allocator Matthew Dobson
2006-01-30 21:23 ` [patch 2/8] mempool - Use common mempool " Matthew Dobson
2006-01-30 21:23 ` [patch 3/8] mempool - Add kmalloc allocator Matthew Dobson
2006-01-30 21:23 ` [patch 4/8] mempool - Use common mempool " Matthew Dobson
2006-01-30 21:23 ` [patch 5/8] mempool - Add kzalloc allocator Matthew Dobson
2006-01-30 21:24 ` [patch 6/8] mempool - Use common mempool " Matthew Dobson
2006-01-30 21:24 ` [patch 7/8] mempool - Add mempool_create_slab_pool() Matthew Dobson
2006-01-30 21:24 ` [patch 8/8] mempool - Use mempool_create_slab_pool() Matthew Dobson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).