All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mike Snitzer <snitzer@redhat.com>
To: dm-devel@redhat.com
Cc: ejt@redhat.com
Subject: [PATCH 2/2 v2] dm thin: use slab mempools with local caches
Date: Thu, 12 Apr 2012 18:39:17 -0400	[thread overview]
Message-ID: <20120412223917.GA11729@redhat.com> (raw)
In-Reply-To: <1334270075-11699-2-git-send-email-snitzer@redhat.com>

Use dedicated caches prefixed with a "dm_" name rather than rely on
kmalloc mempools backed by generic slab caches.

This will aid in debugging thinp memory leaks should they occur.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
---
 drivers/md/dm-thin.c |   52 +++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 47 insertions(+), 5 deletions(-)

v2: tweaked subject and header some, no code changes.

diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 301db0f..57d40b1 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
 	return n;
 }
 
+struct kmem_cache *_cell_cache;
+
 /*
  * @nr_cells should be the number of cells you want in use _concurrently_.
  * Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
 		return NULL;
 
 	spin_lock_init(&prison->lock);
-	prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
-							sizeof(struct cell));
+	prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
 	if (!prison->cell_pool) {
 		kfree(prison);
 		return NULL;
@@ -1649,6 +1650,9 @@ static void pool_features_init(struct pool_features *pf)
 	pf->discard_passdown = 1;
 }
 
+struct kmem_cache *_new_mapping_cache;
+struct kmem_cache *_endio_hook_cache;
+
 static void __pool_destroy(struct pool *pool)
 {
 	__pool_table_remove(pool);
@@ -1738,7 +1742,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 
 	pool->next_mapping = NULL;
 	pool->mapping_pool =
-		mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
+		mempool_create_slab_pool(MAPPING_POOL_SIZE, _new_mapping_cache);
 	if (!pool->mapping_pool) {
 		*error = "Error creating pool's mapping mempool";
 		err_p = ERR_PTR(-ENOMEM);
@@ -1746,7 +1750,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 	}
 
 	pool->endio_hook_pool =
-		mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
+		mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE, _endio_hook_cache);
 	if (!pool->endio_hook_pool) {
 		*error = "Error creating pool's endio_hook mempool";
 		err_p = ERR_PTR(-ENOMEM);
@@ -2748,7 +2752,42 @@ static int __init dm_thin_init(void)
 
 	r = dm_register_target(&pool_target);
 	if (r)
-		dm_unregister_target(&thin_target);
+		goto bad_pool_target;
+
+	_cell_cache = kmem_cache_create("dm_bio_prison_cell",
+					sizeof(struct cell),
+					__alignof__(struct cell), 0, NULL);
+	if (!_cell_cache) {
+		r = -ENOMEM;
+		goto bad_cell_cache;
+	}
+
+	_new_mapping_cache = kmem_cache_create("dm_thin_new_mapping",
+					       sizeof(struct new_mapping),
+					       __alignof__(struct new_mapping), 0, NULL);
+	if (!_new_mapping_cache) {
+		r = -ENOMEM;
+		goto bad_new_mapping_cache;
+	}
+
+	_endio_hook_cache = kmem_cache_create("dm_thin_endio_hook",
+					      sizeof(struct endio_hook),
+					      __alignof__(struct endio_hook), 0, NULL);
+	if (!_endio_hook_cache) {
+		r = -ENOMEM;
+		goto bad_endio_hook_cache;
+	}
+
+	return 0;
+
+bad_endio_hook_cache:
+	kmem_cache_destroy(_new_mapping_cache);
+bad_new_mapping_cache:
+	kmem_cache_destroy(_cell_cache);
+bad_cell_cache:
+	dm_unregister_target(&pool_target);
+bad_pool_target:
+	dm_unregister_target(&thin_target);
 
 	return r;
 }
@@ -2757,6 +2796,9 @@ static void dm_thin_exit(void)
 {
 	dm_unregister_target(&thin_target);
 	dm_unregister_target(&pool_target);
+	kmem_cache_destroy(_cell_cache);
+	kmem_cache_destroy(_new_mapping_cache);
+	kmem_cache_destroy(_endio_hook_cache);
 }
 
 module_init(dm_thin_init);
-- 
1.7.4.4

  reply	other threads:[~2012-04-12 22:39 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-04-12 22:34 [PATCH 1/2] dm thin: fix memory leak of singleton bio-prison cell Mike Snitzer
2012-04-12 22:34 ` [PATCH 2/2] dm thin: use slab_pool for caches Mike Snitzer
2012-04-12 22:39   ` Mike Snitzer [this message]
2012-04-23  7:24 ` [PATCH 1/2] dm thin: fix memory leak of singleton bio-prison cell Joe Thornber
2012-04-23 10:02   ` Alasdair G Kergon
2012-04-23  9:26     ` Joe Thornber

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20120412223917.GA11729@redhat.com \
    --to=snitzer@redhat.com \
    --cc=dm-devel@redhat.com \
    --cc=ejt@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.