From: Dan Streetman <ddstreet@ieee.org>
To: Seth Jennings <sjennings@variantweb.net>,
Minchan Kim <minchan@kernel.org>, Nitin Gupta <ngupta@vflare.org>,
Weijie Yang <weijie.yang@samsung.com>
Cc: Dan Streetman <ddstreet@ieee.org>,
Andrew Morton <akpm@linux-foundation.org>,
Bob Liu <bob.liu@oracle.com>, Hugh Dickins <hughd@google.com>,
Mel Gorman <mgorman@suse.de>, Rik van Riel <riel@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Sergey Senozhatsky <sergey.senozhatsky@gmail.com>,
Linux-MM <linux-mm@kvack.org>,
linux-kernel <linux-kernel@vger.kernel.org>
Subject: [PATCHv2 4/4] mm/zswap: update zswap to use zpool
Date: Wed, 7 May 2014 17:51:36 -0400 [thread overview]
Message-ID: <1399499496-3216-5-git-send-email-ddstreet@ieee.org> (raw)
In-Reply-To: <1399499496-3216-1-git-send-email-ddstreet@ieee.org>
Change zswap to use the zpool api instead of directly using zbud.
Add a boot-time param to allow selecting which zpool implementation
to use, with zbud as the default.
Signed-off-by: Dan Streetman <ddstreet@ieee.org>
Cc: Seth Jennings <sjennings@variantweb.net>
Cc: Weijie Yang <weijie.yang@samsung.com>
---
Changes since v1 https://lkml.org/lkml/2014/4/19/102
-since zpool fallback is removed, manually fall back to zbud if
specified type fails
mm/zswap.c | 75 ++++++++++++++++++++++++++++++++++++--------------------------
1 file changed, 44 insertions(+), 31 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 1cc6770..438c8a5 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -34,7 +34,7 @@
#include <linux/swap.h>
#include <linux/crypto.h>
#include <linux/mempool.h>
-#include <linux/zbud.h>
+#include <linux/zpool.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
@@ -45,8 +45,8 @@
/*********************************
* statistics
**********************************/
-/* Number of memory pages used by the compressed pool */
-static u64 zswap_pool_pages;
+/* Total bytes used by the compressed storage */
+static u64 zswap_pool_total_size;
/* The number of compressed pages currently stored in zswap */
static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
@@ -89,8 +89,13 @@ static unsigned int zswap_max_pool_percent = 20;
module_param_named(max_pool_percent,
zswap_max_pool_percent, uint, 0644);
-/* zbud_pool is shared by all of zswap backend */
-static struct zbud_pool *zswap_pool;
+/* Compressed storage to use */
+#define ZSWAP_ZPOOL_DEFAULT ZPOOL_TYPE_ZBUD
+static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
+module_param_named(zpool, zswap_zpool_type, charp, 0444);
+
+/* zpool is shared by all of zswap backend */
+static struct zpool *zswap_pool;
/*********************************
* compression functions
@@ -168,7 +173,7 @@ static void zswap_comp_exit(void)
* be held while changing the refcount. Since the lock must
* be held, there is no reason to also make refcount atomic.
* offset - the swap offset for the entry. Index into the red-black tree.
- * handle - zbud allocation handle that stores the compressed page data
+ * handle - zpool allocation handle that stores the compressed page data
* length - the length in bytes of the compressed page data. Needed during
* decompression
*/
@@ -284,15 +289,15 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
}
/*
- * Carries out the common pattern of freeing and entry's zbud allocation,
+ * Carries out the common pattern of freeing and entry's zpool allocation,
* freeing the entry itself, and decrementing the number of stored pages.
*/
static void zswap_free_entry(struct zswap_entry *entry)
{
- zbud_free(zswap_pool, entry->handle);
+ zpool_free(zswap_pool, entry->handle);
zswap_entry_cache_free(entry);
atomic_dec(&zswap_stored_pages);
- zswap_pool_pages = zbud_get_pool_size(zswap_pool);
+ zswap_pool_total_size = zpool_get_total_size(zswap_pool);
}
/* caller must hold the tree lock */
@@ -409,7 +414,7 @@ cleanup:
static bool zswap_is_full(void)
{
return totalram_pages * zswap_max_pool_percent / 100 <
- zswap_pool_pages;
+ DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
}
/*********************************
@@ -525,7 +530,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
* the swap cache, the compressed version stored by zswap can be
* freed.
*/
-static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
+static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
{
struct zswap_header *zhdr;
swp_entry_t swpentry;
@@ -541,9 +546,9 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
};
/* extract swpentry from data */
- zhdr = zbud_map(pool, handle);
+ zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
swpentry = zhdr->swpentry; /* here */
- zbud_unmap(pool, handle);
+ zpool_unmap_handle(pool, handle);
tree = zswap_trees[swp_type(swpentry)];
offset = swp_offset(swpentry);
@@ -573,13 +578,13 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
/* decompress */
dlen = PAGE_SIZE;
- src = (u8 *)zbud_map(zswap_pool, entry->handle) +
- sizeof(struct zswap_header);
+ src = (u8 *)zpool_map_handle(zswap_pool, entry->handle,
+ ZPOOL_MM_RO) + sizeof(struct zswap_header);
dst = kmap_atomic(page);
ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
entry->length, dst, &dlen);
kunmap_atomic(dst);
- zbud_unmap(zswap_pool, entry->handle);
+ zpool_unmap_handle(zswap_pool, entry->handle);
BUG_ON(ret);
BUG_ON(dlen != PAGE_SIZE);
@@ -652,7 +657,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
/* reclaim space if needed */
if (zswap_is_full()) {
zswap_pool_limit_hit++;
- if (zbud_reclaim_page(zswap_pool, 8)) {
+ if (zpool_shrink(zswap_pool, PAGE_SIZE)) {
zswap_reject_reclaim_fail++;
ret = -ENOMEM;
goto reject;
@@ -679,7 +684,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
/* store */
len = dlen + sizeof(struct zswap_header);
- ret = zbud_alloc(zswap_pool, len, &handle);
+ ret = zpool_malloc(zswap_pool, len, &handle);
if (ret == -ENOSPC) {
zswap_reject_compress_poor++;
goto freepage;
@@ -688,11 +693,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
zswap_reject_alloc_fail++;
goto freepage;
}
- zhdr = zbud_map(zswap_pool, handle);
+ zhdr = zpool_map_handle(zswap_pool, handle, ZPOOL_MM_RW);
zhdr->swpentry = swp_entry(type, offset);
buf = (u8 *)(zhdr + 1);
memcpy(buf, dst, dlen);
- zbud_unmap(zswap_pool, handle);
+ zpool_unmap_handle(zswap_pool, handle);
put_cpu_var(zswap_dstmem);
/* populate entry */
@@ -715,7 +720,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
/* update stats */
atomic_inc(&zswap_stored_pages);
- zswap_pool_pages = zbud_get_pool_size(zswap_pool);
+ zswap_pool_total_size = zpool_get_total_size(zswap_pool);
return 0;
@@ -751,13 +756,13 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
/* decompress */
dlen = PAGE_SIZE;
- src = (u8 *)zbud_map(zswap_pool, entry->handle) +
- sizeof(struct zswap_header);
+ src = (u8 *)zpool_map_handle(zswap_pool, entry->handle,
+ ZPOOL_MM_RO) + sizeof(struct zswap_header);
dst = kmap_atomic(page);
ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length,
dst, &dlen);
kunmap_atomic(dst);
- zbud_unmap(zswap_pool, entry->handle);
+ zpool_unmap_handle(zswap_pool, entry->handle);
BUG_ON(ret);
spin_lock(&tree->lock);
@@ -810,7 +815,7 @@ static void zswap_frontswap_invalidate_area(unsigned type)
zswap_trees[type] = NULL;
}
-static struct zbud_ops zswap_zbud_ops = {
+static struct zpool_ops zswap_zpool_ops = {
.evict = zswap_writeback_entry
};
@@ -868,8 +873,8 @@ static int __init zswap_debugfs_init(void)
zswap_debugfs_root, &zswap_written_back_pages);
debugfs_create_u64("duplicate_entry", S_IRUGO,
zswap_debugfs_root, &zswap_duplicate_entry);
- debugfs_create_u64("pool_pages", S_IRUGO,
- zswap_debugfs_root, &zswap_pool_pages);
+ debugfs_create_u64("pool_total_size", S_IRUGO,
+ zswap_debugfs_root, &zswap_pool_total_size);
debugfs_create_atomic_t("stored_pages", S_IRUGO,
zswap_debugfs_root, &zswap_stored_pages);
@@ -894,17 +899,25 @@ static void __exit zswap_debugfs_exit(void) { }
**********************************/
static int __init init_zswap(void)
{
+ gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN;
+
if (!zswap_enabled)
return 0;
pr_info("loading zswap\n");
- zswap_pool = zbud_create_pool(__GFP_NORETRY | __GFP_NOWARN,
- &zswap_zbud_ops);
+ zswap_pool = zpool_create_pool(zswap_zpool_type, gfp, &zswap_zpool_ops);
+ if (!zswap_pool) {
+ pr_info("%s zpool not available\n", zswap_zpool_type);
+ zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
+ zswap_pool = zpool_create_pool(zswap_zpool_type, gfp,
+ &zswap_zpool_ops);
+ }
if (!zswap_pool) {
- pr_err("zbud pool creation failed\n");
+ pr_err("zpool creation failed\n");
goto error;
}
+ pr_info("using %s pool\n", zswap_zpool_type);
if (zswap_entry_cache_create()) {
pr_err("entry cache creation failed\n");
@@ -928,7 +941,7 @@ pcpufail:
compfail:
zswap_entry_cache_destory();
cachefail:
- zbud_destroy_pool(zswap_pool);
+ zpool_destroy_pool(zswap_pool);
error:
return -ENOMEM;
}
--
1.8.3.1
next prev parent reply other threads:[~2014-05-07 21:53 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-04-19 15:52 [PATCH 0/4] mm: zpool: add common api for zswap to use zbud/zsmalloc Dan Streetman
2014-04-19 15:52 ` [PATCH 1/4] mm: zpool: zbud_alloc() minor param change Dan Streetman
2014-04-19 15:52 ` [PATCH 2/4] mm: zpool: implement zsmalloc shrinking Dan Streetman
2014-04-26 8:37 ` Weijie Yang
2014-04-27 4:13 ` Dan Streetman
2014-05-02 20:01 ` Seth Jennings
2014-05-04 20:38 ` Dan Streetman
2014-04-19 15:52 ` [PATCH 3/4] mm: zpool: implement common zpool api to zbud/zsmalloc Dan Streetman
2014-04-22 10:05 ` Sergey Senozhatsky
2014-04-22 13:43 ` Dan Streetman
2014-04-19 15:52 ` [PATCH 4/4] mm: zpool: update zswap to use zpool Dan Streetman
2014-04-21 2:47 ` [PATCH 0/4] mm: zpool: add common api for zswap to use zbud/zsmalloc Weijie Yang
2014-05-07 21:51 ` [PATCHv2 0/4] mm/zpool: " Dan Streetman
2014-05-07 21:51 ` [PATCHv2 1/4] mm/zbud: zbud_alloc() minor param change Dan Streetman
2014-05-09 3:33 ` Seth Jennings
2014-05-07 21:51 ` [PATCH 2/4] mm/zbud: change zbud_alloc size type to size_t Dan Streetman
2014-05-09 3:33 ` Seth Jennings
2014-05-07 21:51 ` [PATCHv2 3/4] mm/zpool: implement common zpool api to zbud/zsmalloc Dan Streetman
2014-05-09 4:13 ` Seth Jennings
2014-05-10 16:06 ` Dan Streetman
2014-05-07 21:51 ` Dan Streetman [this message]
2014-05-24 19:06 ` [PATCHv3 0/6] mm/zpool: add common api for zswap to use zbud/zsmalloc Dan Streetman
2014-05-24 19:06 ` [PATCHv2 1/6] mm/zbud: zbud_alloc() minor param change Dan Streetman
2014-05-24 19:06 ` [PATCH 2/6] mm/zbud: change zbud_alloc size type to size_t Dan Streetman
2014-05-24 19:06 ` [PATCHv3 3/6] mm/zpool: implement common zpool api to zbud/zsmalloc Dan Streetman
2014-05-27 22:06 ` Seth Jennings
2014-05-27 22:48 ` Seth Jennings
2014-05-28 0:06 ` Dan Streetman
2014-05-29 3:48 ` Seth Jennings
2014-05-24 19:06 ` [PATCH 4/6] mm/zpool: zbud/zsmalloc implement zpool Dan Streetman
2014-05-24 19:06 ` [PATCHv3 5/6] mm/zpool: update zswap to use zpool Dan Streetman
2014-05-24 19:06 ` [PATCH 6/6] mm/zpool: prevent zbud/zsmalloc from unloading when used Dan Streetman
2014-05-27 22:40 ` Seth Jennings
2014-05-28 0:40 ` Dan Streetman
2014-05-27 22:44 ` [PATCHv3 0/6] mm/zpool: add common api for zswap to use zbud/zsmalloc Seth Jennings
2014-06-02 22:19 ` [PATCHv4 " Dan Streetman
2014-06-02 22:19 ` [PATCHv2 1/6] mm/zbud: zbud_alloc() minor param change Dan Streetman
2014-06-23 21:19 ` Andrew Morton
2014-06-24 15:24 ` Dan Streetman
2014-06-02 22:19 ` [PATCH 2/6] mm/zbud: change zbud_alloc size type to size_t Dan Streetman
2014-06-02 22:19 ` [PATCHv4 3/6] mm/zpool: implement common zpool api to zbud/zsmalloc Dan Streetman
2014-06-23 21:46 ` Andrew Morton
2014-06-24 15:39 ` Dan Streetman
2014-06-24 23:08 ` Andrew Morton
2014-06-27 17:11 ` Dan Streetman
2014-06-27 19:17 ` Andrew Morton
2014-06-02 22:19 ` [PATCHv2 4/6] mm/zpool: zbud/zsmalloc implement zpool Dan Streetman
2014-06-02 22:19 ` [PATCHv4 5/6] mm/zpool: update zswap to use zpool Dan Streetman
2014-06-02 22:19 ` [PATCHv2 6/6] mm/zpool: prevent zbud/zsmalloc from unloading when used Dan Streetman
2014-06-23 21:48 ` Andrew Morton
2014-06-24 15:41 ` Dan Streetman
2014-06-04 1:38 ` [PATCHv4 0/6] mm/zpool: add common api for zswap to use zbud/zsmalloc Bob Liu
2014-06-06 21:01 ` Seth Jennings
2014-07-02 21:43 ` [PATCHv5 0/4] " Dan Streetman
2014-07-02 21:45 ` Dan Streetman
2014-07-02 21:45 ` [PATCHv2 1/4] mm/zbud: change zbud_alloc size type to size_t Dan Streetman
2014-07-02 21:45 ` [PATCHv5 2/4] mm/zpool: implement common zpool api to zbud/zsmalloc Dan Streetman
2014-07-02 21:45 ` [PATCHv3 3/4] mm/zpool: zbud/zsmalloc implement zpool Dan Streetman
2014-07-02 21:45 ` [PATCHv5 4/4] mm/zpool: update zswap to use zpool Dan Streetman
2014-07-14 18:10 ` [PATCHv5 0/4] mm/zpool: add common api for zswap to use zbud/zsmalloc Dan Streetman
2014-07-16 20:59 ` Seth Jennings
2014-07-16 21:05 ` Dan Streetman
2014-07-16 22:00 ` Seth Jennings
2014-07-25 16:59 ` Dan Streetman
2014-07-28 20:40 ` Seth Jennings
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1399499496-3216-5-git-send-email-ddstreet@ieee.org \
--to=ddstreet@ieee.org \
--cc=akpm@linux-foundation.org \
--cc=bob.liu@oracle.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=minchan@kernel.org \
--cc=ngupta@vflare.org \
--cc=riel@redhat.com \
--cc=sergey.senozhatsky@gmail.com \
--cc=sjennings@variantweb.net \
--cc=weijie.yang@samsung.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).