All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] zram: add zpool support
@ 2016-06-08  9:39 ` Geliang Tang
  0 siblings, 0 replies; 26+ messages in thread
From: Geliang Tang @ 2016-06-08  9:39 UTC (permalink / raw)
  To: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, Dan Streetman
  Cc: Geliang Tang, linux-kernel, linux-mm

This patch adds zpool support for zram, it will allow us to use both
the zpool api and directly zsmalloc api in zram.

Signed-off-by: Geliang Tang <geliangtang@gmail.com>
---
 drivers/block/zram/zram_drv.c | 97 +++++++++++++++++++++++++++++++++++++++++++
 drivers/block/zram/zram_drv.h |  5 +++
 2 files changed, 102 insertions(+)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9e2a83c..1f90bd0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -43,6 +43,11 @@ static const char *default_compressor = "lzo";
 /* Module params (documentation at end) */
 static unsigned int num_devices = 1;
 
+#ifdef CONFIG_ZPOOL
+/* Compressed storage zpool to use */
+#define ZRAM_ZPOOL_DEFAULT "zsmalloc"
+#endif
+
 static inline void deprecated_attr_warn(const char *name)
 {
 	pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
@@ -228,7 +233,11 @@ static ssize_t mem_used_total_show(struct device *dev,
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
 		struct zram_meta *meta = zram->meta;
+#ifdef CONFIG_ZPOOL
+		val = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
+#else
 		val = zs_get_total_pages(meta->mem_pool);
+#endif
 	}
 	up_read(&zram->init_lock);
 
@@ -296,8 +305,14 @@ static ssize_t mem_used_max_store(struct device *dev,
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
 		struct zram_meta *meta = zram->meta;
+#ifdef CONFIG_ZPOOL
+		atomic_long_set(&zram->stats.max_used_pages,
+				zpool_get_total_size(meta->mem_pool)
+				>> PAGE_SHIFT);
+#else
 		atomic_long_set(&zram->stats.max_used_pages,
 				zs_get_total_pages(meta->mem_pool));
+#endif
 	}
 	up_read(&zram->init_lock);
 
@@ -366,6 +381,18 @@ static ssize_t comp_algorithm_store(struct device *dev,
 	return len;
 }
 
+#ifdef CONFIG_ZPOOL
+static void zpool_compact(void *pool)
+{
+	zs_compact(pool);
+}
+
+static void zpool_stats(void *pool, struct zs_pool_stats *stats)
+{
+	zs_pool_stats(pool, stats);
+}
+#endif
+
 static ssize_t compact_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
@@ -379,7 +406,11 @@ static ssize_t compact_store(struct device *dev,
 	}
 
 	meta = zram->meta;
+#ifdef CONFIG_ZPOOL
+	zpool_compact(meta->mem_pool);
+#else
 	zs_compact(meta->mem_pool);
+#endif
 	up_read(&zram->init_lock);
 
 	return len;
@@ -416,8 +447,14 @@ static ssize_t mm_stat_show(struct device *dev,
 
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
+#ifdef CONFIG_ZPOOL
+		mem_used = zpool_get_total_size(zram->meta->mem_pool)
+				>> PAGE_SHIFT;
+		zpool_stats(zram->meta->mem_pool, &pool_stats);
+#else
 		mem_used = zs_get_total_pages(zram->meta->mem_pool);
 		zs_pool_stats(zram->meta->mem_pool, &pool_stats);
+#endif
 	}
 
 	orig_size = atomic64_read(&zram->stats.pages_stored);
@@ -490,10 +527,18 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 		if (!handle)
 			continue;
 
+#ifdef CONFIG_ZPOOL
+		zpool_free(meta->mem_pool, handle);
+#else
 		zs_free(meta->mem_pool, handle);
+#endif
 	}
 
+#ifdef CONFIG_ZPOOL
+	zpool_destroy_pool(meta->mem_pool);
+#else
 	zs_destroy_pool(meta->mem_pool);
+#endif
 	vfree(meta->table);
 	kfree(meta);
 }
@@ -513,7 +558,17 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 		goto out_error;
 	}
 
+#ifdef CONFIG_ZPOOL
+	if (!zpool_has_pool(ZRAM_ZPOOL_DEFAULT)) {
+		pr_err("zpool %s not available\n", ZRAM_ZPOOL_DEFAULT);
+		goto out_error;
+	}
+
+	meta->mem_pool = zpool_create_pool(ZRAM_ZPOOL_DEFAULT,
+					pool_name, 0, NULL);
+#else
 	meta->mem_pool = zs_create_pool(pool_name);
+#endif
 	if (!meta->mem_pool) {
 		pr_err("Error creating memory pool\n");
 		goto out_error;
@@ -549,7 +604,11 @@ static void zram_free_page(struct zram *zram, size_t index)
 		return;
 	}
 
+#ifdef CONFIG_ZPOOL
+	zpool_free(meta->mem_pool, handle);
+#else
 	zs_free(meta->mem_pool, handle);
+#endif
 
 	atomic64_sub(zram_get_obj_size(meta, index),
 			&zram->stats.compr_data_size);
@@ -577,7 +636,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 		return 0;
 	}
 
+#ifdef CONFIG_ZPOOL
+	cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
+#else
 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+#endif
 	if (size == PAGE_SIZE) {
 		copy_page(mem, cmem);
 	} else {
@@ -586,7 +649,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 		ret = zcomp_decompress(zstrm, cmem, size, mem);
 		zcomp_stream_put(zram->comp);
 	}
+#ifdef CONFIG_ZPOOL
+	zpool_unmap_handle(meta->mem_pool, handle);
+#else
 	zs_unmap_object(meta->mem_pool, handle);
+#endif
 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 
 	/* Should NEVER happen. Return bio error if it does. */
@@ -735,20 +802,34 @@ compress_again:
 	 * from the slow path and handle has already been allocated.
 	 */
 	if (!handle)
+#ifdef CONFIG_ZPOOL
+		ret = zpool_malloc(meta->mem_pool, clen,
+				__GFP_KSWAPD_RECLAIM |
+				__GFP_NOWARN |
+				__GFP_HIGHMEM |
+				__GFP_MOVABLE, &handle);
+#else
 		handle = zs_malloc(meta->mem_pool, clen,
 				__GFP_KSWAPD_RECLAIM |
 				__GFP_NOWARN |
 				__GFP_HIGHMEM |
 				__GFP_MOVABLE);
+#endif
 	if (!handle) {
 		zcomp_stream_put(zram->comp);
 		zstrm = NULL;
 
 		atomic64_inc(&zram->stats.writestall);
 
+#ifdef CONFIG_ZPOOL
+		ret = zpool_malloc(meta->mem_pool, clen,
+				GFP_NOIO | __GFP_HIGHMEM |
+				__GFP_MOVABLE, &handle);
+#else
 		handle = zs_malloc(meta->mem_pool, clen,
 				GFP_NOIO | __GFP_HIGHMEM |
 				__GFP_MOVABLE);
+#endif
 		if (handle)
 			goto compress_again;
 
@@ -758,16 +839,28 @@ compress_again:
 		goto out;
 	}
 
+#ifdef CONFIG_ZPOOL
+	alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
+#else
 	alloced_pages = zs_get_total_pages(meta->mem_pool);
+#endif
 	update_used_max(zram, alloced_pages);
 
 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+#ifdef CONFIG_ZPOOL
+		zpool_free(meta->mem_pool, handle);
+#else
 		zs_free(meta->mem_pool, handle);
+#endif
 		ret = -ENOMEM;
 		goto out;
 	}
 
+#ifdef CONFIG_ZPOOL
+	cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
+#else
 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
+#endif
 
 	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
 		src = kmap_atomic(page);
@@ -779,7 +872,11 @@ compress_again:
 
 	zcomp_stream_put(zram->comp);
 	zstrm = NULL;
+#ifdef CONFIG_ZPOOL
+	zpool_unmap_handle(meta->mem_pool, handle);
+#else
 	zs_unmap_object(meta->mem_pool, handle);
+#endif
 
 	/*
 	 * Free memory associated with this sector
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 74fcf10..68f1222 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -17,6 +17,7 @@
 
 #include <linux/rwsem.h>
 #include <linux/zsmalloc.h>
+#include <linux/zpool.h>
 #include <linux/crypto.h>
 
 #include "zcomp.h"
@@ -91,7 +92,11 @@ struct zram_stats {
 
 struct zram_meta {
 	struct zram_table_entry *table;
+#ifdef CONFIG_ZPOOL
+	struct zpool *mem_pool;
+#else
 	struct zs_pool *mem_pool;
+#endif
 };
 
 struct zram {
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH] zram: add zpool support
@ 2016-06-08  9:39 ` Geliang Tang
  0 siblings, 0 replies; 26+ messages in thread
From: Geliang Tang @ 2016-06-08  9:39 UTC (permalink / raw)
  To: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, Dan Streetman
  Cc: Geliang Tang, linux-kernel, linux-mm

This patch adds zpool support for zram, it will allow us to use both
the zpool api and directly zsmalloc api in zram.

Signed-off-by: Geliang Tang <geliangtang@gmail.com>
---
 drivers/block/zram/zram_drv.c | 97 +++++++++++++++++++++++++++++++++++++++++++
 drivers/block/zram/zram_drv.h |  5 +++
 2 files changed, 102 insertions(+)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9e2a83c..1f90bd0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -43,6 +43,11 @@ static const char *default_compressor = "lzo";
 /* Module params (documentation at end) */
 static unsigned int num_devices = 1;
 
+#ifdef CONFIG_ZPOOL
+/* Compressed storage zpool to use */
+#define ZRAM_ZPOOL_DEFAULT "zsmalloc"
+#endif
+
 static inline void deprecated_attr_warn(const char *name)
 {
 	pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
@@ -228,7 +233,11 @@ static ssize_t mem_used_total_show(struct device *dev,
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
 		struct zram_meta *meta = zram->meta;
+#ifdef CONFIG_ZPOOL
+		val = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
+#else
 		val = zs_get_total_pages(meta->mem_pool);
+#endif
 	}
 	up_read(&zram->init_lock);
 
@@ -296,8 +305,14 @@ static ssize_t mem_used_max_store(struct device *dev,
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
 		struct zram_meta *meta = zram->meta;
+#ifdef CONFIG_ZPOOL
+		atomic_long_set(&zram->stats.max_used_pages,
+				zpool_get_total_size(meta->mem_pool)
+				>> PAGE_SHIFT);
+#else
 		atomic_long_set(&zram->stats.max_used_pages,
 				zs_get_total_pages(meta->mem_pool));
+#endif
 	}
 	up_read(&zram->init_lock);
 
@@ -366,6 +381,18 @@ static ssize_t comp_algorithm_store(struct device *dev,
 	return len;
 }
 
+#ifdef CONFIG_ZPOOL
+static void zpool_compact(void *pool)
+{
+	zs_compact(pool);
+}
+
+static void zpool_stats(void *pool, struct zs_pool_stats *stats)
+{
+	zs_pool_stats(pool, stats);
+}
+#endif
+
 static ssize_t compact_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
@@ -379,7 +406,11 @@ static ssize_t compact_store(struct device *dev,
 	}
 
 	meta = zram->meta;
+#ifdef CONFIG_ZPOOL
+	zpool_compact(meta->mem_pool);
+#else
 	zs_compact(meta->mem_pool);
+#endif
 	up_read(&zram->init_lock);
 
 	return len;
@@ -416,8 +447,14 @@ static ssize_t mm_stat_show(struct device *dev,
 
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
+#ifdef CONFIG_ZPOOL
+		mem_used = zpool_get_total_size(zram->meta->mem_pool)
+				>> PAGE_SHIFT;
+		zpool_stats(zram->meta->mem_pool, &pool_stats);
+#else
 		mem_used = zs_get_total_pages(zram->meta->mem_pool);
 		zs_pool_stats(zram->meta->mem_pool, &pool_stats);
+#endif
 	}
 
 	orig_size = atomic64_read(&zram->stats.pages_stored);
@@ -490,10 +527,18 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 		if (!handle)
 			continue;
 
+#ifdef CONFIG_ZPOOL
+		zpool_free(meta->mem_pool, handle);
+#else
 		zs_free(meta->mem_pool, handle);
+#endif
 	}
 
+#ifdef CONFIG_ZPOOL
+	zpool_destroy_pool(meta->mem_pool);
+#else
 	zs_destroy_pool(meta->mem_pool);
+#endif
 	vfree(meta->table);
 	kfree(meta);
 }
@@ -513,7 +558,17 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 		goto out_error;
 	}
 
+#ifdef CONFIG_ZPOOL
+	if (!zpool_has_pool(ZRAM_ZPOOL_DEFAULT)) {
+		pr_err("zpool %s not available\n", ZRAM_ZPOOL_DEFAULT);
+		goto out_error;
+	}
+
+	meta->mem_pool = zpool_create_pool(ZRAM_ZPOOL_DEFAULT,
+					pool_name, 0, NULL);
+#else
 	meta->mem_pool = zs_create_pool(pool_name);
+#endif
 	if (!meta->mem_pool) {
 		pr_err("Error creating memory pool\n");
 		goto out_error;
@@ -549,7 +604,11 @@ static void zram_free_page(struct zram *zram, size_t index)
 		return;
 	}
 
+#ifdef CONFIG_ZPOOL
+	zpool_free(meta->mem_pool, handle);
+#else
 	zs_free(meta->mem_pool, handle);
+#endif
 
 	atomic64_sub(zram_get_obj_size(meta, index),
 			&zram->stats.compr_data_size);
@@ -577,7 +636,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 		return 0;
 	}
 
+#ifdef CONFIG_ZPOOL
+	cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
+#else
 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+#endif
 	if (size == PAGE_SIZE) {
 		copy_page(mem, cmem);
 	} else {
@@ -586,7 +649,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 		ret = zcomp_decompress(zstrm, cmem, size, mem);
 		zcomp_stream_put(zram->comp);
 	}
+#ifdef CONFIG_ZPOOL
+	zpool_unmap_handle(meta->mem_pool, handle);
+#else
 	zs_unmap_object(meta->mem_pool, handle);
+#endif
 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 
 	/* Should NEVER happen. Return bio error if it does. */
@@ -735,20 +802,34 @@ compress_again:
 	 * from the slow path and handle has already been allocated.
 	 */
 	if (!handle)
+#ifdef CONFIG_ZPOOL
+		ret = zpool_malloc(meta->mem_pool, clen,
+				__GFP_KSWAPD_RECLAIM |
+				__GFP_NOWARN |
+				__GFP_HIGHMEM |
+				__GFP_MOVABLE, &handle);
+#else
 		handle = zs_malloc(meta->mem_pool, clen,
 				__GFP_KSWAPD_RECLAIM |
 				__GFP_NOWARN |
 				__GFP_HIGHMEM |
 				__GFP_MOVABLE);
+#endif
 	if (!handle) {
 		zcomp_stream_put(zram->comp);
 		zstrm = NULL;
 
 		atomic64_inc(&zram->stats.writestall);
 
+#ifdef CONFIG_ZPOOL
+		ret = zpool_malloc(meta->mem_pool, clen,
+				GFP_NOIO | __GFP_HIGHMEM |
+				__GFP_MOVABLE, &handle);
+#else
 		handle = zs_malloc(meta->mem_pool, clen,
 				GFP_NOIO | __GFP_HIGHMEM |
 				__GFP_MOVABLE);
+#endif
 		if (handle)
 			goto compress_again;
 
@@ -758,16 +839,28 @@ compress_again:
 		goto out;
 	}
 
+#ifdef CONFIG_ZPOOL
+	alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
+#else
 	alloced_pages = zs_get_total_pages(meta->mem_pool);
+#endif
 	update_used_max(zram, alloced_pages);
 
 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+#ifdef CONFIG_ZPOOL
+		zpool_free(meta->mem_pool, handle);
+#else
 		zs_free(meta->mem_pool, handle);
+#endif
 		ret = -ENOMEM;
 		goto out;
 	}
 
+#ifdef CONFIG_ZPOOL
+	cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
+#else
 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
+#endif
 
 	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
 		src = kmap_atomic(page);
@@ -779,7 +872,11 @@ compress_again:
 
 	zcomp_stream_put(zram->comp);
 	zstrm = NULL;
+#ifdef CONFIG_ZPOOL
+	zpool_unmap_handle(meta->mem_pool, handle);
+#else
 	zs_unmap_object(meta->mem_pool, handle);
+#endif
 
 	/*
 	 * Free memory associated with this sector
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 74fcf10..68f1222 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -17,6 +17,7 @@
 
 #include <linux/rwsem.h>
 #include <linux/zsmalloc.h>
+#include <linux/zpool.h>
 #include <linux/crypto.h>
 
 #include "zcomp.h"
@@ -91,7 +92,11 @@ struct zram_stats {
 
 struct zram_meta {
 	struct zram_table_entry *table;
+#ifdef CONFIG_ZPOOL
+	struct zpool *mem_pool;
+#else
 	struct zs_pool *mem_pool;
+#endif
 };
 
 struct zram {
-- 
1.9.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: add zpool support
  2016-06-08  9:39 ` Geliang Tang
@ 2016-06-08 14:51   ` Dan Streetman
  -1 siblings, 0 replies; 26+ messages in thread
From: Dan Streetman @ 2016-06-08 14:51 UTC (permalink / raw)
  To: Geliang Tang
  Cc: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, linux-kernel, Linux-MM

On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com> wrote:
> This patch adds zpool support for zram, it will allow us to use both
> the zpool api and directly zsmalloc api in zram.

besides the problems below, this was discussed a while ago and I
believe Minchan is still against it, as nobody has so far shown what
the benefit to zram would be; zram doesn't need the predictability, or
evictability, of zbud or z3fold.

>
> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
> ---
>  drivers/block/zram/zram_drv.c | 97 +++++++++++++++++++++++++++++++++++++++++++
>  drivers/block/zram/zram_drv.h |  5 +++
>  2 files changed, 102 insertions(+)
>
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 9e2a83c..1f90bd0 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -43,6 +43,11 @@ static const char *default_compressor = "lzo";
>  /* Module params (documentation at end) */
>  static unsigned int num_devices = 1;
>
> +#ifdef CONFIG_ZPOOL
> +/* Compressed storage zpool to use */
> +#define ZRAM_ZPOOL_DEFAULT "zsmalloc"
> +#endif

It doesn't make sense for zram to conditionally use zpool; either it
uses it and thus has 'select ZPOOL' in its Kconfig entry, or it
doesn't use it at all.

> +
>  static inline void deprecated_attr_warn(const char *name)
>  {
>         pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
> @@ -228,7 +233,11 @@ static ssize_t mem_used_total_show(struct device *dev,
>         down_read(&zram->init_lock);
>         if (init_done(zram)) {
>                 struct zram_meta *meta = zram->meta;
> +#ifdef CONFIG_ZPOOL
> +               val = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
> +#else
>                 val = zs_get_total_pages(meta->mem_pool);
> +#endif
>         }
>         up_read(&zram->init_lock);
>
> @@ -296,8 +305,14 @@ static ssize_t mem_used_max_store(struct device *dev,
>         down_read(&zram->init_lock);
>         if (init_done(zram)) {
>                 struct zram_meta *meta = zram->meta;
> +#ifdef CONFIG_ZPOOL
> +               atomic_long_set(&zram->stats.max_used_pages,
> +                               zpool_get_total_size(meta->mem_pool)
> +                               >> PAGE_SHIFT);
> +#else
>                 atomic_long_set(&zram->stats.max_used_pages,
>                                 zs_get_total_pages(meta->mem_pool));
> +#endif
>         }
>         up_read(&zram->init_lock);
>
> @@ -366,6 +381,18 @@ static ssize_t comp_algorithm_store(struct device *dev,
>         return len;
>  }
>
> +#ifdef CONFIG_ZPOOL
> +static void zpool_compact(void *pool)
> +{
> +       zs_compact(pool);
> +}
> +
> +static void zpool_stats(void *pool, struct zs_pool_stats *stats)
> +{
> +       zs_pool_stats(pool, stats);
> +}
> +#endif

first, no.  this obviously makes using zpool in zram completely pointless.

second, did you test this?  the pool you're passing is the zpool, not
the zs_pool.  quite bad things will happen when this code runs.  There
is no way to get the zs_pool from the zpool object (that's the point
of abstraction, of course).

The fact zpool doesn't have these apis (currently) is one of the
reasons against changing zram to use zpool.

> +
>  static ssize_t compact_store(struct device *dev,
>                 struct device_attribute *attr, const char *buf, size_t len)
>  {
> @@ -379,7 +406,11 @@ static ssize_t compact_store(struct device *dev,
>         }
>
>         meta = zram->meta;
> +#ifdef CONFIG_ZPOOL
> +       zpool_compact(meta->mem_pool);
> +#else
>         zs_compact(meta->mem_pool);
> +#endif
>         up_read(&zram->init_lock);
>
>         return len;
> @@ -416,8 +447,14 @@ static ssize_t mm_stat_show(struct device *dev,
>
>         down_read(&zram->init_lock);
>         if (init_done(zram)) {
> +#ifdef CONFIG_ZPOOL
> +               mem_used = zpool_get_total_size(zram->meta->mem_pool)
> +                               >> PAGE_SHIFT;
> +               zpool_stats(zram->meta->mem_pool, &pool_stats);
> +#else
>                 mem_used = zs_get_total_pages(zram->meta->mem_pool);
>                 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
> +#endif
>         }
>
>         orig_size = atomic64_read(&zram->stats.pages_stored);
> @@ -490,10 +527,18 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>                 if (!handle)
>                         continue;
>
> +#ifdef CONFIG_ZPOOL
> +               zpool_free(meta->mem_pool, handle);
> +#else
>                 zs_free(meta->mem_pool, handle);
> +#endif
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       zpool_destroy_pool(meta->mem_pool);
> +#else
>         zs_destroy_pool(meta->mem_pool);
> +#endif
>         vfree(meta->table);
>         kfree(meta);
>  }
> @@ -513,7 +558,17 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
>                 goto out_error;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       if (!zpool_has_pool(ZRAM_ZPOOL_DEFAULT)) {
> +               pr_err("zpool %s not available\n", ZRAM_ZPOOL_DEFAULT);
> +               goto out_error;
> +       }
> +
> +       meta->mem_pool = zpool_create_pool(ZRAM_ZPOOL_DEFAULT,
> +                                       pool_name, 0, NULL);
> +#else
>         meta->mem_pool = zs_create_pool(pool_name);
> +#endif
>         if (!meta->mem_pool) {
>                 pr_err("Error creating memory pool\n");
>                 goto out_error;
> @@ -549,7 +604,11 @@ static void zram_free_page(struct zram *zram, size_t index)
>                 return;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       zpool_free(meta->mem_pool, handle);
> +#else
>         zs_free(meta->mem_pool, handle);
> +#endif
>
>         atomic64_sub(zram_get_obj_size(meta, index),
>                         &zram->stats.compr_data_size);
> @@ -577,7 +636,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>                 return 0;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
> +#else
>         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> +#endif
>         if (size == PAGE_SIZE) {
>                 copy_page(mem, cmem);
>         } else {
> @@ -586,7 +649,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>                 ret = zcomp_decompress(zstrm, cmem, size, mem);
>                 zcomp_stream_put(zram->comp);
>         }
> +#ifdef CONFIG_ZPOOL
> +       zpool_unmap_handle(meta->mem_pool, handle);
> +#else
>         zs_unmap_object(meta->mem_pool, handle);
> +#endif
>         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
>
>         /* Should NEVER happen. Return bio error if it does. */
> @@ -735,20 +802,34 @@ compress_again:
>          * from the slow path and handle has already been allocated.
>          */
>         if (!handle)
> +#ifdef CONFIG_ZPOOL
> +               ret = zpool_malloc(meta->mem_pool, clen,
> +                               __GFP_KSWAPD_RECLAIM |
> +                               __GFP_NOWARN |
> +                               __GFP_HIGHMEM |
> +                               __GFP_MOVABLE, &handle);
> +#else
>                 handle = zs_malloc(meta->mem_pool, clen,
>                                 __GFP_KSWAPD_RECLAIM |
>                                 __GFP_NOWARN |
>                                 __GFP_HIGHMEM |
>                                 __GFP_MOVABLE);
> +#endif
>         if (!handle) {
>                 zcomp_stream_put(zram->comp);
>                 zstrm = NULL;
>
>                 atomic64_inc(&zram->stats.writestall);
>
> +#ifdef CONFIG_ZPOOL
> +               ret = zpool_malloc(meta->mem_pool, clen,
> +                               GFP_NOIO | __GFP_HIGHMEM |
> +                               __GFP_MOVABLE, &handle);
> +#else
>                 handle = zs_malloc(meta->mem_pool, clen,
>                                 GFP_NOIO | __GFP_HIGHMEM |
>                                 __GFP_MOVABLE);
> +#endif
>                 if (handle)
>                         goto compress_again;
>
> @@ -758,16 +839,28 @@ compress_again:
>                 goto out;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
> +#else
>         alloced_pages = zs_get_total_pages(meta->mem_pool);
> +#endif
>         update_used_max(zram, alloced_pages);
>
>         if (zram->limit_pages && alloced_pages > zram->limit_pages) {
> +#ifdef CONFIG_ZPOOL
> +               zpool_free(meta->mem_pool, handle);
> +#else
>                 zs_free(meta->mem_pool, handle);
> +#endif
>                 ret = -ENOMEM;
>                 goto out;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
> +#else
>         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
> +#endif
>
>         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
>                 src = kmap_atomic(page);
> @@ -779,7 +872,11 @@ compress_again:
>
>         zcomp_stream_put(zram->comp);
>         zstrm = NULL;
> +#ifdef CONFIG_ZPOOL
> +       zpool_unmap_handle(meta->mem_pool, handle);
> +#else
>         zs_unmap_object(meta->mem_pool, handle);
> +#endif
>
>         /*
>          * Free memory associated with this sector
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 74fcf10..68f1222 100644
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -17,6 +17,7 @@
>
>  #include <linux/rwsem.h>
>  #include <linux/zsmalloc.h>
> +#include <linux/zpool.h>
>  #include <linux/crypto.h>
>
>  #include "zcomp.h"
> @@ -91,7 +92,11 @@ struct zram_stats {
>
>  struct zram_meta {
>         struct zram_table_entry *table;
> +#ifdef CONFIG_ZPOOL
> +       struct zpool *mem_pool;
> +#else
>         struct zs_pool *mem_pool;
> +#endif
>  };
>
>  struct zram {
> --
> 1.9.1
>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: add zpool support
@ 2016-06-08 14:51   ` Dan Streetman
  0 siblings, 0 replies; 26+ messages in thread
From: Dan Streetman @ 2016-06-08 14:51 UTC (permalink / raw)
  To: Geliang Tang
  Cc: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, linux-kernel, Linux-MM

On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com> wrote:
> This patch adds zpool support for zram, it will allow us to use both
> the zpool api and directly zsmalloc api in zram.

besides the problems below, this was discussed a while ago and I
believe Minchan is still against it, as nobody has so far shown what
the benefit to zram would be; zram doesn't need the predictability, or
evictability, of zbud or z3fold.

>
> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
> ---
>  drivers/block/zram/zram_drv.c | 97 +++++++++++++++++++++++++++++++++++++++++++
>  drivers/block/zram/zram_drv.h |  5 +++
>  2 files changed, 102 insertions(+)
>
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 9e2a83c..1f90bd0 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -43,6 +43,11 @@ static const char *default_compressor = "lzo";
>  /* Module params (documentation at end) */
>  static unsigned int num_devices = 1;
>
> +#ifdef CONFIG_ZPOOL
> +/* Compressed storage zpool to use */
> +#define ZRAM_ZPOOL_DEFAULT "zsmalloc"
> +#endif

It doesn't make sense for zram to conditionally use zpool; either it
uses it and thus has 'select ZPOOL' in its Kconfig entry, or it
doesn't use it at all.

> +
>  static inline void deprecated_attr_warn(const char *name)
>  {
>         pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
> @@ -228,7 +233,11 @@ static ssize_t mem_used_total_show(struct device *dev,
>         down_read(&zram->init_lock);
>         if (init_done(zram)) {
>                 struct zram_meta *meta = zram->meta;
> +#ifdef CONFIG_ZPOOL
> +               val = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
> +#else
>                 val = zs_get_total_pages(meta->mem_pool);
> +#endif
>         }
>         up_read(&zram->init_lock);
>
> @@ -296,8 +305,14 @@ static ssize_t mem_used_max_store(struct device *dev,
>         down_read(&zram->init_lock);
>         if (init_done(zram)) {
>                 struct zram_meta *meta = zram->meta;
> +#ifdef CONFIG_ZPOOL
> +               atomic_long_set(&zram->stats.max_used_pages,
> +                               zpool_get_total_size(meta->mem_pool)
> +                               >> PAGE_SHIFT);
> +#else
>                 atomic_long_set(&zram->stats.max_used_pages,
>                                 zs_get_total_pages(meta->mem_pool));
> +#endif
>         }
>         up_read(&zram->init_lock);
>
> @@ -366,6 +381,18 @@ static ssize_t comp_algorithm_store(struct device *dev,
>         return len;
>  }
>
> +#ifdef CONFIG_ZPOOL
> +static void zpool_compact(void *pool)
> +{
> +       zs_compact(pool);
> +}
> +
> +static void zpool_stats(void *pool, struct zs_pool_stats *stats)
> +{
> +       zs_pool_stats(pool, stats);
> +}
> +#endif

first, no.  this obviously makes using zpool in zram completely pointless.

second, did you test this?  the pool you're passing is the zpool, not
the zs_pool.  quite bad things will happen when this code runs.  There
is no way to get the zs_pool from the zpool object (that's the point
of abstraction, of course).

The fact zpool doesn't have these apis (currently) is one of the
reasons against changing zram to use zpool.

> +
>  static ssize_t compact_store(struct device *dev,
>                 struct device_attribute *attr, const char *buf, size_t len)
>  {
> @@ -379,7 +406,11 @@ static ssize_t compact_store(struct device *dev,
>         }
>
>         meta = zram->meta;
> +#ifdef CONFIG_ZPOOL
> +       zpool_compact(meta->mem_pool);
> +#else
>         zs_compact(meta->mem_pool);
> +#endif
>         up_read(&zram->init_lock);
>
>         return len;
> @@ -416,8 +447,14 @@ static ssize_t mm_stat_show(struct device *dev,
>
>         down_read(&zram->init_lock);
>         if (init_done(zram)) {
> +#ifdef CONFIG_ZPOOL
> +               mem_used = zpool_get_total_size(zram->meta->mem_pool)
> +                               >> PAGE_SHIFT;
> +               zpool_stats(zram->meta->mem_pool, &pool_stats);
> +#else
>                 mem_used = zs_get_total_pages(zram->meta->mem_pool);
>                 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
> +#endif
>         }
>
>         orig_size = atomic64_read(&zram->stats.pages_stored);
> @@ -490,10 +527,18 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>                 if (!handle)
>                         continue;
>
> +#ifdef CONFIG_ZPOOL
> +               zpool_free(meta->mem_pool, handle);
> +#else
>                 zs_free(meta->mem_pool, handle);
> +#endif
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       zpool_destroy_pool(meta->mem_pool);
> +#else
>         zs_destroy_pool(meta->mem_pool);
> +#endif
>         vfree(meta->table);
>         kfree(meta);
>  }
> @@ -513,7 +558,17 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
>                 goto out_error;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       if (!zpool_has_pool(ZRAM_ZPOOL_DEFAULT)) {
> +               pr_err("zpool %s not available\n", ZRAM_ZPOOL_DEFAULT);
> +               goto out_error;
> +       }
> +
> +       meta->mem_pool = zpool_create_pool(ZRAM_ZPOOL_DEFAULT,
> +                                       pool_name, 0, NULL);
> +#else
>         meta->mem_pool = zs_create_pool(pool_name);
> +#endif
>         if (!meta->mem_pool) {
>                 pr_err("Error creating memory pool\n");
>                 goto out_error;
> @@ -549,7 +604,11 @@ static void zram_free_page(struct zram *zram, size_t index)
>                 return;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       zpool_free(meta->mem_pool, handle);
> +#else
>         zs_free(meta->mem_pool, handle);
> +#endif
>
>         atomic64_sub(zram_get_obj_size(meta, index),
>                         &zram->stats.compr_data_size);
> @@ -577,7 +636,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>                 return 0;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
> +#else
>         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> +#endif
>         if (size == PAGE_SIZE) {
>                 copy_page(mem, cmem);
>         } else {
> @@ -586,7 +649,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>                 ret = zcomp_decompress(zstrm, cmem, size, mem);
>                 zcomp_stream_put(zram->comp);
>         }
> +#ifdef CONFIG_ZPOOL
> +       zpool_unmap_handle(meta->mem_pool, handle);
> +#else
>         zs_unmap_object(meta->mem_pool, handle);
> +#endif
>         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
>
>         /* Should NEVER happen. Return bio error if it does. */
> @@ -735,20 +802,34 @@ compress_again:
>          * from the slow path and handle has already been allocated.
>          */
>         if (!handle)
> +#ifdef CONFIG_ZPOOL
> +               ret = zpool_malloc(meta->mem_pool, clen,
> +                               __GFP_KSWAPD_RECLAIM |
> +                               __GFP_NOWARN |
> +                               __GFP_HIGHMEM |
> +                               __GFP_MOVABLE, &handle);
> +#else
>                 handle = zs_malloc(meta->mem_pool, clen,
>                                 __GFP_KSWAPD_RECLAIM |
>                                 __GFP_NOWARN |
>                                 __GFP_HIGHMEM |
>                                 __GFP_MOVABLE);
> +#endif
>         if (!handle) {
>                 zcomp_stream_put(zram->comp);
>                 zstrm = NULL;
>
>                 atomic64_inc(&zram->stats.writestall);
>
> +#ifdef CONFIG_ZPOOL
> +               ret = zpool_malloc(meta->mem_pool, clen,
> +                               GFP_NOIO | __GFP_HIGHMEM |
> +                               __GFP_MOVABLE, &handle);
> +#else
>                 handle = zs_malloc(meta->mem_pool, clen,
>                                 GFP_NOIO | __GFP_HIGHMEM |
>                                 __GFP_MOVABLE);
> +#endif
>                 if (handle)
>                         goto compress_again;
>
> @@ -758,16 +839,28 @@ compress_again:
>                 goto out;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
> +#else
>         alloced_pages = zs_get_total_pages(meta->mem_pool);
> +#endif
>         update_used_max(zram, alloced_pages);
>
>         if (zram->limit_pages && alloced_pages > zram->limit_pages) {
> +#ifdef CONFIG_ZPOOL
> +               zpool_free(meta->mem_pool, handle);
> +#else
>                 zs_free(meta->mem_pool, handle);
> +#endif
>                 ret = -ENOMEM;
>                 goto out;
>         }
>
> +#ifdef CONFIG_ZPOOL
> +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
> +#else
>         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
> +#endif
>
>         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
>                 src = kmap_atomic(page);
> @@ -779,7 +872,11 @@ compress_again:
>
>         zcomp_stream_put(zram->comp);
>         zstrm = NULL;
> +#ifdef CONFIG_ZPOOL
> +       zpool_unmap_handle(meta->mem_pool, handle);
> +#else
>         zs_unmap_object(meta->mem_pool, handle);
> +#endif
>
>         /*
>          * Free memory associated with this sector
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 74fcf10..68f1222 100644
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -17,6 +17,7 @@
>
>  #include <linux/rwsem.h>
>  #include <linux/zsmalloc.h>
> +#include <linux/zpool.h>
>  #include <linux/crypto.h>
>
>  #include "zcomp.h"
> @@ -91,7 +92,11 @@ struct zram_stats {
>
>  struct zram_meta {
>         struct zram_table_entry *table;
> +#ifdef CONFIG_ZPOOL
> +       struct zpool *mem_pool;
> +#else
>         struct zs_pool *mem_pool;
> +#endif
>  };
>
>  struct zram {
> --
> 1.9.1
>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: add zpool support
  2016-06-08 14:51   ` Dan Streetman
@ 2016-06-09  1:34     ` Minchan Kim
  -1 siblings, 0 replies; 26+ messages in thread
From: Minchan Kim @ 2016-06-09  1:34 UTC (permalink / raw)
  To: Dan Streetman
  Cc: Geliang Tang, Nitin Gupta, Sergey Senozhatsky, linux-kernel, Linux-MM

On Wed, Jun 08, 2016 at 10:51:28AM -0400, Dan Streetman wrote:
> On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com> wrote:
> > This patch adds zpool support for zram, it will allow us to use both
> > the zpool api and directly zsmalloc api in zram.
> 
> besides the problems below, this was discussed a while ago and I
> believe Minchan is still against it, as nobody has so far shown what
> the benefit to zram would be; zram doesn't need the predictability, or
> evictability, of zbud or z3fold.

Right.

Geliang, I cannot ack without any *detail* that what's the problem of
zram/zsmalloc, why we can't fix it in zsmalloc itself.
The zbud and zsmalloc is otally different design to aim different goal
determinism vs efficiency so you can choose what you want between zswap
and zram rather than mixing the features.

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: add zpool support
@ 2016-06-09  1:34     ` Minchan Kim
  0 siblings, 0 replies; 26+ messages in thread
From: Minchan Kim @ 2016-06-09  1:34 UTC (permalink / raw)
  To: Dan Streetman
  Cc: Geliang Tang, Nitin Gupta, Sergey Senozhatsky, linux-kernel, Linux-MM

On Wed, Jun 08, 2016 at 10:51:28AM -0400, Dan Streetman wrote:
> On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com> wrote:
> > This patch adds zpool support for zram, it will allow us to use both
> > the zpool api and directly zsmalloc api in zram.
> 
> besides the problems below, this was discussed a while ago and I
> believe Minchan is still against it, as nobody has so far shown what
> the benefit to zram would be; zram doesn't need the predictability, or
> evictability, of zbud or z3fold.

Right.

Geliang, I cannot ack without any *detail* that what's the problem of
zram/zsmalloc, why we can't fix it in zsmalloc itself.
The zbud and zsmalloc is otally different design to aim different goal
determinism vs efficiency so you can choose what you want between zswap
and zram rather than mixing the features.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: add zpool support
  2016-06-09  1:34     ` Minchan Kim
@ 2016-06-09  1:43       ` Sergey Senozhatsky
  -1 siblings, 0 replies; 26+ messages in thread
From: Sergey Senozhatsky @ 2016-06-09  1:43 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Dan Streetman, Geliang Tang, Nitin Gupta, Sergey Senozhatsky,
	linux-kernel, Linux-MM, Vitaly Wool

Hello,

On (06/09/16 10:34), Minchan Kim wrote:
> On Wed, Jun 08, 2016 at 10:51:28AM -0400, Dan Streetman wrote:
> > On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com> wrote:
> > > This patch adds zpool support for zram, it will allow us to use both
> > > the zpool api and directly zsmalloc api in zram.
> > 
> > besides the problems below, this was discussed a while ago and I
> > believe Minchan is still against it, as nobody has so far shown what
> > the benefit to zram would be; zram doesn't need the predictability, or
> > evictability, of zbud or z3fold.
> 
> Right.
> 
> Geliang, I cannot ack without any *detail* that what's the problem of
> zram/zsmalloc, why we can't fix it in zsmalloc itself.
> The zbud and zsmalloc is otally different design to aim different goal
> determinism vs efficiency so you can choose what you want between zswap
> and zram rather than mixing the features.

I'd also probably Cc Vitaly Wool on this

(http://marc.info/?l=linux-mm&m=146537877415982&w=2)

	-ss

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: add zpool support
@ 2016-06-09  1:43       ` Sergey Senozhatsky
  0 siblings, 0 replies; 26+ messages in thread
From: Sergey Senozhatsky @ 2016-06-09  1:43 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Dan Streetman, Geliang Tang, Nitin Gupta, Sergey Senozhatsky,
	linux-kernel, Linux-MM, Vitaly Wool

Hello,

On (06/09/16 10:34), Minchan Kim wrote:
> On Wed, Jun 08, 2016 at 10:51:28AM -0400, Dan Streetman wrote:
> > On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com> wrote:
> > > This patch adds zpool support for zram, it will allow us to use both
> > > the zpool api and directly zsmalloc api in zram.
> > 
> > besides the problems below, this was discussed a while ago and I
> > believe Minchan is still against it, as nobody has so far shown what
> > the benefit to zram would be; zram doesn't need the predictability, or
> > evictability, of zbud or z3fold.
> 
> Right.
> 
> Geliang, I cannot ack without any *detail* that what's the problem of
> zram/zsmalloc, why we can't fix it in zsmalloc itself.
> The zbud and zsmalloc is otally different design to aim different goal
> determinism vs efficiency so you can choose what you want between zswap
> and zram rather than mixing the features.

I'd also probably Cc Vitaly Wool on this

(http://marc.info/?l=linux-mm&m=146537877415982&w=2)

	-ss

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: add zpool support
  2016-06-08 14:51   ` Dan Streetman
  (?)
  (?)
@ 2016-06-13  9:11   ` Vitaly Wool
  2016-06-13 20:04     ` Fwd: " Vitaly Wool
  2016-06-15 14:42       ` Geliang Tang
  -1 siblings, 2 replies; 26+ messages in thread
From: Vitaly Wool @ 2016-06-13  9:11 UTC (permalink / raw)
  To: Dan Streetman
  Cc: Minchan Kim, Geliang Tang, Linux-MM, Sergey Senozhatsky,
	linux-kernel, Nitin Gupta

[-- Attachment #1: Type: text/plain, Size: 10974 bytes --]

Den 8 juni 2016 6:33 em skrev "Dan Streetman" <ddstreet@ieee.org>:
>
> On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com>
wrote:
> > This patch adds zpool support for zram, it will allow us to use both
> > the zpool api and directly zsmalloc api in zram.
>
> besides the problems below, this was discussed a while ago and I
> believe Minchan is still against it, as nobody has so far shown what
> the benefit to zram would be; zram doesn't need the predictability, or
> evictability, of zbud or z3fold.

Well, I believe I have something to say here. z3fold is generally faster
than zsmalloc which makes it a better choice for zram sometimes, e.g. when
zram device is used for swap. Also,  z3fold and zbud do not require MMU so
zram over these can be used on small Linux powered MMU-less IoT devices, as
opposed to the traditional zram over zsmalloc. Otherwise I do agree with
Dan.

>
> >
> > Signed-off-by: Geliang Tang <geliangtang@gmail.com>
> > ---
> >  drivers/block/zram/zram_drv.c | 97
+++++++++++++++++++++++++++++++++++++++++++
> >  drivers/block/zram/zram_drv.h |  5 +++
> >  2 files changed, 102 insertions(+)
> >
> > diff --git a/drivers/block/zram/zram_drv.c
b/drivers/block/zram/zram_drv.c
> > index 9e2a83c..1f90bd0 100644
> > --- a/drivers/block/zram/zram_drv.c
> > +++ b/drivers/block/zram/zram_drv.c
> > @@ -43,6 +43,11 @@ static const char *default_compressor = "lzo";
> >  /* Module params (documentation at end) */
> >  static unsigned int num_devices = 1;
> >
> > +#ifdef CONFIG_ZPOOL
> > +/* Compressed storage zpool to use */
> > +#define ZRAM_ZPOOL_DEFAULT "zsmalloc"
> > +#endif
>
> It doesn't make sense for zram to conditionally use zpool; either it
> uses it and thus has 'select ZPOOL' in its Kconfig entry, or it
> doesn't use it at all.
>
> > +
> >  static inline void deprecated_attr_warn(const char *name)
> >  {
> >         pr_warn_once("%d (%s) Attribute %s (and others) will be
removed. %s\n",
> > @@ -228,7 +233,11 @@ static ssize_t mem_used_total_show(struct device
*dev,
> >         down_read(&zram->init_lock);
> >         if (init_done(zram)) {
> >                 struct zram_meta *meta = zram->meta;
> > +#ifdef CONFIG_ZPOOL
> > +               val = zpool_get_total_size(meta->mem_pool) >>
PAGE_SHIFT;
> > +#else
> >                 val = zs_get_total_pages(meta->mem_pool);
> > +#endif
> >         }
> >         up_read(&zram->init_lock);
> >
> > @@ -296,8 +305,14 @@ static ssize_t mem_used_max_store(struct device
*dev,
> >         down_read(&zram->init_lock);
> >         if (init_done(zram)) {
> >                 struct zram_meta *meta = zram->meta;
> > +#ifdef CONFIG_ZPOOL
> > +               atomic_long_set(&zram->stats.max_used_pages,
> > +                               zpool_get_total_size(meta->mem_pool)
> > +                               >> PAGE_SHIFT);
> > +#else
> >                 atomic_long_set(&zram->stats.max_used_pages,
> >                                 zs_get_total_pages(meta->mem_pool));
> > +#endif
> >         }
> >         up_read(&zram->init_lock);
> >
> > @@ -366,6 +381,18 @@ static ssize_t comp_algorithm_store(struct device
*dev,
> >         return len;
> >  }
> >
> > +#ifdef CONFIG_ZPOOL
> > +static void zpool_compact(void *pool)
> > +{
> > +       zs_compact(pool);
> > +}
> > +
> > +static void zpool_stats(void *pool, struct zs_pool_stats *stats)
> > +{
> > +       zs_pool_stats(pool, stats);
> > +}
> > +#endif
>
> first, no.  this obviously makes using zpool in zram completely pointless.
>
> second, did you test this?  the pool you're passing is the zpool, not
> the zs_pool.  quite bad things will happen when this code runs.  There
> is no way to get the zs_pool from the zpool object (that's the point
> of abstraction, of course).
>
> The fact zpool doesn't have these apis (currently) is one of the
> reasons against changing zram to use zpool.
>
> > +
> >  static ssize_t compact_store(struct device *dev,
> >                 struct device_attribute *attr, const char *buf, size_t
len)
> >  {
> > @@ -379,7 +406,11 @@ static ssize_t compact_store(struct device *dev,
> >         }
> >
> >         meta = zram->meta;
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_compact(meta->mem_pool);
> > +#else
> >         zs_compact(meta->mem_pool);
> > +#endif
> >         up_read(&zram->init_lock);
> >
> >         return len;
> > @@ -416,8 +447,14 @@ static ssize_t mm_stat_show(struct device *dev,
> >
> >         down_read(&zram->init_lock);
> >         if (init_done(zram)) {
> > +#ifdef CONFIG_ZPOOL
> > +               mem_used = zpool_get_total_size(zram->meta->mem_pool)
> > +                               >> PAGE_SHIFT;
> > +               zpool_stats(zram->meta->mem_pool, &pool_stats);
> > +#else
> >                 mem_used = zs_get_total_pages(zram->meta->mem_pool);
> >                 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
> > +#endif
> >         }
> >
> >         orig_size = atomic64_read(&zram->stats.pages_stored);
> > @@ -490,10 +527,18 @@ static void zram_meta_free(struct zram_meta
*meta, u64 disksize)
> >                 if (!handle)
> >                         continue;
> >
> > +#ifdef CONFIG_ZPOOL
> > +               zpool_free(meta->mem_pool, handle);
> > +#else
> >                 zs_free(meta->mem_pool, handle);
> > +#endif
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_destroy_pool(meta->mem_pool);
> > +#else
> >         zs_destroy_pool(meta->mem_pool);
> > +#endif
> >         vfree(meta->table);
> >         kfree(meta);
> >  }
> > @@ -513,7 +558,17 @@ static struct zram_meta *zram_meta_alloc(char
*pool_name, u64 disksize)
> >                 goto out_error;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       if (!zpool_has_pool(ZRAM_ZPOOL_DEFAULT)) {
> > +               pr_err("zpool %s not available\n", ZRAM_ZPOOL_DEFAULT);
> > +               goto out_error;
> > +       }
> > +
> > +       meta->mem_pool = zpool_create_pool(ZRAM_ZPOOL_DEFAULT,
> > +                                       pool_name, 0, NULL);
> > +#else
> >         meta->mem_pool = zs_create_pool(pool_name);
> > +#endif
> >         if (!meta->mem_pool) {
> >                 pr_err("Error creating memory pool\n");
> >                 goto out_error;
> > @@ -549,7 +604,11 @@ static void zram_free_page(struct zram *zram,
size_t index)
> >                 return;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_free(meta->mem_pool, handle);
> > +#else
> >         zs_free(meta->mem_pool, handle);
> > +#endif
> >
> >         atomic64_sub(zram_get_obj_size(meta, index),
> >                         &zram->stats.compr_data_size);
> > @@ -577,7 +636,11 @@ static int zram_decompress_page(struct zram *zram,
char *mem, u32 index)
> >                 return 0;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
> > +#else
> >         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> > +#endif
> >         if (size == PAGE_SIZE) {
> >                 copy_page(mem, cmem);
> >         } else {
> > @@ -586,7 +649,11 @@ static int zram_decompress_page(struct zram *zram,
char *mem, u32 index)
> >                 ret = zcomp_decompress(zstrm, cmem, size, mem);
> >                 zcomp_stream_put(zram->comp);
> >         }
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_unmap_handle(meta->mem_pool, handle);
> > +#else
> >         zs_unmap_object(meta->mem_pool, handle);
> > +#endif
> >         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
> >
> >         /* Should NEVER happen. Return bio error if it does. */
> > @@ -735,20 +802,34 @@ compress_again:
> >          * from the slow path and handle has already been allocated.
> >          */
> >         if (!handle)
> > +#ifdef CONFIG_ZPOOL
> > +               ret = zpool_malloc(meta->mem_pool, clen,
> > +                               __GFP_KSWAPD_RECLAIM |
> > +                               __GFP_NOWARN |
> > +                               __GFP_HIGHMEM |
> > +                               __GFP_MOVABLE, &handle);
> > +#else
> >                 handle = zs_malloc(meta->mem_pool, clen,
> >                                 __GFP_KSWAPD_RECLAIM |
> >                                 __GFP_NOWARN |
> >                                 __GFP_HIGHMEM |
> >                                 __GFP_MOVABLE);
> > +#endif
> >         if (!handle) {
> >                 zcomp_stream_put(zram->comp);
> >                 zstrm = NULL;
> >
> >                 atomic64_inc(&zram->stats.writestall);
> >
> > +#ifdef CONFIG_ZPOOL
> > +               ret = zpool_malloc(meta->mem_pool, clen,
> > +                               GFP_NOIO | __GFP_HIGHMEM |
> > +                               __GFP_MOVABLE, &handle);
> > +#else
> >                 handle = zs_malloc(meta->mem_pool, clen,
> >                                 GFP_NOIO | __GFP_HIGHMEM |
> >                                 __GFP_MOVABLE);
> > +#endif
> >                 if (handle)
> >                         goto compress_again;
> >
> > @@ -758,16 +839,28 @@ compress_again:
> >                 goto out;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       alloced_pages = zpool_get_total_size(meta->mem_pool) >>
PAGE_SHIFT;
> > +#else
> >         alloced_pages = zs_get_total_pages(meta->mem_pool);
> > +#endif
> >         update_used_max(zram, alloced_pages);
> >
> >         if (zram->limit_pages && alloced_pages > zram->limit_pages) {
> > +#ifdef CONFIG_ZPOOL
> > +               zpool_free(meta->mem_pool, handle);
> > +#else
> >                 zs_free(meta->mem_pool, handle);
> > +#endif
> >                 ret = -ENOMEM;
> >                 goto out;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
> > +#else
> >         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
> > +#endif
> >
> >         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
> >                 src = kmap_atomic(page);
> > @@ -779,7 +872,11 @@ compress_again:
> >
> >         zcomp_stream_put(zram->comp);
> >         zstrm = NULL;
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_unmap_handle(meta->mem_pool, handle);
> > +#else
> >         zs_unmap_object(meta->mem_pool, handle);
> > +#endif
> >
> >         /*
> >          * Free memory associated with this sector
> > diff --git a/drivers/block/zram/zram_drv.h
b/drivers/block/zram/zram_drv.h
> > index 74fcf10..68f1222 100644
> > --- a/drivers/block/zram/zram_drv.h
> > +++ b/drivers/block/zram/zram_drv.h
> > @@ -17,6 +17,7 @@
> >
> >  #include <linux/rwsem.h>
> >  #include <linux/zsmalloc.h>
> > +#include <linux/zpool.h>
> >  #include <linux/crypto.h>
> >
> >  #include "zcomp.h"
> > @@ -91,7 +92,11 @@ struct zram_stats {
> >
> >  struct zram_meta {
> >         struct zram_table_entry *table;
> > +#ifdef CONFIG_ZPOOL
> > +       struct zpool *mem_pool;
> > +#else
> >         struct zs_pool *mem_pool;
> > +#endif
> >  };
> >
> >  struct zram {
> > --
> > 1.9.1
> >

Best regards,
Vitaly

[-- Attachment #2: Type: text/html, Size: 15824 bytes --]

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Fwd: [PATCH] zram: add zpool support
  2016-06-13  9:11   ` Vitaly Wool
@ 2016-06-13 20:04     ` Vitaly Wool
  2016-06-15 14:42       ` Geliang Tang
  1 sibling, 0 replies; 26+ messages in thread
From: Vitaly Wool @ 2016-06-13 20:04 UTC (permalink / raw)
  To: Linux-MM

[resending to the list since it didn't work before]

Den 8 juni 2016 6:33 em skrev "Dan Streetman" <ddstreet@ieee.org>:
>
> On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com> wrote:
> > This patch adds zpool support for zram, it will allow us to use both
> > the zpool api and directly zsmalloc api in zram.
>
> besides the problems below, this was discussed a while ago and I
> believe Minchan is still against it, as nobody has so far shown what
> the benefit to zram would be; zram doesn't need the predictability, or
> evictability, of zbud or z3fold.

Well, I believe I have something to say here. z3fold is generally
faster than zsmalloc which makes it a better choice for zram
sometimes, e.g. when zram device is used for swap. Also,  z3fold and
zbud do not require MMU so zram over these can be used on small Linux
powered MMU-less IoT devices, as opposed to the traditional zram over
zsmalloc. Otherwise I do agree with Dan.

>
> >
> > Signed-off-by: Geliang Tang <geliangtang@gmail.com>
> > ---
> >  drivers/block/zram/zram_drv.c | 97 +++++++++++++++++++++++++++++++++++++++++++
> >  drivers/block/zram/zram_drv.h |  5 +++
> >  2 files changed, 102 insertions(+)
> >
> > diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> > index 9e2a83c..1f90bd0 100644
> > --- a/drivers/block/zram/zram_drv.c
> > +++ b/drivers/block/zram/zram_drv.c
> > @@ -43,6 +43,11 @@ static const char *default_compressor = "lzo";
> >  /* Module params (documentation at end) */
> >  static unsigned int num_devices = 1;
> >
> > +#ifdef CONFIG_ZPOOL
> > +/* Compressed storage zpool to use */
> > +#define ZRAM_ZPOOL_DEFAULT "zsmalloc"
> > +#endif
>
> It doesn't make sense for zram to conditionally use zpool; either it
> uses it and thus has 'select ZPOOL' in its Kconfig entry, or it
> doesn't use it at all.
>
> > +
> >  static inline void deprecated_attr_warn(const char *name)
> >  {
> >         pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
> > @@ -228,7 +233,11 @@ static ssize_t mem_used_total_show(struct device *dev,
> >         down_read(&zram->init_lock);
> >         if (init_done(zram)) {
> >                 struct zram_meta *meta = zram->meta;
> > +#ifdef CONFIG_ZPOOL
> > +               val = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
> > +#else
> >                 val = zs_get_total_pages(meta->mem_pool);
> > +#endif
> >         }
> >         up_read(&zram->init_lock);
> >
> > @@ -296,8 +305,14 @@ static ssize_t mem_used_max_store(struct device *dev,
> >         down_read(&zram->init_lock);
> >         if (init_done(zram)) {
> >                 struct zram_meta *meta = zram->meta;
> > +#ifdef CONFIG_ZPOOL
> > +               atomic_long_set(&zram->stats.max_used_pages,
> > +                               zpool_get_total_size(meta->mem_pool)
> > +                               >> PAGE_SHIFT);
> > +#else
> >                 atomic_long_set(&zram->stats.max_used_pages,
> >                                 zs_get_total_pages(meta->mem_pool));
> > +#endif
> >         }
> >         up_read(&zram->init_lock);
> >
> > @@ -366,6 +381,18 @@ static ssize_t comp_algorithm_store(struct device *dev,
> >         return len;
> >  }
> >
> > +#ifdef CONFIG_ZPOOL
> > +static void zpool_compact(void *pool)
> > +{
> > +       zs_compact(pool);
> > +}
> > +
> > +static void zpool_stats(void *pool, struct zs_pool_stats *stats)
> > +{
> > +       zs_pool_stats(pool, stats);
> > +}
> > +#endif
>
> first, no.  this obviously makes using zpool in zram completely pointless.
>
> second, did you test this?  the pool you're passing is the zpool, not
> the zs_pool.  quite bad things will happen when this code runs.  There
> is no way to get the zs_pool from the zpool object (that's the point
> of abstraction, of course).
>
> The fact zpool doesn't have these apis (currently) is one of the
> reasons against changing zram to use zpool.
>
> > +
> >  static ssize_t compact_store(struct device *dev,
> >                 struct device_attribute *attr, const char *buf, size_t len)
> >  {
> > @@ -379,7 +406,11 @@ static ssize_t compact_store(struct device *dev,
> >         }
> >
> >         meta = zram->meta;
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_compact(meta->mem_pool);
> > +#else
> >         zs_compact(meta->mem_pool);
> > +#endif
> >         up_read(&zram->init_lock);
> >
> >         return len;
> > @@ -416,8 +447,14 @@ static ssize_t mm_stat_show(struct device *dev,
> >
> >         down_read(&zram->init_lock);
> >         if (init_done(zram)) {
> > +#ifdef CONFIG_ZPOOL
> > +               mem_used = zpool_get_total_size(zram->meta->mem_pool)
> > +                               >> PAGE_SHIFT;
> > +               zpool_stats(zram->meta->mem_pool, &pool_stats);
> > +#else
> >                 mem_used = zs_get_total_pages(zram->meta->mem_pool);
> >                 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
> > +#endif
> >         }
> >
> >         orig_size = atomic64_read(&zram->stats.pages_stored);
> > @@ -490,10 +527,18 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
> >                 if (!handle)
> >                         continue;
> >
> > +#ifdef CONFIG_ZPOOL
> > +               zpool_free(meta->mem_pool, handle);
> > +#else
> >                 zs_free(meta->mem_pool, handle);
> > +#endif
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_destroy_pool(meta->mem_pool);
> > +#else
> >         zs_destroy_pool(meta->mem_pool);
> > +#endif
> >         vfree(meta->table);
> >         kfree(meta);
> >  }
> > @@ -513,7 +558,17 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
> >                 goto out_error;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       if (!zpool_has_pool(ZRAM_ZPOOL_DEFAULT)) {
> > +               pr_err("zpool %s not available\n", ZRAM_ZPOOL_DEFAULT);
> > +               goto out_error;
> > +       }
> > +
> > +       meta->mem_pool = zpool_create_pool(ZRAM_ZPOOL_DEFAULT,
> > +                                       pool_name, 0, NULL);
> > +#else
> >         meta->mem_pool = zs_create_pool(pool_name);
> > +#endif
> >         if (!meta->mem_pool) {
> >                 pr_err("Error creating memory pool\n");
> >                 goto out_error;
> > @@ -549,7 +604,11 @@ static void zram_free_page(struct zram *zram, size_t index)
> >                 return;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_free(meta->mem_pool, handle);
> > +#else
> >         zs_free(meta->mem_pool, handle);
> > +#endif
> >
> >         atomic64_sub(zram_get_obj_size(meta, index),
> >                         &zram->stats.compr_data_size);
> > @@ -577,7 +636,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> >                 return 0;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
> > +#else
> >         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> > +#endif
> >         if (size == PAGE_SIZE) {
> >                 copy_page(mem, cmem);
> >         } else {
> > @@ -586,7 +649,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> >                 ret = zcomp_decompress(zstrm, cmem, size, mem);
> >                 zcomp_stream_put(zram->comp);
> >         }
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_unmap_handle(meta->mem_pool, handle);
> > +#else
> >         zs_unmap_object(meta->mem_pool, handle);
> > +#endif
> >         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
> >
> >         /* Should NEVER happen. Return bio error if it does. */
> > @@ -735,20 +802,34 @@ compress_again:
> >          * from the slow path and handle has already been allocated.
> >          */
> >         if (!handle)
> > +#ifdef CONFIG_ZPOOL
> > +               ret = zpool_malloc(meta->mem_pool, clen,
> > +                               __GFP_KSWAPD_RECLAIM |
> > +                               __GFP_NOWARN |
> > +                               __GFP_HIGHMEM |
> > +                               __GFP_MOVABLE, &handle);
> > +#else
> >                 handle = zs_malloc(meta->mem_pool, clen,
> >                                 __GFP_KSWAPD_RECLAIM |
> >                                 __GFP_NOWARN |
> >                                 __GFP_HIGHMEM |
> >                                 __GFP_MOVABLE);
> > +#endif
> >         if (!handle) {
> >                 zcomp_stream_put(zram->comp);
> >                 zstrm = NULL;
> >
> >                 atomic64_inc(&zram->stats.writestall);
> >
> > +#ifdef CONFIG_ZPOOL
> > +               ret = zpool_malloc(meta->mem_pool, clen,
> > +                               GFP_NOIO | __GFP_HIGHMEM |
> > +                               __GFP_MOVABLE, &handle);
> > +#else
> >                 handle = zs_malloc(meta->mem_pool, clen,
> >                                 GFP_NOIO | __GFP_HIGHMEM |
> >                                 __GFP_MOVABLE);
> > +#endif
> >                 if (handle)
> >                         goto compress_again;
> >
> > @@ -758,16 +839,28 @@ compress_again:
> >                 goto out;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
> > +#else
> >         alloced_pages = zs_get_total_pages(meta->mem_pool);
> > +#endif
> >         update_used_max(zram, alloced_pages);
> >
> >         if (zram->limit_pages && alloced_pages > zram->limit_pages) {
> > +#ifdef CONFIG_ZPOOL
> > +               zpool_free(meta->mem_pool, handle);
> > +#else
> >                 zs_free(meta->mem_pool, handle);
> > +#endif
> >                 ret = -ENOMEM;
> >                 goto out;
> >         }
> >
> > +#ifdef CONFIG_ZPOOL
> > +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
> > +#else
> >         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
> > +#endif
> >
> >         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
> >                 src = kmap_atomic(page);
> > @@ -779,7 +872,11 @@ compress_again:
> >
> >         zcomp_stream_put(zram->comp);
> >         zstrm = NULL;
> > +#ifdef CONFIG_ZPOOL
> > +       zpool_unmap_handle(meta->mem_pool, handle);
> > +#else
> >         zs_unmap_object(meta->mem_pool, handle);
> > +#endif
> >
> >         /*
> >          * Free memory associated with this sector
> > diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> > index 74fcf10..68f1222 100644
> > --- a/drivers/block/zram/zram_drv.h
> > +++ b/drivers/block/zram/zram_drv.h
> > @@ -17,6 +17,7 @@
> >
> >  #include <linux/rwsem.h>
> >  #include <linux/zsmalloc.h>
> > +#include <linux/zpool.h>
> >  #include <linux/crypto.h>
> >
> >  #include "zcomp.h"
> > @@ -91,7 +92,11 @@ struct zram_stats {
> >
> >  struct zram_meta {
> >         struct zram_table_entry *table;
> > +#ifdef CONFIG_ZPOOL
> > +       struct zpool *mem_pool;
> > +#else
> >         struct zs_pool *mem_pool;
> > +#endif
> >  };
> >
> >  struct zram {
> > --
> > 1.9.1
> >

Best regards,
Vitaly

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH] zram: add zpool support v2
  2016-06-13  9:11   ` Vitaly Wool
@ 2016-06-15 14:42       ` Geliang Tang
  2016-06-15 14:42       ` Geliang Tang
  1 sibling, 0 replies; 26+ messages in thread
From: Geliang Tang @ 2016-06-15 14:42 UTC (permalink / raw)
  To: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, Dan Streetman, Vitaly Wool
  Cc: Geliang Tang, linux-kernel, linux-mm

On Mon, Jun 13, 2016 at 11:11:00AM +0200, Vitaly Wool wrote:
> Den 8 juni 2016 6:33 em skrev "Dan Streetman" <ddstreet@ieee.org>:
> >
> > On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com>
> wrote:
> > > This patch adds zpool support for zram, it will allow us to use both
> > > the zpool api and directly zsmalloc api in zram.
> >
> > besides the problems below, this was discussed a while ago and I
> > believe Minchan is still against it, as nobody has so far shown what
> > the benefit to zram would be; zram doesn't need the predictability, or
> > evictability, of zbud or z3fold.
> 
> > Right.
> >
> > Geliang, I cannot ack without any *detail* that what's the problem of
> > zram/zsmalloc, why we can't fix it in zsmalloc itself.
> > The zbud and zsmalloc is otally different design to aim different goal
> > determinism vs efficiency so you can choose what you want between
> > zswap
> > and zram rather than mixing the features.
>
> I'd also probably Cc Vitaly Wool on this
>
> Well, I believe I have something to say here. z3fold is generally faster
> than zsmalloc which makes it a better choice for zram sometimes, e.g. when
> zram device is used for swap. Also,  z3fold and zbud do not require MMU so
> zram over these can be used on small Linux powered MMU-less IoT devices, as
> opposed to the traditional zram over zsmalloc. Otherwise I do agree with
> Dan.
> 
> >
> > It doesn't make sense for zram to conditionally use zpool; either it
> > uses it and thus has 'select ZPOOL' in its Kconfig entry, or it
> > doesn't use it at all.
> >
> > > +#endif
> >
> > first, no.  this obviously makes using zpool in zram completely pointless.
> >
> > second, did you test this?  the pool you're passing is the zpool, not
> > the zs_pool.  quite bad things will happen when this code runs.  There
> > is no way to get the zs_pool from the zpool object (that's the point
> > of abstraction, of course).
> >
> > The fact zpool doesn't have these apis (currently) is one of the
> > reasons against changing zram to use zpool.
> >

Thank you all for your reply. I updated the patch and I hope this is better.

Geliang Tang (1):
  zram: update zram to use zpool

 drivers/block/zram/Kconfig    |  3 ++-
 drivers/block/zram/zram_drv.c | 59 ++++++++++++++++++++++---------------------
 drivers/block/zram/zram_drv.h |  4 +--
 mm/zsmalloc.c                 | 12 +++++----
 4 files changed, 41 insertions(+), 37 deletions(-)

-- 
2.5.5

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH] zram: add zpool support v2
@ 2016-06-15 14:42       ` Geliang Tang
  0 siblings, 0 replies; 26+ messages in thread
From: Geliang Tang @ 2016-06-15 14:42 UTC (permalink / raw)
  To: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, Dan Streetman, Vitaly Wool
  Cc: Geliang Tang, linux-kernel, linux-mm

On Mon, Jun 13, 2016 at 11:11:00AM +0200, Vitaly Wool wrote:
> Den 8 juni 2016 6:33 em skrev "Dan Streetman" <ddstreet@ieee.org>:
> >
> > On Wed, Jun 8, 2016 at 5:39 AM, Geliang Tang <geliangtang@gmail.com>
> wrote:
> > > This patch adds zpool support for zram, it will allow us to use both
> > > the zpool api and directly zsmalloc api in zram.
> >
> > besides the problems below, this was discussed a while ago and I
> > believe Minchan is still against it, as nobody has so far shown what
> > the benefit to zram would be; zram doesn't need the predictability, or
> > evictability, of zbud or z3fold.
> 
> > Right.
> >
> > Geliang, I cannot ack without any *detail* that what's the problem of
> > zram/zsmalloc, why we can't fix it in zsmalloc itself.
> > The zbud and zsmalloc is otally different design to aim different goal
> > determinism vs efficiency so you can choose what you want between
> > zswap
> > and zram rather than mixing the features.
>
> I'd also probably Cc Vitaly Wool on this
>
> Well, I believe I have something to say here. z3fold is generally faster
> than zsmalloc which makes it a better choice for zram sometimes, e.g. when
> zram device is used for swap. Also,  z3fold and zbud do not require MMU so
> zram over these can be used on small Linux powered MMU-less IoT devices, as
> opposed to the traditional zram over zsmalloc. Otherwise I do agree with
> Dan.
> 
> >
> > It doesn't make sense for zram to conditionally use zpool; either it
> > uses it and thus has 'select ZPOOL' in its Kconfig entry, or it
> > doesn't use it at all.
> >
> > > +#endif
> >
> > first, no.  this obviously makes using zpool in zram completely pointless.
> >
> > second, did you test this?  the pool you're passing is the zpool, not
> > the zs_pool.  quite bad things will happen when this code runs.  There
> > is no way to get the zs_pool from the zpool object (that's the point
> > of abstraction, of course).
> >
> > The fact zpool doesn't have these apis (currently) is one of the
> > reasons against changing zram to use zpool.
> >

Thank you all for your reply. I updated the patch and I hope this is better.

Geliang Tang (1):
  zram: update zram to use zpool

 drivers/block/zram/Kconfig    |  3 ++-
 drivers/block/zram/zram_drv.c | 59 ++++++++++++++++++++++---------------------
 drivers/block/zram/zram_drv.h |  4 +--
 mm/zsmalloc.c                 | 12 +++++----
 4 files changed, 41 insertions(+), 37 deletions(-)

-- 
2.5.5

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH] zram: update zram to use zpool
  2016-06-15 14:42       ` Geliang Tang
@ 2016-06-15 14:42         ` Geliang Tang
  -1 siblings, 0 replies; 26+ messages in thread
From: Geliang Tang @ 2016-06-15 14:42 UTC (permalink / raw)
  To: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, Dan Streetman, Vitaly Wool
  Cc: Geliang Tang, linux-kernel, linux-mm

Change zram to use the zpool api instead of directly using zsmalloc.
The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
I did the following two things to fix it.
1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
   call zs_compact() in zsmalloc.
2) The 'pages_compacted' attribute is showed in zram by calling
   zs_pool_stats(). So in order not to call zs_pool_state() I move the
   attribute to zsmalloc.

Signed-off-by: Geliang Tang <geliangtang@gmail.com>
---
 drivers/block/zram/Kconfig    |  3 ++-
 drivers/block/zram/zram_drv.c | 59 ++++++++++++++++++++++---------------------
 drivers/block/zram/zram_drv.h |  4 +--
 mm/zsmalloc.c                 | 12 +++++----
 4 files changed, 41 insertions(+), 37 deletions(-)

diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index b8ecba6..6389a5a 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -1,6 +1,7 @@
 config ZRAM
 	tristate "Compressed RAM block device support"
-	depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
+	depends on BLOCK && SYSFS && ZPOOL && CRYPTO
+	select ZSMALLOC
 	select CRYPTO_LZO
 	default n
 	help
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7454cf1..7ee9050 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -39,6 +39,7 @@ static DEFINE_MUTEX(zram_index_mutex);
 
 static int zram_major;
 static const char *default_compressor = "lzo";
+static char *default_zpool_type = "zsmalloc";
 
 /* Module params (documentation at end) */
 static unsigned int num_devices = 1;
@@ -228,11 +229,11 @@ static ssize_t mem_used_total_show(struct device *dev,
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
 		struct zram_meta *meta = zram->meta;
-		val = zs_get_total_pages(meta->mem_pool);
+		val = zpool_get_total_size(meta->mem_pool);
 	}
 	up_read(&zram->init_lock);
 
-	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
 }
 
 static ssize_t mem_limit_show(struct device *dev,
@@ -297,7 +298,7 @@ static ssize_t mem_used_max_store(struct device *dev,
 	if (init_done(zram)) {
 		struct zram_meta *meta = zram->meta;
 		atomic_long_set(&zram->stats.max_used_pages,
-				zs_get_total_pages(meta->mem_pool));
+			zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT);
 	}
 	up_read(&zram->init_lock);
 
@@ -379,7 +380,7 @@ static ssize_t compact_store(struct device *dev,
 	}
 
 	meta = zram->meta;
-	zs_compact(meta->mem_pool);
+	zpool_shrink(meta->mem_pool, 0, NULL);
 	up_read(&zram->init_lock);
 
 	return len;
@@ -407,31 +408,25 @@ static ssize_t mm_stat_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct zram *zram = dev_to_zram(dev);
-	struct zs_pool_stats pool_stats;
 	u64 orig_size, mem_used = 0;
 	long max_used;
 	ssize_t ret;
 
-	memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
-
 	down_read(&zram->init_lock);
-	if (init_done(zram)) {
-		mem_used = zs_get_total_pages(zram->meta->mem_pool);
-		zs_pool_stats(zram->meta->mem_pool, &pool_stats);
-	}
+	if (init_done(zram))
+		mem_used = zpool_get_total_size(zram->meta->mem_pool);
 
 	orig_size = atomic64_read(&zram->stats.pages_stored);
 	max_used = atomic_long_read(&zram->stats.max_used_pages);
 
 	ret = scnprintf(buf, PAGE_SIZE,
-			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
+			"%8llu %8llu %8llu %8lu %8ld %8llu\n",
 			orig_size << PAGE_SHIFT,
 			(u64)atomic64_read(&zram->stats.compr_data_size),
-			mem_used << PAGE_SHIFT,
+			mem_used,
 			zram->limit_pages << PAGE_SHIFT,
 			max_used << PAGE_SHIFT,
-			(u64)atomic64_read(&zram->stats.zero_pages),
-			pool_stats.pages_compacted);
+			(u64)atomic64_read(&zram->stats.zero_pages));
 	up_read(&zram->init_lock);
 
 	return ret;
@@ -490,10 +485,10 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 		if (!handle)
 			continue;
 
-		zs_free(meta->mem_pool, handle);
+		zpool_free(meta->mem_pool, handle);
 	}
 
-	zs_destroy_pool(meta->mem_pool);
+	zpool_destroy_pool(meta->mem_pool);
 	vfree(meta->table);
 	kfree(meta);
 }
@@ -513,7 +508,13 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 		goto out_error;
 	}
 
-	meta->mem_pool = zs_create_pool(pool_name);
+	if (!zpool_has_pool(default_zpool_type)) {
+		pr_err("zpool %s not available\n", default_zpool_type);
+		goto out_error;
+	}
+
+	meta->mem_pool = zpool_create_pool(default_zpool_type,
+					   pool_name, 0, NULL);
 	if (!meta->mem_pool) {
 		pr_err("Error creating memory pool\n");
 		goto out_error;
@@ -549,7 +550,7 @@ static void zram_free_page(struct zram *zram, size_t index)
 		return;
 	}
 
-	zs_free(meta->mem_pool, handle);
+	zpool_free(meta->mem_pool, handle);
 
 	atomic64_sub(zram_get_obj_size(meta, index),
 			&zram->stats.compr_data_size);
@@ -577,7 +578,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 		return 0;
 	}
 
-	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+	cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
 	if (size == PAGE_SIZE) {
 		copy_page(mem, cmem);
 	} else {
@@ -586,7 +587,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 		ret = zcomp_decompress(zstrm, cmem, size, mem);
 		zcomp_stream_put(zram->comp);
 	}
-	zs_unmap_object(meta->mem_pool, handle);
+	zpool_unmap_handle(meta->mem_pool, handle);
 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 
 	/* Should NEVER happen. Return bio error if it does. */
@@ -735,20 +736,20 @@ compress_again:
 	 * from the slow path and handle has already been allocated.
 	 */
 	if (!handle)
-		handle = zs_malloc(meta->mem_pool, clen,
+		ret = zpool_malloc(meta->mem_pool, clen,
 				__GFP_KSWAPD_RECLAIM |
 				__GFP_NOWARN |
 				__GFP_HIGHMEM |
-				__GFP_MOVABLE);
+				__GFP_MOVABLE, &handle);
 	if (!handle) {
 		zcomp_stream_put(zram->comp);
 		zstrm = NULL;
 
 		atomic64_inc(&zram->stats.writestall);
 
-		handle = zs_malloc(meta->mem_pool, clen,
+		ret = zpool_malloc(meta->mem_pool, clen,
 				GFP_NOIO | __GFP_HIGHMEM |
-				__GFP_MOVABLE);
+				__GFP_MOVABLE, &handle);
 		if (handle)
 			goto compress_again;
 
@@ -758,16 +759,16 @@ compress_again:
 		goto out;
 	}
 
-	alloced_pages = zs_get_total_pages(meta->mem_pool);
+	alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
 	update_used_max(zram, alloced_pages);
 
 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
-		zs_free(meta->mem_pool, handle);
+		zpool_free(meta->mem_pool, handle);
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
+	cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
 
 	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
 		src = kmap_atomic(page);
@@ -779,7 +780,7 @@ compress_again:
 
 	zcomp_stream_put(zram->comp);
 	zstrm = NULL;
-	zs_unmap_object(meta->mem_pool, handle);
+	zpool_unmap_handle(meta->mem_pool, handle);
 
 	/*
 	 * Free memory associated with this sector
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 74fcf10..de3e013 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -16,7 +16,7 @@
 #define _ZRAM_DRV_H_
 
 #include <linux/rwsem.h>
-#include <linux/zsmalloc.h>
+#include <linux/zpool.h>
 #include <linux/crypto.h>
 
 #include "zcomp.h"
@@ -91,7 +91,7 @@ struct zram_stats {
 
 struct zram_meta {
 	struct zram_table_entry *table;
-	struct zs_pool *mem_pool;
+	struct zpool *mem_pool;
 };
 
 struct zram {
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 6a58edc..56e6439 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -421,7 +421,8 @@ static void zs_zpool_free(void *pool, unsigned long handle)
 static int zs_zpool_shrink(void *pool, unsigned int pages,
 			unsigned int *reclaimed)
 {
-	return -EINVAL;
+	zs_compact(pool);
+	return 0;
 }
 
 static void *zs_zpool_map(void *pool, unsigned long handle,
@@ -609,10 +610,10 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
 	unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
 	unsigned long total_freeable = 0;
 
-	seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
+	seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s %15s\n",
 			"class", "size", "almost_full", "almost_empty",
 			"obj_allocated", "obj_used", "pages_used",
-			"pages_per_zspage", "freeable");
+			"pages_per_zspage", "freeable", "pages_compacted");
 
 	for (i = 0; i < zs_size_classes; i++) {
 		class = pool->size_class[i];
@@ -648,10 +649,11 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
 	}
 
 	seq_puts(s, "\n");
-	seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
+	seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu %15lu\n",
 			"Total", "", total_class_almost_full,
 			total_class_almost_empty, total_objs,
-			total_used_objs, total_pages, "", total_freeable);
+			total_used_objs, total_pages, "", total_freeable,
+			pool->stats.pages_compacted);
 
 	return 0;
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH] zram: update zram to use zpool
@ 2016-06-15 14:42         ` Geliang Tang
  0 siblings, 0 replies; 26+ messages in thread
From: Geliang Tang @ 2016-06-15 14:42 UTC (permalink / raw)
  To: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, Dan Streetman, Vitaly Wool
  Cc: Geliang Tang, linux-kernel, linux-mm

Change zram to use the zpool api instead of directly using zsmalloc.
The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
I did the following two things to fix it.
1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
   call zs_compact() in zsmalloc.
2) The 'pages_compacted' attribute is showed in zram by calling
   zs_pool_stats(). So in order not to call zs_pool_state() I move the
   attribute to zsmalloc.

Signed-off-by: Geliang Tang <geliangtang@gmail.com>
---
 drivers/block/zram/Kconfig    |  3 ++-
 drivers/block/zram/zram_drv.c | 59 ++++++++++++++++++++++---------------------
 drivers/block/zram/zram_drv.h |  4 +--
 mm/zsmalloc.c                 | 12 +++++----
 4 files changed, 41 insertions(+), 37 deletions(-)

diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index b8ecba6..6389a5a 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -1,6 +1,7 @@
 config ZRAM
 	tristate "Compressed RAM block device support"
-	depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
+	depends on BLOCK && SYSFS && ZPOOL && CRYPTO
+	select ZSMALLOC
 	select CRYPTO_LZO
 	default n
 	help
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7454cf1..7ee9050 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -39,6 +39,7 @@ static DEFINE_MUTEX(zram_index_mutex);
 
 static int zram_major;
 static const char *default_compressor = "lzo";
+static char *default_zpool_type = "zsmalloc";
 
 /* Module params (documentation at end) */
 static unsigned int num_devices = 1;
@@ -228,11 +229,11 @@ static ssize_t mem_used_total_show(struct device *dev,
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
 		struct zram_meta *meta = zram->meta;
-		val = zs_get_total_pages(meta->mem_pool);
+		val = zpool_get_total_size(meta->mem_pool);
 	}
 	up_read(&zram->init_lock);
 
-	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
 }
 
 static ssize_t mem_limit_show(struct device *dev,
@@ -297,7 +298,7 @@ static ssize_t mem_used_max_store(struct device *dev,
 	if (init_done(zram)) {
 		struct zram_meta *meta = zram->meta;
 		atomic_long_set(&zram->stats.max_used_pages,
-				zs_get_total_pages(meta->mem_pool));
+			zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT);
 	}
 	up_read(&zram->init_lock);
 
@@ -379,7 +380,7 @@ static ssize_t compact_store(struct device *dev,
 	}
 
 	meta = zram->meta;
-	zs_compact(meta->mem_pool);
+	zpool_shrink(meta->mem_pool, 0, NULL);
 	up_read(&zram->init_lock);
 
 	return len;
@@ -407,31 +408,25 @@ static ssize_t mm_stat_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct zram *zram = dev_to_zram(dev);
-	struct zs_pool_stats pool_stats;
 	u64 orig_size, mem_used = 0;
 	long max_used;
 	ssize_t ret;
 
-	memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
-
 	down_read(&zram->init_lock);
-	if (init_done(zram)) {
-		mem_used = zs_get_total_pages(zram->meta->mem_pool);
-		zs_pool_stats(zram->meta->mem_pool, &pool_stats);
-	}
+	if (init_done(zram))
+		mem_used = zpool_get_total_size(zram->meta->mem_pool);
 
 	orig_size = atomic64_read(&zram->stats.pages_stored);
 	max_used = atomic_long_read(&zram->stats.max_used_pages);
 
 	ret = scnprintf(buf, PAGE_SIZE,
-			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
+			"%8llu %8llu %8llu %8lu %8ld %8llu\n",
 			orig_size << PAGE_SHIFT,
 			(u64)atomic64_read(&zram->stats.compr_data_size),
-			mem_used << PAGE_SHIFT,
+			mem_used,
 			zram->limit_pages << PAGE_SHIFT,
 			max_used << PAGE_SHIFT,
-			(u64)atomic64_read(&zram->stats.zero_pages),
-			pool_stats.pages_compacted);
+			(u64)atomic64_read(&zram->stats.zero_pages));
 	up_read(&zram->init_lock);
 
 	return ret;
@@ -490,10 +485,10 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 		if (!handle)
 			continue;
 
-		zs_free(meta->mem_pool, handle);
+		zpool_free(meta->mem_pool, handle);
 	}
 
-	zs_destroy_pool(meta->mem_pool);
+	zpool_destroy_pool(meta->mem_pool);
 	vfree(meta->table);
 	kfree(meta);
 }
@@ -513,7 +508,13 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 		goto out_error;
 	}
 
-	meta->mem_pool = zs_create_pool(pool_name);
+	if (!zpool_has_pool(default_zpool_type)) {
+		pr_err("zpool %s not available\n", default_zpool_type);
+		goto out_error;
+	}
+
+	meta->mem_pool = zpool_create_pool(default_zpool_type,
+					   pool_name, 0, NULL);
 	if (!meta->mem_pool) {
 		pr_err("Error creating memory pool\n");
 		goto out_error;
@@ -549,7 +550,7 @@ static void zram_free_page(struct zram *zram, size_t index)
 		return;
 	}
 
-	zs_free(meta->mem_pool, handle);
+	zpool_free(meta->mem_pool, handle);
 
 	atomic64_sub(zram_get_obj_size(meta, index),
 			&zram->stats.compr_data_size);
@@ -577,7 +578,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 		return 0;
 	}
 
-	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+	cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
 	if (size == PAGE_SIZE) {
 		copy_page(mem, cmem);
 	} else {
@@ -586,7 +587,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 		ret = zcomp_decompress(zstrm, cmem, size, mem);
 		zcomp_stream_put(zram->comp);
 	}
-	zs_unmap_object(meta->mem_pool, handle);
+	zpool_unmap_handle(meta->mem_pool, handle);
 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 
 	/* Should NEVER happen. Return bio error if it does. */
@@ -735,20 +736,20 @@ compress_again:
 	 * from the slow path and handle has already been allocated.
 	 */
 	if (!handle)
-		handle = zs_malloc(meta->mem_pool, clen,
+		ret = zpool_malloc(meta->mem_pool, clen,
 				__GFP_KSWAPD_RECLAIM |
 				__GFP_NOWARN |
 				__GFP_HIGHMEM |
-				__GFP_MOVABLE);
+				__GFP_MOVABLE, &handle);
 	if (!handle) {
 		zcomp_stream_put(zram->comp);
 		zstrm = NULL;
 
 		atomic64_inc(&zram->stats.writestall);
 
-		handle = zs_malloc(meta->mem_pool, clen,
+		ret = zpool_malloc(meta->mem_pool, clen,
 				GFP_NOIO | __GFP_HIGHMEM |
-				__GFP_MOVABLE);
+				__GFP_MOVABLE, &handle);
 		if (handle)
 			goto compress_again;
 
@@ -758,16 +759,16 @@ compress_again:
 		goto out;
 	}
 
-	alloced_pages = zs_get_total_pages(meta->mem_pool);
+	alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
 	update_used_max(zram, alloced_pages);
 
 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
-		zs_free(meta->mem_pool, handle);
+		zpool_free(meta->mem_pool, handle);
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
+	cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
 
 	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
 		src = kmap_atomic(page);
@@ -779,7 +780,7 @@ compress_again:
 
 	zcomp_stream_put(zram->comp);
 	zstrm = NULL;
-	zs_unmap_object(meta->mem_pool, handle);
+	zpool_unmap_handle(meta->mem_pool, handle);
 
 	/*
 	 * Free memory associated with this sector
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 74fcf10..de3e013 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -16,7 +16,7 @@
 #define _ZRAM_DRV_H_
 
 #include <linux/rwsem.h>
-#include <linux/zsmalloc.h>
+#include <linux/zpool.h>
 #include <linux/crypto.h>
 
 #include "zcomp.h"
@@ -91,7 +91,7 @@ struct zram_stats {
 
 struct zram_meta {
 	struct zram_table_entry *table;
-	struct zs_pool *mem_pool;
+	struct zpool *mem_pool;
 };
 
 struct zram {
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 6a58edc..56e6439 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -421,7 +421,8 @@ static void zs_zpool_free(void *pool, unsigned long handle)
 static int zs_zpool_shrink(void *pool, unsigned int pages,
 			unsigned int *reclaimed)
 {
-	return -EINVAL;
+	zs_compact(pool);
+	return 0;
 }
 
 static void *zs_zpool_map(void *pool, unsigned long handle,
@@ -609,10 +610,10 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
 	unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
 	unsigned long total_freeable = 0;
 
-	seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
+	seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s %15s\n",
 			"class", "size", "almost_full", "almost_empty",
 			"obj_allocated", "obj_used", "pages_used",
-			"pages_per_zspage", "freeable");
+			"pages_per_zspage", "freeable", "pages_compacted");
 
 	for (i = 0; i < zs_size_classes; i++) {
 		class = pool->size_class[i];
@@ -648,10 +649,11 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
 	}
 
 	seq_puts(s, "\n");
-	seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
+	seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu %15lu\n",
 			"Total", "", total_class_almost_full,
 			total_class_almost_empty, total_objs,
-			total_used_objs, total_pages, "", total_freeable);
+			total_used_objs, total_pages, "", total_freeable,
+			pool->stats.pages_compacted);
 
 	return 0;
 }
-- 
2.5.5

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
  2016-06-15 14:42         ` Geliang Tang
@ 2016-06-15 14:54           ` Dan Streetman
  -1 siblings, 0 replies; 26+ messages in thread
From: Dan Streetman @ 2016-06-15 14:54 UTC (permalink / raw)
  To: Geliang Tang
  Cc: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, Dan Streetman,
	Vitaly Wool, linux-kernel, linux-mm

On Wed, Jun 15, 2016 at 10:42 AM, Geliang Tang <geliangtang@gmail.com> wrote:
> Change zram to use the zpool api instead of directly using zsmalloc.
> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
> I did the following two things to fix it.
> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
>    call zs_compact() in zsmalloc.
> 2) The 'pages_compacted' attribute is showed in zram by calling
>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
>    attribute to zsmalloc.

I think you're going to have quite a hard time getting a patch like
this accepted without doing some convincing that it's really needed,
and I don't see any new reasons here.  Possibly hard data on how it
improves speed or some other metric, plus reasoning on how using zpool
is better for zram than keeping its direct link to zsmalloc (meaning,
why can't zsmalloc just be improved to perform on par with zpool, on
whatever metric you're comparing).

>
> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
> ---
>  drivers/block/zram/Kconfig    |  3 ++-
>  drivers/block/zram/zram_drv.c | 59 ++++++++++++++++++++++---------------------
>  drivers/block/zram/zram_drv.h |  4 +--
>  mm/zsmalloc.c                 | 12 +++++----
>  4 files changed, 41 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
> index b8ecba6..6389a5a 100644
> --- a/drivers/block/zram/Kconfig
> +++ b/drivers/block/zram/Kconfig
> @@ -1,6 +1,7 @@
>  config ZRAM
>         tristate "Compressed RAM block device support"
> -       depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
> +       depends on BLOCK && SYSFS && ZPOOL && CRYPTO
> +       select ZSMALLOC
>         select CRYPTO_LZO
>         default n
>         help
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 7454cf1..7ee9050 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -39,6 +39,7 @@ static DEFINE_MUTEX(zram_index_mutex);
>
>  static int zram_major;
>  static const char *default_compressor = "lzo";
> +static char *default_zpool_type = "zsmalloc";
>
>  /* Module params (documentation at end) */
>  static unsigned int num_devices = 1;
> @@ -228,11 +229,11 @@ static ssize_t mem_used_total_show(struct device *dev,
>         down_read(&zram->init_lock);
>         if (init_done(zram)) {
>                 struct zram_meta *meta = zram->meta;
> -               val = zs_get_total_pages(meta->mem_pool);
> +               val = zpool_get_total_size(meta->mem_pool);
>         }
>         up_read(&zram->init_lock);
>
> -       return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
> +       return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
>  }
>
>  static ssize_t mem_limit_show(struct device *dev,
> @@ -297,7 +298,7 @@ static ssize_t mem_used_max_store(struct device *dev,
>         if (init_done(zram)) {
>                 struct zram_meta *meta = zram->meta;
>                 atomic_long_set(&zram->stats.max_used_pages,
> -                               zs_get_total_pages(meta->mem_pool));
> +                       zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT);
>         }
>         up_read(&zram->init_lock);
>
> @@ -379,7 +380,7 @@ static ssize_t compact_store(struct device *dev,
>         }
>
>         meta = zram->meta;
> -       zs_compact(meta->mem_pool);
> +       zpool_shrink(meta->mem_pool, 0, NULL);
>         up_read(&zram->init_lock);
>
>         return len;
> @@ -407,31 +408,25 @@ static ssize_t mm_stat_show(struct device *dev,
>                 struct device_attribute *attr, char *buf)
>  {
>         struct zram *zram = dev_to_zram(dev);
> -       struct zs_pool_stats pool_stats;
>         u64 orig_size, mem_used = 0;
>         long max_used;
>         ssize_t ret;
>
> -       memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
> -
>         down_read(&zram->init_lock);
> -       if (init_done(zram)) {
> -               mem_used = zs_get_total_pages(zram->meta->mem_pool);
> -               zs_pool_stats(zram->meta->mem_pool, &pool_stats);
> -       }
> +       if (init_done(zram))
> +               mem_used = zpool_get_total_size(zram->meta->mem_pool);
>
>         orig_size = atomic64_read(&zram->stats.pages_stored);
>         max_used = atomic_long_read(&zram->stats.max_used_pages);
>
>         ret = scnprintf(buf, PAGE_SIZE,
> -                       "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
> +                       "%8llu %8llu %8llu %8lu %8ld %8llu\n",
>                         orig_size << PAGE_SHIFT,
>                         (u64)atomic64_read(&zram->stats.compr_data_size),
> -                       mem_used << PAGE_SHIFT,
> +                       mem_used,
>                         zram->limit_pages << PAGE_SHIFT,
>                         max_used << PAGE_SHIFT,
> -                       (u64)atomic64_read(&zram->stats.zero_pages),
> -                       pool_stats.pages_compacted);
> +                       (u64)atomic64_read(&zram->stats.zero_pages));
>         up_read(&zram->init_lock);
>
>         return ret;
> @@ -490,10 +485,10 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>                 if (!handle)
>                         continue;
>
> -               zs_free(meta->mem_pool, handle);
> +               zpool_free(meta->mem_pool, handle);
>         }
>
> -       zs_destroy_pool(meta->mem_pool);
> +       zpool_destroy_pool(meta->mem_pool);
>         vfree(meta->table);
>         kfree(meta);
>  }
> @@ -513,7 +508,13 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
>                 goto out_error;
>         }
>
> -       meta->mem_pool = zs_create_pool(pool_name);
> +       if (!zpool_has_pool(default_zpool_type)) {
> +               pr_err("zpool %s not available\n", default_zpool_type);
> +               goto out_error;
> +       }
> +
> +       meta->mem_pool = zpool_create_pool(default_zpool_type,
> +                                          pool_name, 0, NULL);
>         if (!meta->mem_pool) {
>                 pr_err("Error creating memory pool\n");
>                 goto out_error;
> @@ -549,7 +550,7 @@ static void zram_free_page(struct zram *zram, size_t index)
>                 return;
>         }
>
> -       zs_free(meta->mem_pool, handle);
> +       zpool_free(meta->mem_pool, handle);
>
>         atomic64_sub(zram_get_obj_size(meta, index),
>                         &zram->stats.compr_data_size);
> @@ -577,7 +578,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>                 return 0;
>         }
>
> -       cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
>         if (size == PAGE_SIZE) {
>                 copy_page(mem, cmem);
>         } else {
> @@ -586,7 +587,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>                 ret = zcomp_decompress(zstrm, cmem, size, mem);
>                 zcomp_stream_put(zram->comp);
>         }
> -       zs_unmap_object(meta->mem_pool, handle);
> +       zpool_unmap_handle(meta->mem_pool, handle);
>         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
>
>         /* Should NEVER happen. Return bio error if it does. */
> @@ -735,20 +736,20 @@ compress_again:
>          * from the slow path and handle has already been allocated.
>          */
>         if (!handle)
> -               handle = zs_malloc(meta->mem_pool, clen,
> +               ret = zpool_malloc(meta->mem_pool, clen,
>                                 __GFP_KSWAPD_RECLAIM |
>                                 __GFP_NOWARN |
>                                 __GFP_HIGHMEM |
> -                               __GFP_MOVABLE);
> +                               __GFP_MOVABLE, &handle);
>         if (!handle) {
>                 zcomp_stream_put(zram->comp);
>                 zstrm = NULL;
>
>                 atomic64_inc(&zram->stats.writestall);
>
> -               handle = zs_malloc(meta->mem_pool, clen,
> +               ret = zpool_malloc(meta->mem_pool, clen,
>                                 GFP_NOIO | __GFP_HIGHMEM |
> -                               __GFP_MOVABLE);
> +                               __GFP_MOVABLE, &handle);
>                 if (handle)
>                         goto compress_again;
>
> @@ -758,16 +759,16 @@ compress_again:
>                 goto out;
>         }
>
> -       alloced_pages = zs_get_total_pages(meta->mem_pool);
> +       alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
>         update_used_max(zram, alloced_pages);
>
>         if (zram->limit_pages && alloced_pages > zram->limit_pages) {
> -               zs_free(meta->mem_pool, handle);
> +               zpool_free(meta->mem_pool, handle);
>                 ret = -ENOMEM;
>                 goto out;
>         }
>
> -       cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
> +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
>
>         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
>                 src = kmap_atomic(page);
> @@ -779,7 +780,7 @@ compress_again:
>
>         zcomp_stream_put(zram->comp);
>         zstrm = NULL;
> -       zs_unmap_object(meta->mem_pool, handle);
> +       zpool_unmap_handle(meta->mem_pool, handle);
>
>         /*
>          * Free memory associated with this sector
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 74fcf10..de3e013 100644
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -16,7 +16,7 @@
>  #define _ZRAM_DRV_H_
>
>  #include <linux/rwsem.h>
> -#include <linux/zsmalloc.h>
> +#include <linux/zpool.h>
>  #include <linux/crypto.h>
>
>  #include "zcomp.h"
> @@ -91,7 +91,7 @@ struct zram_stats {
>
>  struct zram_meta {
>         struct zram_table_entry *table;
> -       struct zs_pool *mem_pool;
> +       struct zpool *mem_pool;
>  };
>
>  struct zram {
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 6a58edc..56e6439 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -421,7 +421,8 @@ static void zs_zpool_free(void *pool, unsigned long handle)
>  static int zs_zpool_shrink(void *pool, unsigned int pages,
>                         unsigned int *reclaimed)
>  {
> -       return -EINVAL;
> +       zs_compact(pool);
> +       return 0;

If we're doing this, I'd like it to be implemented correctly;
zs_compact returns the number of pages compacted, so you should set
reclaimed to that.

>  }
>
>  static void *zs_zpool_map(void *pool, unsigned long handle,
> @@ -609,10 +610,10 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
>         unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
>         unsigned long total_freeable = 0;
>
> -       seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
> +       seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s %15s\n",
>                         "class", "size", "almost_full", "almost_empty",
>                         "obj_allocated", "obj_used", "pages_used",
> -                       "pages_per_zspage", "freeable");
> +                       "pages_per_zspage", "freeable", "pages_compacted");
>
>         for (i = 0; i < zs_size_classes; i++) {
>                 class = pool->size_class[i];
> @@ -648,10 +649,11 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
>         }
>
>         seq_puts(s, "\n");
> -       seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
> +       seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu %15lu\n",
>                         "Total", "", total_class_almost_full,
>                         total_class_almost_empty, total_objs,
> -                       total_used_objs, total_pages, "", total_freeable);
> +                       total_used_objs, total_pages, "", total_freeable,
> +                       pool->stats.pages_compacted);
>
>         return 0;
>  }
> --
> 2.5.5
>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
@ 2016-06-15 14:54           ` Dan Streetman
  0 siblings, 0 replies; 26+ messages in thread
From: Dan Streetman @ 2016-06-15 14:54 UTC (permalink / raw)
  To: Geliang Tang
  Cc: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, Dan Streetman,
	Vitaly Wool, linux-kernel, linux-mm

On Wed, Jun 15, 2016 at 10:42 AM, Geliang Tang <geliangtang@gmail.com> wrote:
> Change zram to use the zpool api instead of directly using zsmalloc.
> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
> I did the following two things to fix it.
> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
>    call zs_compact() in zsmalloc.
> 2) The 'pages_compacted' attribute is showed in zram by calling
>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
>    attribute to zsmalloc.

I think you're going to have quite a hard time getting a patch like
this accepted without doing some convincing that it's really needed,
and I don't see any new reasons here.  Possibly hard data on how it
improves speed or some other metric, plus reasoning on how using zpool
is better for zram than keeping its direct link to zsmalloc (meaning,
why can't zsmalloc just be improved to perform on par with zpool, on
whatever metric you're comparing).

>
> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
> ---
>  drivers/block/zram/Kconfig    |  3 ++-
>  drivers/block/zram/zram_drv.c | 59 ++++++++++++++++++++++---------------------
>  drivers/block/zram/zram_drv.h |  4 +--
>  mm/zsmalloc.c                 | 12 +++++----
>  4 files changed, 41 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
> index b8ecba6..6389a5a 100644
> --- a/drivers/block/zram/Kconfig
> +++ b/drivers/block/zram/Kconfig
> @@ -1,6 +1,7 @@
>  config ZRAM
>         tristate "Compressed RAM block device support"
> -       depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
> +       depends on BLOCK && SYSFS && ZPOOL && CRYPTO
> +       select ZSMALLOC
>         select CRYPTO_LZO
>         default n
>         help
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 7454cf1..7ee9050 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -39,6 +39,7 @@ static DEFINE_MUTEX(zram_index_mutex);
>
>  static int zram_major;
>  static const char *default_compressor = "lzo";
> +static char *default_zpool_type = "zsmalloc";
>
>  /* Module params (documentation at end) */
>  static unsigned int num_devices = 1;
> @@ -228,11 +229,11 @@ static ssize_t mem_used_total_show(struct device *dev,
>         down_read(&zram->init_lock);
>         if (init_done(zram)) {
>                 struct zram_meta *meta = zram->meta;
> -               val = zs_get_total_pages(meta->mem_pool);
> +               val = zpool_get_total_size(meta->mem_pool);
>         }
>         up_read(&zram->init_lock);
>
> -       return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
> +       return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
>  }
>
>  static ssize_t mem_limit_show(struct device *dev,
> @@ -297,7 +298,7 @@ static ssize_t mem_used_max_store(struct device *dev,
>         if (init_done(zram)) {
>                 struct zram_meta *meta = zram->meta;
>                 atomic_long_set(&zram->stats.max_used_pages,
> -                               zs_get_total_pages(meta->mem_pool));
> +                       zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT);
>         }
>         up_read(&zram->init_lock);
>
> @@ -379,7 +380,7 @@ static ssize_t compact_store(struct device *dev,
>         }
>
>         meta = zram->meta;
> -       zs_compact(meta->mem_pool);
> +       zpool_shrink(meta->mem_pool, 0, NULL);
>         up_read(&zram->init_lock);
>
>         return len;
> @@ -407,31 +408,25 @@ static ssize_t mm_stat_show(struct device *dev,
>                 struct device_attribute *attr, char *buf)
>  {
>         struct zram *zram = dev_to_zram(dev);
> -       struct zs_pool_stats pool_stats;
>         u64 orig_size, mem_used = 0;
>         long max_used;
>         ssize_t ret;
>
> -       memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
> -
>         down_read(&zram->init_lock);
> -       if (init_done(zram)) {
> -               mem_used = zs_get_total_pages(zram->meta->mem_pool);
> -               zs_pool_stats(zram->meta->mem_pool, &pool_stats);
> -       }
> +       if (init_done(zram))
> +               mem_used = zpool_get_total_size(zram->meta->mem_pool);
>
>         orig_size = atomic64_read(&zram->stats.pages_stored);
>         max_used = atomic_long_read(&zram->stats.max_used_pages);
>
>         ret = scnprintf(buf, PAGE_SIZE,
> -                       "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
> +                       "%8llu %8llu %8llu %8lu %8ld %8llu\n",
>                         orig_size << PAGE_SHIFT,
>                         (u64)atomic64_read(&zram->stats.compr_data_size),
> -                       mem_used << PAGE_SHIFT,
> +                       mem_used,
>                         zram->limit_pages << PAGE_SHIFT,
>                         max_used << PAGE_SHIFT,
> -                       (u64)atomic64_read(&zram->stats.zero_pages),
> -                       pool_stats.pages_compacted);
> +                       (u64)atomic64_read(&zram->stats.zero_pages));
>         up_read(&zram->init_lock);
>
>         return ret;
> @@ -490,10 +485,10 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>                 if (!handle)
>                         continue;
>
> -               zs_free(meta->mem_pool, handle);
> +               zpool_free(meta->mem_pool, handle);
>         }
>
> -       zs_destroy_pool(meta->mem_pool);
> +       zpool_destroy_pool(meta->mem_pool);
>         vfree(meta->table);
>         kfree(meta);
>  }
> @@ -513,7 +508,13 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
>                 goto out_error;
>         }
>
> -       meta->mem_pool = zs_create_pool(pool_name);
> +       if (!zpool_has_pool(default_zpool_type)) {
> +               pr_err("zpool %s not available\n", default_zpool_type);
> +               goto out_error;
> +       }
> +
> +       meta->mem_pool = zpool_create_pool(default_zpool_type,
> +                                          pool_name, 0, NULL);
>         if (!meta->mem_pool) {
>                 pr_err("Error creating memory pool\n");
>                 goto out_error;
> @@ -549,7 +550,7 @@ static void zram_free_page(struct zram *zram, size_t index)
>                 return;
>         }
>
> -       zs_free(meta->mem_pool, handle);
> +       zpool_free(meta->mem_pool, handle);
>
>         atomic64_sub(zram_get_obj_size(meta, index),
>                         &zram->stats.compr_data_size);
> @@ -577,7 +578,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>                 return 0;
>         }
>
> -       cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
>         if (size == PAGE_SIZE) {
>                 copy_page(mem, cmem);
>         } else {
> @@ -586,7 +587,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>                 ret = zcomp_decompress(zstrm, cmem, size, mem);
>                 zcomp_stream_put(zram->comp);
>         }
> -       zs_unmap_object(meta->mem_pool, handle);
> +       zpool_unmap_handle(meta->mem_pool, handle);
>         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
>
>         /* Should NEVER happen. Return bio error if it does. */
> @@ -735,20 +736,20 @@ compress_again:
>          * from the slow path and handle has already been allocated.
>          */
>         if (!handle)
> -               handle = zs_malloc(meta->mem_pool, clen,
> +               ret = zpool_malloc(meta->mem_pool, clen,
>                                 __GFP_KSWAPD_RECLAIM |
>                                 __GFP_NOWARN |
>                                 __GFP_HIGHMEM |
> -                               __GFP_MOVABLE);
> +                               __GFP_MOVABLE, &handle);
>         if (!handle) {
>                 zcomp_stream_put(zram->comp);
>                 zstrm = NULL;
>
>                 atomic64_inc(&zram->stats.writestall);
>
> -               handle = zs_malloc(meta->mem_pool, clen,
> +               ret = zpool_malloc(meta->mem_pool, clen,
>                                 GFP_NOIO | __GFP_HIGHMEM |
> -                               __GFP_MOVABLE);
> +                               __GFP_MOVABLE, &handle);
>                 if (handle)
>                         goto compress_again;
>
> @@ -758,16 +759,16 @@ compress_again:
>                 goto out;
>         }
>
> -       alloced_pages = zs_get_total_pages(meta->mem_pool);
> +       alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
>         update_used_max(zram, alloced_pages);
>
>         if (zram->limit_pages && alloced_pages > zram->limit_pages) {
> -               zs_free(meta->mem_pool, handle);
> +               zpool_free(meta->mem_pool, handle);
>                 ret = -ENOMEM;
>                 goto out;
>         }
>
> -       cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
> +       cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
>
>         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
>                 src = kmap_atomic(page);
> @@ -779,7 +780,7 @@ compress_again:
>
>         zcomp_stream_put(zram->comp);
>         zstrm = NULL;
> -       zs_unmap_object(meta->mem_pool, handle);
> +       zpool_unmap_handle(meta->mem_pool, handle);
>
>         /*
>          * Free memory associated with this sector
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 74fcf10..de3e013 100644
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -16,7 +16,7 @@
>  #define _ZRAM_DRV_H_
>
>  #include <linux/rwsem.h>
> -#include <linux/zsmalloc.h>
> +#include <linux/zpool.h>
>  #include <linux/crypto.h>
>
>  #include "zcomp.h"
> @@ -91,7 +91,7 @@ struct zram_stats {
>
>  struct zram_meta {
>         struct zram_table_entry *table;
> -       struct zs_pool *mem_pool;
> +       struct zpool *mem_pool;
>  };
>
>  struct zram {
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 6a58edc..56e6439 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -421,7 +421,8 @@ static void zs_zpool_free(void *pool, unsigned long handle)
>  static int zs_zpool_shrink(void *pool, unsigned int pages,
>                         unsigned int *reclaimed)
>  {
> -       return -EINVAL;
> +       zs_compact(pool);
> +       return 0;

If we're doing this, I'd like it to be implemented correctly;
zs_compact returns the number of pages compacted, so you should set
reclaimed to that.

>  }
>
>  static void *zs_zpool_map(void *pool, unsigned long handle,
> @@ -609,10 +610,10 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
>         unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
>         unsigned long total_freeable = 0;
>
> -       seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
> +       seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s %15s\n",
>                         "class", "size", "almost_full", "almost_empty",
>                         "obj_allocated", "obj_used", "pages_used",
> -                       "pages_per_zspage", "freeable");
> +                       "pages_per_zspage", "freeable", "pages_compacted");
>
>         for (i = 0; i < zs_size_classes; i++) {
>                 class = pool->size_class[i];
> @@ -648,10 +649,11 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
>         }
>
>         seq_puts(s, "\n");
> -       seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
> +       seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu %15lu\n",
>                         "Total", "", total_class_almost_full,
>                         total_class_almost_empty, total_objs,
> -                       total_used_objs, total_pages, "", total_freeable);
> +                       total_used_objs, total_pages, "", total_freeable,
> +                       pool->stats.pages_compacted);
>
>         return 0;
>  }
> --
> 2.5.5
>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
  2016-06-15 14:42         ` Geliang Tang
@ 2016-06-15 15:33           ` kbuild test robot
  -1 siblings, 0 replies; 26+ messages in thread
From: kbuild test robot @ 2016-06-15 15:33 UTC (permalink / raw)
  To: Geliang Tang
  Cc: kbuild-all, Minchan Kim, Nitin Gupta, Sergey Senozhatsky,
	Dan Streetman, Vitaly Wool, Geliang Tang, linux-kernel, linux-mm

[-- Attachment #1: Type: text/plain, Size: 941 bytes --]

Hi,

[auto build test WARNING on next-20160615]
[cannot apply to v4.7-rc3 v4.7-rc2 v4.7-rc1 v4.7-rc3]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Geliang-Tang/zram-update-zram-to-use-zpool/20160615-224513
config: sh-allyesconfig (attached as .config)
compiler: sh4-linux-gnu-gcc (Debian 5.3.1-8) 5.3.1 20160205
reproduce:
        wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=sh 

All warnings (new ones prefixed by >>):

warning: (ZRAM) selects ZSMALLOC which has unmet direct dependencies (MMU)

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/octet-stream, Size: 40515 bytes --]

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
@ 2016-06-15 15:33           ` kbuild test robot
  0 siblings, 0 replies; 26+ messages in thread
From: kbuild test robot @ 2016-06-15 15:33 UTC (permalink / raw)
  To: Geliang Tang
  Cc: kbuild-all, Minchan Kim, Nitin Gupta, Sergey Senozhatsky,
	Dan Streetman, Vitaly Wool, linux-kernel, linux-mm

[-- Attachment #1: Type: text/plain, Size: 941 bytes --]

Hi,

[auto build test WARNING on next-20160615]
[cannot apply to v4.7-rc3 v4.7-rc2 v4.7-rc1 v4.7-rc3]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Geliang-Tang/zram-update-zram-to-use-zpool/20160615-224513
config: sh-allyesconfig (attached as .config)
compiler: sh4-linux-gnu-gcc (Debian 5.3.1-8) 5.3.1 20160205
reproduce:
        wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=sh 

All warnings (new ones prefixed by >>):

warning: (ZRAM) selects ZSMALLOC which has unmet direct dependencies (MMU)

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/octet-stream, Size: 40515 bytes --]

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
  2016-06-15 14:42         ` Geliang Tang
@ 2016-06-15 23:17           ` Minchan Kim
  -1 siblings, 0 replies; 26+ messages in thread
From: Minchan Kim @ 2016-06-15 23:17 UTC (permalink / raw)
  To: Geliang Tang
  Cc: Nitin Gupta, Sergey Senozhatsky, Dan Streetman, Vitaly Wool,
	linux-kernel, linux-mm

On Wed, Jun 15, 2016 at 10:42:07PM +0800, Geliang Tang wrote:
> Change zram to use the zpool api instead of directly using zsmalloc.
> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
> I did the following two things to fix it.
> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
>    call zs_compact() in zsmalloc.
> 2) The 'pages_compacted' attribute is showed in zram by calling
>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
>    attribute to zsmalloc.
> 
> Signed-off-by: Geliang Tang <geliangtang@gmail.com>

NACK.

I already explained why.
http://lkml.kernel.org/r/20160609013411.GA29779@bbox

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
@ 2016-06-15 23:17           ` Minchan Kim
  0 siblings, 0 replies; 26+ messages in thread
From: Minchan Kim @ 2016-06-15 23:17 UTC (permalink / raw)
  To: Geliang Tang
  Cc: Nitin Gupta, Sergey Senozhatsky, Dan Streetman, Vitaly Wool,
	linux-kernel, linux-mm

On Wed, Jun 15, 2016 at 10:42:07PM +0800, Geliang Tang wrote:
> Change zram to use the zpool api instead of directly using zsmalloc.
> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
> I did the following two things to fix it.
> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
>    call zs_compact() in zsmalloc.
> 2) The 'pages_compacted' attribute is showed in zram by calling
>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
>    attribute to zsmalloc.
> 
> Signed-off-by: Geliang Tang <geliangtang@gmail.com>

NACK.

I already explained why.
http://lkml.kernel.org/r/20160609013411.GA29779@bbox

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
  2016-06-15 23:17           ` Minchan Kim
@ 2016-06-17  8:30             ` Vitaly Wool
  -1 siblings, 0 replies; 26+ messages in thread
From: Vitaly Wool @ 2016-06-17  8:30 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Geliang Tang, Nitin Gupta, Sergey Senozhatsky, Dan Streetman,
	LKML, Linux-MM

Hi Minchan,

On Thu, Jun 16, 2016 at 1:17 AM, Minchan Kim <minchan@kernel.org> wrote:
> On Wed, Jun 15, 2016 at 10:42:07PM +0800, Geliang Tang wrote:
>> Change zram to use the zpool api instead of directly using zsmalloc.
>> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
>> I did the following two things to fix it.
>> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
>>    call zs_compact() in zsmalloc.
>> 2) The 'pages_compacted' attribute is showed in zram by calling
>>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
>>    attribute to zsmalloc.
>>
>> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
>
> NACK.
>
> I already explained why.
> http://lkml.kernel.org/r/20160609013411.GA29779@bbox

This is a fair statement, to a certain extent. I'll let Geliang speak
for himself but I am personally interested in this zram extension
because I want it to work on MMU-less systems. zsmalloc can not handle
that, so I want to be able to use zram over z3fold.

Best regards,
   Vitaly

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
@ 2016-06-17  8:30             ` Vitaly Wool
  0 siblings, 0 replies; 26+ messages in thread
From: Vitaly Wool @ 2016-06-17  8:30 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Geliang Tang, Nitin Gupta, Sergey Senozhatsky, Dan Streetman,
	LKML, Linux-MM

Hi Minchan,

On Thu, Jun 16, 2016 at 1:17 AM, Minchan Kim <minchan@kernel.org> wrote:
> On Wed, Jun 15, 2016 at 10:42:07PM +0800, Geliang Tang wrote:
>> Change zram to use the zpool api instead of directly using zsmalloc.
>> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
>> I did the following two things to fix it.
>> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
>>    call zs_compact() in zsmalloc.
>> 2) The 'pages_compacted' attribute is showed in zram by calling
>>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
>>    attribute to zsmalloc.
>>
>> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
>
> NACK.
>
> I already explained why.
> http://lkml.kernel.org/r/20160609013411.GA29779@bbox

This is a fair statement, to a certain extent. I'll let Geliang speak
for himself but I am personally interested in this zram extension
because I want it to work on MMU-less systems. zsmalloc can not handle
that, so I want to be able to use zram over z3fold.

Best regards,
   Vitaly

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
  2016-06-17  8:30             ` Vitaly Wool
@ 2016-06-17 12:28               ` Austin S. Hemmelgarn
  -1 siblings, 0 replies; 26+ messages in thread
From: Austin S. Hemmelgarn @ 2016-06-17 12:28 UTC (permalink / raw)
  To: Vitaly Wool, Minchan Kim
  Cc: Geliang Tang, Nitin Gupta, Sergey Senozhatsky, Dan Streetman,
	LKML, Linux-MM

On 2016-06-17 04:30, Vitaly Wool wrote:
> Hi Minchan,
>
> On Thu, Jun 16, 2016 at 1:17 AM, Minchan Kim <minchan@kernel.org> wrote:
>> On Wed, Jun 15, 2016 at 10:42:07PM +0800, Geliang Tang wrote:
>>> Change zram to use the zpool api instead of directly using zsmalloc.
>>> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
>>> I did the following two things to fix it.
>>> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
>>>    call zs_compact() in zsmalloc.
>>> 2) The 'pages_compacted' attribute is showed in zram by calling
>>>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
>>>    attribute to zsmalloc.
>>>
>>> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
>>
>> NACK.
>>
>> I already explained why.
>> http://lkml.kernel.org/r/20160609013411.GA29779@bbox
>
> This is a fair statement, to a certain extent. I'll let Geliang speak
> for himself but I am personally interested in this zram extension
> because I want it to work on MMU-less systems. zsmalloc can not handle
> that, so I want to be able to use zram over z3fold.
I concur with this.

It's also worth pointing out that people can and do use zram for things 
other than swap, so the assumption that zswap is a viable alternative is 
not universally correct.  In my case for example, I use it on a VM host 
for temporary storage for transient SSI VM's.  Making it more 
deterministic would be seriously helpful in this case, as it would mean 
I can more precisely provision resources on this particular system, and 
could better account for latencies in the testing these transient VM's 
are used for.

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
@ 2016-06-17 12:28               ` Austin S. Hemmelgarn
  0 siblings, 0 replies; 26+ messages in thread
From: Austin S. Hemmelgarn @ 2016-06-17 12:28 UTC (permalink / raw)
  To: Vitaly Wool, Minchan Kim
  Cc: Geliang Tang, Nitin Gupta, Sergey Senozhatsky, Dan Streetman,
	LKML, Linux-MM

On 2016-06-17 04:30, Vitaly Wool wrote:
> Hi Minchan,
>
> On Thu, Jun 16, 2016 at 1:17 AM, Minchan Kim <minchan@kernel.org> wrote:
>> On Wed, Jun 15, 2016 at 10:42:07PM +0800, Geliang Tang wrote:
>>> Change zram to use the zpool api instead of directly using zsmalloc.
>>> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
>>> I did the following two things to fix it.
>>> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
>>>    call zs_compact() in zsmalloc.
>>> 2) The 'pages_compacted' attribute is showed in zram by calling
>>>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
>>>    attribute to zsmalloc.
>>>
>>> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
>>
>> NACK.
>>
>> I already explained why.
>> http://lkml.kernel.org/r/20160609013411.GA29779@bbox
>
> This is a fair statement, to a certain extent. I'll let Geliang speak
> for himself but I am personally interested in this zram extension
> because I want it to work on MMU-less systems. zsmalloc can not handle
> that, so I want to be able to use zram over z3fold.
I concur with this.

It's also worth pointing out that people can and do use zram for things 
other than swap, so the assumption that zswap is a viable alternative is 
not universally correct.  In my case for example, I use it on a VM host 
for temporary storage for transient SSI VM's.  Making it more 
deterministic would be seriously helpful in this case, as it would mean 
I can more precisely provision resources on this particular system, and 
could better account for latencies in the testing these transient VM's 
are used for.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
  2016-06-17  8:30             ` Vitaly Wool
@ 2016-06-20  8:00               ` Minchan Kim
  -1 siblings, 0 replies; 26+ messages in thread
From: Minchan Kim @ 2016-06-20  8:00 UTC (permalink / raw)
  To: Vitaly Wool
  Cc: Geliang Tang, Nitin Gupta, Sergey Senozhatsky, Dan Streetman,
	LKML, Linux-MM

On Fri, Jun 17, 2016 at 10:30:58AM +0200, Vitaly Wool wrote:
> Hi Minchan,
> 
> On Thu, Jun 16, 2016 at 1:17 AM, Minchan Kim <minchan@kernel.org> wrote:
> > On Wed, Jun 15, 2016 at 10:42:07PM +0800, Geliang Tang wrote:
> >> Change zram to use the zpool api instead of directly using zsmalloc.
> >> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
> >> I did the following two things to fix it.
> >> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
> >>    call zs_compact() in zsmalloc.
> >> 2) The 'pages_compacted' attribute is showed in zram by calling
> >>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
> >>    attribute to zsmalloc.
> >>
> >> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
> >
> > NACK.
> >
> > I already explained why.
> > http://lkml.kernel.org/r/20160609013411.GA29779@bbox
> 
> This is a fair statement, to a certain extent. I'll let Geliang speak
> for himself but I am personally interested in this zram extension
> because I want it to work on MMU-less systems. zsmalloc can not handle
> that, so I want to be able to use zram over z3fold.

Could you tell me more detail? What's the usecase?

> 
> Best regards,
>    Vitaly
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] zram: update zram to use zpool
@ 2016-06-20  8:00               ` Minchan Kim
  0 siblings, 0 replies; 26+ messages in thread
From: Minchan Kim @ 2016-06-20  8:00 UTC (permalink / raw)
  To: Vitaly Wool
  Cc: Geliang Tang, Nitin Gupta, Sergey Senozhatsky, Dan Streetman,
	LKML, Linux-MM

On Fri, Jun 17, 2016 at 10:30:58AM +0200, Vitaly Wool wrote:
> Hi Minchan,
> 
> On Thu, Jun 16, 2016 at 1:17 AM, Minchan Kim <minchan@kernel.org> wrote:
> > On Wed, Jun 15, 2016 at 10:42:07PM +0800, Geliang Tang wrote:
> >> Change zram to use the zpool api instead of directly using zsmalloc.
> >> The zpool api doesn't have zs_compact() and zs_pool_stats() functions.
> >> I did the following two things to fix it.
> >> 1) I replace zs_compact() with zpool_shrink(), use zpool_shrink() to
> >>    call zs_compact() in zsmalloc.
> >> 2) The 'pages_compacted' attribute is showed in zram by calling
> >>    zs_pool_stats(). So in order not to call zs_pool_state() I move the
> >>    attribute to zsmalloc.
> >>
> >> Signed-off-by: Geliang Tang <geliangtang@gmail.com>
> >
> > NACK.
> >
> > I already explained why.
> > http://lkml.kernel.org/r/20160609013411.GA29779@bbox
> 
> This is a fair statement, to a certain extent. I'll let Geliang speak
> for himself but I am personally interested in this zram extension
> because I want it to work on MMU-less systems. zsmalloc can not handle
> that, so I want to be able to use zram over z3fold.

Could you tell me more detail? What's the usecase?

> 
> Best regards,
>    Vitaly
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 26+ messages in thread

end of thread, other threads:[~2016-06-20  8:01 UTC | newest]

Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-08  9:39 [PATCH] zram: add zpool support Geliang Tang
2016-06-08  9:39 ` Geliang Tang
2016-06-08 14:51 ` Dan Streetman
2016-06-08 14:51   ` Dan Streetman
2016-06-09  1:34   ` Minchan Kim
2016-06-09  1:34     ` Minchan Kim
2016-06-09  1:43     ` Sergey Senozhatsky
2016-06-09  1:43       ` Sergey Senozhatsky
2016-06-13  9:11   ` Vitaly Wool
2016-06-13 20:04     ` Fwd: " Vitaly Wool
2016-06-15 14:42     ` [PATCH] zram: add zpool support v2 Geliang Tang
2016-06-15 14:42       ` Geliang Tang
2016-06-15 14:42       ` [PATCH] zram: update zram to use zpool Geliang Tang
2016-06-15 14:42         ` Geliang Tang
2016-06-15 14:54         ` Dan Streetman
2016-06-15 14:54           ` Dan Streetman
2016-06-15 15:33         ` kbuild test robot
2016-06-15 15:33           ` kbuild test robot
2016-06-15 23:17         ` Minchan Kim
2016-06-15 23:17           ` Minchan Kim
2016-06-17  8:30           ` Vitaly Wool
2016-06-17  8:30             ` Vitaly Wool
2016-06-17 12:28             ` Austin S. Hemmelgarn
2016-06-17 12:28               ` Austin S. Hemmelgarn
2016-06-20  8:00             ` Minchan Kim
2016-06-20  8:00               ` Minchan Kim

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.