All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-05  4:01 ` Weijie Yang
  0 siblings, 0 replies; 30+ messages in thread
From: Weijie Yang @ 2014-05-05  4:01 UTC (permalink / raw)
  To: 'Minchan Kim'
  Cc: 'Andrew Morton', 'Nitin Gupta',
	'Sergey Senozhatsky', 'Bob Liu',
	'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

Currently, we use a rwlock tb_lock to protect concurrent access to
whole zram meta table. However, according to the actual access model,
there is only a small chance for upper user access the same table[index],
so the current lock granularity is too big.

This patch add a atomic state for every table[index] to record its access,
by using CAS operation, protect concurrent access to the same table[index],
meanwhile allow the maximum concurrency.

On 64-bit system, it will not increase the meta table memory overhead, and
on 32-bit system with 4K page_size, it will increase about 1MB memory overhead
for 1GB zram. So, it is cost-efficient.

Test result:
(x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04,
kernel v3.15.0-rc3, zram 1GB with 4 max_comp_streams LZO,
take the average of 5 tests)

iozone -t 4 -R -r 16K -s 200M -I +Z

      Test          base	   lock-free	ratio
------------------------------------------------------
 Initial write   1348017.60    1424141.62   +5.6%
       Rewrite   1520189.16    1652504.81   +8.7%
          Read   8294445.45   11404668.35   +37.5%
       Re-read   8134448.83   11555483.75   +42.1%
  Reverse Read   6748717.97    8394478.17   +24.4%
   Stride read   7220276.66    9372229.95   +29.8%
   Random read   7133010.06    9187221.90   +28.8%
Mixed workload   4056980.71    5843370.85   +44.0%
  Random write   1470106.17    1608947.04   +9.4%
        Pwrite   1259493.72    1311055.32   +4.1%
         Pread   4247583.17    4652056.11   +9.5%

Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
---

This patch is based on linux-next tree, commit b5c8d48bf8f42 

 drivers/block/zram/zram_drv.c |   41 ++++++++++++++++++++++++++---------------
 drivers/block/zram/zram_drv.h |    5 ++++-
 2 files changed, 30 insertions(+), 16 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 48eccb3..8b70945
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -255,7 +255,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
 		goto free_table;
 	}
 
-	rwlock_init(&meta->tb_lock);
 	return meta;
 
 free_table:
@@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 	unsigned long handle;
 	u16 size;
 
-	read_lock(&meta->tb_lock);
+	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
+		cpu_relax();
+
 	handle = meta->table[index].handle;
 	size = meta->table[index].size;
 
 	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
-		read_unlock(&meta->tb_lock);
+		atomic_set(&meta->table[index].state, IDLE);
 		clear_page(mem);
 		return 0;
 	}
@@ -355,7 +356,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 	else
 		ret = zcomp_decompress(zram->comp, cmem, size, mem);
 	zs_unmap_object(meta->mem_pool, handle);
-	read_unlock(&meta->tb_lock);
+	atomic_set(&meta->table[index].state, IDLE);
 
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret)) {
@@ -376,14 +377,16 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 	struct zram_meta *meta = zram->meta;
 	page = bvec->bv_page;
 
-	read_lock(&meta->tb_lock);
+	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
+		cpu_relax();
+
 	if (unlikely(!meta->table[index].handle) ||
 			zram_test_flag(meta, index, ZRAM_ZERO)) {
-		read_unlock(&meta->tb_lock);
+		atomic_set(&meta->table[index].state, IDLE);
 		handle_zero_page(bvec);
 		return 0;
 	}
-	read_unlock(&meta->tb_lock);
+	atomic_set(&meta->table[index].state, IDLE);
 
 	if (is_partial_io(bvec))
 		/* Use  a temporary buffer to decompress the page */
@@ -461,10 +464,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 	if (page_zero_filled(uncmem)) {
 		kunmap_atomic(user_mem);
 		/* Free memory associated with this sector now. */
-		write_lock(&zram->meta->tb_lock);
+		while(atomic_cmpxchg(&meta->table[index].state,
+				IDLE, ACCESS) != IDLE)
+			cpu_relax();
+
 		zram_free_page(zram, index);
 		zram_set_flag(meta, index, ZRAM_ZERO);
-		write_unlock(&zram->meta->tb_lock);
+		atomic_set(&meta->table[index].state, IDLE);
 
 		atomic64_inc(&zram->stats.zero_pages);
 		ret = 0;
@@ -514,12 +520,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 	 * Free memory associated with this sector
 	 * before overwriting unused sectors.
 	 */
-	write_lock(&zram->meta->tb_lock);
+	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
+		cpu_relax();
 	zram_free_page(zram, index);
 
 	meta->table[index].handle = handle;
 	meta->table[index].size = clen;
-	write_unlock(&zram->meta->tb_lock);
+	atomic_set(&meta->table[index].state, IDLE);
 
 	/* Update stats */
 	atomic64_add(clen, &zram->stats.compr_data_size);
@@ -560,6 +567,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
 			     int offset, struct bio *bio)
 {
 	size_t n = bio->bi_iter.bi_size;
+	struct zram_meta *meta = zram->meta;
 
 	/*
 	 * zram manages data in physical block size units. Because logical block
@@ -584,9 +592,11 @@ static void zram_bio_discard(struct zram *zram, u32 index,
 		 * Discard request can be large so the lock hold times could be
 		 * lengthy.  So take the lock once per page.
 		 */
-		write_lock(&zram->meta->tb_lock);
+		while(atomic_cmpxchg(&meta->table[index].state,
+				IDLE, ACCESS) != IDLE)
+			cpu_relax();
 		zram_free_page(zram, index);
-		write_unlock(&zram->meta->tb_lock);
+		atomic_set(&meta->table[index].state, IDLE);
 		index++;
 		n -= PAGE_SIZE;
 	}
@@ -804,9 +814,10 @@ static void zram_slot_free_notify(struct block_device *bdev,
 	zram = bdev->bd_disk->private_data;
 	meta = zram->meta;
 
-	write_lock(&meta->tb_lock);
+	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
+		cpu_relax();
 	zram_free_page(zram, index);
-	write_unlock(&meta->tb_lock);
+	atomic_set(&meta->table[index].state, IDLE);
 	atomic64_inc(&zram->stats.notify_free);
 }
 
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 7f21c14..76b2bb5
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -61,9 +61,13 @@ enum zram_pageflags {
 
 /*-- Data structures */
 
+#define IDLE   0
+#define ACCESS 1
+
 /* Allocated for each disk page */
 struct table {
 	unsigned long handle;
+	atomic_t state;
 	u16 size;	/* object size (excluding header) */
 	u8 flags;
 } __aligned(4);
@@ -81,7 +85,6 @@ struct zram_stats {
 };
 
 struct zram_meta {
-	rwlock_t tb_lock;	/* protect table */
 	struct table *table;
 	struct zs_pool *mem_pool;
 };
-- 
1.7.10.4



^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-05  4:01 ` Weijie Yang
  0 siblings, 0 replies; 30+ messages in thread
From: Weijie Yang @ 2014-05-05  4:01 UTC (permalink / raw)
  To: 'Minchan Kim'
  Cc: 'Andrew Morton', 'Nitin Gupta',
	'Sergey Senozhatsky', 'Bob Liu',
	'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

Currently, we use a rwlock tb_lock to protect concurrent access to
whole zram meta table. However, according to the actual access model,
there is only a small chance for upper user access the same table[index],
so the current lock granularity is too big.

This patch add a atomic state for every table[index] to record its access,
by using CAS operation, protect concurrent access to the same table[index],
meanwhile allow the maximum concurrency.

On 64-bit system, it will not increase the meta table memory overhead, and
on 32-bit system with 4K page_size, it will increase about 1MB memory overhead
for 1GB zram. So, it is cost-efficient.

Test result:
(x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04,
kernel v3.15.0-rc3, zram 1GB with 4 max_comp_streams LZO,
take the average of 5 tests)

iozone -t 4 -R -r 16K -s 200M -I +Z

      Test          base	   lock-free	ratio
------------------------------------------------------
 Initial write   1348017.60    1424141.62   +5.6%
       Rewrite   1520189.16    1652504.81   +8.7%
          Read   8294445.45   11404668.35   +37.5%
       Re-read   8134448.83   11555483.75   +42.1%
  Reverse Read   6748717.97    8394478.17   +24.4%
   Stride read   7220276.66    9372229.95   +29.8%
   Random read   7133010.06    9187221.90   +28.8%
Mixed workload   4056980.71    5843370.85   +44.0%
  Random write   1470106.17    1608947.04   +9.4%
        Pwrite   1259493.72    1311055.32   +4.1%
         Pread   4247583.17    4652056.11   +9.5%

Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
---

This patch is based on linux-next tree, commit b5c8d48bf8f42 

 drivers/block/zram/zram_drv.c |   41 ++++++++++++++++++++++++++---------------
 drivers/block/zram/zram_drv.h |    5 ++++-
 2 files changed, 30 insertions(+), 16 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 48eccb3..8b70945
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -255,7 +255,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
 		goto free_table;
 	}
 
-	rwlock_init(&meta->tb_lock);
 	return meta;
 
 free_table:
@@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 	unsigned long handle;
 	u16 size;
 
-	read_lock(&meta->tb_lock);
+	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
+		cpu_relax();
+
 	handle = meta->table[index].handle;
 	size = meta->table[index].size;
 
 	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
-		read_unlock(&meta->tb_lock);
+		atomic_set(&meta->table[index].state, IDLE);
 		clear_page(mem);
 		return 0;
 	}
@@ -355,7 +356,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 	else
 		ret = zcomp_decompress(zram->comp, cmem, size, mem);
 	zs_unmap_object(meta->mem_pool, handle);
-	read_unlock(&meta->tb_lock);
+	atomic_set(&meta->table[index].state, IDLE);
 
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret)) {
@@ -376,14 +377,16 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 	struct zram_meta *meta = zram->meta;
 	page = bvec->bv_page;
 
-	read_lock(&meta->tb_lock);
+	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
+		cpu_relax();
+
 	if (unlikely(!meta->table[index].handle) ||
 			zram_test_flag(meta, index, ZRAM_ZERO)) {
-		read_unlock(&meta->tb_lock);
+		atomic_set(&meta->table[index].state, IDLE);
 		handle_zero_page(bvec);
 		return 0;
 	}
-	read_unlock(&meta->tb_lock);
+	atomic_set(&meta->table[index].state, IDLE);
 
 	if (is_partial_io(bvec))
 		/* Use  a temporary buffer to decompress the page */
@@ -461,10 +464,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 	if (page_zero_filled(uncmem)) {
 		kunmap_atomic(user_mem);
 		/* Free memory associated with this sector now. */
-		write_lock(&zram->meta->tb_lock);
+		while(atomic_cmpxchg(&meta->table[index].state,
+				IDLE, ACCESS) != IDLE)
+			cpu_relax();
+
 		zram_free_page(zram, index);
 		zram_set_flag(meta, index, ZRAM_ZERO);
-		write_unlock(&zram->meta->tb_lock);
+		atomic_set(&meta->table[index].state, IDLE);
 
 		atomic64_inc(&zram->stats.zero_pages);
 		ret = 0;
@@ -514,12 +520,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 	 * Free memory associated with this sector
 	 * before overwriting unused sectors.
 	 */
-	write_lock(&zram->meta->tb_lock);
+	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
+		cpu_relax();
 	zram_free_page(zram, index);
 
 	meta->table[index].handle = handle;
 	meta->table[index].size = clen;
-	write_unlock(&zram->meta->tb_lock);
+	atomic_set(&meta->table[index].state, IDLE);
 
 	/* Update stats */
 	atomic64_add(clen, &zram->stats.compr_data_size);
@@ -560,6 +567,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
 			     int offset, struct bio *bio)
 {
 	size_t n = bio->bi_iter.bi_size;
+	struct zram_meta *meta = zram->meta;
 
 	/*
 	 * zram manages data in physical block size units. Because logical block
@@ -584,9 +592,11 @@ static void zram_bio_discard(struct zram *zram, u32 index,
 		 * Discard request can be large so the lock hold times could be
 		 * lengthy.  So take the lock once per page.
 		 */
-		write_lock(&zram->meta->tb_lock);
+		while(atomic_cmpxchg(&meta->table[index].state,
+				IDLE, ACCESS) != IDLE)
+			cpu_relax();
 		zram_free_page(zram, index);
-		write_unlock(&zram->meta->tb_lock);
+		atomic_set(&meta->table[index].state, IDLE);
 		index++;
 		n -= PAGE_SIZE;
 	}
@@ -804,9 +814,10 @@ static void zram_slot_free_notify(struct block_device *bdev,
 	zram = bdev->bd_disk->private_data;
 	meta = zram->meta;
 
-	write_lock(&meta->tb_lock);
+	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
+		cpu_relax();
 	zram_free_page(zram, index);
-	write_unlock(&meta->tb_lock);
+	atomic_set(&meta->table[index].state, IDLE);
 	atomic64_inc(&zram->stats.notify_free);
 }
 
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 7f21c14..76b2bb5
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -61,9 +61,13 @@ enum zram_pageflags {
 
 /*-- Data structures */
 
+#define IDLE   0
+#define ACCESS 1
+
 /* Allocated for each disk page */
 struct table {
 	unsigned long handle;
+	atomic_t state;
 	u16 size;	/* object size (excluding header) */
 	u8 flags;
 } __aligned(4);
@@ -81,7 +85,6 @@ struct zram_stats {
 };
 
 struct zram_meta {
-	rwlock_t tb_lock;	/* protect table */
 	struct table *table;
 	struct zs_pool *mem_pool;
 };
-- 
1.7.10.4


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-05  4:01 ` Weijie Yang
@ 2014-05-05 10:32   ` Sergey Senozhatsky
  -1 siblings, 0 replies; 30+ messages in thread
From: Sergey Senozhatsky @ 2014-05-05 10:32 UTC (permalink / raw)
  To: Weijie Yang
  Cc: 'Minchan Kim', 'Andrew Morton',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

Hello Weijie,

On (05/05/14 12:01), Weijie Yang wrote:
> Currently, we use a rwlock tb_lock to protect concurrent access to
> whole zram meta table. However, according to the actual access model,
> there is only a small chance for upper user access the same table[index],
> so the current lock granularity is too big.
> 
> This patch add a atomic state for every table[index] to record its access,
> by using CAS operation, protect concurrent access to the same table[index],
> meanwhile allow the maximum concurrency.
> 
> On 64-bit system, it will not increase the meta table memory overhead, and
> on 32-bit system with 4K page_size, it will increase about 1MB memory overhead
> for 1GB zram. So, it is cost-efficient.
> 

not sure if it worth the effort (just an idea), but can we merge `u8 flags' and
`atomic_t state' into `atomic_t flags'. then reserve, say, 1 bit for IDLE/BUSY
(ACCESS) bit and the rest for flags (there is only one at the moment - ZRAM_ZERO).

> Test result:
> (x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04,
> kernel v3.15.0-rc3, zram 1GB with 4 max_comp_streams LZO,
> take the average of 5 tests)
> 
> iozone -t 4 -R -r 16K -s 200M -I +Z
> 
>       Test          base	   lock-free	ratio
> ------------------------------------------------------
>  Initial write   1348017.60    1424141.62   +5.6%
>        Rewrite   1520189.16    1652504.81   +8.7%
>           Read   8294445.45   11404668.35   +37.5%
>        Re-read   8134448.83   11555483.75   +42.1%
>   Reverse Read   6748717.97    8394478.17   +24.4%
>    Stride read   7220276.66    9372229.95   +29.8%
>    Random read   7133010.06    9187221.90   +28.8%
> Mixed workload   4056980.71    5843370.85   +44.0%
>   Random write   1470106.17    1608947.04   +9.4%
>         Pwrite   1259493.72    1311055.32   +4.1%
>          Pread   4247583.17    4652056.11   +9.5%
> 
> Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
> ---
> 
> This patch is based on linux-next tree, commit b5c8d48bf8f42 
> 
>  drivers/block/zram/zram_drv.c |   41 ++++++++++++++++++++++++++---------------
>  drivers/block/zram/zram_drv.h |    5 ++++-
>  2 files changed, 30 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 48eccb3..8b70945
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -255,7 +255,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
>  		goto free_table;
>  	}
>  
> -	rwlock_init(&meta->tb_lock);
>  	return meta;
>  
>  free_table:
> @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>  	unsigned long handle;
>  	u16 size;
>  
> -	read_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
> +

a minor nitpick, this seems to be a common for 6 places, so how about factoring
out this loop to a `static inline zram_wait_for_idle_bit(meta, index)' function
(the naming is not perfect, just as example)?

'while()' does not pass checkpatch's coding style check.


	-ss

>  	handle = meta->table[index].handle;
>  	size = meta->table[index].size;
>  
>  	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
> -		read_unlock(&meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		clear_page(mem);
>  		return 0;
>  	}
> @@ -355,7 +356,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>  	else
>  		ret = zcomp_decompress(zram->comp, cmem, size, mem);
>  	zs_unmap_object(meta->mem_pool, handle);
> -	read_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	/* Should NEVER happen. Return bio error if it does. */
>  	if (unlikely(ret)) {
> @@ -376,14 +377,16 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
>  	struct zram_meta *meta = zram->meta;
>  	page = bvec->bv_page;
>  
> -	read_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
> +
>  	if (unlikely(!meta->table[index].handle) ||
>  			zram_test_flag(meta, index, ZRAM_ZERO)) {
> -		read_unlock(&meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		handle_zero_page(bvec);
>  		return 0;
>  	}
> -	read_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	if (is_partial_io(bvec))
>  		/* Use  a temporary buffer to decompress the page */
> @@ -461,10 +464,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>  	if (page_zero_filled(uncmem)) {
>  		kunmap_atomic(user_mem);
>  		/* Free memory associated with this sector now. */
> -		write_lock(&zram->meta->tb_lock);
> +		while(atomic_cmpxchg(&meta->table[index].state,
> +				IDLE, ACCESS) != IDLE)
> +			cpu_relax();
> +
>  		zram_free_page(zram, index);
>  		zram_set_flag(meta, index, ZRAM_ZERO);
> -		write_unlock(&zram->meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  
>  		atomic64_inc(&zram->stats.zero_pages);
>  		ret = 0;
> @@ -514,12 +520,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>  	 * Free memory associated with this sector
>  	 * before overwriting unused sectors.
>  	 */
> -	write_lock(&zram->meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
>  	zram_free_page(zram, index);
>  
>  	meta->table[index].handle = handle;
>  	meta->table[index].size = clen;
> -	write_unlock(&zram->meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	/* Update stats */
>  	atomic64_add(clen, &zram->stats.compr_data_size);
> @@ -560,6 +567,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
>  			     int offset, struct bio *bio)
>  {
>  	size_t n = bio->bi_iter.bi_size;
> +	struct zram_meta *meta = zram->meta;
>  
>  	/*
>  	 * zram manages data in physical block size units. Because logical block
> @@ -584,9 +592,11 @@ static void zram_bio_discard(struct zram *zram, u32 index,
>  		 * Discard request can be large so the lock hold times could be
>  		 * lengthy.  So take the lock once per page.
>  		 */
> -		write_lock(&zram->meta->tb_lock);
> +		while(atomic_cmpxchg(&meta->table[index].state,
> +				IDLE, ACCESS) != IDLE)
> +			cpu_relax();
>  		zram_free_page(zram, index);
> -		write_unlock(&zram->meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		index++;
>  		n -= PAGE_SIZE;
>  	}
> @@ -804,9 +814,10 @@ static void zram_slot_free_notify(struct block_device *bdev,
>  	zram = bdev->bd_disk->private_data;
>  	meta = zram->meta;
>  
> -	write_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
>  	zram_free_page(zram, index);
> -	write_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  	atomic64_inc(&zram->stats.notify_free);
>  }
>  
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 7f21c14..76b2bb5
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -61,9 +61,13 @@ enum zram_pageflags {
>  
>  /*-- Data structures */
>  
> +#define IDLE   0
> +#define ACCESS 1
> +
>  /* Allocated for each disk page */
>  struct table {
>  	unsigned long handle;
> +	atomic_t state;
>  	u16 size;	/* object size (excluding header) */
>  	u8 flags;
>  } __aligned(4);
> @@ -81,7 +85,6 @@ struct zram_stats {
>  };
>  
>  struct zram_meta {
> -	rwlock_t tb_lock;	/* protect table */
>  	struct table *table;
>  	struct zs_pool *mem_pool;
>  };
> -- 
> 1.7.10.4
> 
> 

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-05 10:32   ` Sergey Senozhatsky
  0 siblings, 0 replies; 30+ messages in thread
From: Sergey Senozhatsky @ 2014-05-05 10:32 UTC (permalink / raw)
  To: Weijie Yang
  Cc: 'Minchan Kim', 'Andrew Morton',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

Hello Weijie,

On (05/05/14 12:01), Weijie Yang wrote:
> Currently, we use a rwlock tb_lock to protect concurrent access to
> whole zram meta table. However, according to the actual access model,
> there is only a small chance for upper user access the same table[index],
> so the current lock granularity is too big.
> 
> This patch add a atomic state for every table[index] to record its access,
> by using CAS operation, protect concurrent access to the same table[index],
> meanwhile allow the maximum concurrency.
> 
> On 64-bit system, it will not increase the meta table memory overhead, and
> on 32-bit system with 4K page_size, it will increase about 1MB memory overhead
> for 1GB zram. So, it is cost-efficient.
> 

not sure if it worth the effort (just an idea), but can we merge `u8 flags' and
`atomic_t state' into `atomic_t flags'. then reserve, say, 1 bit for IDLE/BUSY
(ACCESS) bit and the rest for flags (there is only one at the moment - ZRAM_ZERO).

> Test result:
> (x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04,
> kernel v3.15.0-rc3, zram 1GB with 4 max_comp_streams LZO,
> take the average of 5 tests)
> 
> iozone -t 4 -R -r 16K -s 200M -I +Z
> 
>       Test          base	   lock-free	ratio
> ------------------------------------------------------
>  Initial write   1348017.60    1424141.62   +5.6%
>        Rewrite   1520189.16    1652504.81   +8.7%
>           Read   8294445.45   11404668.35   +37.5%
>        Re-read   8134448.83   11555483.75   +42.1%
>   Reverse Read   6748717.97    8394478.17   +24.4%
>    Stride read   7220276.66    9372229.95   +29.8%
>    Random read   7133010.06    9187221.90   +28.8%
> Mixed workload   4056980.71    5843370.85   +44.0%
>   Random write   1470106.17    1608947.04   +9.4%
>         Pwrite   1259493.72    1311055.32   +4.1%
>          Pread   4247583.17    4652056.11   +9.5%
> 
> Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
> ---
> 
> This patch is based on linux-next tree, commit b5c8d48bf8f42 
> 
>  drivers/block/zram/zram_drv.c |   41 ++++++++++++++++++++++++++---------------
>  drivers/block/zram/zram_drv.h |    5 ++++-
>  2 files changed, 30 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 48eccb3..8b70945
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -255,7 +255,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
>  		goto free_table;
>  	}
>  
> -	rwlock_init(&meta->tb_lock);
>  	return meta;
>  
>  free_table:
> @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>  	unsigned long handle;
>  	u16 size;
>  
> -	read_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
> +

a minor nitpick, this seems to be a common for 6 places, so how about factoring
out this loop to a `static inline zram_wait_for_idle_bit(meta, index)' function
(the naming is not perfect, just as example)?

'while()' does not pass checkpatch's coding style check.


	-ss

>  	handle = meta->table[index].handle;
>  	size = meta->table[index].size;
>  
>  	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
> -		read_unlock(&meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		clear_page(mem);
>  		return 0;
>  	}
> @@ -355,7 +356,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>  	else
>  		ret = zcomp_decompress(zram->comp, cmem, size, mem);
>  	zs_unmap_object(meta->mem_pool, handle);
> -	read_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	/* Should NEVER happen. Return bio error if it does. */
>  	if (unlikely(ret)) {
> @@ -376,14 +377,16 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
>  	struct zram_meta *meta = zram->meta;
>  	page = bvec->bv_page;
>  
> -	read_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
> +
>  	if (unlikely(!meta->table[index].handle) ||
>  			zram_test_flag(meta, index, ZRAM_ZERO)) {
> -		read_unlock(&meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		handle_zero_page(bvec);
>  		return 0;
>  	}
> -	read_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	if (is_partial_io(bvec))
>  		/* Use  a temporary buffer to decompress the page */
> @@ -461,10 +464,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>  	if (page_zero_filled(uncmem)) {
>  		kunmap_atomic(user_mem);
>  		/* Free memory associated with this sector now. */
> -		write_lock(&zram->meta->tb_lock);
> +		while(atomic_cmpxchg(&meta->table[index].state,
> +				IDLE, ACCESS) != IDLE)
> +			cpu_relax();
> +
>  		zram_free_page(zram, index);
>  		zram_set_flag(meta, index, ZRAM_ZERO);
> -		write_unlock(&zram->meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  
>  		atomic64_inc(&zram->stats.zero_pages);
>  		ret = 0;
> @@ -514,12 +520,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>  	 * Free memory associated with this sector
>  	 * before overwriting unused sectors.
>  	 */
> -	write_lock(&zram->meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
>  	zram_free_page(zram, index);
>  
>  	meta->table[index].handle = handle;
>  	meta->table[index].size = clen;
> -	write_unlock(&zram->meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	/* Update stats */
>  	atomic64_add(clen, &zram->stats.compr_data_size);
> @@ -560,6 +567,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
>  			     int offset, struct bio *bio)
>  {
>  	size_t n = bio->bi_iter.bi_size;
> +	struct zram_meta *meta = zram->meta;
>  
>  	/*
>  	 * zram manages data in physical block size units. Because logical block
> @@ -584,9 +592,11 @@ static void zram_bio_discard(struct zram *zram, u32 index,
>  		 * Discard request can be large so the lock hold times could be
>  		 * lengthy.  So take the lock once per page.
>  		 */
> -		write_lock(&zram->meta->tb_lock);
> +		while(atomic_cmpxchg(&meta->table[index].state,
> +				IDLE, ACCESS) != IDLE)
> +			cpu_relax();
>  		zram_free_page(zram, index);
> -		write_unlock(&zram->meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		index++;
>  		n -= PAGE_SIZE;
>  	}
> @@ -804,9 +814,10 @@ static void zram_slot_free_notify(struct block_device *bdev,
>  	zram = bdev->bd_disk->private_data;
>  	meta = zram->meta;
>  
> -	write_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
>  	zram_free_page(zram, index);
> -	write_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  	atomic64_inc(&zram->stats.notify_free);
>  }
>  
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 7f21c14..76b2bb5
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -61,9 +61,13 @@ enum zram_pageflags {
>  
>  /*-- Data structures */
>  
> +#define IDLE   0
> +#define ACCESS 1
> +
>  /* Allocated for each disk page */
>  struct table {
>  	unsigned long handle;
> +	atomic_t state;
>  	u16 size;	/* object size (excluding header) */
>  	u8 flags;
>  } __aligned(4);
> @@ -81,7 +85,6 @@ struct zram_stats {
>  };
>  
>  struct zram_meta {
> -	rwlock_t tb_lock;	/* protect table */
>  	struct table *table;
>  	struct zs_pool *mem_pool;
>  };
> -- 
> 1.7.10.4
> 
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-05  4:01 ` Weijie Yang
@ 2014-05-05 15:20   ` Seth Jennings
  -1 siblings, 0 replies; 30+ messages in thread
From: Seth Jennings @ 2014-05-05 15:20 UTC (permalink / raw)
  To: Weijie Yang
  Cc: 'Minchan Kim', 'Andrew Morton',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Mon, May 05, 2014 at 12:01:21PM +0800, Weijie Yang wrote:
> Currently, we use a rwlock tb_lock to protect concurrent access to
> whole zram meta table. However, according to the actual access model,
> there is only a small chance for upper user access the same table[index],
> so the current lock granularity is too big.
> 
> This patch add a atomic state for every table[index] to record its access,
> by using CAS operation, protect concurrent access to the same table[index],
> meanwhile allow the maximum concurrency.
> 
> On 64-bit system, it will not increase the meta table memory overhead, and
> on 32-bit system with 4K page_size, it will increase about 1MB memory overhead
> for 1GB zram. So, it is cost-efficient.
> 
> Test result:
> (x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04,
> kernel v3.15.0-rc3, zram 1GB with 4 max_comp_streams LZO,
> take the average of 5 tests)
> 
> iozone -t 4 -R -r 16K -s 200M -I +Z
> 
>       Test          base	   lock-free	ratio
> ------------------------------------------------------
>  Initial write   1348017.60    1424141.62   +5.6%
>        Rewrite   1520189.16    1652504.81   +8.7%
>           Read   8294445.45   11404668.35   +37.5%
>        Re-read   8134448.83   11555483.75   +42.1%
>   Reverse Read   6748717.97    8394478.17   +24.4%
>    Stride read   7220276.66    9372229.95   +29.8%
>    Random read   7133010.06    9187221.90   +28.8%
> Mixed workload   4056980.71    5843370.85   +44.0%
>   Random write   1470106.17    1608947.04   +9.4%
>         Pwrite   1259493.72    1311055.32   +4.1%
>          Pread   4247583.17    4652056.11   +9.5%
> 
> Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
> ---
> 
> This patch is based on linux-next tree, commit b5c8d48bf8f42 
> 
>  drivers/block/zram/zram_drv.c |   41 ++++++++++++++++++++++++++---------------
>  drivers/block/zram/zram_drv.h |    5 ++++-
>  2 files changed, 30 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 48eccb3..8b70945
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -255,7 +255,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
>  		goto free_table;
>  	}
>  
> -	rwlock_init(&meta->tb_lock);
>  	return meta;
>  
>  free_table:
> @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>  	unsigned long handle;
>  	u16 size;
>  
> -	read_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
> +

So... this might be dumb question, but this looks like a spinlock
implementation.

What advantage does this have over a standard spinlock?

Seth

>  	handle = meta->table[index].handle;
>  	size = meta->table[index].size;
>  
>  	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
> -		read_unlock(&meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		clear_page(mem);
>  		return 0;
>  	}
> @@ -355,7 +356,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>  	else
>  		ret = zcomp_decompress(zram->comp, cmem, size, mem);
>  	zs_unmap_object(meta->mem_pool, handle);
> -	read_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	/* Should NEVER happen. Return bio error if it does. */
>  	if (unlikely(ret)) {
> @@ -376,14 +377,16 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
>  	struct zram_meta *meta = zram->meta;
>  	page = bvec->bv_page;
>  
> -	read_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
> +
>  	if (unlikely(!meta->table[index].handle) ||
>  			zram_test_flag(meta, index, ZRAM_ZERO)) {
> -		read_unlock(&meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		handle_zero_page(bvec);
>  		return 0;
>  	}
> -	read_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	if (is_partial_io(bvec))
>  		/* Use  a temporary buffer to decompress the page */
> @@ -461,10 +464,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>  	if (page_zero_filled(uncmem)) {
>  		kunmap_atomic(user_mem);
>  		/* Free memory associated with this sector now. */
> -		write_lock(&zram->meta->tb_lock);
> +		while(atomic_cmpxchg(&meta->table[index].state,
> +				IDLE, ACCESS) != IDLE)
> +			cpu_relax();
> +
>  		zram_free_page(zram, index);
>  		zram_set_flag(meta, index, ZRAM_ZERO);
> -		write_unlock(&zram->meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  
>  		atomic64_inc(&zram->stats.zero_pages);
>  		ret = 0;
> @@ -514,12 +520,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>  	 * Free memory associated with this sector
>  	 * before overwriting unused sectors.
>  	 */
> -	write_lock(&zram->meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
>  	zram_free_page(zram, index);
>  
>  	meta->table[index].handle = handle;
>  	meta->table[index].size = clen;
> -	write_unlock(&zram->meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	/* Update stats */
>  	atomic64_add(clen, &zram->stats.compr_data_size);
> @@ -560,6 +567,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
>  			     int offset, struct bio *bio)
>  {
>  	size_t n = bio->bi_iter.bi_size;
> +	struct zram_meta *meta = zram->meta;
>  
>  	/*
>  	 * zram manages data in physical block size units. Because logical block
> @@ -584,9 +592,11 @@ static void zram_bio_discard(struct zram *zram, u32 index,
>  		 * Discard request can be large so the lock hold times could be
>  		 * lengthy.  So take the lock once per page.
>  		 */
> -		write_lock(&zram->meta->tb_lock);
> +		while(atomic_cmpxchg(&meta->table[index].state,
> +				IDLE, ACCESS) != IDLE)
> +			cpu_relax();
>  		zram_free_page(zram, index);
> -		write_unlock(&zram->meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		index++;
>  		n -= PAGE_SIZE;
>  	}
> @@ -804,9 +814,10 @@ static void zram_slot_free_notify(struct block_device *bdev,
>  	zram = bdev->bd_disk->private_data;
>  	meta = zram->meta;
>  
> -	write_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
>  	zram_free_page(zram, index);
> -	write_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  	atomic64_inc(&zram->stats.notify_free);
>  }
>  
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 7f21c14..76b2bb5
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -61,9 +61,13 @@ enum zram_pageflags {
>  
>  /*-- Data structures */
>  
> +#define IDLE   0
> +#define ACCESS 1
> +
>  /* Allocated for each disk page */
>  struct table {
>  	unsigned long handle;
> +	atomic_t state;
>  	u16 size;	/* object size (excluding header) */
>  	u8 flags;
>  } __aligned(4);
> @@ -81,7 +85,6 @@ struct zram_stats {
>  };
>  
>  struct zram_meta {
> -	rwlock_t tb_lock;	/* protect table */
>  	struct table *table;
>  	struct zs_pool *mem_pool;
>  };
> -- 
> 1.7.10.4
> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-05 15:20   ` Seth Jennings
  0 siblings, 0 replies; 30+ messages in thread
From: Seth Jennings @ 2014-05-05 15:20 UTC (permalink / raw)
  To: Weijie Yang
  Cc: 'Minchan Kim', 'Andrew Morton',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Mon, May 05, 2014 at 12:01:21PM +0800, Weijie Yang wrote:
> Currently, we use a rwlock tb_lock to protect concurrent access to
> whole zram meta table. However, according to the actual access model,
> there is only a small chance for upper user access the same table[index],
> so the current lock granularity is too big.
> 
> This patch add a atomic state for every table[index] to record its access,
> by using CAS operation, protect concurrent access to the same table[index],
> meanwhile allow the maximum concurrency.
> 
> On 64-bit system, it will not increase the meta table memory overhead, and
> on 32-bit system with 4K page_size, it will increase about 1MB memory overhead
> for 1GB zram. So, it is cost-efficient.
> 
> Test result:
> (x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04,
> kernel v3.15.0-rc3, zram 1GB with 4 max_comp_streams LZO,
> take the average of 5 tests)
> 
> iozone -t 4 -R -r 16K -s 200M -I +Z
> 
>       Test          base	   lock-free	ratio
> ------------------------------------------------------
>  Initial write   1348017.60    1424141.62   +5.6%
>        Rewrite   1520189.16    1652504.81   +8.7%
>           Read   8294445.45   11404668.35   +37.5%
>        Re-read   8134448.83   11555483.75   +42.1%
>   Reverse Read   6748717.97    8394478.17   +24.4%
>    Stride read   7220276.66    9372229.95   +29.8%
>    Random read   7133010.06    9187221.90   +28.8%
> Mixed workload   4056980.71    5843370.85   +44.0%
>   Random write   1470106.17    1608947.04   +9.4%
>         Pwrite   1259493.72    1311055.32   +4.1%
>          Pread   4247583.17    4652056.11   +9.5%
> 
> Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
> ---
> 
> This patch is based on linux-next tree, commit b5c8d48bf8f42 
> 
>  drivers/block/zram/zram_drv.c |   41 ++++++++++++++++++++++++++---------------
>  drivers/block/zram/zram_drv.h |    5 ++++-
>  2 files changed, 30 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 48eccb3..8b70945
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -255,7 +255,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
>  		goto free_table;
>  	}
>  
> -	rwlock_init(&meta->tb_lock);
>  	return meta;
>  
>  free_table:
> @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>  	unsigned long handle;
>  	u16 size;
>  
> -	read_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
> +

So... this might be dumb question, but this looks like a spinlock
implementation.

What advantage does this have over a standard spinlock?

Seth

>  	handle = meta->table[index].handle;
>  	size = meta->table[index].size;
>  
>  	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
> -		read_unlock(&meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		clear_page(mem);
>  		return 0;
>  	}
> @@ -355,7 +356,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>  	else
>  		ret = zcomp_decompress(zram->comp, cmem, size, mem);
>  	zs_unmap_object(meta->mem_pool, handle);
> -	read_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	/* Should NEVER happen. Return bio error if it does. */
>  	if (unlikely(ret)) {
> @@ -376,14 +377,16 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
>  	struct zram_meta *meta = zram->meta;
>  	page = bvec->bv_page;
>  
> -	read_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
> +
>  	if (unlikely(!meta->table[index].handle) ||
>  			zram_test_flag(meta, index, ZRAM_ZERO)) {
> -		read_unlock(&meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		handle_zero_page(bvec);
>  		return 0;
>  	}
> -	read_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	if (is_partial_io(bvec))
>  		/* Use  a temporary buffer to decompress the page */
> @@ -461,10 +464,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>  	if (page_zero_filled(uncmem)) {
>  		kunmap_atomic(user_mem);
>  		/* Free memory associated with this sector now. */
> -		write_lock(&zram->meta->tb_lock);
> +		while(atomic_cmpxchg(&meta->table[index].state,
> +				IDLE, ACCESS) != IDLE)
> +			cpu_relax();
> +
>  		zram_free_page(zram, index);
>  		zram_set_flag(meta, index, ZRAM_ZERO);
> -		write_unlock(&zram->meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  
>  		atomic64_inc(&zram->stats.zero_pages);
>  		ret = 0;
> @@ -514,12 +520,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>  	 * Free memory associated with this sector
>  	 * before overwriting unused sectors.
>  	 */
> -	write_lock(&zram->meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
>  	zram_free_page(zram, index);
>  
>  	meta->table[index].handle = handle;
>  	meta->table[index].size = clen;
> -	write_unlock(&zram->meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  
>  	/* Update stats */
>  	atomic64_add(clen, &zram->stats.compr_data_size);
> @@ -560,6 +567,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
>  			     int offset, struct bio *bio)
>  {
>  	size_t n = bio->bi_iter.bi_size;
> +	struct zram_meta *meta = zram->meta;
>  
>  	/*
>  	 * zram manages data in physical block size units. Because logical block
> @@ -584,9 +592,11 @@ static void zram_bio_discard(struct zram *zram, u32 index,
>  		 * Discard request can be large so the lock hold times could be
>  		 * lengthy.  So take the lock once per page.
>  		 */
> -		write_lock(&zram->meta->tb_lock);
> +		while(atomic_cmpxchg(&meta->table[index].state,
> +				IDLE, ACCESS) != IDLE)
> +			cpu_relax();
>  		zram_free_page(zram, index);
> -		write_unlock(&zram->meta->tb_lock);
> +		atomic_set(&meta->table[index].state, IDLE);
>  		index++;
>  		n -= PAGE_SIZE;
>  	}
> @@ -804,9 +814,10 @@ static void zram_slot_free_notify(struct block_device *bdev,
>  	zram = bdev->bd_disk->private_data;
>  	meta = zram->meta;
>  
> -	write_lock(&meta->tb_lock);
> +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> +		cpu_relax();
>  	zram_free_page(zram, index);
> -	write_unlock(&meta->tb_lock);
> +	atomic_set(&meta->table[index].state, IDLE);
>  	atomic64_inc(&zram->stats.notify_free);
>  }
>  
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index 7f21c14..76b2bb5
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -61,9 +61,13 @@ enum zram_pageflags {
>  
>  /*-- Data structures */
>  
> +#define IDLE   0
> +#define ACCESS 1
> +
>  /* Allocated for each disk page */
>  struct table {
>  	unsigned long handle;
> +	atomic_t state;
>  	u16 size;	/* object size (excluding header) */
>  	u8 flags;
>  } __aligned(4);
> @@ -81,7 +85,6 @@ struct zram_stats {
>  };
>  
>  struct zram_meta {
> -	rwlock_t tb_lock;	/* protect table */
>  	struct table *table;
>  	struct zs_pool *mem_pool;
>  };
> -- 
> 1.7.10.4
> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-05 15:20   ` Seth Jennings
@ 2014-05-05 18:00     ` Davidlohr Bueso
  -1 siblings, 0 replies; 30+ messages in thread
From: Davidlohr Bueso @ 2014-05-05 18:00 UTC (permalink / raw)
  To: Seth Jennings
  Cc: Weijie Yang, 'Minchan Kim', 'Andrew Morton',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Mon, 2014-05-05 at 10:20 -0500, Seth Jennings wrote:
> On Mon, May 05, 2014 at 12:01:21PM +0800, Weijie Yang wrote:
> > Currently, we use a rwlock tb_lock to protect concurrent access to
> > whole zram meta table. However, according to the actual access model,
> > there is only a small chance for upper user access the same table[index],
> > so the current lock granularity is too big.
> > 
> > This patch add a atomic state for every table[index] to record its access,
> > by using CAS operation, protect concurrent access to the same table[index],
> > meanwhile allow the maximum concurrency.
> > 
> > On 64-bit system, it will not increase the meta table memory overhead, and
> > on 32-bit system with 4K page_size, it will increase about 1MB memory overhead
> > for 1GB zram. So, it is cost-efficient.
> > 
> > Test result:
> > (x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04,
> > kernel v3.15.0-rc3, zram 1GB with 4 max_comp_streams LZO,
> > take the average of 5 tests)
> > 
> > iozone -t 4 -R -r 16K -s 200M -I +Z
> > 
> >       Test          base	   lock-free	ratio
> > ------------------------------------------------------
> >  Initial write   1348017.60    1424141.62   +5.6%
> >        Rewrite   1520189.16    1652504.81   +8.7%
> >           Read   8294445.45   11404668.35   +37.5%
> >        Re-read   8134448.83   11555483.75   +42.1%
> >   Reverse Read   6748717.97    8394478.17   +24.4%
> >    Stride read   7220276.66    9372229.95   +29.8%
> >    Random read   7133010.06    9187221.90   +28.8%
> > Mixed workload   4056980.71    5843370.85   +44.0%
> >   Random write   1470106.17    1608947.04   +9.4%
> >         Pwrite   1259493.72    1311055.32   +4.1%
> >          Pread   4247583.17    4652056.11   +9.5%
> > 
> > Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
> > ---
> > 
> > This patch is based on linux-next tree, commit b5c8d48bf8f42 
> > 
> >  drivers/block/zram/zram_drv.c |   41 ++++++++++++++++++++++++++---------------
> >  drivers/block/zram/zram_drv.h |    5 ++++-
> >  2 files changed, 30 insertions(+), 16 deletions(-)
> > 
> > diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> > index 48eccb3..8b70945
> > --- a/drivers/block/zram/zram_drv.c
> > +++ b/drivers/block/zram/zram_drv.c
> > @@ -255,7 +255,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
> >  		goto free_table;
> >  	}
> >  
> > -	rwlock_init(&meta->tb_lock);
> >  	return meta;
> >  
> >  free_table:
> > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> >  	unsigned long handle;
> >  	u16 size;
> >  
> > -	read_lock(&meta->tb_lock);
> > +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> > +		cpu_relax();
> > +
> 
> So... this might be dumb question, but this looks like a spinlock
> implementation.
> 
> What advantage does this have over a standard spinlock?

I was wondering the same thing. Furthermore by doing this you'll loose
the benefits of sharing the lock... your numbers do indicate that it is
for the better. Also, note that hopefully rwlock_t will soon be updated
to be fair and perform up to par with spinlocks, something which is long
overdue. So you could reduce the critical region by implementing the
same granularity, just don't implement your own locking schemes, like
this.

> Seth
> 
> >  	handle = meta->table[index].handle;
> >  	size = meta->table[index].size;
> >  
> >  	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
> > -		read_unlock(&meta->tb_lock);
> > +		atomic_set(&meta->table[index].state, IDLE);
> >  		clear_page(mem);
> >  		return 0;
> >  	}
> > @@ -355,7 +356,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> >  	else
> >  		ret = zcomp_decompress(zram->comp, cmem, size, mem);
> >  	zs_unmap_object(meta->mem_pool, handle);
> > -	read_unlock(&meta->tb_lock);
> > +	atomic_set(&meta->table[index].state, IDLE);
> >  
> >  	/* Should NEVER happen. Return bio error if it does. */
> >  	if (unlikely(ret)) {
> > @@ -376,14 +377,16 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> >  	struct zram_meta *meta = zram->meta;
> >  	page = bvec->bv_page;
> >  
> > -	read_lock(&meta->tb_lock);
> > +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> > +		cpu_relax();
> > +

So here you could reduce the amount of atomic ops and cacheline boucing
by doing a read before the CAS. It works well for our mutexes and
rwsems. Something like:

while (true) {
	if (atomic_read(&meta->table[index].state) == IDLE &&
	    atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) == IDLE)
		/* yay! lock acquired */
}

But then again, that's kind of implementing your own locking scheme...
use a standard one instead ;)

Also, instead of cpu_relax() you probably want arch_mutex_cpu_relax()
for the sake of z systems.

Thanks,
Davidlohr


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-05 18:00     ` Davidlohr Bueso
  0 siblings, 0 replies; 30+ messages in thread
From: Davidlohr Bueso @ 2014-05-05 18:00 UTC (permalink / raw)
  To: Seth Jennings
  Cc: Weijie Yang, 'Minchan Kim', 'Andrew Morton',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Mon, 2014-05-05 at 10:20 -0500, Seth Jennings wrote:
> On Mon, May 05, 2014 at 12:01:21PM +0800, Weijie Yang wrote:
> > Currently, we use a rwlock tb_lock to protect concurrent access to
> > whole zram meta table. However, according to the actual access model,
> > there is only a small chance for upper user access the same table[index],
> > so the current lock granularity is too big.
> > 
> > This patch add a atomic state for every table[index] to record its access,
> > by using CAS operation, protect concurrent access to the same table[index],
> > meanwhile allow the maximum concurrency.
> > 
> > On 64-bit system, it will not increase the meta table memory overhead, and
> > on 32-bit system with 4K page_size, it will increase about 1MB memory overhead
> > for 1GB zram. So, it is cost-efficient.
> > 
> > Test result:
> > (x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04,
> > kernel v3.15.0-rc3, zram 1GB with 4 max_comp_streams LZO,
> > take the average of 5 tests)
> > 
> > iozone -t 4 -R -r 16K -s 200M -I +Z
> > 
> >       Test          base	   lock-free	ratio
> > ------------------------------------------------------
> >  Initial write   1348017.60    1424141.62   +5.6%
> >        Rewrite   1520189.16    1652504.81   +8.7%
> >           Read   8294445.45   11404668.35   +37.5%
> >        Re-read   8134448.83   11555483.75   +42.1%
> >   Reverse Read   6748717.97    8394478.17   +24.4%
> >    Stride read   7220276.66    9372229.95   +29.8%
> >    Random read   7133010.06    9187221.90   +28.8%
> > Mixed workload   4056980.71    5843370.85   +44.0%
> >   Random write   1470106.17    1608947.04   +9.4%
> >         Pwrite   1259493.72    1311055.32   +4.1%
> >          Pread   4247583.17    4652056.11   +9.5%
> > 
> > Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
> > ---
> > 
> > This patch is based on linux-next tree, commit b5c8d48bf8f42 
> > 
> >  drivers/block/zram/zram_drv.c |   41 ++++++++++++++++++++++++++---------------
> >  drivers/block/zram/zram_drv.h |    5 ++++-
> >  2 files changed, 30 insertions(+), 16 deletions(-)
> > 
> > diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> > index 48eccb3..8b70945
> > --- a/drivers/block/zram/zram_drv.c
> > +++ b/drivers/block/zram/zram_drv.c
> > @@ -255,7 +255,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
> >  		goto free_table;
> >  	}
> >  
> > -	rwlock_init(&meta->tb_lock);
> >  	return meta;
> >  
> >  free_table:
> > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> >  	unsigned long handle;
> >  	u16 size;
> >  
> > -	read_lock(&meta->tb_lock);
> > +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> > +		cpu_relax();
> > +
> 
> So... this might be dumb question, but this looks like a spinlock
> implementation.
> 
> What advantage does this have over a standard spinlock?

I was wondering the same thing. Furthermore by doing this you'll loose
the benefits of sharing the lock... your numbers do indicate that it is
for the better. Also, note that hopefully rwlock_t will soon be updated
to be fair and perform up to par with spinlocks, something which is long
overdue. So you could reduce the critical region by implementing the
same granularity, just don't implement your own locking schemes, like
this.

> Seth
> 
> >  	handle = meta->table[index].handle;
> >  	size = meta->table[index].size;
> >  
> >  	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
> > -		read_unlock(&meta->tb_lock);
> > +		atomic_set(&meta->table[index].state, IDLE);
> >  		clear_page(mem);
> >  		return 0;
> >  	}
> > @@ -355,7 +356,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> >  	else
> >  		ret = zcomp_decompress(zram->comp, cmem, size, mem);
> >  	zs_unmap_object(meta->mem_pool, handle);
> > -	read_unlock(&meta->tb_lock);
> > +	atomic_set(&meta->table[index].state, IDLE);
> >  
> >  	/* Should NEVER happen. Return bio error if it does. */
> >  	if (unlikely(ret)) {
> > @@ -376,14 +377,16 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> >  	struct zram_meta *meta = zram->meta;
> >  	page = bvec->bv_page;
> >  
> > -	read_lock(&meta->tb_lock);
> > +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> > +		cpu_relax();
> > +

So here you could reduce the amount of atomic ops and cacheline boucing
by doing a read before the CAS. It works well for our mutexes and
rwsems. Something like:

while (true) {
	if (atomic_read(&meta->table[index].state) == IDLE &&
	    atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) == IDLE)
		/* yay! lock acquired */
}

But then again, that's kind of implementing your own locking scheme...
use a standard one instead ;)

Also, instead of cpu_relax() you probably want arch_mutex_cpu_relax()
for the sake of z systems.

Thanks,
Davidlohr

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-05 18:00     ` Davidlohr Bueso
@ 2014-05-05 20:46       ` Andrew Morton
  -1 siblings, 0 replies; 30+ messages in thread
From: Andrew Morton @ 2014-05-05 20:46 UTC (permalink / raw)
  To: Davidlohr Bueso
  Cc: Seth Jennings, Weijie Yang, 'Minchan Kim',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:

> > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> > >  	unsigned long handle;
> > >  	u16 size;
> > >  
> > > -	read_lock(&meta->tb_lock);
> > > +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> > > +		cpu_relax();
> > > +
> > 
> > So... this might be dumb question, but this looks like a spinlock
> > implementation.
> > 
> > What advantage does this have over a standard spinlock?
> 
> I was wondering the same thing. Furthermore by doing this you'll loose
> the benefits of sharing the lock... your numbers do indicate that it is
> for the better. Also, note that hopefully rwlock_t will soon be updated
> to be fair and perform up to par with spinlocks, something which is long
> overdue. So you could reduce the critical region by implementing the
> same granularity, just don't implement your own locking schemes, like
> this.

It sounds like seqlocks will match this access pattern pretty well?

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-05 20:46       ` Andrew Morton
  0 siblings, 0 replies; 30+ messages in thread
From: Andrew Morton @ 2014-05-05 20:46 UTC (permalink / raw)
  To: Davidlohr Bueso
  Cc: Seth Jennings, Weijie Yang, 'Minchan Kim',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:

> > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> > >  	unsigned long handle;
> > >  	u16 size;
> > >  
> > > -	read_lock(&meta->tb_lock);
> > > +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> > > +		cpu_relax();
> > > +
> > 
> > So... this might be dumb question, but this looks like a spinlock
> > implementation.
> > 
> > What advantage does this have over a standard spinlock?
> 
> I was wondering the same thing. Furthermore by doing this you'll loose
> the benefits of sharing the lock... your numbers do indicate that it is
> for the better. Also, note that hopefully rwlock_t will soon be updated
> to be fair and perform up to par with spinlocks, something which is long
> overdue. So you could reduce the critical region by implementing the
> same granularity, just don't implement your own locking schemes, like
> this.

It sounds like seqlocks will match this access pattern pretty well?

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-05 20:46       ` Andrew Morton
@ 2014-05-05 22:22         ` Davidlohr Bueso
  -1 siblings, 0 replies; 30+ messages in thread
From: Davidlohr Bueso @ 2014-05-05 22:22 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Seth Jennings, Weijie Yang, 'Minchan Kim',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Mon, 2014-05-05 at 13:46 -0700, Andrew Morton wrote:
> On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:
> 
> > > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> > > >  	unsigned long handle;
> > > >  	u16 size;
> > > >  
> > > > -	read_lock(&meta->tb_lock);
> > > > +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> > > > +		cpu_relax();
> > > > +
> > > 
> > > So... this might be dumb question, but this looks like a spinlock
> > > implementation.
> > > 
> > > What advantage does this have over a standard spinlock?
> > 
> > I was wondering the same thing. Furthermore by doing this you'll loose
> > the benefits of sharing the lock... your numbers do indicate that it is
> > for the better. Also, note that hopefully rwlock_t will soon be updated
> > to be fair and perform up to par with spinlocks, something which is long
> > overdue. So you could reduce the critical region by implementing the
> > same granularity, just don't implement your own locking schemes, like
> > this.
> 
> It sounds like seqlocks will match this access pattern pretty well?

Indeed. And after a closer look, except for zram_slot_free_notify(),
that lock is always shared. So, unless fine graining it implies taking
the lock exclusively like in this patch (if so, that needs to be
explicitly documented in the changelog), we would ideally continue to
share it. That _should_ provide nicer performance numbers when using the
correct lock.




^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-05 22:22         ` Davidlohr Bueso
  0 siblings, 0 replies; 30+ messages in thread
From: Davidlohr Bueso @ 2014-05-05 22:22 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Seth Jennings, Weijie Yang, 'Minchan Kim',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Mon, 2014-05-05 at 13:46 -0700, Andrew Morton wrote:
> On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:
> 
> > > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> > > >  	unsigned long handle;
> > > >  	u16 size;
> > > >  
> > > > -	read_lock(&meta->tb_lock);
> > > > +	while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> > > > +		cpu_relax();
> > > > +
> > > 
> > > So... this might be dumb question, but this looks like a spinlock
> > > implementation.
> > > 
> > > What advantage does this have over a standard spinlock?
> > 
> > I was wondering the same thing. Furthermore by doing this you'll loose
> > the benefits of sharing the lock... your numbers do indicate that it is
> > for the better. Also, note that hopefully rwlock_t will soon be updated
> > to be fair and perform up to par with spinlocks, something which is long
> > overdue. So you could reduce the critical region by implementing the
> > same granularity, just don't implement your own locking schemes, like
> > this.
> 
> It sounds like seqlocks will match this access pattern pretty well?

Indeed. And after a closer look, except for zram_slot_free_notify(),
that lock is always shared. So, unless fine graining it implies taking
the lock exclusively like in this patch (if so, that needs to be
explicitly documented in the changelog), we would ideally continue to
share it. That _should_ provide nicer performance numbers when using the
correct lock.



--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-05 22:22         ` Davidlohr Bueso
@ 2014-05-07  7:51           ` Weijie Yang
  -1 siblings, 0 replies; 30+ messages in thread
From: Weijie Yang @ 2014-05-07  7:51 UTC (permalink / raw)
  To: 'Davidlohr Bueso', 'Andrew Morton'
  Cc: 'Seth Jennings', 'Minchan Kim',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Tue, May 6, 2014 at 6:22 AM, Davidlohr Bueso <davidlohr@hp.com> wrote:
> On Mon, 2014-05-05 at 13:46 -0700, Andrew Morton wrote:
>> On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:
>>
>> > > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>> > > >         unsigned long handle;
>> > > >         u16 size;
>> > > >
>> > > > -       read_lock(&meta->tb_lock);
>> > > > +       while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
>> > > > +               cpu_relax();
>> > > > +
>> > >
>> > > So... this might be dumb question, but this looks like a spinlock
>> > > implementation.
>> > >
>> > > What advantage does this have over a standard spinlock?
>> >
>> > I was wondering the same thing. Furthermore by doing this you'll loose
>> > the benefits of sharing the lock... your numbers do indicate that it is
>> > for the better. Also, note that hopefully rwlock_t will soon be updated
>> > to be fair and perform up to par with spinlocks, something which is long
>> > overdue. So you could reduce the critical region by implementing the
>> > same granularity, just don't implement your own locking schemes, like
>> > this.

Actually, the main reason I use a CAS rather than a standard lock here is
that I want to minimize the meta table memory overhead. A tiny reason is
my fuzzy memory that CAS is more efficient than spinlock (please correct me
if I am wrong).

Anyway, I changed the CAS to spinlock and rwlock, re-test them:

      Test       lock-free	   spinlock     rwlock
------------------------------------------------------
 Initial write   1424141.62   1426372.84   1423019.21
       Rewrite   1652504.81   1623307.14   1653682.04
          Read  11404668.35  11242885.05  10938125.00
       Re-read  11555483.75   11253906.6  10837773.50
  Reverse Read   8394478.17   8277250.34   7768057.39
   Stride read   9372229.95   9010498.53   8692871.77
   Random read   9187221.90   8988080.55   8661184.60
Mixed workload   5843370.85   5414729.54   5451055.03
  Random write   1608947.04   1572276.64   1588866.51
        Pwrite   1311055.32   1302463.04   1302001.06
         Pread   4652056.11   4555802.18   4469672.34

And I cann't say which one is the best, they have the similar performance.

Wait, iozone will create temporary files for every test thread, so there is no
possibility that these threads access the same table[index] concurrenctly.
So, I use fio to test the raw zram block device.
To enhance the possibility of access the same table[index] conflictly, I set zram
with a small disksize(10M) and let thread run with large loop count.

On the same test machine, the fio test command is:
fio --bs=32k --randrepeat=1 --randseed=100 --refill_buffers
--scramble_buffers=1 --direct=1 --loops=3000 --numjobs=4
--filename=/dev/zram0 --name=seq-write --rw=write --stonewall
--name=seq-read --rw=read --stonewall --name=seq-readwrite
--rw=rw --stonewall --name=rand-readwrite --rw=randrw --stonewall

    Test      base    lock-free   spinlock   rwlock
------------------------------------------------------
seq-write   935109.2   999580.5   998134.8   994384.6
 seq-read  5598064.6  6444011.5  6243184.6  6197514.2
   seq-rw  1403963.0  1635673.0  1633823.0  1635972.2
  rand-rw  1389864.4  1612520.4  1613403.6  1612129.8

This result(KB/s, average of 5 tests) shows the performance improvement
on base version, however, I cann't say which method is the best.

>>
>> It sounds like seqlocks will match this access pattern pretty well?
>
> Indeed. And after a closer look, except for zram_slot_free_notify(),
> that lock is always shared. So, unless fine graining it implies taking
> the lock exclusively like in this patch (if so, that needs to be
> explicitly documented in the changelog), we would ideally continue to
> share it. That _should_ provide nicer performance numbers when using the
> correct lock.
>

Andrew mentioned seqlocks, however, I think it is hard the use seqlocks here
after I recheck the codes. No matter use it as a meta global lock or a
table[index] lock. The main reason is the writer will free the handle rather than
just change some value.
If I misunderstand you, please let me know.

Now, I am in a delimma. For minimizing the memory overhead, I like to use CAS.
However, it is not a standard way.

Any complaint or suggestions are welcomed.

Regards,

>
>



^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-07  7:51           ` Weijie Yang
  0 siblings, 0 replies; 30+ messages in thread
From: Weijie Yang @ 2014-05-07  7:51 UTC (permalink / raw)
  To: 'Davidlohr Bueso', 'Andrew Morton'
  Cc: 'Seth Jennings', 'Minchan Kim',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Tue, May 6, 2014 at 6:22 AM, Davidlohr Bueso <davidlohr@hp.com> wrote:
> On Mon, 2014-05-05 at 13:46 -0700, Andrew Morton wrote:
>> On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:
>>
>> > > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>> > > >         unsigned long handle;
>> > > >         u16 size;
>> > > >
>> > > > -       read_lock(&meta->tb_lock);
>> > > > +       while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
>> > > > +               cpu_relax();
>> > > > +
>> > >
>> > > So... this might be dumb question, but this looks like a spinlock
>> > > implementation.
>> > >
>> > > What advantage does this have over a standard spinlock?
>> >
>> > I was wondering the same thing. Furthermore by doing this you'll loose
>> > the benefits of sharing the lock... your numbers do indicate that it is
>> > for the better. Also, note that hopefully rwlock_t will soon be updated
>> > to be fair and perform up to par with spinlocks, something which is long
>> > overdue. So you could reduce the critical region by implementing the
>> > same granularity, just don't implement your own locking schemes, like
>> > this.

Actually, the main reason I use a CAS rather than a standard lock here is
that I want to minimize the meta table memory overhead. A tiny reason is
my fuzzy memory that CAS is more efficient than spinlock (please correct me
if I am wrong).

Anyway, I changed the CAS to spinlock and rwlock, re-test them:

      Test       lock-free	   spinlock     rwlock
------------------------------------------------------
 Initial write   1424141.62   1426372.84   1423019.21
       Rewrite   1652504.81   1623307.14   1653682.04
          Read  11404668.35  11242885.05  10938125.00
       Re-read  11555483.75   11253906.6  10837773.50
  Reverse Read   8394478.17   8277250.34   7768057.39
   Stride read   9372229.95   9010498.53   8692871.77
   Random read   9187221.90   8988080.55   8661184.60
Mixed workload   5843370.85   5414729.54   5451055.03
  Random write   1608947.04   1572276.64   1588866.51
        Pwrite   1311055.32   1302463.04   1302001.06
         Pread   4652056.11   4555802.18   4469672.34

And I cann't say which one is the best, they have the similar performance.

Wait, iozone will create temporary files for every test thread, so there is no
possibility that these threads access the same table[index] concurrenctly.
So, I use fio to test the raw zram block device.
To enhance the possibility of access the same table[index] conflictly, I set zram
with a small disksize(10M) and let thread run with large loop count.

On the same test machine, the fio test command is:
fio --bs=32k --randrepeat=1 --randseed=100 --refill_buffers
--scramble_buffers=1 --direct=1 --loops=3000 --numjobs=4
--filename=/dev/zram0 --name=seq-write --rw=write --stonewall
--name=seq-read --rw=read --stonewall --name=seq-readwrite
--rw=rw --stonewall --name=rand-readwrite --rw=randrw --stonewall

    Test      base    lock-free   spinlock   rwlock
------------------------------------------------------
seq-write   935109.2   999580.5   998134.8   994384.6
 seq-read  5598064.6  6444011.5  6243184.6  6197514.2
   seq-rw  1403963.0  1635673.0  1633823.0  1635972.2
  rand-rw  1389864.4  1612520.4  1613403.6  1612129.8

This result(KB/s, average of 5 tests) shows the performance improvement
on base version, however, I cann't say which method is the best.

>>
>> It sounds like seqlocks will match this access pattern pretty well?
>
> Indeed. And after a closer look, except for zram_slot_free_notify(),
> that lock is always shared. So, unless fine graining it implies taking
> the lock exclusively like in this patch (if so, that needs to be
> explicitly documented in the changelog), we would ideally continue to
> share it. That _should_ provide nicer performance numbers when using the
> correct lock.
>

Andrew mentioned seqlocks, however, I think it is hard the use seqlocks here
after I recheck the codes. No matter use it as a meta global lock or a
table[index] lock. The main reason is the writer will free the handle rather than
just change some value.
If I misunderstand you, please let me know.

Now, I am in a delimma. For minimizing the memory overhead, I like to use CAS.
However, it is not a standard way.

Any complaint or suggestions are welcomed.

Regards,

>
>


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-07  7:51           ` Weijie Yang
@ 2014-05-07  8:57             ` Minchan Kim
  -1 siblings, 0 replies; 30+ messages in thread
From: Minchan Kim @ 2014-05-07  8:57 UTC (permalink / raw)
  To: Weijie Yang
  Cc: 'Davidlohr Bueso', 'Andrew Morton',
	'Seth Jennings', 'Nitin Gupta',
	'Sergey Senozhatsky', 'Bob Liu',
	'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Wed, May 07, 2014 at 03:51:35PM +0800, Weijie Yang wrote:
> On Tue, May 6, 2014 at 6:22 AM, Davidlohr Bueso <davidlohr@hp.com> wrote:
> > On Mon, 2014-05-05 at 13:46 -0700, Andrew Morton wrote:
> >> On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:
> >>
> >> > > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> >> > > >         unsigned long handle;
> >> > > >         u16 size;
> >> > > >
> >> > > > -       read_lock(&meta->tb_lock);
> >> > > > +       while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> >> > > > +               cpu_relax();
> >> > > > +
> >> > >
> >> > > So... this might be dumb question, but this looks like a spinlock
> >> > > implementation.
> >> > >
> >> > > What advantage does this have over a standard spinlock?
> >> >
> >> > I was wondering the same thing. Furthermore by doing this you'll loose
> >> > the benefits of sharing the lock... your numbers do indicate that it is
> >> > for the better. Also, note that hopefully rwlock_t will soon be updated
> >> > to be fair and perform up to par with spinlocks, something which is long
> >> > overdue. So you could reduce the critical region by implementing the
> >> > same granularity, just don't implement your own locking schemes, like
> >> > this.
> 
> Actually, the main reason I use a CAS rather than a standard lock here is
> that I want to minimize the meta table memory overhead. A tiny reason is
> my fuzzy memory that CAS is more efficient than spinlock (please correct me
> if I am wrong).
> 
> Anyway, I changed the CAS to spinlock and rwlock, re-test them:
> 
>       Test       lock-free	   spinlock     rwlock
> ------------------------------------------------------
>  Initial write   1424141.62   1426372.84   1423019.21
>        Rewrite   1652504.81   1623307.14   1653682.04
>           Read  11404668.35  11242885.05  10938125.00
>        Re-read  11555483.75   11253906.6  10837773.50
>   Reverse Read   8394478.17   8277250.34   7768057.39
>    Stride read   9372229.95   9010498.53   8692871.77
>    Random read   9187221.90   8988080.55   8661184.60
> Mixed workload   5843370.85   5414729.54   5451055.03
>   Random write   1608947.04   1572276.64   1588866.51
>         Pwrite   1311055.32   1302463.04   1302001.06
>          Pread   4652056.11   4555802.18   4469672.34

I'd like to clear it out.
The spinlock and rwlock you mentioned is per-meta entry lock like state
you added or global lock for meta? If it's latter, rwlock means base?

> 
> And I cann't say which one is the best, they have the similar performance.

Most popular use of zram is the in-memory swap for small embedded system
so I don't want to increase memory footprint without good reason although
it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
consider compression ratio and real free memory after boot(But data I have
an interest is mixed workload enhancement. It would be important for heavy
memory pressure for latency so it attractives me a lot. Anyway, I need number
for back up the justification with real swap usecase rather than zram-blk.
Recently, I have considered per-process reclaim  based on zram so maybe I will
have a test for that).
But recently, I have received private mail from some server folks to use
zram-blk, not zram-swap so in case of that, such enhancement would be
desirable so my point is I'm not saying the drop of the patch and let's
find proper solution to meet both and gather more data.

> 
> Wait, iozone will create temporary files for every test thread, so there is no
> possibility that these threads access the same table[index] concurrenctly.
> So, I use fio to test the raw zram block device.
> To enhance the possibility of access the same table[index] conflictly, I set zram
> with a small disksize(10M) and let thread run with large loop count.
> 
> On the same test machine, the fio test command is:
> fio --bs=32k --randrepeat=1 --randseed=100 --refill_buffers
> --scramble_buffers=1 --direct=1 --loops=3000 --numjobs=4
> --filename=/dev/zram0 --name=seq-write --rw=write --stonewall
> --name=seq-read --rw=read --stonewall --name=seq-readwrite
> --rw=rw --stonewall --name=rand-readwrite --rw=randrw --stonewall
> 
>     Test      base    lock-free   spinlock   rwlock
> ------------------------------------------------------
> seq-write   935109.2   999580.5   998134.8   994384.6
>  seq-read  5598064.6  6444011.5  6243184.6  6197514.2
>    seq-rw  1403963.0  1635673.0  1633823.0  1635972.2
>   rand-rw  1389864.4  1612520.4  1613403.6  1612129.8

What's the difference between base and rwlock?
Base means global rwlock while rwlock means per-meta entry rwlock?


> 
> This result(KB/s, average of 5 tests) shows the performance improvement
> on base version, however, I cann't say which method is the best.
> 
> >>
> >> It sounds like seqlocks will match this access pattern pretty well?
> >
> > Indeed. And after a closer look, except for zram_slot_free_notify(),
> > that lock is always shared. So, unless fine graining it implies taking
> > the lock exclusively like in this patch (if so, that needs to be
> > explicitly documented in the changelog), we would ideally continue to
> > share it. That _should_ provide nicer performance numbers when using the
> > correct lock.
> >
> 
> Andrew mentioned seqlocks, however, I think it is hard the use seqlocks here
> after I recheck the codes. No matter use it as a meta global lock or a
> table[index] lock. The main reason is the writer will free the handle rather than
> just change some value.
> If I misunderstand you, please let me know.
> 
> Now, I am in a delimma. For minimizing the memory overhead, I like to use CAS.
> However, it is not a standard way.
> 
> Any complaint or suggestions are welcomed.
> 
> Regards,
> 
> >
> >
> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-07  8:57             ` Minchan Kim
  0 siblings, 0 replies; 30+ messages in thread
From: Minchan Kim @ 2014-05-07  8:57 UTC (permalink / raw)
  To: Weijie Yang
  Cc: 'Davidlohr Bueso', 'Andrew Morton',
	'Seth Jennings', 'Nitin Gupta',
	'Sergey Senozhatsky', 'Bob Liu',
	'Dan Streetman',
	weijie.yang.kh, heesub.shin, 'linux-kernel',
	'Linux-MM'

On Wed, May 07, 2014 at 03:51:35PM +0800, Weijie Yang wrote:
> On Tue, May 6, 2014 at 6:22 AM, Davidlohr Bueso <davidlohr@hp.com> wrote:
> > On Mon, 2014-05-05 at 13:46 -0700, Andrew Morton wrote:
> >> On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:
> >>
> >> > > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> >> > > >         unsigned long handle;
> >> > > >         u16 size;
> >> > > >
> >> > > > -       read_lock(&meta->tb_lock);
> >> > > > +       while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
> >> > > > +               cpu_relax();
> >> > > > +
> >> > >
> >> > > So... this might be dumb question, but this looks like a spinlock
> >> > > implementation.
> >> > >
> >> > > What advantage does this have over a standard spinlock?
> >> >
> >> > I was wondering the same thing. Furthermore by doing this you'll loose
> >> > the benefits of sharing the lock... your numbers do indicate that it is
> >> > for the better. Also, note that hopefully rwlock_t will soon be updated
> >> > to be fair and perform up to par with spinlocks, something which is long
> >> > overdue. So you could reduce the critical region by implementing the
> >> > same granularity, just don't implement your own locking schemes, like
> >> > this.
> 
> Actually, the main reason I use a CAS rather than a standard lock here is
> that I want to minimize the meta table memory overhead. A tiny reason is
> my fuzzy memory that CAS is more efficient than spinlock (please correct me
> if I am wrong).
> 
> Anyway, I changed the CAS to spinlock and rwlock, re-test them:
> 
>       Test       lock-free	   spinlock     rwlock
> ------------------------------------------------------
>  Initial write   1424141.62   1426372.84   1423019.21
>        Rewrite   1652504.81   1623307.14   1653682.04
>           Read  11404668.35  11242885.05  10938125.00
>        Re-read  11555483.75   11253906.6  10837773.50
>   Reverse Read   8394478.17   8277250.34   7768057.39
>    Stride read   9372229.95   9010498.53   8692871.77
>    Random read   9187221.90   8988080.55   8661184.60
> Mixed workload   5843370.85   5414729.54   5451055.03
>   Random write   1608947.04   1572276.64   1588866.51
>         Pwrite   1311055.32   1302463.04   1302001.06
>          Pread   4652056.11   4555802.18   4469672.34

I'd like to clear it out.
The spinlock and rwlock you mentioned is per-meta entry lock like state
you added or global lock for meta? If it's latter, rwlock means base?

> 
> And I cann't say which one is the best, they have the similar performance.

Most popular use of zram is the in-memory swap for small embedded system
so I don't want to increase memory footprint without good reason although
it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
consider compression ratio and real free memory after boot(But data I have
an interest is mixed workload enhancement. It would be important for heavy
memory pressure for latency so it attractives me a lot. Anyway, I need number
for back up the justification with real swap usecase rather than zram-blk.
Recently, I have considered per-process reclaim  based on zram so maybe I will
have a test for that).
But recently, I have received private mail from some server folks to use
zram-blk, not zram-swap so in case of that, such enhancement would be
desirable so my point is I'm not saying the drop of the patch and let's
find proper solution to meet both and gather more data.

> 
> Wait, iozone will create temporary files for every test thread, so there is no
> possibility that these threads access the same table[index] concurrenctly.
> So, I use fio to test the raw zram block device.
> To enhance the possibility of access the same table[index] conflictly, I set zram
> with a small disksize(10M) and let thread run with large loop count.
> 
> On the same test machine, the fio test command is:
> fio --bs=32k --randrepeat=1 --randseed=100 --refill_buffers
> --scramble_buffers=1 --direct=1 --loops=3000 --numjobs=4
> --filename=/dev/zram0 --name=seq-write --rw=write --stonewall
> --name=seq-read --rw=read --stonewall --name=seq-readwrite
> --rw=rw --stonewall --name=rand-readwrite --rw=randrw --stonewall
> 
>     Test      base    lock-free   spinlock   rwlock
> ------------------------------------------------------
> seq-write   935109.2   999580.5   998134.8   994384.6
>  seq-read  5598064.6  6444011.5  6243184.6  6197514.2
>    seq-rw  1403963.0  1635673.0  1633823.0  1635972.2
>   rand-rw  1389864.4  1612520.4  1613403.6  1612129.8

What's the difference between base and rwlock?
Base means global rwlock while rwlock means per-meta entry rwlock?


> 
> This result(KB/s, average of 5 tests) shows the performance improvement
> on base version, however, I cann't say which method is the best.
> 
> >>
> >> It sounds like seqlocks will match this access pattern pretty well?
> >
> > Indeed. And after a closer look, except for zram_slot_free_notify(),
> > that lock is always shared. So, unless fine graining it implies taking
> > the lock exclusively like in this patch (if so, that needs to be
> > explicitly documented in the changelog), we would ideally continue to
> > share it. That _should_ provide nicer performance numbers when using the
> > correct lock.
> >
> 
> Andrew mentioned seqlocks, however, I think it is hard the use seqlocks here
> after I recheck the codes. No matter use it as a meta global lock or a
> table[index] lock. The main reason is the writer will free the handle rather than
> just change some value.
> If I misunderstand you, please let me know.
> 
> Now, I am in a delimma. For minimizing the memory overhead, I like to use CAS.
> However, it is not a standard way.
> 
> Any complaint or suggestions are welcomed.
> 
> Regards,
> 
> >
> >
> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-07  8:57             ` Minchan Kim
@ 2014-05-07  9:16               ` Weijie Yang
  -1 siblings, 0 replies; 30+ messages in thread
From: Weijie Yang @ 2014-05-07  9:16 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Weijie Yang, Davidlohr Bueso, Andrew Morton, Seth Jennings,
	Nitin Gupta, Sergey Senozhatsky, Bob Liu, Dan Streetman,
	Heesub Shin, linux-kernel, Linux-MM

On Wed, May 7, 2014 at 4:57 PM, Minchan Kim <minchan@kernel.org> wrote:
> On Wed, May 07, 2014 at 03:51:35PM +0800, Weijie Yang wrote:
>> On Tue, May 6, 2014 at 6:22 AM, Davidlohr Bueso <davidlohr@hp.com> wrote:
>> > On Mon, 2014-05-05 at 13:46 -0700, Andrew Morton wrote:
>> >> On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:
>> >>
>> >> > > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>> >> > > >         unsigned long handle;
>> >> > > >         u16 size;
>> >> > > >
>> >> > > > -       read_lock(&meta->tb_lock);
>> >> > > > +       while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
>> >> > > > +               cpu_relax();
>> >> > > > +
>> >> > >
>> >> > > So... this might be dumb question, but this looks like a spinlock
>> >> > > implementation.
>> >> > >
>> >> > > What advantage does this have over a standard spinlock?
>> >> >
>> >> > I was wondering the same thing. Furthermore by doing this you'll loose
>> >> > the benefits of sharing the lock... your numbers do indicate that it is
>> >> > for the better. Also, note that hopefully rwlock_t will soon be updated
>> >> > to be fair and perform up to par with spinlocks, something which is long
>> >> > overdue. So you could reduce the critical region by implementing the
>> >> > same granularity, just don't implement your own locking schemes, like
>> >> > this.
>>
>> Actually, the main reason I use a CAS rather than a standard lock here is
>> that I want to minimize the meta table memory overhead. A tiny reason is
>> my fuzzy memory that CAS is more efficient than spinlock (please correct me
>> if I am wrong).
>>
>> Anyway, I changed the CAS to spinlock and rwlock, re-test them:
>>
>>       Test       lock-free       spinlock     rwlock
>> ------------------------------------------------------
>>  Initial write   1424141.62   1426372.84   1423019.21
>>        Rewrite   1652504.81   1623307.14   1653682.04
>>           Read  11404668.35  11242885.05  10938125.00
>>        Re-read  11555483.75   11253906.6  10837773.50
>>   Reverse Read   8394478.17   8277250.34   7768057.39
>>    Stride read   9372229.95   9010498.53   8692871.77
>>    Random read   9187221.90   8988080.55   8661184.60
>> Mixed workload   5843370.85   5414729.54   5451055.03
>>   Random write   1608947.04   1572276.64   1588866.51
>>         Pwrite   1311055.32   1302463.04   1302001.06
>>          Pread   4652056.11   4555802.18   4469672.34
>
> I'd like to clear it out.
> The spinlock and rwlock you mentioned is per-meta entry lock like state
> you added or global lock for meta? If it's latter, rwlock means base?

The spinlock and rwlock is per-meta entry lock like state.
Because the base data is added in the first mail, I don't mention
them here.

>>
>> And I cann't say which one is the best, they have the similar performance.
>
> Most popular use of zram is the in-memory swap for small embedded system
> so I don't want to increase memory footprint without good reason although
> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> consider compression ratio and real free memory after boot(But data I have
> an interest is mixed workload enhancement. It would be important for heavy
> memory pressure for latency so it attractives me a lot. Anyway, I need number
> for back up the justification with real swap usecase rather than zram-blk.
> Recently, I have considered per-process reclaim  based on zram so maybe I will
> have a test for that).
> But recently, I have received private mail from some server folks to use
> zram-blk, not zram-swap so in case of that, such enhancement would be
> desirable so my point is I'm not saying the drop of the patch and let's
> find proper solution to meet both and gather more data.
>
>>
>> Wait, iozone will create temporary files for every test thread, so there is no
>> possibility that these threads access the same table[index] concurrenctly.
>> So, I use fio to test the raw zram block device.
>> To enhance the possibility of access the same table[index] conflictly, I set zram
>> with a small disksize(10M) and let thread run with large loop count.
>>
>> On the same test machine, the fio test command is:
>> fio --bs=32k --randrepeat=1 --randseed=100 --refill_buffers
>> --scramble_buffers=1 --direct=1 --loops=3000 --numjobs=4
>> --filename=/dev/zram0 --name=seq-write --rw=write --stonewall
>> --name=seq-read --rw=read --stonewall --name=seq-readwrite
>> --rw=rw --stonewall --name=rand-readwrite --rw=randrw --stonewall
>>
>>     Test      base    lock-free   spinlock   rwlock
>> ------------------------------------------------------
>> seq-write   935109.2   999580.5   998134.8   994384.6
>>  seq-read  5598064.6  6444011.5  6243184.6  6197514.2
>>    seq-rw  1403963.0  1635673.0  1633823.0  1635972.2
>>   rand-rw  1389864.4  1612520.4  1613403.6  1612129.8
>
> What's the difference between base and rwlock?
> Base means global rwlock while rwlock means per-meta entry rwlock?
>

Base means  global rwlock, it is the 3.15.0-rc3 code.
rwlock means per-meta entry rwlock.

Sorry to confuse you.

>>
>> This result(KB/s, average of 5 tests) shows the performance improvement
>> on base version, however, I cann't say which method is the best.
>>
>> >>
>> >> It sounds like seqlocks will match this access pattern pretty well?
>> >
>> > Indeed. And after a closer look, except for zram_slot_free_notify(),
>> > that lock is always shared. So, unless fine graining it implies taking
>> > the lock exclusively like in this patch (if so, that needs to be
>> > explicitly documented in the changelog), we would ideally continue to
>> > share it. That _should_ provide nicer performance numbers when using the
>> > correct lock.
>> >
>>
>> Andrew mentioned seqlocks, however, I think it is hard the use seqlocks here
>> after I recheck the codes. No matter use it as a meta global lock or a
>> table[index] lock. The main reason is the writer will free the handle rather than
>> just change some value.
>> If I misunderstand you, please let me know.
>>
>> Now, I am in a delimma. For minimizing the memory overhead, I like to use CAS.
>> However, it is not a standard way.
>>
>> Any complaint or suggestions are welcomed.
>>
>> Regards,
>>
>> >
>> >
>>
>>
>> --
>> To unsubscribe, send a message with 'unsubscribe linux-mm' in
>> the body to majordomo@kvack.org.  For more info on Linux MM,
>> see: http://www.linux-mm.org/ .
>> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>
> --
> Kind regards,
> Minchan Kim

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-07  9:16               ` Weijie Yang
  0 siblings, 0 replies; 30+ messages in thread
From: Weijie Yang @ 2014-05-07  9:16 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Weijie Yang, Davidlohr Bueso, Andrew Morton, Seth Jennings,
	Nitin Gupta, Sergey Senozhatsky, Bob Liu, Dan Streetman,
	Heesub Shin, linux-kernel, Linux-MM

On Wed, May 7, 2014 at 4:57 PM, Minchan Kim <minchan@kernel.org> wrote:
> On Wed, May 07, 2014 at 03:51:35PM +0800, Weijie Yang wrote:
>> On Tue, May 6, 2014 at 6:22 AM, Davidlohr Bueso <davidlohr@hp.com> wrote:
>> > On Mon, 2014-05-05 at 13:46 -0700, Andrew Morton wrote:
>> >> On Mon, 05 May 2014 11:00:44 -0700 Davidlohr Bueso <davidlohr@hp.com> wrote:
>> >>
>> >> > > > @@ -339,12 +338,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>> >> > > >         unsigned long handle;
>> >> > > >         u16 size;
>> >> > > >
>> >> > > > -       read_lock(&meta->tb_lock);
>> >> > > > +       while(atomic_cmpxchg(&meta->table[index].state, IDLE, ACCESS) != IDLE)
>> >> > > > +               cpu_relax();
>> >> > > > +
>> >> > >
>> >> > > So... this might be dumb question, but this looks like a spinlock
>> >> > > implementation.
>> >> > >
>> >> > > What advantage does this have over a standard spinlock?
>> >> >
>> >> > I was wondering the same thing. Furthermore by doing this you'll loose
>> >> > the benefits of sharing the lock... your numbers do indicate that it is
>> >> > for the better. Also, note that hopefully rwlock_t will soon be updated
>> >> > to be fair and perform up to par with spinlocks, something which is long
>> >> > overdue. So you could reduce the critical region by implementing the
>> >> > same granularity, just don't implement your own locking schemes, like
>> >> > this.
>>
>> Actually, the main reason I use a CAS rather than a standard lock here is
>> that I want to minimize the meta table memory overhead. A tiny reason is
>> my fuzzy memory that CAS is more efficient than spinlock (please correct me
>> if I am wrong).
>>
>> Anyway, I changed the CAS to spinlock and rwlock, re-test them:
>>
>>       Test       lock-free       spinlock     rwlock
>> ------------------------------------------------------
>>  Initial write   1424141.62   1426372.84   1423019.21
>>        Rewrite   1652504.81   1623307.14   1653682.04
>>           Read  11404668.35  11242885.05  10938125.00
>>        Re-read  11555483.75   11253906.6  10837773.50
>>   Reverse Read   8394478.17   8277250.34   7768057.39
>>    Stride read   9372229.95   9010498.53   8692871.77
>>    Random read   9187221.90   8988080.55   8661184.60
>> Mixed workload   5843370.85   5414729.54   5451055.03
>>   Random write   1608947.04   1572276.64   1588866.51
>>         Pwrite   1311055.32   1302463.04   1302001.06
>>          Pread   4652056.11   4555802.18   4469672.34
>
> I'd like to clear it out.
> The spinlock and rwlock you mentioned is per-meta entry lock like state
> you added or global lock for meta? If it's latter, rwlock means base?

The spinlock and rwlock is per-meta entry lock like state.
Because the base data is added in the first mail, I don't mention
them here.

>>
>> And I cann't say which one is the best, they have the similar performance.
>
> Most popular use of zram is the in-memory swap for small embedded system
> so I don't want to increase memory footprint without good reason although
> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> consider compression ratio and real free memory after boot(But data I have
> an interest is mixed workload enhancement. It would be important for heavy
> memory pressure for latency so it attractives me a lot. Anyway, I need number
> for back up the justification with real swap usecase rather than zram-blk.
> Recently, I have considered per-process reclaim  based on zram so maybe I will
> have a test for that).
> But recently, I have received private mail from some server folks to use
> zram-blk, not zram-swap so in case of that, such enhancement would be
> desirable so my point is I'm not saying the drop of the patch and let's
> find proper solution to meet both and gather more data.
>
>>
>> Wait, iozone will create temporary files for every test thread, so there is no
>> possibility that these threads access the same table[index] concurrenctly.
>> So, I use fio to test the raw zram block device.
>> To enhance the possibility of access the same table[index] conflictly, I set zram
>> with a small disksize(10M) and let thread run with large loop count.
>>
>> On the same test machine, the fio test command is:
>> fio --bs=32k --randrepeat=1 --randseed=100 --refill_buffers
>> --scramble_buffers=1 --direct=1 --loops=3000 --numjobs=4
>> --filename=/dev/zram0 --name=seq-write --rw=write --stonewall
>> --name=seq-read --rw=read --stonewall --name=seq-readwrite
>> --rw=rw --stonewall --name=rand-readwrite --rw=randrw --stonewall
>>
>>     Test      base    lock-free   spinlock   rwlock
>> ------------------------------------------------------
>> seq-write   935109.2   999580.5   998134.8   994384.6
>>  seq-read  5598064.6  6444011.5  6243184.6  6197514.2
>>    seq-rw  1403963.0  1635673.0  1633823.0  1635972.2
>>   rand-rw  1389864.4  1612520.4  1613403.6  1612129.8
>
> What's the difference between base and rwlock?
> Base means global rwlock while rwlock means per-meta entry rwlock?
>

Base means  global rwlock, it is the 3.15.0-rc3 code.
rwlock means per-meta entry rwlock.

Sorry to confuse you.

>>
>> This result(KB/s, average of 5 tests) shows the performance improvement
>> on base version, however, I cann't say which method is the best.
>>
>> >>
>> >> It sounds like seqlocks will match this access pattern pretty well?
>> >
>> > Indeed. And after a closer look, except for zram_slot_free_notify(),
>> > that lock is always shared. So, unless fine graining it implies taking
>> > the lock exclusively like in this patch (if so, that needs to be
>> > explicitly documented in the changelog), we would ideally continue to
>> > share it. That _should_ provide nicer performance numbers when using the
>> > correct lock.
>> >
>>
>> Andrew mentioned seqlocks, however, I think it is hard the use seqlocks here
>> after I recheck the codes. No matter use it as a meta global lock or a
>> table[index] lock. The main reason is the writer will free the handle rather than
>> just change some value.
>> If I misunderstand you, please let me know.
>>
>> Now, I am in a delimma. For minimizing the memory overhead, I like to use CAS.
>> However, it is not a standard way.
>>
>> Any complaint or suggestions are welcomed.
>>
>> Regards,
>>
>> >
>> >
>>
>>
>> --
>> To unsubscribe, send a message with 'unsubscribe linux-mm' in
>> the body to majordomo@kvack.org.  For more info on Linux MM,
>> see: http://www.linux-mm.org/ .
>> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>
> --
> Kind regards,
> Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-07  9:16               ` Weijie Yang
@ 2014-05-07 14:52                 ` Joonsoo Kim
  -1 siblings, 0 replies; 30+ messages in thread
From: Joonsoo Kim @ 2014-05-07 14:52 UTC (permalink / raw)
  To: Weijie Yang
  Cc: Minchan Kim, Weijie Yang, Davidlohr Bueso, Andrew Morton,
	Seth Jennings, Nitin Gupta, Sergey Senozhatsky, Bob Liu,
	Dan Streetman, Heesub Shin, linux-kernel, Linux-MM

>> Most popular use of zram is the in-memory swap for small embedded system
>> so I don't want to increase memory footprint without good reason although
>> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
>> consider compression ratio and real free memory after boot

We can use bit spin lock and this would not increase memory footprint for 32 bit
platform.

Thanks.

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-07 14:52                 ` Joonsoo Kim
  0 siblings, 0 replies; 30+ messages in thread
From: Joonsoo Kim @ 2014-05-07 14:52 UTC (permalink / raw)
  To: Weijie Yang
  Cc: Minchan Kim, Weijie Yang, Davidlohr Bueso, Andrew Morton,
	Seth Jennings, Nitin Gupta, Sergey Senozhatsky, Bob Liu,
	Dan Streetman, Heesub Shin, linux-kernel, Linux-MM

>> Most popular use of zram is the in-memory swap for small embedded system
>> so I don't want to increase memory footprint without good reason although
>> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
>> consider compression ratio and real free memory after boot

We can use bit spin lock and this would not increase memory footprint for 32 bit
platform.

Thanks.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-07 14:52                 ` Joonsoo Kim
@ 2014-05-08  6:24                   ` Minchan Kim
  -1 siblings, 0 replies; 30+ messages in thread
From: Minchan Kim @ 2014-05-08  6:24 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Weijie Yang, Weijie Yang, Davidlohr Bueso, Andrew Morton,
	Seth Jennings, Nitin Gupta, Sergey Senozhatsky, Bob Liu,
	Dan Streetman, Heesub Shin, linux-kernel, Linux-MM

On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
> >> Most popular use of zram is the in-memory swap for small embedded system
> >> so I don't want to increase memory footprint without good reason although
> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> >> consider compression ratio and real free memory after boot
> 
> We can use bit spin lock and this would not increase memory footprint for 32 bit
> platform.

Sounds like a idea.
Weijie, Do you mind testing with bit spin lock?

> 
> Thanks.
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-08  6:24                   ` Minchan Kim
  0 siblings, 0 replies; 30+ messages in thread
From: Minchan Kim @ 2014-05-08  6:24 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Weijie Yang, Weijie Yang, Davidlohr Bueso, Andrew Morton,
	Seth Jennings, Nitin Gupta, Sergey Senozhatsky, Bob Liu,
	Dan Streetman, Heesub Shin, linux-kernel, Linux-MM

On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
> >> Most popular use of zram is the in-memory swap for small embedded system
> >> so I don't want to increase memory footprint without good reason although
> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> >> consider compression ratio and real free memory after boot
> 
> We can use bit spin lock and this would not increase memory footprint for 32 bit
> platform.

Sounds like a idea.
Weijie, Do you mind testing with bit spin lock?

> 
> Thanks.
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-08  6:24                   ` Minchan Kim
@ 2014-05-10  6:10                     ` Weijie Yang
  -1 siblings, 0 replies; 30+ messages in thread
From: Weijie Yang @ 2014-05-10  6:10 UTC (permalink / raw)
  To: 'Minchan Kim', 'Joonsoo Kim'
  Cc: 'Weijie Yang', 'Davidlohr Bueso',
	'Andrew Morton', 'Seth Jennings',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman', 'Heesub Shin',
	'linux-kernel', 'Linux-MM'

On Thu, May 8, 2014 at 2:24 PM, Minchan Kim <minchan@kernel.org> wrote:
> On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
>> >> Most popular use of zram is the in-memory swap for small embedded system
>> >> so I don't want to increase memory footprint without good reason although
>> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
>> >> consider compression ratio and real free memory after boot
>>
>> We can use bit spin lock and this would not increase memory footprint for 32 bit
>> platform.
>
> Sounds like a idea.
> Weijie, Do you mind testing with bit spin lock?

Yes, I re-test them.
This time, I test each case 10 times, and take the average(KS/s).
(the test machine and method are same like previous mail's)

Iozone test result:

      Test       BASE     CAS   spinlock   rwlock  bit_spinlock
--------------------------------------------------------------
 Initial write  1381094   1425435   1422860   1423075   1421521
       Rewrite  1529479   1641199   1668762   1672855   1654910
          Read  8468009  11324979  11305569  11117273  10997202
       Re-read  8467476  11260914  11248059  11145336  10906486
  Reverse Read  6821393   8106334   8282174   8279195   8109186
   Stride read  7191093   8994306   9153982   8961224   9004434
   Random read  7156353   8957932   9167098   8980465   8940476
Mixed workload  4172747   5680814   5927825   5489578   5972253
  Random write  1483044   1605588   1594329   1600453   1596010
        Pwrite  1276644   1303108   1311612   1314228   1300960
         Pread  4324337   4632869   4618386   4457870   4500166

Fio test result:

    Test     base     CAS    spinlock    rwlock  bit_spinlock
-------------------------------------------------------------
seq-write   933789   999357   1003298    995961   1001958
 seq-read  5634130  6577930   6380861   6243912   6230006
   seq-rw  1405687  1638117   1640256   1633903   1634459
  rand-rw  1386119  1614664   1617211   1609267   1612471


The base is v3.15.0-rc3, the others are per-meta entry lock.
Every optimization method shows higher performance than the base, however,
it is hard to say which method is the most appropriate.

To bit_spinlock, the modified code is mainly like this:

+#define ZRAM_FLAG_SHIFT 16
+
enum zram_pageflags {
 	/* Page consists entirely of zeros */
-	ZRAM_ZERO,
+	ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
+	ZRAM_ACCESS,
 
 	__NR_ZRAM_PAGEFLAGS,
 };
 
 /* Allocated for each disk page */
 struct table {
 	unsigned long handle;
-	u16 size;	/* object size (excluding header) */
-	u8 flags;
+	unsigned long value;
 } __aligned(4);

The lower ZRAM_FLAG_SHIFT bits of table.value is size, the higher bits
is for zram_pageflags. By this means, it doesn't increase any memory
overhead on both 32-bit and 64-bit system.

Any complaint or suggestions are welcomed.

>>
>> Thanks.
>>
>> --
>> To unsubscribe, send a message with 'unsubscribe linux-mm' in
>> the body to majordomo@kvack.org.  For more info on Linux MM,
>> see: http://www.linux-mm.org/ .
>> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>
> --
> Kind regards,
> Minchan Kim



^ permalink raw reply	[flat|nested] 30+ messages in thread

* RE: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-10  6:10                     ` Weijie Yang
  0 siblings, 0 replies; 30+ messages in thread
From: Weijie Yang @ 2014-05-10  6:10 UTC (permalink / raw)
  To: 'Minchan Kim', 'Joonsoo Kim'
  Cc: 'Weijie Yang', 'Davidlohr Bueso',
	'Andrew Morton', 'Seth Jennings',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman', 'Heesub Shin',
	'linux-kernel', 'Linux-MM'

On Thu, May 8, 2014 at 2:24 PM, Minchan Kim <minchan@kernel.org> wrote:
> On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
>> >> Most popular use of zram is the in-memory swap for small embedded system
>> >> so I don't want to increase memory footprint without good reason although
>> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
>> >> consider compression ratio and real free memory after boot
>>
>> We can use bit spin lock and this would not increase memory footprint for 32 bit
>> platform.
>
> Sounds like a idea.
> Weijie, Do you mind testing with bit spin lock?

Yes, I re-test them.
This time, I test each case 10 times, and take the average(KS/s).
(the test machine and method are same like previous mail's)

Iozone test result:

      Test       BASE     CAS   spinlock   rwlock  bit_spinlock
--------------------------------------------------------------
 Initial write  1381094   1425435   1422860   1423075   1421521
       Rewrite  1529479   1641199   1668762   1672855   1654910
          Read  8468009  11324979  11305569  11117273  10997202
       Re-read  8467476  11260914  11248059  11145336  10906486
  Reverse Read  6821393   8106334   8282174   8279195   8109186
   Stride read  7191093   8994306   9153982   8961224   9004434
   Random read  7156353   8957932   9167098   8980465   8940476
Mixed workload  4172747   5680814   5927825   5489578   5972253
  Random write  1483044   1605588   1594329   1600453   1596010
        Pwrite  1276644   1303108   1311612   1314228   1300960
         Pread  4324337   4632869   4618386   4457870   4500166

Fio test result:

    Test     base     CAS    spinlock    rwlock  bit_spinlock
-------------------------------------------------------------
seq-write   933789   999357   1003298    995961   1001958
 seq-read  5634130  6577930   6380861   6243912   6230006
   seq-rw  1405687  1638117   1640256   1633903   1634459
  rand-rw  1386119  1614664   1617211   1609267   1612471


The base is v3.15.0-rc3, the others are per-meta entry lock.
Every optimization method shows higher performance than the base, however,
it is hard to say which method is the most appropriate.

To bit_spinlock, the modified code is mainly like this:

+#define ZRAM_FLAG_SHIFT 16
+
enum zram_pageflags {
 	/* Page consists entirely of zeros */
-	ZRAM_ZERO,
+	ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
+	ZRAM_ACCESS,
 
 	__NR_ZRAM_PAGEFLAGS,
 };
 
 /* Allocated for each disk page */
 struct table {
 	unsigned long handle;
-	u16 size;	/* object size (excluding header) */
-	u8 flags;
+	unsigned long value;
 } __aligned(4);

The lower ZRAM_FLAG_SHIFT bits of table.value is size, the higher bits
is for zram_pageflags. By this means, it doesn't increase any memory
overhead on both 32-bit and 64-bit system.

Any complaint or suggestions are welcomed.

>>
>> Thanks.
>>
>> --
>> To unsubscribe, send a message with 'unsubscribe linux-mm' in
>> the body to majordomo@kvack.org.  For more info on Linux MM,
>> see: http://www.linux-mm.org/ .
>> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>
> --
> Kind regards,
> Minchan Kim


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-10  6:10                     ` Weijie Yang
@ 2014-05-12  5:15                       ` Minchan Kim
  -1 siblings, 0 replies; 30+ messages in thread
From: Minchan Kim @ 2014-05-12  5:15 UTC (permalink / raw)
  To: Weijie Yang
  Cc: 'Joonsoo Kim', 'Weijie Yang',
	'Davidlohr Bueso', 'Andrew Morton',
	'Seth Jennings', 'Nitin Gupta',
	'Sergey Senozhatsky', 'Bob Liu',
	'Dan Streetman', 'Heesub Shin',
	'linux-kernel', 'Linux-MM'

On Sat, May 10, 2014 at 02:10:08PM +0800, Weijie Yang wrote:
> On Thu, May 8, 2014 at 2:24 PM, Minchan Kim <minchan@kernel.org> wrote:
> > On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
> >> >> Most popular use of zram is the in-memory swap for small embedded system
> >> >> so I don't want to increase memory footprint without good reason although
> >> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> >> >> consider compression ratio and real free memory after boot
> >>
> >> We can use bit spin lock and this would not increase memory footprint for 32 bit
> >> platform.
> >
> > Sounds like a idea.
> > Weijie, Do you mind testing with bit spin lock?
> 
> Yes, I re-test them.
> This time, I test each case 10 times, and take the average(KS/s).
> (the test machine and method are same like previous mail's)
> 
> Iozone test result:
> 
>       Test       BASE     CAS   spinlock   rwlock  bit_spinlock
> --------------------------------------------------------------
>  Initial write  1381094   1425435   1422860   1423075   1421521
>        Rewrite  1529479   1641199   1668762   1672855   1654910
>           Read  8468009  11324979  11305569  11117273  10997202
>        Re-read  8467476  11260914  11248059  11145336  10906486
>   Reverse Read  6821393   8106334   8282174   8279195   8109186
>    Stride read  7191093   8994306   9153982   8961224   9004434
>    Random read  7156353   8957932   9167098   8980465   8940476
> Mixed workload  4172747   5680814   5927825   5489578   5972253
>   Random write  1483044   1605588   1594329   1600453   1596010
>         Pwrite  1276644   1303108   1311612   1314228   1300960
>          Pread  4324337   4632869   4618386   4457870   4500166
> 
> Fio test result:
> 
>     Test     base     CAS    spinlock    rwlock  bit_spinlock
> -------------------------------------------------------------
> seq-write   933789   999357   1003298    995961   1001958
>  seq-read  5634130  6577930   6380861   6243912   6230006
>    seq-rw  1405687  1638117   1640256   1633903   1634459
>   rand-rw  1386119  1614664   1617211   1609267   1612471
> 
> 
> The base is v3.15.0-rc3, the others are per-meta entry lock.
> Every optimization method shows higher performance than the base, however,
> it is hard to say which method is the most appropriate.

It's not too big between CAS and bit_spinlock so I prefer general method.

> 
> To bit_spinlock, the modified code is mainly like this:
> 
> +#define ZRAM_FLAG_SHIFT 16
> +
> enum zram_pageflags {
>  	/* Page consists entirely of zeros */
> -	ZRAM_ZERO,
> +	ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
> +	ZRAM_ACCESS,
>  
>  	__NR_ZRAM_PAGEFLAGS,
>  };
>  
>  /* Allocated for each disk page */
>  struct table {
>  	unsigned long handle;
> -	u16 size;	/* object size (excluding header) */
> -	u8 flags;
> +	unsigned long value;

Why does we need to change flags and size "unsigned long value"?
Couldn't we use existing flags with just adding new ZRAM_TABLE_LOCK?


>  } __aligned(4);
> 
> The lower ZRAM_FLAG_SHIFT bits of table.value is size, the higher bits
> is for zram_pageflags. By this means, it doesn't increase any memory
> overhead on both 32-bit and 64-bit system.
> 
> Any complaint or suggestions are welcomed.

Anyway, I'd like to go this way.
Pz, resend formal patch with a number.

Thanks!

> 
> >>
> >> Thanks.
> >>
> >> --
> >> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> >> the body to majordomo@kvack.org.  For more info on Linux MM,
> >> see: http://www.linux-mm.org/ .
> >> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> >
> > --
> > Kind regards,
> > Minchan Kim
> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-12  5:15                       ` Minchan Kim
  0 siblings, 0 replies; 30+ messages in thread
From: Minchan Kim @ 2014-05-12  5:15 UTC (permalink / raw)
  To: Weijie Yang
  Cc: 'Joonsoo Kim', 'Weijie Yang',
	'Davidlohr Bueso', 'Andrew Morton',
	'Seth Jennings', 'Nitin Gupta',
	'Sergey Senozhatsky', 'Bob Liu',
	'Dan Streetman', 'Heesub Shin',
	'linux-kernel', 'Linux-MM'

On Sat, May 10, 2014 at 02:10:08PM +0800, Weijie Yang wrote:
> On Thu, May 8, 2014 at 2:24 PM, Minchan Kim <minchan@kernel.org> wrote:
> > On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
> >> >> Most popular use of zram is the in-memory swap for small embedded system
> >> >> so I don't want to increase memory footprint without good reason although
> >> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> >> >> consider compression ratio and real free memory after boot
> >>
> >> We can use bit spin lock and this would not increase memory footprint for 32 bit
> >> platform.
> >
> > Sounds like a idea.
> > Weijie, Do you mind testing with bit spin lock?
> 
> Yes, I re-test them.
> This time, I test each case 10 times, and take the average(KS/s).
> (the test machine and method are same like previous mail's)
> 
> Iozone test result:
> 
>       Test       BASE     CAS   spinlock   rwlock  bit_spinlock
> --------------------------------------------------------------
>  Initial write  1381094   1425435   1422860   1423075   1421521
>        Rewrite  1529479   1641199   1668762   1672855   1654910
>           Read  8468009  11324979  11305569  11117273  10997202
>        Re-read  8467476  11260914  11248059  11145336  10906486
>   Reverse Read  6821393   8106334   8282174   8279195   8109186
>    Stride read  7191093   8994306   9153982   8961224   9004434
>    Random read  7156353   8957932   9167098   8980465   8940476
> Mixed workload  4172747   5680814   5927825   5489578   5972253
>   Random write  1483044   1605588   1594329   1600453   1596010
>         Pwrite  1276644   1303108   1311612   1314228   1300960
>          Pread  4324337   4632869   4618386   4457870   4500166
> 
> Fio test result:
> 
>     Test     base     CAS    spinlock    rwlock  bit_spinlock
> -------------------------------------------------------------
> seq-write   933789   999357   1003298    995961   1001958
>  seq-read  5634130  6577930   6380861   6243912   6230006
>    seq-rw  1405687  1638117   1640256   1633903   1634459
>   rand-rw  1386119  1614664   1617211   1609267   1612471
> 
> 
> The base is v3.15.0-rc3, the others are per-meta entry lock.
> Every optimization method shows higher performance than the base, however,
> it is hard to say which method is the most appropriate.

It's not too big between CAS and bit_spinlock so I prefer general method.

> 
> To bit_spinlock, the modified code is mainly like this:
> 
> +#define ZRAM_FLAG_SHIFT 16
> +
> enum zram_pageflags {
>  	/* Page consists entirely of zeros */
> -	ZRAM_ZERO,
> +	ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
> +	ZRAM_ACCESS,
>  
>  	__NR_ZRAM_PAGEFLAGS,
>  };
>  
>  /* Allocated for each disk page */
>  struct table {
>  	unsigned long handle;
> -	u16 size;	/* object size (excluding header) */
> -	u8 flags;
> +	unsigned long value;

Why does we need to change flags and size "unsigned long value"?
Couldn't we use existing flags with just adding new ZRAM_TABLE_LOCK?


>  } __aligned(4);
> 
> The lower ZRAM_FLAG_SHIFT bits of table.value is size, the higher bits
> is for zram_pageflags. By this means, it doesn't increase any memory
> overhead on both 32-bit and 64-bit system.
> 
> Any complaint or suggestions are welcomed.

Anyway, I'd like to go this way.
Pz, resend formal patch with a number.

Thanks!

> 
> >>
> >> Thanks.
> >>
> >> --
> >> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> >> the body to majordomo@kvack.org.  For more info on Linux MM,
> >> see: http://www.linux-mm.org/ .
> >> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> >
> > --
> > Kind regards,
> > Minchan Kim
> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-12  5:15                       ` Minchan Kim
@ 2014-05-12 14:49                         ` Davidlohr Bueso
  -1 siblings, 0 replies; 30+ messages in thread
From: Davidlohr Bueso @ 2014-05-12 14:49 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Weijie Yang, 'Joonsoo Kim', 'Weijie Yang',
	'Andrew Morton', 'Seth Jennings',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman', 'Heesub Shin',
	'linux-kernel', 'Linux-MM'

On Mon, 2014-05-12 at 14:15 +0900, Minchan Kim wrote:
> On Sat, May 10, 2014 at 02:10:08PM +0800, Weijie Yang wrote:
> > On Thu, May 8, 2014 at 2:24 PM, Minchan Kim <minchan@kernel.org> wrote:
> > > On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
> > >> >> Most popular use of zram is the in-memory swap for small embedded system
> > >> >> so I don't want to increase memory footprint without good reason although
> > >> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> > >> >> consider compression ratio and real free memory after boot
> > >>
> > >> We can use bit spin lock and this would not increase memory footprint for 32 bit
> > >> platform.
> > >
> > > Sounds like a idea.
> > > Weijie, Do you mind testing with bit spin lock?
> > 
> > Yes, I re-test them.
> > This time, I test each case 10 times, and take the average(KS/s).
> > (the test machine and method are same like previous mail's)
> > 
> > Iozone test result:
> > 
> >       Test       BASE     CAS   spinlock   rwlock  bit_spinlock
> > --------------------------------------------------------------
> >  Initial write  1381094   1425435   1422860   1423075   1421521
> >        Rewrite  1529479   1641199   1668762   1672855   1654910
> >           Read  8468009  11324979  11305569  11117273  10997202
> >        Re-read  8467476  11260914  11248059  11145336  10906486
> >   Reverse Read  6821393   8106334   8282174   8279195   8109186
> >    Stride read  7191093   8994306   9153982   8961224   9004434
> >    Random read  7156353   8957932   9167098   8980465   8940476
> > Mixed workload  4172747   5680814   5927825   5489578   5972253
> >   Random write  1483044   1605588   1594329   1600453   1596010
> >         Pwrite  1276644   1303108   1311612   1314228   1300960
> >          Pread  4324337   4632869   4618386   4457870   4500166
> > 
> > Fio test result:
> > 
> >     Test     base     CAS    spinlock    rwlock  bit_spinlock
> > -------------------------------------------------------------
> > seq-write   933789   999357   1003298    995961   1001958
> >  seq-read  5634130  6577930   6380861   6243912   6230006
> >    seq-rw  1405687  1638117   1640256   1633903   1634459
> >   rand-rw  1386119  1614664   1617211   1609267   1612471
> > 
> > 
> > The base is v3.15.0-rc3, the others are per-meta entry lock.
> > Every optimization method shows higher performance than the base, however,
> > it is hard to say which method is the most appropriate.
> 
> It's not too big between CAS and bit_spinlock so I prefer general method.

Well, I imagine that's because the test system is small enough that the
lock is not stressed enough. Bit spinlocks are considerably slower than
other types. I'm not sure if we really care for the case of zram, but in
general I really dislike this lock. It suffers from just about
everything our regular spinlocks try to optimize, specially unfairness
in who gets the lock when contended (ticketing).

Thanks,
Davidlohr


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-12 14:49                         ` Davidlohr Bueso
  0 siblings, 0 replies; 30+ messages in thread
From: Davidlohr Bueso @ 2014-05-12 14:49 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Weijie Yang, 'Joonsoo Kim', 'Weijie Yang',
	'Andrew Morton', 'Seth Jennings',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman', 'Heesub Shin',
	'linux-kernel', 'Linux-MM'

On Mon, 2014-05-12 at 14:15 +0900, Minchan Kim wrote:
> On Sat, May 10, 2014 at 02:10:08PM +0800, Weijie Yang wrote:
> > On Thu, May 8, 2014 at 2:24 PM, Minchan Kim <minchan@kernel.org> wrote:
> > > On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
> > >> >> Most popular use of zram is the in-memory swap for small embedded system
> > >> >> so I don't want to increase memory footprint without good reason although
> > >> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> > >> >> consider compression ratio and real free memory after boot
> > >>
> > >> We can use bit spin lock and this would not increase memory footprint for 32 bit
> > >> platform.
> > >
> > > Sounds like a idea.
> > > Weijie, Do you mind testing with bit spin lock?
> > 
> > Yes, I re-test them.
> > This time, I test each case 10 times, and take the average(KS/s).
> > (the test machine and method are same like previous mail's)
> > 
> > Iozone test result:
> > 
> >       Test       BASE     CAS   spinlock   rwlock  bit_spinlock
> > --------------------------------------------------------------
> >  Initial write  1381094   1425435   1422860   1423075   1421521
> >        Rewrite  1529479   1641199   1668762   1672855   1654910
> >           Read  8468009  11324979  11305569  11117273  10997202
> >        Re-read  8467476  11260914  11248059  11145336  10906486
> >   Reverse Read  6821393   8106334   8282174   8279195   8109186
> >    Stride read  7191093   8994306   9153982   8961224   9004434
> >    Random read  7156353   8957932   9167098   8980465   8940476
> > Mixed workload  4172747   5680814   5927825   5489578   5972253
> >   Random write  1483044   1605588   1594329   1600453   1596010
> >         Pwrite  1276644   1303108   1311612   1314228   1300960
> >          Pread  4324337   4632869   4618386   4457870   4500166
> > 
> > Fio test result:
> > 
> >     Test     base     CAS    spinlock    rwlock  bit_spinlock
> > -------------------------------------------------------------
> > seq-write   933789   999357   1003298    995961   1001958
> >  seq-read  5634130  6577930   6380861   6243912   6230006
> >    seq-rw  1405687  1638117   1640256   1633903   1634459
> >   rand-rw  1386119  1614664   1617211   1609267   1612471
> > 
> > 
> > The base is v3.15.0-rc3, the others are per-meta entry lock.
> > Every optimization method shows higher performance than the base, however,
> > it is hard to say which method is the most appropriate.
> 
> It's not too big between CAS and bit_spinlock so I prefer general method.

Well, I imagine that's because the test system is small enough that the
lock is not stressed enough. Bit spinlocks are considerably slower than
other types. I'm not sure if we really care for the case of zram, but in
general I really dislike this lock. It suffers from just about
everything our regular spinlocks try to optimize, specially unfairness
in who gets the lock when contended (ticketing).

Thanks,
Davidlohr

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
  2014-05-12 14:49                         ` Davidlohr Bueso
@ 2014-05-13  0:03                           ` Minchan Kim
  -1 siblings, 0 replies; 30+ messages in thread
From: Minchan Kim @ 2014-05-13  0:03 UTC (permalink / raw)
  To: Davidlohr Bueso
  Cc: Weijie Yang, 'Joonsoo Kim', 'Weijie Yang',
	'Andrew Morton', 'Seth Jennings',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman', 'Heesub Shin',
	'linux-kernel', 'Linux-MM'

Hello David,

On Mon, May 12, 2014 at 07:49:18AM -0700, Davidlohr Bueso wrote:
> On Mon, 2014-05-12 at 14:15 +0900, Minchan Kim wrote:
> > On Sat, May 10, 2014 at 02:10:08PM +0800, Weijie Yang wrote:
> > > On Thu, May 8, 2014 at 2:24 PM, Minchan Kim <minchan@kernel.org> wrote:
> > > > On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
> > > >> >> Most popular use of zram is the in-memory swap for small embedded system
> > > >> >> so I don't want to increase memory footprint without good reason although
> > > >> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> > > >> >> consider compression ratio and real free memory after boot
> > > >>
> > > >> We can use bit spin lock and this would not increase memory footprint for 32 bit
> > > >> platform.
> > > >
> > > > Sounds like a idea.
> > > > Weijie, Do you mind testing with bit spin lock?
> > > 
> > > Yes, I re-test them.
> > > This time, I test each case 10 times, and take the average(KS/s).
> > > (the test machine and method are same like previous mail's)
> > > 
> > > Iozone test result:
> > > 
> > >       Test       BASE     CAS   spinlock   rwlock  bit_spinlock
> > > --------------------------------------------------------------
> > >  Initial write  1381094   1425435   1422860   1423075   1421521
> > >        Rewrite  1529479   1641199   1668762   1672855   1654910
> > >           Read  8468009  11324979  11305569  11117273  10997202
> > >        Re-read  8467476  11260914  11248059  11145336  10906486
> > >   Reverse Read  6821393   8106334   8282174   8279195   8109186
> > >    Stride read  7191093   8994306   9153982   8961224   9004434
> > >    Random read  7156353   8957932   9167098   8980465   8940476
> > > Mixed workload  4172747   5680814   5927825   5489578   5972253
> > >   Random write  1483044   1605588   1594329   1600453   1596010
> > >         Pwrite  1276644   1303108   1311612   1314228   1300960
> > >          Pread  4324337   4632869   4618386   4457870   4500166
> > > 
> > > Fio test result:
> > > 
> > >     Test     base     CAS    spinlock    rwlock  bit_spinlock
> > > -------------------------------------------------------------
> > > seq-write   933789   999357   1003298    995961   1001958
> > >  seq-read  5634130  6577930   6380861   6243912   6230006
> > >    seq-rw  1405687  1638117   1640256   1633903   1634459
> > >   rand-rw  1386119  1614664   1617211   1609267   1612471
> > > 
> > > 
> > > The base is v3.15.0-rc3, the others are per-meta entry lock.
> > > Every optimization method shows higher performance than the base, however,
> > > it is hard to say which method is the most appropriate.
> > 
> > It's not too big between CAS and bit_spinlock so I prefer general method.
> 
> Well, I imagine that's because the test system is small enough that the
> lock is not stressed enough. Bit spinlocks are considerably slower than
> other types. I'm not sure if we really care for the case of zram, but in
> general I really dislike this lock. It suffers from just about
> everything our regular spinlocks try to optimize, specially unfairness
> in who gets the lock when contended (ticketing).

But as you said, in general, you're right but it's not the case for zram.
Most popular zram usecase is in-memory swap for small embedded system(at most,
4 CPU, even, they don't turn on always) so I believe lock contention
(concurrent swapout of same slot? concurrent swapread of same slot)
is too much rare(ie, actually it wouldn't happen by upper layer's lock).

Another usecase zram-blk, yeb, thesedays, some guys start to use zram as block
device but it would be same with zram-swap because upper layer(ex, file system)
would already have a lock to prevent concurrent access of the block so
contention would be rare, too.

I don't want to bloat zram's memory footprint for minor usecase, even, without
real report with the number. We have reasonable rationale to use bit_spin_lock
like above.

> 
> Thanks,
> Davidlohr
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] zram: remove global tb_lock by using lock-free CAS
@ 2014-05-13  0:03                           ` Minchan Kim
  0 siblings, 0 replies; 30+ messages in thread
From: Minchan Kim @ 2014-05-13  0:03 UTC (permalink / raw)
  To: Davidlohr Bueso
  Cc: Weijie Yang, 'Joonsoo Kim', 'Weijie Yang',
	'Andrew Morton', 'Seth Jennings',
	'Nitin Gupta', 'Sergey Senozhatsky',
	'Bob Liu', 'Dan Streetman', 'Heesub Shin',
	'linux-kernel', 'Linux-MM'

Hello David,

On Mon, May 12, 2014 at 07:49:18AM -0700, Davidlohr Bueso wrote:
> On Mon, 2014-05-12 at 14:15 +0900, Minchan Kim wrote:
> > On Sat, May 10, 2014 at 02:10:08PM +0800, Weijie Yang wrote:
> > > On Thu, May 8, 2014 at 2:24 PM, Minchan Kim <minchan@kernel.org> wrote:
> > > > On Wed, May 07, 2014 at 11:52:59PM +0900, Joonsoo Kim wrote:
> > > >> >> Most popular use of zram is the in-memory swap for small embedded system
> > > >> >> so I don't want to increase memory footprint without good reason although
> > > >> >> it makes synthetic benchmark. Alhought it's 1M for 1G, it isn't small if we
> > > >> >> consider compression ratio and real free memory after boot
> > > >>
> > > >> We can use bit spin lock and this would not increase memory footprint for 32 bit
> > > >> platform.
> > > >
> > > > Sounds like a idea.
> > > > Weijie, Do you mind testing with bit spin lock?
> > > 
> > > Yes, I re-test them.
> > > This time, I test each case 10 times, and take the average(KS/s).
> > > (the test machine and method are same like previous mail's)
> > > 
> > > Iozone test result:
> > > 
> > >       Test       BASE     CAS   spinlock   rwlock  bit_spinlock
> > > --------------------------------------------------------------
> > >  Initial write  1381094   1425435   1422860   1423075   1421521
> > >        Rewrite  1529479   1641199   1668762   1672855   1654910
> > >           Read  8468009  11324979  11305569  11117273  10997202
> > >        Re-read  8467476  11260914  11248059  11145336  10906486
> > >   Reverse Read  6821393   8106334   8282174   8279195   8109186
> > >    Stride read  7191093   8994306   9153982   8961224   9004434
> > >    Random read  7156353   8957932   9167098   8980465   8940476
> > > Mixed workload  4172747   5680814   5927825   5489578   5972253
> > >   Random write  1483044   1605588   1594329   1600453   1596010
> > >         Pwrite  1276644   1303108   1311612   1314228   1300960
> > >          Pread  4324337   4632869   4618386   4457870   4500166
> > > 
> > > Fio test result:
> > > 
> > >     Test     base     CAS    spinlock    rwlock  bit_spinlock
> > > -------------------------------------------------------------
> > > seq-write   933789   999357   1003298    995961   1001958
> > >  seq-read  5634130  6577930   6380861   6243912   6230006
> > >    seq-rw  1405687  1638117   1640256   1633903   1634459
> > >   rand-rw  1386119  1614664   1617211   1609267   1612471
> > > 
> > > 
> > > The base is v3.15.0-rc3, the others are per-meta entry lock.
> > > Every optimization method shows higher performance than the base, however,
> > > it is hard to say which method is the most appropriate.
> > 
> > It's not too big between CAS and bit_spinlock so I prefer general method.
> 
> Well, I imagine that's because the test system is small enough that the
> lock is not stressed enough. Bit spinlocks are considerably slower than
> other types. I'm not sure if we really care for the case of zram, but in
> general I really dislike this lock. It suffers from just about
> everything our regular spinlocks try to optimize, specially unfairness
> in who gets the lock when contended (ticketing).

But as you said, in general, you're right but it's not the case for zram.
Most popular zram usecase is in-memory swap for small embedded system(at most,
4 CPU, even, they don't turn on always) so I believe lock contention
(concurrent swapout of same slot? concurrent swapread of same slot)
is too much rare(ie, actually it wouldn't happen by upper layer's lock).

Another usecase zram-blk, yeb, thesedays, some guys start to use zram as block
device but it would be same with zram-swap because upper layer(ex, file system)
would already have a lock to prevent concurrent access of the block so
contention would be rare, too.

I don't want to bloat zram's memory footprint for minor usecase, even, without
real report with the number. We have reasonable rationale to use bit_spin_lock
like above.

> 
> Thanks,
> Davidlohr
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2014-05-13  0:00 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-05-05  4:01 [PATCH] zram: remove global tb_lock by using lock-free CAS Weijie Yang
2014-05-05  4:01 ` Weijie Yang
2014-05-05 10:32 ` Sergey Senozhatsky
2014-05-05 10:32   ` Sergey Senozhatsky
2014-05-05 15:20 ` Seth Jennings
2014-05-05 15:20   ` Seth Jennings
2014-05-05 18:00   ` Davidlohr Bueso
2014-05-05 18:00     ` Davidlohr Bueso
2014-05-05 20:46     ` Andrew Morton
2014-05-05 20:46       ` Andrew Morton
2014-05-05 22:22       ` Davidlohr Bueso
2014-05-05 22:22         ` Davidlohr Bueso
2014-05-07  7:51         ` Weijie Yang
2014-05-07  7:51           ` Weijie Yang
2014-05-07  8:57           ` Minchan Kim
2014-05-07  8:57             ` Minchan Kim
2014-05-07  9:16             ` Weijie Yang
2014-05-07  9:16               ` Weijie Yang
2014-05-07 14:52               ` Joonsoo Kim
2014-05-07 14:52                 ` Joonsoo Kim
2014-05-08  6:24                 ` Minchan Kim
2014-05-08  6:24                   ` Minchan Kim
2014-05-10  6:10                   ` Weijie Yang
2014-05-10  6:10                     ` Weijie Yang
2014-05-12  5:15                     ` Minchan Kim
2014-05-12  5:15                       ` Minchan Kim
2014-05-12 14:49                       ` Davidlohr Bueso
2014-05-12 14:49                         ` Davidlohr Bueso
2014-05-13  0:03                         ` Minchan Kim
2014-05-13  0:03                           ` Minchan Kim

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.