All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] zram: factor-out zram_decompress_page() function
@ 2012-10-27 16:00 Sergey Senozhatsky
  2012-10-29 17:14 ` Nitin Gupta
  0 siblings, 1 reply; 12+ messages in thread
From: Sergey Senozhatsky @ 2012-10-27 16:00 UTC (permalink / raw)
  To: Nitin Gupta; +Cc: Greg Kroah-Hartman, linux-kernel

  zram: factor-out zram_decompress_page() function

  zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
  Factor-out and make commonly used zram_decompress_page() function, which also simplified
  error handling in zram_bvec_read().
    
  Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>

---

 drivers/staging/zram/zram_drv.c | 115 +++++++++++++++++-----------------------
 1 file changed, 50 insertions(+), 65 deletions(-)

diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 6edefde..7585467 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
 	return bvec->bv_len != PAGE_SIZE;
 }
 
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
-			  u32 index, int offset, struct bio *bio)
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 {
-	int ret;
-	size_t clen;
-	struct page *page;
-	unsigned char *user_mem, *cmem, *uncmem = NULL;
-
-	page = bvec->bv_page;
-
-	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
-		handle_zero_page(bvec);
-		return 0;
-	}
+	int ret = LZO_E_OK;
+	size_t clen = PAGE_SIZE;
+	unsigned char *cmem;
+	unsigned long handle = zram->table[index].handle;
 
-	/* Requested page is not present in compressed area */
-	if (unlikely(!zram->table[index].handle)) {
-		pr_debug("Read before write: sector=%lu, size=%u",
-			 (ulong)(bio->bi_sector), bio->bi_size);
-		handle_zero_page(bvec);
+	if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
+		memset(mem, 0, PAGE_SIZE);
 		return 0;
 	}
 
-	if (is_partial_io(bvec)) {
-		/* Use  a temporary buffer to decompress the page */
-		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
-		if (!uncmem) {
-			pr_info("Error allocating temp memory!\n");
-			return -ENOMEM;
-		}
-	}
-
-	user_mem = kmap_atomic(page);
-	if (!is_partial_io(bvec))
-		uncmem = user_mem;
-	clen = PAGE_SIZE;
-
-	cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
-				ZS_MM_RO);
-
-	if (zram->table[index].size == PAGE_SIZE) {
-		memcpy(uncmem, cmem, PAGE_SIZE);
-		ret = LZO_E_OK;
-	} else {
+	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+	if (zram->table[index].size == PAGE_SIZE)
+		memcpy(mem, cmem, PAGE_SIZE);
+	else
 		ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-				    uncmem, &clen);
-	}
-
-	if (is_partial_io(bvec)) {
-		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
-		       bvec->bv_len);
-		kfree(uncmem);
-	}
-
-	zs_unmap_object(zram->mem_pool, zram->table[index].handle);
-	kunmap_atomic(user_mem);
+						mem, &clen);
+	zs_unmap_object(zram->mem_pool, handle);
 
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret != LZO_E_OK)) {
@@ -247,36 +210,58 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 		return ret;
 	}
 
-	flush_dcache_page(page);
-
 	return 0;
 }
 
-static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+			  u32 index, int offset, struct bio *bio)
 {
 	int ret;
-	size_t clen = PAGE_SIZE;
-	unsigned char *cmem;
-	unsigned long handle = zram->table[index].handle;
+	struct page *page;
+	unsigned char *user_mem, *uncmem = NULL;
 
-	if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
-		memset(mem, 0, PAGE_SIZE);
+	page = bvec->bv_page;
+
+	if (unlikely(!zram->table[index].handle) ||
+			zram_test_flag(zram, index, ZRAM_ZERO)) {
+		pr_debug("Read before write: sector=%lu, size=%u",
+			 (ulong)(bio->bi_sector), bio->bi_size);
+		handle_zero_page(bvec);
 		return 0;
 	}
 
-	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
-	ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-				    mem, &clen);
-	zs_unmap_object(zram->mem_pool, handle);
+	user_mem = kmap_atomic(page);
+	if (is_partial_io(bvec))
+		/* Use  a temporary buffer to decompress the page */
+		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	else
+		uncmem = user_mem;
 
+	if (!uncmem) {
+		pr_info("Unable to allocate temp memory\n");
+		ret = -ENOMEM;
+		goto out_cleanup;
+	}
+
+	ret = zram_decompress_page(zram, uncmem, index);
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret != LZO_E_OK)) {
 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
 		zram_stat64_inc(zram, &zram->stats.failed_reads);
-		return ret;
+		goto out_cleanup;
 	}
 
-	return 0;
+	if (is_partial_io(bvec))
+		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+				bvec->bv_len);
+
+	flush_dcache_page(page);
+	ret = 0;
+out_cleanup:
+	kunmap_atomic(user_mem);
+	if (is_partial_io(bvec))
+		kfree(uncmem);
+	return ret;
 }
 
 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
@@ -302,7 +287,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 			ret = -ENOMEM;
 			goto out;
 		}
-		ret = zram_read_before_write(zram, uncmem, index);
+		ret = zram_decompress_page(zram, uncmem, index);
 		if (ret) {
 			kfree(uncmem);
 			goto out;



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] zram: factor-out zram_decompress_page() function
  2012-10-27 16:00 [PATCH 1/2] zram: factor-out zram_decompress_page() function Sergey Senozhatsky
@ 2012-10-29 17:14 ` Nitin Gupta
  2012-10-29 17:33   ` Sergey Senozhatsky
                     ` (2 more replies)
  0 siblings, 3 replies; 12+ messages in thread
From: Nitin Gupta @ 2012-10-29 17:14 UTC (permalink / raw)
  To: Sergey Senozhatsky; +Cc: Greg Kroah-Hartman, Fengguang Wu, linux-kernel

On 10/27/2012 09:00 AM, Sergey Senozhatsky wrote:
>    zram: factor-out zram_decompress_page() function
>
>    zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
>    Factor-out and make commonly used zram_decompress_page() function, which also simplified
>    error handling in zram_bvec_read().
>
>    Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
>
> ---
>
>   drivers/staging/zram/zram_drv.c | 115 +++++++++++++++++-----------------------
>   1 file changed, 50 insertions(+), 65 deletions(-)
>
> diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
> index 6edefde..7585467 100644
> --- a/drivers/staging/zram/zram_drv.c
> +++ b/drivers/staging/zram/zram_drv.c
> @@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
>   	return bvec->bv_len != PAGE_SIZE;
>   }
>
> -static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> -			  u32 index, int offset, struct bio *bio)
> +static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>   {
> -	int ret;
> -	size_t clen;
> -	struct page *page;
> -	unsigned char *user_mem, *cmem, *uncmem = NULL;
> -
> -	page = bvec->bv_page;
> -
> -	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
> -		handle_zero_page(bvec);
> -		return 0;
> -	}
> +	int ret = LZO_E_OK;
> +	size_t clen = PAGE_SIZE;
> +	unsigned char *cmem;
> +	unsigned long handle = zram->table[index].handle;
>
> -	/* Requested page is not present in compressed area */
> -	if (unlikely(!zram->table[index].handle)) {
> -		pr_debug("Read before write: sector=%lu, size=%u",
> -			 (ulong)(bio->bi_sector), bio->bi_size);
> -		handle_zero_page(bvec);
> +	if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
> +		memset(mem, 0, PAGE_SIZE);
>   		return 0;
>   	}
>
> -	if (is_partial_io(bvec)) {
> -		/* Use  a temporary buffer to decompress the page */
> -		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
> -		if (!uncmem) {
> -			pr_info("Error allocating temp memory!\n");
> -			return -ENOMEM;
> -		}
> -	}
> -
> -	user_mem = kmap_atomic(page);
> -	if (!is_partial_io(bvec))
> -		uncmem = user_mem;
> -	clen = PAGE_SIZE;
> -
> -	cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
> -				ZS_MM_RO);
> -
> -	if (zram->table[index].size == PAGE_SIZE) {
> -		memcpy(uncmem, cmem, PAGE_SIZE);
> -		ret = LZO_E_OK;
> -	} else {
> +	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
> +	if (zram->table[index].size == PAGE_SIZE)
> +		memcpy(mem, cmem, PAGE_SIZE);
> +	else
>   		ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
> -				    uncmem, &clen);
> -	}
> -
> -	if (is_partial_io(bvec)) {
> -		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
> -		       bvec->bv_len);
> -		kfree(uncmem);
> -	}
> -
> -	zs_unmap_object(zram->mem_pool, zram->table[index].handle);
> -	kunmap_atomic(user_mem);
> +						mem, &clen);
> +	zs_unmap_object(zram->mem_pool, handle);
>
>   	/* Should NEVER happen. Return bio error if it does. */
>   	if (unlikely(ret != LZO_E_OK)) {
> @@ -247,36 +210,58 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
>   		return ret;
>   	}
>
> -	flush_dcache_page(page);
> -
>   	return 0;
>   }
>
> -static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
> +static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> +			  u32 index, int offset, struct bio *bio)
>   {
>   	int ret;
> -	size_t clen = PAGE_SIZE;
> -	unsigned char *cmem;
> -	unsigned long handle = zram->table[index].handle;
> +	struct page *page;
> +	unsigned char *user_mem, *uncmem = NULL;
>
> -	if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
> -		memset(mem, 0, PAGE_SIZE);
> +	page = bvec->bv_page;
> +
> +	if (unlikely(!zram->table[index].handle) ||
> +			zram_test_flag(zram, index, ZRAM_ZERO)) {
> +		pr_debug("Read before write: sector=%lu, size=%u",
> +			 (ulong)(bio->bi_sector), bio->bi_size);


"Read before write" message is not valid in case ZRAM_ZERO flag is set. 
Its true only in !handle case.

Otherwise, the patch looks good to me.

On a side note, zram still contains a known use-after-free bug reported 
by Fengguang Wu (CC'ed) which happens in the "partial I/O" i.e. non 
PAGE_SIZE'ed I/O case which is fixed by the following patch.

Please let me know if you can include the following patch when you 
resend this patch series, or I can do the same or will wait for this to 
be merged and then send it later.

======
zram: Fix use-after-free in partial I/O case

When the compressed size of a page exceeds a threshold, the page is 
stored as-is i.e. in uncompressed form. In the partial I/O i.e. 
non-PAGE_SIZE'ed I/O case, however, the uncompressed memory was being 
freed before it could be copied into the zsmalloc pool resulting in 
use-after-free bug.

Signed-off-by: Nitin Gupta <ngupta@vflare.org>
---

diff --git a/drivers/staging/zram/zram_drv.c 
b/drivers/staging/zram/zram_drv.c
index 7585467..635736b 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -288,10 +288,8 @@ static int zram_bvec_write(struct zram *zram, 
struct bio_vec *bvec, u32 index,
  			goto out;
  		}
  		ret = zram_decompress_page(zram, uncmem, index);
-		if (ret) {
-			kfree(uncmem);
+		if (ret)
  			goto out;
-		}
  	}

  	/*
@@ -312,8 +310,6 @@ static int zram_bvec_write(struct zram *zram, struct 
bio_vec *bvec, u32 index,

  	if (page_zero_filled(uncmem)) {
  		kunmap_atomic(user_mem);
-		if (is_partial_io(bvec))
-			kfree(uncmem);
  		zram_stat_inc(&zram->stats.pages_zero);
  		zram_set_flag(zram, index, ZRAM_ZERO);
  		ret = 0;
@@ -324,8 +320,6 @@ static int zram_bvec_write(struct zram *zram, struct 
bio_vec *bvec, u32 index,
  			       zram->compress_workmem);

  	kunmap_atomic(user_mem);
-	if (is_partial_io(bvec))
-			kfree(uncmem);

  	if (unlikely(ret != LZO_E_OK)) {
  		pr_err("Compression failed! err=%d\n", ret);
@@ -360,11 +354,15 @@ static int zram_bvec_write(struct zram *zram, 
struct bio_vec *bvec, u32 index,
  	if (clen <= PAGE_SIZE / 2)
  		zram_stat_inc(&zram->stats.good_compress);

-	return 0;
+	ret = 0;

  out:
  	if (ret)
  		zram_stat64_inc(zram, &zram->stats.failed_writes);
+
+	if (is_partial_io(bvec))
+		kfree(uncmem);
+
  	return ret;
  }


BTW, I could not trigger this partial I/O case, so please let me know if 
you hit any issue during your testing.

There is another sparse warning to be fixed: zram_reset_device should be 
static.

Thanks,
Nitin


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] zram: factor-out zram_decompress_page() function
  2012-10-29 17:14 ` Nitin Gupta
@ 2012-10-29 17:33   ` Sergey Senozhatsky
  2012-10-29 18:05   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function (v2) Sergey Senozhatsky
  2012-10-30 21:04   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function Sergey Senozhatsky
  2 siblings, 0 replies; 12+ messages in thread
From: Sergey Senozhatsky @ 2012-10-29 17:33 UTC (permalink / raw)
  To: Nitin Gupta; +Cc: Greg Kroah-Hartman, Fengguang Wu, linux-kernel

On (10/29/12 10:14), Nitin Gupta wrote:
> 
> "Read before write" message is not valid in case ZRAM_ZERO flag is
> set. Its true only in !handle case.
> 

do we actually need this message?


> Otherwise, the patch looks good to me.
> 
> On a side note, zram still contains a known use-after-free bug
> reported by Fengguang Wu (CC'ed) which happens in the "partial I/O"
> i.e. non PAGE_SIZE'ed I/O case which is fixed by the following patch.
> 
> Please let me know if you can include the following patch when you
> resend this patch series, or I can do the same or will wait for this
> to be merged and then send it later.
>

Nitin, I think let's deal with one change at a time. I'll try to resend my patch
shortly, then we can continue with your fix (I didn't hit that problem, though
will be happy to help with testing).

	-ss

 
> ======
> zram: Fix use-after-free in partial I/O case
> 
> When the compressed size of a page exceeds a threshold, the page is
> stored as-is i.e. in uncompressed form. In the partial I/O i.e.
> non-PAGE_SIZE'ed I/O case, however, the uncompressed memory was being
> freed before it could be copied into the zsmalloc pool resulting in
> use-after-free bug.
> 
> Signed-off-by: Nitin Gupta <ngupta@vflare.org>
> ---
> 
> diff --git a/drivers/staging/zram/zram_drv.c
> b/drivers/staging/zram/zram_drv.c
> index 7585467..635736b 100644
> --- a/drivers/staging/zram/zram_drv.c
> +++ b/drivers/staging/zram/zram_drv.c
> @@ -288,10 +288,8 @@ static int zram_bvec_write(struct zram *zram,
> struct bio_vec *bvec, u32 index,
>  			goto out;
>  		}
>  		ret = zram_decompress_page(zram, uncmem, index);
> -		if (ret) {
> -			kfree(uncmem);
> +		if (ret)
>  			goto out;
> -		}
>  	}
> 
>  	/*
> @@ -312,8 +310,6 @@ static int zram_bvec_write(struct zram *zram,
> struct bio_vec *bvec, u32 index,
> 
>  	if (page_zero_filled(uncmem)) {
>  		kunmap_atomic(user_mem);
> -		if (is_partial_io(bvec))
> -			kfree(uncmem);
>  		zram_stat_inc(&zram->stats.pages_zero);
>  		zram_set_flag(zram, index, ZRAM_ZERO);
>  		ret = 0;
> @@ -324,8 +320,6 @@ static int zram_bvec_write(struct zram *zram,
> struct bio_vec *bvec, u32 index,
>  			       zram->compress_workmem);
> 
>  	kunmap_atomic(user_mem);
> -	if (is_partial_io(bvec))
> -			kfree(uncmem);
> 
>  	if (unlikely(ret != LZO_E_OK)) {
>  		pr_err("Compression failed! err=%d\n", ret);
> @@ -360,11 +354,15 @@ static int zram_bvec_write(struct zram *zram,
> struct bio_vec *bvec, u32 index,
>  	if (clen <= PAGE_SIZE / 2)
>  		zram_stat_inc(&zram->stats.good_compress);
> 
> -	return 0;
> +	ret = 0;
> 
>  out:
>  	if (ret)
>  		zram_stat64_inc(zram, &zram->stats.failed_writes);
> +
> +	if (is_partial_io(bvec))
> +		kfree(uncmem);
> +
>  	return ret;
>  }
> 
> 
> BTW, I could not trigger this partial I/O case, so please let me know
> if you hit any issue during your testing.
> 
> There is another sparse warning to be fixed: zram_reset_device should
> be static.
> 
> Thanks,
> Nitin
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/2] zram: factor-out zram_decompress_page() function (v2)
  2012-10-29 17:14 ` Nitin Gupta
  2012-10-29 17:33   ` Sergey Senozhatsky
@ 2012-10-29 18:05   ` Sergey Senozhatsky
  2012-10-29 18:32     ` Nitin Gupta
  2012-10-30 21:04   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function Sergey Senozhatsky
  2 siblings, 1 reply; 12+ messages in thread
From: Sergey Senozhatsky @ 2012-10-29 18:05 UTC (permalink / raw)
  To: Nitin Gupta; +Cc: Greg Kroah-Hartman, linux-kernel

  zram: factor-out zram_decompress_page() function

  zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
  Factor-out and make commonly used zram_decompress_page() function, which also simplified
  error handling in zram_bvec_read().

  V2: changed debug message for handle_zero_page() case to a more general one

  Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>

---

 drivers/staging/zram/zram_drv.c | 115 +++++++++++++++++-----------------------
 1 file changed, 50 insertions(+), 65 deletions(-)

diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 6edefde..bd5efdf 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
 	return bvec->bv_len != PAGE_SIZE;
 }
 
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
-			  u32 index, int offset, struct bio *bio)
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 {
-	int ret;
-	size_t clen;
-	struct page *page;
-	unsigned char *user_mem, *cmem, *uncmem = NULL;
-
-	page = bvec->bv_page;
-
-	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
-		handle_zero_page(bvec);
-		return 0;
-	}
+	int ret = LZO_E_OK;
+	size_t clen = PAGE_SIZE;
+	unsigned char *cmem;
+	unsigned long handle = zram->table[index].handle;
 
-	/* Requested page is not present in compressed area */
-	if (unlikely(!zram->table[index].handle)) {
-		pr_debug("Read before write: sector=%lu, size=%u",
-			 (ulong)(bio->bi_sector), bio->bi_size);
-		handle_zero_page(bvec);
+	if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
+		memset(mem, 0, PAGE_SIZE);
 		return 0;
 	}
 
-	if (is_partial_io(bvec)) {
-		/* Use  a temporary buffer to decompress the page */
-		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
-		if (!uncmem) {
-			pr_info("Error allocating temp memory!\n");
-			return -ENOMEM;
-		}
-	}
-
-	user_mem = kmap_atomic(page);
-	if (!is_partial_io(bvec))
-		uncmem = user_mem;
-	clen = PAGE_SIZE;
-
-	cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
-				ZS_MM_RO);
-
-	if (zram->table[index].size == PAGE_SIZE) {
-		memcpy(uncmem, cmem, PAGE_SIZE);
-		ret = LZO_E_OK;
-	} else {
+	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+	if (zram->table[index].size == PAGE_SIZE)
+		memcpy(mem, cmem, PAGE_SIZE);
+	else
 		ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-				    uncmem, &clen);
-	}
-
-	if (is_partial_io(bvec)) {
-		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
-		       bvec->bv_len);
-		kfree(uncmem);
-	}
-
-	zs_unmap_object(zram->mem_pool, zram->table[index].handle);
-	kunmap_atomic(user_mem);
+						mem, &clen);
+	zs_unmap_object(zram->mem_pool, handle);
 
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret != LZO_E_OK)) {
@@ -247,36 +210,58 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 		return ret;
 	}
 
-	flush_dcache_page(page);
-
 	return 0;
 }
 
-static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+			  u32 index, int offset, struct bio *bio)
 {
 	int ret;
-	size_t clen = PAGE_SIZE;
-	unsigned char *cmem;
-	unsigned long handle = zram->table[index].handle;
+	struct page *page;
+	unsigned char *user_mem, *uncmem = NULL;
 
-	if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
-		memset(mem, 0, PAGE_SIZE);
+	page = bvec->bv_page;
+
+	if (unlikely(!zram->table[index].handle) ||
+			zram_test_flag(zram, index, ZRAM_ZERO)) {
+		pr_debug("Handle zero page: sector=%lu, size=%u",
+			 (ulong)(bio->bi_sector), bio->bi_size);
+		handle_zero_page(bvec);
 		return 0;
 	}
 
-	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
-	ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-				    mem, &clen);
-	zs_unmap_object(zram->mem_pool, handle);
+	user_mem = kmap_atomic(page);
+	if (is_partial_io(bvec))
+		/* Use  a temporary buffer to decompress the page */
+		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	else
+		uncmem = user_mem;
 
+	if (!uncmem) {
+		pr_info("Unable to allocate temp memory\n");
+		ret = -ENOMEM;
+		goto out_cleanup;
+	}
+
+	ret = zram_decompress_page(zram, uncmem, index);
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret != LZO_E_OK)) {
 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
 		zram_stat64_inc(zram, &zram->stats.failed_reads);
-		return ret;
+		goto out_cleanup;
 	}
 
-	return 0;
+	if (is_partial_io(bvec))
+		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+				bvec->bv_len);
+
+	flush_dcache_page(page);
+	ret = 0;
+out_cleanup:
+	kunmap_atomic(user_mem);
+	if (is_partial_io(bvec))
+		kfree(uncmem);
+	return ret;
 }
 
 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
@@ -302,7 +287,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 			ret = -ENOMEM;
 			goto out;
 		}
-		ret = zram_read_before_write(zram, uncmem, index);
+		ret = zram_decompress_page(zram, uncmem, index);
 		if (ret) {
 			kfree(uncmem);
 			goto out;


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] zram: factor-out zram_decompress_page() function (v2)
  2012-10-29 18:05   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function (v2) Sergey Senozhatsky
@ 2012-10-29 18:32     ` Nitin Gupta
  2012-10-29 18:57       ` Sergey Senozhatsky
  2012-10-29 19:00       ` [PATCH 1/2] zram: factor-out zram_decompress_page() function (v3) Sergey Senozhatsky
  0 siblings, 2 replies; 12+ messages in thread
From: Nitin Gupta @ 2012-10-29 18:32 UTC (permalink / raw)
  To: Sergey Senozhatsky; +Cc: Greg Kroah-Hartman, linux-kernel

On Mon, Oct 29, 2012 at 11:05 AM, Sergey Senozhatsky
<sergey.senozhatsky@gmail.com> wrote:
>   zram: factor-out zram_decompress_page() function
>
>   zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
>   Factor-out and make commonly used zram_decompress_page() function, which also simplified
>   error handling in zram_bvec_read().
>
>   V2: changed debug message for handle_zero_page() case to a more general one
>
>   Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
>
> ---
>
>  drivers/staging/zram/zram_drv.c | 115 +++++++++++++++++-----------------------
>  1 file changed, 50 insertions(+), 65 deletions(-)
>
> diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
> index 6edefde..bd5efdf 100644
> --- a/drivers/staging/zram/zram_drv.c
> +++ b/drivers/staging/zram/zram_drv.c
> @@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
>         return bvec->bv_len != PAGE_SIZE;
>  }
>
> -static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> -                         u32 index, int offset, struct bio *bio)
> +static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>  {
> -       int ret;
> -       size_t clen;
> -       struct page *page;
> -       unsigned char *user_mem, *cmem, *uncmem = NULL;
> -
> -       page = bvec->bv_page;
> -
> -       if (zram_test_flag(zram, index, ZRAM_ZERO)) {
> -               handle_zero_page(bvec);
> -               return 0;
> -       }
> +       int ret = LZO_E_OK;
> +       size_t clen = PAGE_SIZE;
> +       unsigned char *cmem;
> +       unsigned long handle = zram->table[index].handle;
>
> -       /* Requested page is not present in compressed area */
> -       if (unlikely(!zram->table[index].handle)) {
> -               pr_debug("Read before write: sector=%lu, size=%u",
> -                        (ulong)(bio->bi_sector), bio->bi_size);
> -               handle_zero_page(bvec);
> +       if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
> +               memset(mem, 0, PAGE_SIZE);
>                 return 0;
>         }
>
> -       if (is_partial_io(bvec)) {
> -               /* Use  a temporary buffer to decompress the page */
> -               uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
> -               if (!uncmem) {
> -                       pr_info("Error allocating temp memory!\n");
> -                       return -ENOMEM;
> -               }
> -       }
> -
> -       user_mem = kmap_atomic(page);
> -       if (!is_partial_io(bvec))
> -               uncmem = user_mem;
> -       clen = PAGE_SIZE;
> -
> -       cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
> -                               ZS_MM_RO);
> -
> -       if (zram->table[index].size == PAGE_SIZE) {
> -               memcpy(uncmem, cmem, PAGE_SIZE);
> -               ret = LZO_E_OK;
> -       } else {
> +       cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
> +       if (zram->table[index].size == PAGE_SIZE)
> +               memcpy(mem, cmem, PAGE_SIZE);
> +       else
>                 ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
> -                                   uncmem, &clen);
> -       }
> -
> -       if (is_partial_io(bvec)) {
> -               memcpy(user_mem + bvec->bv_offset, uncmem + offset,
> -                      bvec->bv_len);
> -               kfree(uncmem);
> -       }
> -
> -       zs_unmap_object(zram->mem_pool, zram->table[index].handle);
> -       kunmap_atomic(user_mem);
> +                                               mem, &clen);
> +       zs_unmap_object(zram->mem_pool, handle);
>
>         /* Should NEVER happen. Return bio error if it does. */
>         if (unlikely(ret != LZO_E_OK)) {
> @@ -247,36 +210,58 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
>                 return ret;
>         }
>
> -       flush_dcache_page(page);
> -
>         return 0;
>  }
>
> -static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
> +static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> +                         u32 index, int offset, struct bio *bio)
>  {
>         int ret;
> -       size_t clen = PAGE_SIZE;
> -       unsigned char *cmem;
> -       unsigned long handle = zram->table[index].handle;
> +       struct page *page;
> +       unsigned char *user_mem, *uncmem = NULL;
>
> -       if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
> -               memset(mem, 0, PAGE_SIZE);
> +       page = bvec->bv_page;
> +
> +       if (unlikely(!zram->table[index].handle) ||
> +                       zram_test_flag(zram, index, ZRAM_ZERO)) {
> +               pr_debug("Handle zero page: sector=%lu, size=%u",
> +                        (ulong)(bio->bi_sector), bio->bi_size);
> +               handle_zero_page(bvec);

Nothing should be printed (even a debug only message) for
the ZRAM_ZERO case. This case can be quite common for certain
kinds of data and would cause a huge log spew.  Also (!handle) case
is not the same as zero-filled page case, so this message would
be misleading.

So, we should either get rid of this warning entirely or only do
pr_debug("Read before write ....") for (!handle) case and log nothing
for ZRAM_ZERO case.

Nitin

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] zram: factor-out zram_decompress_page() function (v2)
  2012-10-29 18:32     ` Nitin Gupta
@ 2012-10-29 18:57       ` Sergey Senozhatsky
  2012-10-29 19:00       ` [PATCH 1/2] zram: factor-out zram_decompress_page() function (v3) Sergey Senozhatsky
  1 sibling, 0 replies; 12+ messages in thread
From: Sergey Senozhatsky @ 2012-10-29 18:57 UTC (permalink / raw)
  To: Nitin Gupta; +Cc: Greg Kroah-Hartman, Fengguang Wu, linux-kernel

On (10/29/12 11:32), Nitin Gupta wrote:
> 
> Nothing should be printed (even a debug only message) for
> the ZRAM_ZERO case. This case can be quite common for certain
> kinds of data and would cause a huge log spew.  Also (!handle) case
> is not the same as zero-filled page case, so this message would
> be misleading.
> 
> So, we should either get rid of this warning entirely or only do
> pr_debug("Read before write ....") for (!handle) case and log nothing
> for ZRAM_ZERO case.
> 

I'd rather remove this message. Will resend.



By the way, about use after-free. I'm afraid you fix is not covering 100% of the
cases.

The problem is with this case:
[..]
334 
335         if (unlikely(clen > max_zpage_size)) {
336                 zram_stat_inc(&zram->stats.bad_compress);
337                 src = uncmem;
338                 clen = PAGE_SIZE;
339         }
340 
[..]

where uncmem could be:
-- kmap'ed page
-- kmalloc'ed page

both of which were unmap'ed/kfree'd before. you moved kfree to the end of the function,
while kunmap_atomic(user_mem) is still happening before src = uncmem/memcpy(cmem, src, clen)
pair.


	-ss

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] zram: factor-out zram_decompress_page() function (v3)
  2012-10-29 18:32     ` Nitin Gupta
  2012-10-29 18:57       ` Sergey Senozhatsky
@ 2012-10-29 19:00       ` Sergey Senozhatsky
  1 sibling, 0 replies; 12+ messages in thread
From: Sergey Senozhatsky @ 2012-10-29 19:00 UTC (permalink / raw)
  To: Nitin Gupta; +Cc: Greg Kroah-Hartman, linux-kernel

  zram: factor-out zram_decompress_page() function

  zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
  Factor-out and make commonly used zram_decompress_page() function, which also simplified
  error handling in zram_bvec_read().

  v3: remove debug message for handle_zero_page() case
  v2: changed debug message for handle_zero_page() case to a more general one

  Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>

---

 drivers/staging/zram/zram_drv.c | 113 +++++++++++++++++-----------------------
 1 file changed, 48 insertions(+), 65 deletions(-)

diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 6edefde..fb4a7c9 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
 	return bvec->bv_len != PAGE_SIZE;
 }
 
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
-			  u32 index, int offset, struct bio *bio)
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 {
-	int ret;
-	size_t clen;
-	struct page *page;
-	unsigned char *user_mem, *cmem, *uncmem = NULL;
-
-	page = bvec->bv_page;
-
-	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
-		handle_zero_page(bvec);
-		return 0;
-	}
+	int ret = LZO_E_OK;
+	size_t clen = PAGE_SIZE;
+	unsigned char *cmem;
+	unsigned long handle = zram->table[index].handle;
 
-	/* Requested page is not present in compressed area */
-	if (unlikely(!zram->table[index].handle)) {
-		pr_debug("Read before write: sector=%lu, size=%u",
-			 (ulong)(bio->bi_sector), bio->bi_size);
-		handle_zero_page(bvec);
+	if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
+		memset(mem, 0, PAGE_SIZE);
 		return 0;
 	}
 
-	if (is_partial_io(bvec)) {
-		/* Use  a temporary buffer to decompress the page */
-		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
-		if (!uncmem) {
-			pr_info("Error allocating temp memory!\n");
-			return -ENOMEM;
-		}
-	}
-
-	user_mem = kmap_atomic(page);
-	if (!is_partial_io(bvec))
-		uncmem = user_mem;
-	clen = PAGE_SIZE;
-
-	cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
-				ZS_MM_RO);
-
-	if (zram->table[index].size == PAGE_SIZE) {
-		memcpy(uncmem, cmem, PAGE_SIZE);
-		ret = LZO_E_OK;
-	} else {
+	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+	if (zram->table[index].size == PAGE_SIZE)
+		memcpy(mem, cmem, PAGE_SIZE);
+	else
 		ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-				    uncmem, &clen);
-	}
-
-	if (is_partial_io(bvec)) {
-		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
-		       bvec->bv_len);
-		kfree(uncmem);
-	}
-
-	zs_unmap_object(zram->mem_pool, zram->table[index].handle);
-	kunmap_atomic(user_mem);
+						mem, &clen);
+	zs_unmap_object(zram->mem_pool, handle);
 
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret != LZO_E_OK)) {
@@ -247,36 +210,56 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 		return ret;
 	}
 
-	flush_dcache_page(page);
-
 	return 0;
 }
 
-static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+			  u32 index, int offset, struct bio *bio)
 {
 	int ret;
-	size_t clen = PAGE_SIZE;
-	unsigned char *cmem;
-	unsigned long handle = zram->table[index].handle;
+	struct page *page;
+	unsigned char *user_mem, *uncmem = NULL;
 
-	if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
-		memset(mem, 0, PAGE_SIZE);
+	page = bvec->bv_page;
+
+	if (unlikely(!zram->table[index].handle) ||
+			zram_test_flag(zram, index, ZRAM_ZERO)) {
+		handle_zero_page(bvec);
 		return 0;
 	}
 
-	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
-	ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-				    mem, &clen);
-	zs_unmap_object(zram->mem_pool, handle);
+	user_mem = kmap_atomic(page);
+	if (is_partial_io(bvec))
+		/* Use  a temporary buffer to decompress the page */
+		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	else
+		uncmem = user_mem;
 
+	if (!uncmem) {
+		pr_info("Unable to allocate temp memory\n");
+		ret = -ENOMEM;
+		goto out_cleanup;
+	}
+
+	ret = zram_decompress_page(zram, uncmem, index);
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret != LZO_E_OK)) {
 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
 		zram_stat64_inc(zram, &zram->stats.failed_reads);
-		return ret;
+		goto out_cleanup;
 	}
 
-	return 0;
+	if (is_partial_io(bvec))
+		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+				bvec->bv_len);
+
+	flush_dcache_page(page);
+	ret = 0;
+out_cleanup:
+	kunmap_atomic(user_mem);
+	if (is_partial_io(bvec))
+		kfree(uncmem);
+	return ret;
 }
 
 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
@@ -302,7 +285,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 			ret = -ENOMEM;
 			goto out;
 		}
-		ret = zram_read_before_write(zram, uncmem, index);
+		ret = zram_decompress_page(zram, uncmem, index);
 		if (ret) {
 			kfree(uncmem);
 			goto out;


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] zram: factor-out zram_decompress_page() function
  2012-10-29 17:14 ` Nitin Gupta
  2012-10-29 17:33   ` Sergey Senozhatsky
  2012-10-29 18:05   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function (v2) Sergey Senozhatsky
@ 2012-10-30 21:04   ` Sergey Senozhatsky
  2012-10-31  3:55     ` Nitin Gupta
  2 siblings, 1 reply; 12+ messages in thread
From: Sergey Senozhatsky @ 2012-10-30 21:04 UTC (permalink / raw)
  To: Nitin Gupta; +Cc: linux-kernel

On (10/29/12 10:14), Nitin Gupta wrote:
> ======
> zram: Fix use-after-free in partial I/O case
> 
> When the compressed size of a page exceeds a threshold, the page is
> stored as-is i.e. in uncompressed form. In the partial I/O i.e.
> non-PAGE_SIZE'ed I/O case, however, the uncompressed memory was being
> freed before it could be copied into the zsmalloc pool resulting in
> use-after-free bug.
> 

Hello Nitin,
hope you are fine.

how about the following one? I moved some of the code to zram_compress_page()
(very similar to zram_decompress_page()), so errors are easier to care in 
zram_bvec_write(). now we handle both use after-kfree (as you did in your patch),
and use after-kunmap. 

please review.

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>

---

 drivers/staging/zram/zram_drv.c | 91 +++++++++++++++++++++--------------------
 1 file changed, 46 insertions(+), 45 deletions(-)

diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 47f2e3a..5f37be1 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -213,6 +213,44 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 	return 0;
 }
 
+static int zram_compress_page(struct zram *zram, char *uncmem, u32 index)
+{
+	int ret;
+	size_t clen;
+	unsigned long handle;
+	unsigned char *cmem, *src;
+
+	src = zram->compress_buffer;
+	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
+			       zram->compress_workmem);
+	if (unlikely(ret != LZO_E_OK)) {
+		pr_err("Page compression failed: err=%d\n", ret);
+		return ret;
+	}
+
+	if (unlikely(clen > max_zpage_size)) {
+		zram_stat_inc(&zram->stats.bad_compress);
+		src = uncmem;
+		clen = PAGE_SIZE;
+	}
+
+	handle = zs_malloc(zram->mem_pool, clen);
+	if (!handle) {
+		pr_info("Error allocating memory for compressed "
+			"page: %u, size=%zu\n", index, clen);
+		return -ENOMEM;
+	}
+
+	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+	memcpy(cmem, src, clen);
+	zs_unmap_object(zram->mem_pool, handle);
+
+	zram->table[index].handle = handle;
+	zram->table[index].size = clen;
+
+	return 0;
+}
+
 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 			  u32 index, int offset, struct bio *bio)
 {
@@ -267,13 +305,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 {
 	int ret;
 	size_t clen;
-	unsigned long handle;
 	struct page *page;
-	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
+	unsigned char *user_mem, *uncmem = NULL;
 
 	page = bvec->bv_page;
-	src = zram->compress_buffer;
-
 	if (is_partial_io(bvec)) {
 		/*
 		 * This is a partial IO. We need to read the full page
@@ -286,10 +321,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 			goto out;
 		}
 		ret = zram_decompress_page(zram, uncmem, index);
-		if (ret) {
-			kfree(uncmem);
+		if (ret)
 			goto out;
-		}
 	}
 
 	/*
@@ -309,58 +342,26 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 		uncmem = user_mem;
 
 	if (page_zero_filled(uncmem)) {
-		kunmap_atomic(user_mem);
-		if (is_partial_io(bvec))
-			kfree(uncmem);
 		zram_stat_inc(&zram->stats.pages_zero);
 		zram_set_flag(zram, index, ZRAM_ZERO);
 		ret = 0;
 		goto out;
 	}
 
-	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
-			       zram->compress_workmem);
-
-	kunmap_atomic(user_mem);
-	if (is_partial_io(bvec))
-			kfree(uncmem);
-
-	if (unlikely(ret != LZO_E_OK)) {
-		pr_err("Compression failed! err=%d\n", ret);
-		goto out;
-	}
-
-	if (unlikely(clen > max_zpage_size)) {
-		zram_stat_inc(&zram->stats.bad_compress);
-		src = uncmem;
-		clen = PAGE_SIZE;
-	}
-
-	handle = zs_malloc(zram->mem_pool, clen);
-	if (!handle) {
-		pr_info("Error allocating memory for compressed "
-			"page: %u, size=%zu\n", index, clen);
-		ret = -ENOMEM;
+	ret = zram_compress_page(zram, uncmem, index);
+	if (ret)
 		goto out;
-	}
-	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
-
-	memcpy(cmem, src, clen);
-
-	zs_unmap_object(zram->mem_pool, handle);
-
-	zram->table[index].handle = handle;
-	zram->table[index].size = clen;
 
+	clen = zram->table[index].size;
 	/* Update stats */
 	zram_stat64_add(zram, &zram->stats.compr_size, clen);
 	zram_stat_inc(&zram->stats.pages_stored);
 	if (clen <= PAGE_SIZE / 2)
 		zram_stat_inc(&zram->stats.good_compress);
-
-	return 0;
-
 out:
+	kunmap_atomic(user_mem);
+	if (is_partial_io(bvec))
+		kfree(uncmem);
 	if (ret)
 		zram_stat64_inc(zram, &zram->stats.failed_writes);
 	return ret;


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] zram: factor-out zram_decompress_page() function
  2012-10-30 21:04   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function Sergey Senozhatsky
@ 2012-10-31  3:55     ` Nitin Gupta
  2012-10-31  7:05       ` zram: use after free Sergey Senozhatsky
  0 siblings, 1 reply; 12+ messages in thread
From: Nitin Gupta @ 2012-10-31  3:55 UTC (permalink / raw)
  To: Sergey Senozhatsky; +Cc: linux-kernel

On 10/30/2012 02:04 PM, Sergey Senozhatsky wrote:
> On (10/29/12 10:14), Nitin Gupta wrote:
>> ======
>> zram: Fix use-after-free in partial I/O case
>>
>> When the compressed size of a page exceeds a threshold, the page is
>> stored as-is i.e. in uncompressed form. In the partial I/O i.e.
>> non-PAGE_SIZE'ed I/O case, however, the uncompressed memory was being
>> freed before it could be copied into the zsmalloc pool resulting in
>> use-after-free bug.
>>
>
> Hello Nitin,
> hope you are fine.
>
> how about the following one? I moved some of the code to zram_compress_page()
> (very similar to zram_decompress_page()), so errors are easier to care in
> zram_bvec_write(). now we handle both use after-kfree (as you did in your patch),
> and use after-kunmap.
>
> please review.
>
> Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
>
> ---
>
>   drivers/staging/zram/zram_drv.c | 91 +++++++++++++++++++++--------------------
>   1 file changed, 46 insertions(+), 45 deletions(-)
>
> diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
> index 47f2e3a..5f37be1 100644
> --- a/drivers/staging/zram/zram_drv.c
> +++ b/drivers/staging/zram/zram_drv.c
> @@ -213,6 +213,44 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
>   	return 0;
>   }
>
> +static int zram_compress_page(struct zram *zram, char *uncmem, u32 index)
> +{
> +	int ret;
> +	size_t clen;
> +	unsigned long handle;
> +	unsigned char *cmem, *src;
> +
> +	src = zram->compress_buffer;
> +	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
> +			       zram->compress_workmem);
> +	if (unlikely(ret != LZO_E_OK)) {
> +		pr_err("Page compression failed: err=%d\n", ret);
> +		return ret;
> +	}
> +
> +	if (unlikely(clen > max_zpage_size)) {
> +		zram_stat_inc(&zram->stats.bad_compress);
> +		src = uncmem;
> +		clen = PAGE_SIZE;
> +	}
> +
> +	handle = zs_malloc(zram->mem_pool, clen);
> +	if (!handle) {
> +		pr_info("Error allocating memory for compressed "
> +			"page: %u, size=%zu\n", index, clen);
> +		return -ENOMEM;
> +	}
> +
> +	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
> +	memcpy(cmem, src, clen);
> +	zs_unmap_object(zram->mem_pool, handle);
> +
> +	zram->table[index].handle = handle;
> +	zram->table[index].size = clen;
> +
> +	return 0;
> +}
> +
>   static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
>   			  u32 index, int offset, struct bio *bio)
>   {
> @@ -267,13 +305,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>   {
>   	int ret;
>   	size_t clen;
> -	unsigned long handle;
>   	struct page *page;
> -	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
> +	unsigned char *user_mem, *uncmem = NULL;
>
>   	page = bvec->bv_page;
> -	src = zram->compress_buffer;
> -
>   	if (is_partial_io(bvec)) {
>   		/*
>   		 * This is a partial IO. We need to read the full page
> @@ -286,10 +321,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>   			goto out;
>   		}
>   		ret = zram_decompress_page(zram, uncmem, index);
> -		if (ret) {
> -			kfree(uncmem);
> +		if (ret)
>   			goto out;
> -		}
>   	}
>
>   	/*
> @@ -309,58 +342,26 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
>   		uncmem = user_mem;
>
>   	if (page_zero_filled(uncmem)) {
> -		kunmap_atomic(user_mem);
> -		if (is_partial_io(bvec))
> -			kfree(uncmem);
>   		zram_stat_inc(&zram->stats.pages_zero);
>   		zram_set_flag(zram, index, ZRAM_ZERO);
>   		ret = 0;
>   		goto out;
>   	}
>
> -	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
> -			       zram->compress_workmem);
> -
> -	kunmap_atomic(user_mem);
> -	if (is_partial_io(bvec))
> -			kfree(uncmem);
> -
> -	if (unlikely(ret != LZO_E_OK)) {
> -		pr_err("Compression failed! err=%d\n", ret);
> -		goto out;
> -	}
> -
> -	if (unlikely(clen > max_zpage_size)) {
> -		zram_stat_inc(&zram->stats.bad_compress);
> -		src = uncmem;
> -		clen = PAGE_SIZE;
> -	}
> -
> -	handle = zs_malloc(zram->mem_pool, clen);
> -	if (!handle) {
> -		pr_info("Error allocating memory for compressed "
> -			"page: %u, size=%zu\n", index, clen);
> -		ret = -ENOMEM;
> +	ret = zram_compress_page(zram, uncmem, index);
> +	if (ret)
>   		goto out;
> -	}
> -	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
> -
> -	memcpy(cmem, src, clen);
> -
> -	zs_unmap_object(zram->mem_pool, handle);
> -
> -	zram->table[index].handle = handle;
> -	zram->table[index].size = clen;
>
> +	clen = zram->table[index].size;
>   	/* Update stats */
>   	zram_stat64_add(zram, &zram->stats.compr_size, clen);
>   	zram_stat_inc(&zram->stats.pages_stored);
>   	if (clen <= PAGE_SIZE / 2)
>   		zram_stat_inc(&zram->stats.good_compress);
> -
> -	return 0;
> -
>   out:
> +	kunmap_atomic(user_mem);

We are doing zs_malloc + zs_map/unmap + memcpy while keeping a page 
kmap'ed. We must really release it as soon as possible, so 
zs_compress_page() should just do compression and return with results, 
or just keep direct can to lzo_compress in bvec_write() since we are not 
gaining anything by this refactoring, unlike the decompress case.

Do we have a way to generate these partial I/Os so we can exercise these 
code paths?

Thanks,
Nitin


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: zram: use after free
  2012-10-31  3:55     ` Nitin Gupta
@ 2012-10-31  7:05       ` Sergey Senozhatsky
  0 siblings, 0 replies; 12+ messages in thread
From: Sergey Senozhatsky @ 2012-10-31  7:05 UTC (permalink / raw)
  To: Nitin Gupta; +Cc: linux-kernel

I've renamed the topic.


On (10/30/12 20:55), Nitin Gupta wrote:
> >>======
> >>zram: Fix use-after-free in partial I/O case
> >>
> >>When the compressed size of a page exceeds a threshold, the page is
> >>stored as-is i.e. in uncompressed form. In the partial I/O i.e.
> >>non-PAGE_SIZE'ed I/O case, however, the uncompressed memory was being
> >>freed before it could be copied into the zsmalloc pool resulting in
> >>use-after-free bug.
> >>
> >
> >Hello Nitin,
> >hope you are fine.
> >
> >how about the following one? I moved some of the code to zram_compress_page()
> >(very similar to zram_decompress_page()), so errors are easier to care in
> >zram_bvec_write(). now we handle both use after-kfree (as you did in your patch),
> >and use after-kunmap.
> >
> >  drivers/staging/zram/zram_drv.c | 91 +++++++++++++++++++++--------------------
> >  1 file changed, 46 insertions(+), 45 deletions(-)
> >
> >diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
> >index 47f2e3a..5f37be1 100644
> >--- a/drivers/staging/zram/zram_drv.c
> >+++ b/drivers/staging/zram/zram_drv.c
> >@@ -213,6 +213,44 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> >  	return 0;
> >  }
> >
> >+static int zram_compress_page(struct zram *zram, char *uncmem, u32 index)
> >+{
> >+	int ret;
> >+	size_t clen;
> >+	unsigned long handle;
> >+	unsigned char *cmem, *src;
> >+
> >+	src = zram->compress_buffer;
> >+	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
> >+			       zram->compress_workmem);
> >+	if (unlikely(ret != LZO_E_OK)) {
> >+		pr_err("Page compression failed: err=%d\n", ret);
> >+		return ret;
> >+	}
> >+
> >+	if (unlikely(clen > max_zpage_size)) {
> >+		zram_stat_inc(&zram->stats.bad_compress);
> >+		src = uncmem;
> >+		clen = PAGE_SIZE;
> >+	}
> >+
> >+	handle = zs_malloc(zram->mem_pool, clen);
> >+	if (!handle) {
> >+		pr_info("Error allocating memory for compressed "
> >+			"page: %u, size=%zu\n", index, clen);
> >+		return -ENOMEM;
> >+	}
> >+
> >+	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
> >+	memcpy(cmem, src, clen);
> >+	zs_unmap_object(zram->mem_pool, handle);
> >+
> >+	zram->table[index].handle = handle;
> >+	zram->table[index].size = clen;
> >+
> >+	return 0;
> >+}
> >+
> >  static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> >  			  u32 index, int offset, struct bio *bio)
> >  {
> >@@ -267,13 +305,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
> >  {
> >  	int ret;
> >  	size_t clen;
> >-	unsigned long handle;
> >  	struct page *page;
> >-	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
> >+	unsigned char *user_mem, *uncmem = NULL;
> >
> >  	page = bvec->bv_page;
> >-	src = zram->compress_buffer;
> >-
> >  	if (is_partial_io(bvec)) {
> >  		/*
> >  		 * This is a partial IO. We need to read the full page
> >@@ -286,10 +321,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
> >  			goto out;
> >  		}
> >  		ret = zram_decompress_page(zram, uncmem, index);
> >-		if (ret) {
> >-			kfree(uncmem);
> >+		if (ret)
> >  			goto out;
> >-		}
> >  	}
> >
> >  	/*
> >@@ -309,58 +342,26 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
> >  		uncmem = user_mem;
> >
> >  	if (page_zero_filled(uncmem)) {
> >-		kunmap_atomic(user_mem);
> >-		if (is_partial_io(bvec))
> >-			kfree(uncmem);
> >  		zram_stat_inc(&zram->stats.pages_zero);
> >  		zram_set_flag(zram, index, ZRAM_ZERO);
> >  		ret = 0;
> >  		goto out;
> >  	}
> >
> >-	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
> >-			       zram->compress_workmem);
> >-
> >-	kunmap_atomic(user_mem);
> >-	if (is_partial_io(bvec))
> >-			kfree(uncmem);
> >-
> >-	if (unlikely(ret != LZO_E_OK)) {
> >-		pr_err("Compression failed! err=%d\n", ret);
> >-		goto out;
> >-	}
> >-
> >-	if (unlikely(clen > max_zpage_size)) {
> >-		zram_stat_inc(&zram->stats.bad_compress);
> >-		src = uncmem;
> >-		clen = PAGE_SIZE;
> >-	}
> >-
> >-	handle = zs_malloc(zram->mem_pool, clen);
> >-	if (!handle) {
> >-		pr_info("Error allocating memory for compressed "
> >-			"page: %u, size=%zu\n", index, clen);
> >-		ret = -ENOMEM;
> >+	ret = zram_compress_page(zram, uncmem, index);
> >+	if (ret)
> >  		goto out;
> >-	}
> >-	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
> >-
> >-	memcpy(cmem, src, clen);
> >-
> >-	zs_unmap_object(zram->mem_pool, handle);
> >-
> >-	zram->table[index].handle = handle;
> >-	zram->table[index].size = clen;
> >
> >+	clen = zram->table[index].size;
> >  	/* Update stats */
> >  	zram_stat64_add(zram, &zram->stats.compr_size, clen);
> >  	zram_stat_inc(&zram->stats.pages_stored);
> >  	if (clen <= PAGE_SIZE / 2)
> >  		zram_stat_inc(&zram->stats.good_compress);
> >-
> >-	return 0;
> >-
> >  out:
> >+	kunmap_atomic(user_mem);
> 
> We are doing zs_malloc + zs_map/unmap + memcpy while keeping a page
> kmap'ed. We must really release it as soon as possible, so
> zs_compress_page() should just do compression and return with
> results, or just keep direct can to lzo_compress in bvec_write()
> since we are not gaining anything by this refactoring, unlike the
> decompress case.
>

yeah, we can unmap and map again as a slow-path for
	`clen > max_zpage_size'
 

in that case zs_compress_page() is not helping. the main reason behind this
function was to make bvec_write() easier, if we count our fingers for
what bvec_write() is capable of, it turns out to be a pretty mighty function.
I'll think about this.


> Do we have a way to generate these partial I/Os so we can exercise
> these code paths?
> 

I'll try.

	-ss

> Thanks,
> Nitin
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] zram: factor-out zram_decompress_page() function
  2012-10-30 18:58   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function Sergey Senozhatsky
@ 2012-10-30 19:18     ` Greg Kroah-Hartman
  0 siblings, 0 replies; 12+ messages in thread
From: Greg Kroah-Hartman @ 2012-10-30 19:18 UTC (permalink / raw)
  To: Sergey Senozhatsky; +Cc: Nitin Gupta, linux-kernel

On Tue, Oct 30, 2012 at 09:58:42PM +0300, Sergey Senozhatsky wrote:
>   zram: factor-out zram_decompress_page() function

What's with the indentation?

And including the Subject: again here?

> 
>   zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
>   Factor-out and make commonly used zram_decompress_page() function, which also simplified
>   error handling in zram_bvec_read().
> 
>   Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>

Where did Nitin's ack go?

I would have to edit these by hand to apply them, which isn't ok.
Please fix this up, add Nitin's acks, and resend properly.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/2] zram: factor-out zram_decompress_page() function
  2012-10-30 18:04 ` Greg Kroah-Hartman
@ 2012-10-30 18:58   ` Sergey Senozhatsky
  2012-10-30 19:18     ` Greg Kroah-Hartman
  0 siblings, 1 reply; 12+ messages in thread
From: Sergey Senozhatsky @ 2012-10-30 18:58 UTC (permalink / raw)
  To: Greg Kroah-Hartman; +Cc: Nitin Gupta, linux-kernel

  zram: factor-out zram_decompress_page() function

  zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
  Factor-out and make commonly used zram_decompress_page() function, which also simplified
  error handling in zram_bvec_read().

  Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>

---

 drivers/staging/zram/zram_drv.c | 113 +++++++++++++++++-----------------------
 1 file changed, 48 insertions(+), 65 deletions(-)

diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 6edefde..fb4a7c9 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
 	return bvec->bv_len != PAGE_SIZE;
 }
 
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
-			  u32 index, int offset, struct bio *bio)
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 {
-	int ret;
-	size_t clen;
-	struct page *page;
-	unsigned char *user_mem, *cmem, *uncmem = NULL;
-
-	page = bvec->bv_page;
-
-	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
-		handle_zero_page(bvec);
-		return 0;
-	}
+	int ret = LZO_E_OK;
+	size_t clen = PAGE_SIZE;
+	unsigned char *cmem;
+	unsigned long handle = zram->table[index].handle;
 
-	/* Requested page is not present in compressed area */
-	if (unlikely(!zram->table[index].handle)) {
-		pr_debug("Read before write: sector=%lu, size=%u",
-			 (ulong)(bio->bi_sector), bio->bi_size);
-		handle_zero_page(bvec);
+	if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
+		memset(mem, 0, PAGE_SIZE);
 		return 0;
 	}
 
-	if (is_partial_io(bvec)) {
-		/* Use  a temporary buffer to decompress the page */
-		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
-		if (!uncmem) {
-			pr_info("Error allocating temp memory!\n");
-			return -ENOMEM;
-		}
-	}
-
-	user_mem = kmap_atomic(page);
-	if (!is_partial_io(bvec))
-		uncmem = user_mem;
-	clen = PAGE_SIZE;
-
-	cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
-				ZS_MM_RO);
-
-	if (zram->table[index].size == PAGE_SIZE) {
-		memcpy(uncmem, cmem, PAGE_SIZE);
-		ret = LZO_E_OK;
-	} else {
+	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+	if (zram->table[index].size == PAGE_SIZE)
+		memcpy(mem, cmem, PAGE_SIZE);
+	else
 		ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-				    uncmem, &clen);
-	}
-
-	if (is_partial_io(bvec)) {
-		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
-		       bvec->bv_len);
-		kfree(uncmem);
-	}
-
-	zs_unmap_object(zram->mem_pool, zram->table[index].handle);
-	kunmap_atomic(user_mem);
+						mem, &clen);
+	zs_unmap_object(zram->mem_pool, handle);
 
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret != LZO_E_OK)) {
@@ -247,36 +210,56 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 		return ret;
 	}
 
-	flush_dcache_page(page);
-
 	return 0;
 }
 
-static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+			  u32 index, int offset, struct bio *bio)
 {
 	int ret;
-	size_t clen = PAGE_SIZE;
-	unsigned char *cmem;
-	unsigned long handle = zram->table[index].handle;
+	struct page *page;
+	unsigned char *user_mem, *uncmem = NULL;
 
-	if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
-		memset(mem, 0, PAGE_SIZE);
+	page = bvec->bv_page;
+
+	if (unlikely(!zram->table[index].handle) ||
+			zram_test_flag(zram, index, ZRAM_ZERO)) {
+		handle_zero_page(bvec);
 		return 0;
 	}
 
-	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
-	ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-				    mem, &clen);
-	zs_unmap_object(zram->mem_pool, handle);
+	user_mem = kmap_atomic(page);
+	if (is_partial_io(bvec))
+		/* Use  a temporary buffer to decompress the page */
+		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	else
+		uncmem = user_mem;
 
+	if (!uncmem) {
+		pr_info("Unable to allocate temp memory\n");
+		ret = -ENOMEM;
+		goto out_cleanup;
+	}
+
+	ret = zram_decompress_page(zram, uncmem, index);
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret != LZO_E_OK)) {
 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
 		zram_stat64_inc(zram, &zram->stats.failed_reads);
-		return ret;
+		goto out_cleanup;
 	}
 
-	return 0;
+	if (is_partial_io(bvec))
+		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+				bvec->bv_len);
+
+	flush_dcache_page(page);
+	ret = 0;
+out_cleanup:
+	kunmap_atomic(user_mem);
+	if (is_partial_io(bvec))
+		kfree(uncmem);
+	return ret;
 }
 
 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
@@ -302,7 +285,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 			ret = -ENOMEM;
 			goto out;
 		}
-		ret = zram_read_before_write(zram, uncmem, index);
+		ret = zram_decompress_page(zram, uncmem, index);
 		if (ret) {
 			kfree(uncmem);
 			goto out;


^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2012-10-31  7:07 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-10-27 16:00 [PATCH 1/2] zram: factor-out zram_decompress_page() function Sergey Senozhatsky
2012-10-29 17:14 ` Nitin Gupta
2012-10-29 17:33   ` Sergey Senozhatsky
2012-10-29 18:05   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function (v2) Sergey Senozhatsky
2012-10-29 18:32     ` Nitin Gupta
2012-10-29 18:57       ` Sergey Senozhatsky
2012-10-29 19:00       ` [PATCH 1/2] zram: factor-out zram_decompress_page() function (v3) Sergey Senozhatsky
2012-10-30 21:04   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function Sergey Senozhatsky
2012-10-31  3:55     ` Nitin Gupta
2012-10-31  7:05       ` zram: use after free Sergey Senozhatsky
2012-10-30  9:03 [PATCH 2/2] zram: permit sleeping while in pool zs_malloc() Sergey Senozhatsky
2012-10-30 18:04 ` Greg Kroah-Hartman
2012-10-30 18:58   ` [PATCH 1/2] zram: factor-out zram_decompress_page() function Sergey Senozhatsky
2012-10-30 19:18     ` Greg Kroah-Hartman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.