All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATH] z3fold: extend compaction function
@ 2016-11-03 21:04 ` Vitaly Wool
  0 siblings, 0 replies; 12+ messages in thread
From: Vitaly Wool @ 2016-11-03 21:04 UTC (permalink / raw)
  To: Linux-MM, linux-kernel; +Cc: Dan Streetman, Andrew Morton

z3fold_compact_page() currently only handles the situation when
there's a single middle chunk within the z3fold page. However it
may be worth it to move middle chunk closer to either first or
last chunk, whichever is there, if the gap between them is big
enough.

This patch adds the relevant code, using BIG_CHUNK_GAP define as
a threshold for middle chunk to be worth moving.

Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
---
 mm/z3fold.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 47 insertions(+), 13 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 4d02280..fea6791 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -250,26 +250,60 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
 	kfree(pool);
 }
 
+static inline void *mchunk_memmove(struct z3fold_header *zhdr,
+				unsigned short dst_chunk)
+{
+	void *beg = zhdr;
+	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
+		       beg + (zhdr->start_middle << CHUNK_SHIFT),
+		       zhdr->middle_chunks << CHUNK_SHIFT);
+}
+
+#define BIG_CHUNK_GAP	3
 /* Has to be called with lock held */
 static int z3fold_compact_page(struct z3fold_header *zhdr)
 {
 	struct page *page = virt_to_page(zhdr);
-	void *beg = zhdr;
+	int ret = 0;
+
+	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
+		goto out;
 
+	if (zhdr->middle_chunks != 0) {
+		if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+			mchunk_memmove(zhdr, 1); /* move to the beginning */
+			zhdr->first_chunks = zhdr->middle_chunks;
+			zhdr->middle_chunks = 0;
+			zhdr->start_middle = 0;
+			zhdr->first_num++;
+			ret = 1;
+			goto out;
+		}
 
-	if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
-	    zhdr->middle_chunks != 0 &&
-	    zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
-		memmove(beg + ZHDR_SIZE_ALIGNED,
-			beg + (zhdr->start_middle << CHUNK_SHIFT),
-			zhdr->middle_chunks << CHUNK_SHIFT);
-		zhdr->first_chunks = zhdr->middle_chunks;
-		zhdr->middle_chunks = 0;
-		zhdr->start_middle = 0;
-		zhdr->first_num++;
-		return 1;
+		/*
+		 * moving data is expensive, so let's only do that if
+		 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
+		 */
+		if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
+		    zhdr->start_middle > zhdr->first_chunks + BIG_CHUNK_GAP) {
+			mchunk_memmove(zhdr, zhdr->first_chunks + 1);
+			zhdr->start_middle = zhdr->first_chunks + 1;
+			ret = 1;
+			goto out;
+		}
+		if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
+		    zhdr->middle_chunks + zhdr->last_chunks <=
+		    NCHUNKS - zhdr->start_middle - BIG_CHUNK_GAP) {
+			unsigned short new_start = NCHUNKS - zhdr->last_chunks -
+				zhdr->middle_chunks;
+			mchunk_memmove(zhdr, new_start);
+			zhdr->start_middle = new_start;
+			ret = 1;
+			goto out;
+		}
 	}
-	return 0;
+out:
+	return ret;
 }
 
 /**
-- 
2.4.2

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATH] z3fold: extend compaction function
@ 2016-11-03 21:04 ` Vitaly Wool
  0 siblings, 0 replies; 12+ messages in thread
From: Vitaly Wool @ 2016-11-03 21:04 UTC (permalink / raw)
  To: Linux-MM, linux-kernel; +Cc: Dan Streetman, Andrew Morton

z3fold_compact_page() currently only handles the situation when
there's a single middle chunk within the z3fold page. However it
may be worth it to move middle chunk closer to either first or
last chunk, whichever is there, if the gap between them is big
enough.

This patch adds the relevant code, using BIG_CHUNK_GAP define as
a threshold for middle chunk to be worth moving.

Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
---
 mm/z3fold.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 47 insertions(+), 13 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 4d02280..fea6791 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -250,26 +250,60 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
 	kfree(pool);
 }
 
+static inline void *mchunk_memmove(struct z3fold_header *zhdr,
+				unsigned short dst_chunk)
+{
+	void *beg = zhdr;
+	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
+		       beg + (zhdr->start_middle << CHUNK_SHIFT),
+		       zhdr->middle_chunks << CHUNK_SHIFT);
+}
+
+#define BIG_CHUNK_GAP	3
 /* Has to be called with lock held */
 static int z3fold_compact_page(struct z3fold_header *zhdr)
 {
 	struct page *page = virt_to_page(zhdr);
-	void *beg = zhdr;
+	int ret = 0;
+
+	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
+		goto out;
 
+	if (zhdr->middle_chunks != 0) {
+		if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+			mchunk_memmove(zhdr, 1); /* move to the beginning */
+			zhdr->first_chunks = zhdr->middle_chunks;
+			zhdr->middle_chunks = 0;
+			zhdr->start_middle = 0;
+			zhdr->first_num++;
+			ret = 1;
+			goto out;
+		}
 
-	if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
-	    zhdr->middle_chunks != 0 &&
-	    zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
-		memmove(beg + ZHDR_SIZE_ALIGNED,
-			beg + (zhdr->start_middle << CHUNK_SHIFT),
-			zhdr->middle_chunks << CHUNK_SHIFT);
-		zhdr->first_chunks = zhdr->middle_chunks;
-		zhdr->middle_chunks = 0;
-		zhdr->start_middle = 0;
-		zhdr->first_num++;
-		return 1;
+		/*
+		 * moving data is expensive, so let's only do that if
+		 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
+		 */
+		if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
+		    zhdr->start_middle > zhdr->first_chunks + BIG_CHUNK_GAP) {
+			mchunk_memmove(zhdr, zhdr->first_chunks + 1);
+			zhdr->start_middle = zhdr->first_chunks + 1;
+			ret = 1;
+			goto out;
+		}
+		if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
+		    zhdr->middle_chunks + zhdr->last_chunks <=
+		    NCHUNKS - zhdr->start_middle - BIG_CHUNK_GAP) {
+			unsigned short new_start = NCHUNKS - zhdr->last_chunks -
+				zhdr->middle_chunks;
+			mchunk_memmove(zhdr, new_start);
+			zhdr->start_middle = new_start;
+			ret = 1;
+			goto out;
+		}
 	}
-	return 0;
+out:
+	return ret;
 }
 
 /**
-- 
2.4.2

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
  2016-11-03 21:04 ` Vitaly Wool
@ 2016-11-03 21:16   ` Andrew Morton
  -1 siblings, 0 replies; 12+ messages in thread
From: Andrew Morton @ 2016-11-03 21:16 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: Linux-MM, linux-kernel, Dan Streetman

On Thu, 3 Nov 2016 22:04:28 +0100 Vitaly Wool <vitalywool@gmail.com> wrote:

> z3fold_compact_page() currently only handles the situation when
> there's a single middle chunk within the z3fold page. However it
> may be worth it to move middle chunk closer to either first or
> last chunk, whichever is there, if the gap between them is big
> enough.

"may be worth it" is vague.  Does the patch improve the driver or does
it not?  If it *does* improve the driver then in what way?  *Why* is is
"worth it"?

> This patch adds the relevant code, using BIG_CHUNK_GAP define as
> a threshold for middle chunk to be worth moving.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
@ 2016-11-03 21:16   ` Andrew Morton
  0 siblings, 0 replies; 12+ messages in thread
From: Andrew Morton @ 2016-11-03 21:16 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: Linux-MM, linux-kernel, Dan Streetman

On Thu, 3 Nov 2016 22:04:28 +0100 Vitaly Wool <vitalywool@gmail.com> wrote:

> z3fold_compact_page() currently only handles the situation when
> there's a single middle chunk within the z3fold page. However it
> may be worth it to move middle chunk closer to either first or
> last chunk, whichever is there, if the gap between them is big
> enough.

"may be worth it" is vague.  Does the patch improve the driver or does
it not?  If it *does* improve the driver then in what way?  *Why* is is
"worth it"?

> This patch adds the relevant code, using BIG_CHUNK_GAP define as
> a threshold for middle chunk to be worth moving.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
  2016-11-03 21:16   ` Andrew Morton
@ 2016-11-03 21:35     ` Vitaly Wool
  -1 siblings, 0 replies; 12+ messages in thread
From: Vitaly Wool @ 2016-11-03 21:35 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linux-MM, LKML, Dan Streetman

On Thu, Nov 3, 2016 at 10:16 PM, Andrew Morton
<akpm@linux-foundation.org> wrote:
> On Thu, 3 Nov 2016 22:04:28 +0100 Vitaly Wool <vitalywool@gmail.com> wrote:
>
>> z3fold_compact_page() currently only handles the situation when
>> there's a single middle chunk within the z3fold page. However it
>> may be worth it to move middle chunk closer to either first or
>> last chunk, whichever is there, if the gap between them is big
>> enough.
>
> "may be worth it" is vague.  Does the patch improve the driver or does
> it not?  If it *does* improve the driver then in what way?  *Why* is is
> "worth it"?

Yep, I must admit I wasn't clear enough here. Basically compression
ratio wise, it always makes sense to move middle chunk as close as
possible to another in-page z3fold object, because then the third
object can use all the remaining space. However, moving big object
just by one chunk will hurt performance without gaining much
compression ratio wise. So the gap between the middle object and the
edge object should be big enough to justify the move.

So,
this patch improves compression ratio because in-page compaction
becomes more comprehensive;
this patch (which came as a surprise) also increases performance in
fio randrw tests (I am not 100% sure why, but probably due to less
actual page allocations on hot path due to denser in-page allocation).

~vitaly

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
@ 2016-11-03 21:35     ` Vitaly Wool
  0 siblings, 0 replies; 12+ messages in thread
From: Vitaly Wool @ 2016-11-03 21:35 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linux-MM, LKML, Dan Streetman

On Thu, Nov 3, 2016 at 10:16 PM, Andrew Morton
<akpm@linux-foundation.org> wrote:
> On Thu, 3 Nov 2016 22:04:28 +0100 Vitaly Wool <vitalywool@gmail.com> wrote:
>
>> z3fold_compact_page() currently only handles the situation when
>> there's a single middle chunk within the z3fold page. However it
>> may be worth it to move middle chunk closer to either first or
>> last chunk, whichever is there, if the gap between them is big
>> enough.
>
> "may be worth it" is vague.  Does the patch improve the driver or does
> it not?  If it *does* improve the driver then in what way?  *Why* is is
> "worth it"?

Yep, I must admit I wasn't clear enough here. Basically compression
ratio wise, it always makes sense to move middle chunk as close as
possible to another in-page z3fold object, because then the third
object can use all the remaining space. However, moving big object
just by one chunk will hurt performance without gaining much
compression ratio wise. So the gap between the middle object and the
edge object should be big enough to justify the move.

So,
this patch improves compression ratio because in-page compaction
becomes more comprehensive;
this patch (which came as a surprise) also increases performance in
fio randrw tests (I am not 100% sure why, but probably due to less
actual page allocations on hot path due to denser in-page allocation).

~vitaly

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
  2016-11-03 21:04 ` Vitaly Wool
@ 2016-11-25 14:43   ` Dan Streetman
  -1 siblings, 0 replies; 12+ messages in thread
From: Dan Streetman @ 2016-11-25 14:43 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Thu, Nov 3, 2016 at 5:04 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
> z3fold_compact_page() currently only handles the situation when
> there's a single middle chunk within the z3fold page. However it
> may be worth it to move middle chunk closer to either first or
> last chunk, whichever is there, if the gap between them is big
> enough.
>
> This patch adds the relevant code, using BIG_CHUNK_GAP define as
> a threshold for middle chunk to be worth moving.
>
> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>

with the bikeshedding comments below, looks good.

Acked-by: Dan Streetman <ddstreet@ieee.org>

> ---
>  mm/z3fold.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 47 insertions(+), 13 deletions(-)
>
> diff --git a/mm/z3fold.c b/mm/z3fold.c
> index 4d02280..fea6791 100644
> --- a/mm/z3fold.c
> +++ b/mm/z3fold.c
> @@ -250,26 +250,60 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
>         kfree(pool);
>  }
>
> +static inline void *mchunk_memmove(struct z3fold_header *zhdr,
> +                               unsigned short dst_chunk)
> +{
> +       void *beg = zhdr;
> +       return memmove(beg + (dst_chunk << CHUNK_SHIFT),
> +                      beg + (zhdr->start_middle << CHUNK_SHIFT),
> +                      zhdr->middle_chunks << CHUNK_SHIFT);
> +}
> +
> +#define BIG_CHUNK_GAP  3
>  /* Has to be called with lock held */
>  static int z3fold_compact_page(struct z3fold_header *zhdr)
>  {
>         struct page *page = virt_to_page(zhdr);
> -       void *beg = zhdr;
> +       int ret = 0;
> +
> +       if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
> +               goto out;
>
> +       if (zhdr->middle_chunks != 0) {

bikeshed: this check could be moved up also, as if there's no middle
chunk there is no compacting to do and we can just return 0.  saves a
tab in all the code below.

> +               if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
> +                       mchunk_memmove(zhdr, 1); /* move to the beginning */
> +                       zhdr->first_chunks = zhdr->middle_chunks;
> +                       zhdr->middle_chunks = 0;
> +                       zhdr->start_middle = 0;
> +                       zhdr->first_num++;
> +                       ret = 1;
> +                       goto out;
> +               }
>
> -       if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
> -           zhdr->middle_chunks != 0 &&
> -           zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
> -               memmove(beg + ZHDR_SIZE_ALIGNED,
> -                       beg + (zhdr->start_middle << CHUNK_SHIFT),
> -                       zhdr->middle_chunks << CHUNK_SHIFT);
> -               zhdr->first_chunks = zhdr->middle_chunks;
> -               zhdr->middle_chunks = 0;
> -               zhdr->start_middle = 0;
> -               zhdr->first_num++;
> -               return 1;
> +               /*
> +                * moving data is expensive, so let's only do that if
> +                * there's substantial gain (at least BIG_CHUNK_GAP chunks)
> +                */
> +               if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
> +                   zhdr->start_middle > zhdr->first_chunks + BIG_CHUNK_GAP) {
> +                       mchunk_memmove(zhdr, zhdr->first_chunks + 1);
> +                       zhdr->start_middle = zhdr->first_chunks + 1;
> +                       ret = 1;
> +                       goto out;
> +               }
> +               if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
> +                   zhdr->middle_chunks + zhdr->last_chunks <=
> +                   NCHUNKS - zhdr->start_middle - BIG_CHUNK_GAP) {
> +                       unsigned short new_start = NCHUNKS - zhdr->last_chunks -
> +                               zhdr->middle_chunks;
> +                       mchunk_memmove(zhdr, new_start);
> +                       zhdr->start_middle = new_start;
> +                       ret = 1;
> +                       goto out;
> +               }
>         }
> -       return 0;
> +out:
> +       return ret;

bikeshed: do we need all the goto out?  why not just return 0/1
appropriately above?

>  }
>
>  /**
> --
> 2.4.2

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
@ 2016-11-25 14:43   ` Dan Streetman
  0 siblings, 0 replies; 12+ messages in thread
From: Dan Streetman @ 2016-11-25 14:43 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Thu, Nov 3, 2016 at 5:04 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
> z3fold_compact_page() currently only handles the situation when
> there's a single middle chunk within the z3fold page. However it
> may be worth it to move middle chunk closer to either first or
> last chunk, whichever is there, if the gap between them is big
> enough.
>
> This patch adds the relevant code, using BIG_CHUNK_GAP define as
> a threshold for middle chunk to be worth moving.
>
> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>

with the bikeshedding comments below, looks good.

Acked-by: Dan Streetman <ddstreet@ieee.org>

> ---
>  mm/z3fold.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 47 insertions(+), 13 deletions(-)
>
> diff --git a/mm/z3fold.c b/mm/z3fold.c
> index 4d02280..fea6791 100644
> --- a/mm/z3fold.c
> +++ b/mm/z3fold.c
> @@ -250,26 +250,60 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
>         kfree(pool);
>  }
>
> +static inline void *mchunk_memmove(struct z3fold_header *zhdr,
> +                               unsigned short dst_chunk)
> +{
> +       void *beg = zhdr;
> +       return memmove(beg + (dst_chunk << CHUNK_SHIFT),
> +                      beg + (zhdr->start_middle << CHUNK_SHIFT),
> +                      zhdr->middle_chunks << CHUNK_SHIFT);
> +}
> +
> +#define BIG_CHUNK_GAP  3
>  /* Has to be called with lock held */
>  static int z3fold_compact_page(struct z3fold_header *zhdr)
>  {
>         struct page *page = virt_to_page(zhdr);
> -       void *beg = zhdr;
> +       int ret = 0;
> +
> +       if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
> +               goto out;
>
> +       if (zhdr->middle_chunks != 0) {

bikeshed: this check could be moved up also, as if there's no middle
chunk there is no compacting to do and we can just return 0.  saves a
tab in all the code below.

> +               if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
> +                       mchunk_memmove(zhdr, 1); /* move to the beginning */
> +                       zhdr->first_chunks = zhdr->middle_chunks;
> +                       zhdr->middle_chunks = 0;
> +                       zhdr->start_middle = 0;
> +                       zhdr->first_num++;
> +                       ret = 1;
> +                       goto out;
> +               }
>
> -       if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
> -           zhdr->middle_chunks != 0 &&
> -           zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
> -               memmove(beg + ZHDR_SIZE_ALIGNED,
> -                       beg + (zhdr->start_middle << CHUNK_SHIFT),
> -                       zhdr->middle_chunks << CHUNK_SHIFT);
> -               zhdr->first_chunks = zhdr->middle_chunks;
> -               zhdr->middle_chunks = 0;
> -               zhdr->start_middle = 0;
> -               zhdr->first_num++;
> -               return 1;
> +               /*
> +                * moving data is expensive, so let's only do that if
> +                * there's substantial gain (at least BIG_CHUNK_GAP chunks)
> +                */
> +               if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
> +                   zhdr->start_middle > zhdr->first_chunks + BIG_CHUNK_GAP) {
> +                       mchunk_memmove(zhdr, zhdr->first_chunks + 1);
> +                       zhdr->start_middle = zhdr->first_chunks + 1;
> +                       ret = 1;
> +                       goto out;
> +               }
> +               if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
> +                   zhdr->middle_chunks + zhdr->last_chunks <=
> +                   NCHUNKS - zhdr->start_middle - BIG_CHUNK_GAP) {
> +                       unsigned short new_start = NCHUNKS - zhdr->last_chunks -
> +                               zhdr->middle_chunks;
> +                       mchunk_memmove(zhdr, new_start);
> +                       zhdr->start_middle = new_start;
> +                       ret = 1;
> +                       goto out;
> +               }
>         }
> -       return 0;
> +out:
> +       return ret;

bikeshed: do we need all the goto out?  why not just return 0/1
appropriately above?

>  }
>
>  /**
> --
> 2.4.2

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
  2016-11-25 14:43   ` Dan Streetman
@ 2016-11-25 21:17     ` Dan Streetman
  -1 siblings, 0 replies; 12+ messages in thread
From: Dan Streetman @ 2016-11-25 21:17 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Fri, Nov 25, 2016 at 9:43 AM, Dan Streetman <ddstreet@ieee.org> wrote:
> On Thu, Nov 3, 2016 at 5:04 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
>> z3fold_compact_page() currently only handles the situation when
>> there's a single middle chunk within the z3fold page. However it
>> may be worth it to move middle chunk closer to either first or
>> last chunk, whichever is there, if the gap between them is big
>> enough.
>>
>> This patch adds the relevant code, using BIG_CHUNK_GAP define as
>> a threshold for middle chunk to be worth moving.
>>
>> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
>
> with the bikeshedding comments below, looks good.
>
> Acked-by: Dan Streetman <ddstreet@ieee.org>
>
>> ---
>>  mm/z3fold.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++-------------
>>  1 file changed, 47 insertions(+), 13 deletions(-)
>>
>> diff --git a/mm/z3fold.c b/mm/z3fold.c
>> index 4d02280..fea6791 100644
>> --- a/mm/z3fold.c
>> +++ b/mm/z3fold.c
>> @@ -250,26 +250,60 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
>>         kfree(pool);
>>  }
>>
>> +static inline void *mchunk_memmove(struct z3fold_header *zhdr,
>> +                               unsigned short dst_chunk)
>> +{
>> +       void *beg = zhdr;
>> +       return memmove(beg + (dst_chunk << CHUNK_SHIFT),
>> +                      beg + (zhdr->start_middle << CHUNK_SHIFT),
>> +                      zhdr->middle_chunks << CHUNK_SHIFT);
>> +}
>> +
>> +#define BIG_CHUNK_GAP  3
>>  /* Has to be called with lock held */
>>  static int z3fold_compact_page(struct z3fold_header *zhdr)
>>  {
>>         struct page *page = virt_to_page(zhdr);
>> -       void *beg = zhdr;
>> +       int ret = 0;
>> +
>> +       if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
>> +               goto out;
>>
>> +       if (zhdr->middle_chunks != 0) {
>
> bikeshed: this check could be moved up also, as if there's no middle
> chunk there is no compacting to do and we can just return 0.  saves a
> tab in all the code below.
>
>> +               if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
>> +                       mchunk_memmove(zhdr, 1); /* move to the beginning */
>> +                       zhdr->first_chunks = zhdr->middle_chunks;
>> +                       zhdr->middle_chunks = 0;
>> +                       zhdr->start_middle = 0;
>> +                       zhdr->first_num++;
>> +                       ret = 1;
>> +                       goto out;
>> +               }
>>
>> -       if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
>> -           zhdr->middle_chunks != 0 &&
>> -           zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
>> -               memmove(beg + ZHDR_SIZE_ALIGNED,
>> -                       beg + (zhdr->start_middle << CHUNK_SHIFT),
>> -                       zhdr->middle_chunks << CHUNK_SHIFT);
>> -               zhdr->first_chunks = zhdr->middle_chunks;
>> -               zhdr->middle_chunks = 0;
>> -               zhdr->start_middle = 0;
>> -               zhdr->first_num++;
>> -               return 1;
>> +               /*
>> +                * moving data is expensive, so let's only do that if
>> +                * there's substantial gain (at least BIG_CHUNK_GAP chunks)
>> +                */
>> +               if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
>> +                   zhdr->start_middle > zhdr->first_chunks + BIG_CHUNK_GAP) {
>> +                       mchunk_memmove(zhdr, zhdr->first_chunks + 1);
>> +                       zhdr->start_middle = zhdr->first_chunks + 1;
>> +                       ret = 1;
>> +                       goto out;
>> +               }
>> +               if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
>> +                   zhdr->middle_chunks + zhdr->last_chunks <=
>> +                   NCHUNKS - zhdr->start_middle - BIG_CHUNK_GAP) {
>> +                       unsigned short new_start = NCHUNKS - zhdr->last_chunks -
>> +                               zhdr->middle_chunks;

after closer review, I see that this is wrong.  NCHUNKS isn't the
total number of page chunks, it's the total number of chunks minus the
header chunk(s).  so that calculation of where the new start is, is
wrong.  it should use the total page chunks, not the NCHUNKS, because
start_middle already accounts for the header chunk(s).  Probably a new
macro would help.

Also, the num_free_chunks() function makes the same mistake:

int nfree_after = zhdr->last_chunks ?
  0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks;

that's wrong, it should be something like:

#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
...
int nfree_after = zhdr->last_chunks ?
  0 : TOTAL_CHUNKS - zhdr->start_middle - zhdr->middle_chunks;


>> +                       mchunk_memmove(zhdr, new_start);
>> +                       zhdr->start_middle = new_start;
>> +                       ret = 1;
>> +                       goto out;
>> +               }
>>         }
>> -       return 0;
>> +out:
>> +       return ret;
>
> bikeshed: do we need all the goto out?  why not just return 0/1
> appropriately above?
>
>>  }
>>
>>  /**
>> --
>> 2.4.2

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
@ 2016-11-25 21:17     ` Dan Streetman
  0 siblings, 0 replies; 12+ messages in thread
From: Dan Streetman @ 2016-11-25 21:17 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Fri, Nov 25, 2016 at 9:43 AM, Dan Streetman <ddstreet@ieee.org> wrote:
> On Thu, Nov 3, 2016 at 5:04 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
>> z3fold_compact_page() currently only handles the situation when
>> there's a single middle chunk within the z3fold page. However it
>> may be worth it to move middle chunk closer to either first or
>> last chunk, whichever is there, if the gap between them is big
>> enough.
>>
>> This patch adds the relevant code, using BIG_CHUNK_GAP define as
>> a threshold for middle chunk to be worth moving.
>>
>> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
>
> with the bikeshedding comments below, looks good.
>
> Acked-by: Dan Streetman <ddstreet@ieee.org>
>
>> ---
>>  mm/z3fold.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++-------------
>>  1 file changed, 47 insertions(+), 13 deletions(-)
>>
>> diff --git a/mm/z3fold.c b/mm/z3fold.c
>> index 4d02280..fea6791 100644
>> --- a/mm/z3fold.c
>> +++ b/mm/z3fold.c
>> @@ -250,26 +250,60 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
>>         kfree(pool);
>>  }
>>
>> +static inline void *mchunk_memmove(struct z3fold_header *zhdr,
>> +                               unsigned short dst_chunk)
>> +{
>> +       void *beg = zhdr;
>> +       return memmove(beg + (dst_chunk << CHUNK_SHIFT),
>> +                      beg + (zhdr->start_middle << CHUNK_SHIFT),
>> +                      zhdr->middle_chunks << CHUNK_SHIFT);
>> +}
>> +
>> +#define BIG_CHUNK_GAP  3
>>  /* Has to be called with lock held */
>>  static int z3fold_compact_page(struct z3fold_header *zhdr)
>>  {
>>         struct page *page = virt_to_page(zhdr);
>> -       void *beg = zhdr;
>> +       int ret = 0;
>> +
>> +       if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
>> +               goto out;
>>
>> +       if (zhdr->middle_chunks != 0) {
>
> bikeshed: this check could be moved up also, as if there's no middle
> chunk there is no compacting to do and we can just return 0.  saves a
> tab in all the code below.
>
>> +               if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
>> +                       mchunk_memmove(zhdr, 1); /* move to the beginning */
>> +                       zhdr->first_chunks = zhdr->middle_chunks;
>> +                       zhdr->middle_chunks = 0;
>> +                       zhdr->start_middle = 0;
>> +                       zhdr->first_num++;
>> +                       ret = 1;
>> +                       goto out;
>> +               }
>>
>> -       if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
>> -           zhdr->middle_chunks != 0 &&
>> -           zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
>> -               memmove(beg + ZHDR_SIZE_ALIGNED,
>> -                       beg + (zhdr->start_middle << CHUNK_SHIFT),
>> -                       zhdr->middle_chunks << CHUNK_SHIFT);
>> -               zhdr->first_chunks = zhdr->middle_chunks;
>> -               zhdr->middle_chunks = 0;
>> -               zhdr->start_middle = 0;
>> -               zhdr->first_num++;
>> -               return 1;
>> +               /*
>> +                * moving data is expensive, so let's only do that if
>> +                * there's substantial gain (at least BIG_CHUNK_GAP chunks)
>> +                */
>> +               if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
>> +                   zhdr->start_middle > zhdr->first_chunks + BIG_CHUNK_GAP) {
>> +                       mchunk_memmove(zhdr, zhdr->first_chunks + 1);
>> +                       zhdr->start_middle = zhdr->first_chunks + 1;
>> +                       ret = 1;
>> +                       goto out;
>> +               }
>> +               if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
>> +                   zhdr->middle_chunks + zhdr->last_chunks <=
>> +                   NCHUNKS - zhdr->start_middle - BIG_CHUNK_GAP) {
>> +                       unsigned short new_start = NCHUNKS - zhdr->last_chunks -
>> +                               zhdr->middle_chunks;

after closer review, I see that this is wrong.  NCHUNKS isn't the
total number of page chunks, it's the total number of chunks minus the
header chunk(s).  so that calculation of where the new start is, is
wrong.  it should use the total page chunks, not the NCHUNKS, because
start_middle already accounts for the header chunk(s).  Probably a new
macro would help.

Also, the num_free_chunks() function makes the same mistake:

int nfree_after = zhdr->last_chunks ?
  0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks;

that's wrong, it should be something like:

#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
...
int nfree_after = zhdr->last_chunks ?
  0 : TOTAL_CHUNKS - zhdr->start_middle - zhdr->middle_chunks;


>> +                       mchunk_memmove(zhdr, new_start);
>> +                       zhdr->start_middle = new_start;
>> +                       ret = 1;
>> +                       goto out;
>> +               }
>>         }
>> -       return 0;
>> +out:
>> +       return ret;
>
> bikeshed: do we need all the goto out?  why not just return 0/1
> appropriately above?
>
>>  }
>>
>>  /**
>> --
>> 2.4.2

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
  2016-11-25 21:17     ` Dan Streetman
@ 2016-11-26  9:09       ` Vitaly Wool
  -1 siblings, 0 replies; 12+ messages in thread
From: Vitaly Wool @ 2016-11-26  9:09 UTC (permalink / raw)
  To: Dan Streetman; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Fri, Nov 25, 2016 at 10:17 PM, Dan Streetman <ddstreet@ieee.org> wrote:
> On Fri, Nov 25, 2016 at 9:43 AM, Dan Streetman <ddstreet@ieee.org> wrote:
>> On Thu, Nov 3, 2016 at 5:04 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
>>> z3fold_compact_page() currently only handles the situation when
>>> there's a single middle chunk within the z3fold page. However it
>>> may be worth it to move middle chunk closer to either first or
>>> last chunk, whichever is there, if the gap between them is big
>>> enough.
>>>
>>> This patch adds the relevant code, using BIG_CHUNK_GAP define as
>>> a threshold for middle chunk to be worth moving.
>>>
>>> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
>>
>> with the bikeshedding comments below, looks good.
>>
>> Acked-by: Dan Streetman <ddstreet@ieee.org>
>>
>>> ---
>>>  mm/z3fold.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++-------------
>>>  1 file changed, 47 insertions(+), 13 deletions(-)
>>>
>>> diff --git a/mm/z3fold.c b/mm/z3fold.c
>>> index 4d02280..fea6791 100644
>>> --- a/mm/z3fold.c
>>> +++ b/mm/z3fold.c
>>> @@ -250,26 +250,60 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
>>>         kfree(pool);
>>>  }
>>>
>>> +static inline void *mchunk_memmove(struct z3fold_header *zhdr,
>>> +                               unsigned short dst_chunk)
>>> +{
>>> +       void *beg = zhdr;
>>> +       return memmove(beg + (dst_chunk << CHUNK_SHIFT),
>>> +                      beg + (zhdr->start_middle << CHUNK_SHIFT),
>>> +                      zhdr->middle_chunks << CHUNK_SHIFT);
>>> +}
>>> +
>>> +#define BIG_CHUNK_GAP  3
>>>  /* Has to be called with lock held */
>>>  static int z3fold_compact_page(struct z3fold_header *zhdr)
>>>  {
>>>         struct page *page = virt_to_page(zhdr);
>>> -       void *beg = zhdr;
>>> +       int ret = 0;
>>> +
>>> +       if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
>>> +               goto out;
>>>
>>> +       if (zhdr->middle_chunks != 0) {
>>
>> bikeshed: this check could be moved up also, as if there's no middle
>> chunk there is no compacting to do and we can just return 0.  saves a
>> tab in all the code below.
>>
>>> +               if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
>>> +                       mchunk_memmove(zhdr, 1); /* move to the beginning */
>>> +                       zhdr->first_chunks = zhdr->middle_chunks;
>>> +                       zhdr->middle_chunks = 0;
>>> +                       zhdr->start_middle = 0;
>>> +                       zhdr->first_num++;
>>> +                       ret = 1;
>>> +                       goto out;
>>> +               }
>>>
>>> -       if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
>>> -           zhdr->middle_chunks != 0 &&
>>> -           zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
>>> -               memmove(beg + ZHDR_SIZE_ALIGNED,
>>> -                       beg + (zhdr->start_middle << CHUNK_SHIFT),
>>> -                       zhdr->middle_chunks << CHUNK_SHIFT);
>>> -               zhdr->first_chunks = zhdr->middle_chunks;
>>> -               zhdr->middle_chunks = 0;
>>> -               zhdr->start_middle = 0;
>>> -               zhdr->first_num++;
>>> -               return 1;
>>> +               /*
>>> +                * moving data is expensive, so let's only do that if
>>> +                * there's substantial gain (at least BIG_CHUNK_GAP chunks)
>>> +                */
>>> +               if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
>>> +                   zhdr->start_middle > zhdr->first_chunks + BIG_CHUNK_GAP) {
>>> +                       mchunk_memmove(zhdr, zhdr->first_chunks + 1);
>>> +                       zhdr->start_middle = zhdr->first_chunks + 1;
>>> +                       ret = 1;
>>> +                       goto out;
>>> +               }
>>> +               if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
>>> +                   zhdr->middle_chunks + zhdr->last_chunks <=
>>> +                   NCHUNKS - zhdr->start_middle - BIG_CHUNK_GAP) {
>>> +                       unsigned short new_start = NCHUNKS - zhdr->last_chunks -
>>> +                               zhdr->middle_chunks;
>
> after closer review, I see that this is wrong.  NCHUNKS isn't the
> total number of page chunks, it's the total number of chunks minus the
> header chunk(s).  so that calculation of where the new start is, is
> wrong.  it should use the total page chunks, not the NCHUNKS, because
> start_middle already accounts for the header chunk(s).  Probably a new
> macro would help.
>
> Also, the num_free_chunks() function makes the same mistake:
>
> int nfree_after = zhdr->last_chunks ?
>   0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks;
>
> that's wrong, it should be something like:
>
> #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
> ...
> int nfree_after = zhdr->last_chunks ?
>   0 : TOTAL_CHUNKS - zhdr->start_middle - zhdr->middle_chunks;

Right, will fix.

~vitaly

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATH] z3fold: extend compaction function
@ 2016-11-26  9:09       ` Vitaly Wool
  0 siblings, 0 replies; 12+ messages in thread
From: Vitaly Wool @ 2016-11-26  9:09 UTC (permalink / raw)
  To: Dan Streetman; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Fri, Nov 25, 2016 at 10:17 PM, Dan Streetman <ddstreet@ieee.org> wrote:
> On Fri, Nov 25, 2016 at 9:43 AM, Dan Streetman <ddstreet@ieee.org> wrote:
>> On Thu, Nov 3, 2016 at 5:04 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
>>> z3fold_compact_page() currently only handles the situation when
>>> there's a single middle chunk within the z3fold page. However it
>>> may be worth it to move middle chunk closer to either first or
>>> last chunk, whichever is there, if the gap between them is big
>>> enough.
>>>
>>> This patch adds the relevant code, using BIG_CHUNK_GAP define as
>>> a threshold for middle chunk to be worth moving.
>>>
>>> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
>>
>> with the bikeshedding comments below, looks good.
>>
>> Acked-by: Dan Streetman <ddstreet@ieee.org>
>>
>>> ---
>>>  mm/z3fold.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++-------------
>>>  1 file changed, 47 insertions(+), 13 deletions(-)
>>>
>>> diff --git a/mm/z3fold.c b/mm/z3fold.c
>>> index 4d02280..fea6791 100644
>>> --- a/mm/z3fold.c
>>> +++ b/mm/z3fold.c
>>> @@ -250,26 +250,60 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
>>>         kfree(pool);
>>>  }
>>>
>>> +static inline void *mchunk_memmove(struct z3fold_header *zhdr,
>>> +                               unsigned short dst_chunk)
>>> +{
>>> +       void *beg = zhdr;
>>> +       return memmove(beg + (dst_chunk << CHUNK_SHIFT),
>>> +                      beg + (zhdr->start_middle << CHUNK_SHIFT),
>>> +                      zhdr->middle_chunks << CHUNK_SHIFT);
>>> +}
>>> +
>>> +#define BIG_CHUNK_GAP  3
>>>  /* Has to be called with lock held */
>>>  static int z3fold_compact_page(struct z3fold_header *zhdr)
>>>  {
>>>         struct page *page = virt_to_page(zhdr);
>>> -       void *beg = zhdr;
>>> +       int ret = 0;
>>> +
>>> +       if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
>>> +               goto out;
>>>
>>> +       if (zhdr->middle_chunks != 0) {
>>
>> bikeshed: this check could be moved up also, as if there's no middle
>> chunk there is no compacting to do and we can just return 0.  saves a
>> tab in all the code below.
>>
>>> +               if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
>>> +                       mchunk_memmove(zhdr, 1); /* move to the beginning */
>>> +                       zhdr->first_chunks = zhdr->middle_chunks;
>>> +                       zhdr->middle_chunks = 0;
>>> +                       zhdr->start_middle = 0;
>>> +                       zhdr->first_num++;
>>> +                       ret = 1;
>>> +                       goto out;
>>> +               }
>>>
>>> -       if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
>>> -           zhdr->middle_chunks != 0 &&
>>> -           zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
>>> -               memmove(beg + ZHDR_SIZE_ALIGNED,
>>> -                       beg + (zhdr->start_middle << CHUNK_SHIFT),
>>> -                       zhdr->middle_chunks << CHUNK_SHIFT);
>>> -               zhdr->first_chunks = zhdr->middle_chunks;
>>> -               zhdr->middle_chunks = 0;
>>> -               zhdr->start_middle = 0;
>>> -               zhdr->first_num++;
>>> -               return 1;
>>> +               /*
>>> +                * moving data is expensive, so let's only do that if
>>> +                * there's substantial gain (at least BIG_CHUNK_GAP chunks)
>>> +                */
>>> +               if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
>>> +                   zhdr->start_middle > zhdr->first_chunks + BIG_CHUNK_GAP) {
>>> +                       mchunk_memmove(zhdr, zhdr->first_chunks + 1);
>>> +                       zhdr->start_middle = zhdr->first_chunks + 1;
>>> +                       ret = 1;
>>> +                       goto out;
>>> +               }
>>> +               if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
>>> +                   zhdr->middle_chunks + zhdr->last_chunks <=
>>> +                   NCHUNKS - zhdr->start_middle - BIG_CHUNK_GAP) {
>>> +                       unsigned short new_start = NCHUNKS - zhdr->last_chunks -
>>> +                               zhdr->middle_chunks;
>
> after closer review, I see that this is wrong.  NCHUNKS isn't the
> total number of page chunks, it's the total number of chunks minus the
> header chunk(s).  so that calculation of where the new start is, is
> wrong.  it should use the total page chunks, not the NCHUNKS, because
> start_middle already accounts for the header chunk(s).  Probably a new
> macro would help.
>
> Also, the num_free_chunks() function makes the same mistake:
>
> int nfree_after = zhdr->last_chunks ?
>   0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks;
>
> that's wrong, it should be something like:
>
> #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
> ...
> int nfree_after = zhdr->last_chunks ?
>   0 : TOTAL_CHUNKS - zhdr->start_middle - zhdr->middle_chunks;

Right, will fix.

~vitaly

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2016-11-26  9:10 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-11-03 21:04 [PATH] z3fold: extend compaction function Vitaly Wool
2016-11-03 21:04 ` Vitaly Wool
2016-11-03 21:16 ` Andrew Morton
2016-11-03 21:16   ` Andrew Morton
2016-11-03 21:35   ` Vitaly Wool
2016-11-03 21:35     ` Vitaly Wool
2016-11-25 14:43 ` Dan Streetman
2016-11-25 14:43   ` Dan Streetman
2016-11-25 21:17   ` Dan Streetman
2016-11-25 21:17     ` Dan Streetman
2016-11-26  9:09     ` Vitaly Wool
2016-11-26  9:09       ` Vitaly Wool

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.