linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] z3fold: background page compaction
@ 2016-10-19 16:33 Vitaly Wool
  2016-10-19 16:35 ` [PATCH 1/3] z3fold: make counters atomic Vitaly Wool
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Vitaly Wool @ 2016-10-19 16:33 UTC (permalink / raw)
  To: Linux-MM, linux-kernel; +Cc: Dan Streetman, Andrew Morton

The coming patchset is another take on z3fold page layout
optimization problem. The previous solution [1] used
shrinker to solve the issue of in-page space fragmentation
but after some discussions the decision was made to rewrite
background page layout optimization using good old work
queues.

The patchset thus implements in-page compaction worker for
z3fold, preceded by some code optimizations and preparations
which, again, deserved to be separate patches.

Signed-off-by: Vitaly Wool <vitalywool@gmail.com>

[1] https://lkml.org/lkml/2016/10/15/31

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/3] z3fold: make counters atomic
  2016-10-19 16:33 [PATCH 0/3] z3fold: background page compaction Vitaly Wool
@ 2016-10-19 16:35 ` Vitaly Wool
  2016-10-20 20:17   ` Dan Streetman
  2016-10-19 16:35 ` [PATCH 2/3] z3fold: remove redundant locking Vitaly Wool
  2016-10-19 16:36 ` [PATCH 3/3] z3fold: add compaction worker Vitaly Wool
  2 siblings, 1 reply; 9+ messages in thread
From: Vitaly Wool @ 2016-10-19 16:35 UTC (permalink / raw)
  To: Linux-MM, linux-kernel; +Cc: Dan Streetman, Andrew Morton

This patch converts pages_nr per-pool counter to atomic64_t.
It also introduces a new counter, unbuddied_nr, which is
atomic64_t, too, to track the number of unbuddied (compactable)
z3fold pages.

Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
---
 mm/z3fold.c | 33 +++++++++++++++++++++++++--------
 1 file changed, 25 insertions(+), 8 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 8f9e89c..5ac325a 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -69,6 +69,7 @@ struct z3fold_ops {
  * @lru:	list tracking the z3fold pages in LRU order by most recently
  *		added buddy.
  * @pages_nr:	number of z3fold pages in the pool.
+ * @unbuddied_nr:	number of unbuddied z3fold pages in the pool.
  * @ops:	pointer to a structure of user defined operations specified at
  *		pool creation time.
  *
@@ -80,7 +81,8 @@ struct z3fold_pool {
 	struct list_head unbuddied[NCHUNKS];
 	struct list_head buddied;
 	struct list_head lru;
-	u64 pages_nr;
+	atomic64_t pages_nr;
+	atomic64_t unbuddied_nr;
 	const struct z3fold_ops *ops;
 	struct zpool *zpool;
 	const struct zpool_ops *zpool_ops;
@@ -234,7 +236,8 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
 		INIT_LIST_HEAD(&pool->unbuddied[i]);
 	INIT_LIST_HEAD(&pool->buddied);
 	INIT_LIST_HEAD(&pool->lru);
-	pool->pages_nr = 0;
+	atomic64_set(&pool->pages_nr, 0);
+	atomic64_set(&pool->unbuddied_nr, 0);
 	pool->ops = ops;
 	return pool;
 }
@@ -334,6 +337,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
 					continue;
 				}
 				list_del(&zhdr->buddy);
+				atomic64_dec(&pool->unbuddied_nr);
 				goto found;
 			}
 		}
@@ -346,7 +350,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
 	if (!page)
 		return -ENOMEM;
 	spin_lock(&pool->lock);
-	pool->pages_nr++;
+	atomic64_inc(&pool->pages_nr);
 	zhdr = init_z3fold_page(page);
 
 	if (bud == HEADLESS) {
@@ -369,6 +373,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
 		/* Add to unbuddied list */
 		freechunks = num_free_chunks(zhdr);
 		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+		atomic64_inc(&pool->unbuddied_nr);
 	} else {
 		/* Add to buddied list */
 		list_add(&zhdr->buddy, &pool->buddied);
@@ -412,6 +417,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 		/* HEADLESS page stored */
 		bud = HEADLESS;
 	} else {
+		bool is_unbuddied = zhdr->first_chunks == 0 ||
+				zhdr->middle_chunks == 0 ||
+				zhdr->last_chunks == 0;
+
 		bud = handle_to_buddy(handle);
 
 		switch (bud) {
@@ -431,6 +440,8 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 			spin_unlock(&pool->lock);
 			return;
 		}
+		if (is_unbuddied)
+			atomic64_dec(&pool->unbuddied_nr);
 	}
 
 	if (test_bit(UNDER_RECLAIM, &page->private)) {
@@ -451,12 +462,13 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 		list_del(&page->lru);
 		clear_bit(PAGE_HEADLESS, &page->private);
 		free_z3fold_page(zhdr);
-		pool->pages_nr--;
+		atomic64_dec(&pool->pages_nr);
 	} else {
 		z3fold_compact_page(zhdr);
 		/* Add to the unbuddied list */
 		freechunks = num_free_chunks(zhdr);
 		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+		atomic64_inc(&pool->unbuddied_nr);
 	}
 
 	spin_unlock(&pool->lock);
@@ -520,6 +532,11 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 		zhdr = page_address(page);
 		if (!test_bit(PAGE_HEADLESS, &page->private)) {
 			list_del(&zhdr->buddy);
+			if (zhdr->first_chunks == 0 ||
+			    zhdr->middle_chunks == 0 ||
+			    zhdr->last_chunks == 0)
+				atomic64_dec(&pool->unbuddied_nr);
+
 			/*
 			 * We need encode the handles before unlocking, since
 			 * we can race with free that will set
@@ -569,7 +586,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 			 */
 			clear_bit(PAGE_HEADLESS, &page->private);
 			free_z3fold_page(zhdr);
-			pool->pages_nr--;
+			atomic64_dec(&pool->pages_nr);
 			spin_unlock(&pool->lock);
 			return 0;
 		}  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
@@ -584,6 +601,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 				freechunks = num_free_chunks(zhdr);
 				list_add(&zhdr->buddy,
 					 &pool->unbuddied[freechunks]);
+				atomic64_inc(&pool->unbuddied_nr);
 			}
 		}
 
@@ -672,12 +690,11 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
  * z3fold_get_pool_size() - gets the z3fold pool size in pages
  * @pool:	pool whose size is being queried
  *
- * Returns: size in pages of the given pool.  The pool lock need not be
- * taken to access pages_nr.
+ * Returns: size in pages of the given pool.
  */
 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
 {
-	return pool->pages_nr;
+	return atomic64_read(&pool->pages_nr);
 }
 
 /*****************
-- 
2.4.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3] z3fold: remove redundant locking
  2016-10-19 16:33 [PATCH 0/3] z3fold: background page compaction Vitaly Wool
  2016-10-19 16:35 ` [PATCH 1/3] z3fold: make counters atomic Vitaly Wool
@ 2016-10-19 16:35 ` Vitaly Wool
  2016-10-20 20:15   ` Dan Streetman
  2016-10-19 16:36 ` [PATCH 3/3] z3fold: add compaction worker Vitaly Wool
  2 siblings, 1 reply; 9+ messages in thread
From: Vitaly Wool @ 2016-10-19 16:35 UTC (permalink / raw)
  To: Linux-MM, linux-kernel; +Cc: Dan Streetman, Andrew Morton

The per-pool z3fold spinlock should generally be taken only when
a non-atomic pool variable is modified. There's no need to take it
to map/unmap an object. This patch introduces per-page lock that
will be used instead to protect per-page variables in map/unmap
functions.

Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
---
 mm/z3fold.c | 65 ++++++++++++++++++++++++++++++++++++-------------------------
 1 file changed, 38 insertions(+), 27 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 5ac325a..329bc26 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -104,6 +104,7 @@ enum buddy {
  * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
  * @last_chunks:	the size of the last buddy in chunks, 0 if free
  * @first_num:		the starting number (for the first handle)
+ * @page_lock:		per-page lock
  */
 struct z3fold_header {
 	struct list_head buddy;
@@ -112,6 +113,7 @@ struct z3fold_header {
 	unsigned short last_chunks;
 	unsigned short start_middle;
 	unsigned short first_num:NCHUNKS_ORDER;
+	raw_spinlock_t page_lock;
 };
 
 /*
@@ -152,6 +154,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page)
 	zhdr->first_num = 0;
 	zhdr->start_middle = 0;
 	INIT_LIST_HEAD(&zhdr->buddy);
+	raw_spin_lock_init(&zhdr->page_lock);
 	return zhdr;
 }
 
@@ -163,15 +166,17 @@ static void free_z3fold_page(struct z3fold_header *zhdr)
 
 /*
  * Encodes the handle of a particular buddy within a z3fold page
- * Pool lock should be held as this function accesses first_num
  */
 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
 {
 	unsigned long handle;
 
 	handle = (unsigned long)zhdr;
-	if (bud != HEADLESS)
+	if (bud != HEADLESS) {
+		raw_spin_lock(&zhdr->page_lock);
 		handle += (bud + zhdr->first_num) & BUDDY_MASK;
+		raw_spin_unlock(&zhdr->page_lock);
+	}
 	return handle;
 }
 
@@ -181,7 +186,10 @@ static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
 	return (struct z3fold_header *)(handle & PAGE_MASK);
 }
 
-/* Returns buddy number */
+/*
+ * Returns buddy number.
+ * NB: can't be used with HEADLESS pages.
+ */
 static enum buddy handle_to_buddy(unsigned long handle)
 {
 	struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
@@ -253,7 +261,6 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
 	kfree(pool);
 }
 
-/* Has to be called with lock held */
 static int z3fold_compact_page(struct z3fold_header *zhdr)
 {
 	struct page *page = virt_to_page(zhdr);
@@ -263,6 +270,7 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
 	if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
 	    zhdr->middle_chunks != 0 &&
 	    zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+		raw_spin_lock(&zhdr->page_lock);
 		memmove(beg + ZHDR_SIZE_ALIGNED,
 			beg + (zhdr->start_middle << CHUNK_SHIFT),
 			zhdr->middle_chunks << CHUNK_SHIFT);
@@ -270,6 +278,7 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
 		zhdr->middle_chunks = 0;
 		zhdr->start_middle = 0;
 		zhdr->first_num++;
+		raw_spin_unlock(&zhdr->page_lock);
 		return 1;
 	}
 	return 0;
@@ -385,9 +394,9 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
 		list_del(&page->lru);
 
 	list_add(&page->lru, &pool->lru);
+	spin_unlock(&pool->lock);
 
 	*handle = encode_handle(zhdr, bud);
-	spin_unlock(&pool->lock);
 
 	return 0;
 }
@@ -409,15 +418,18 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 	struct page *page;
 	enum buddy bud;
 
-	spin_lock(&pool->lock);
 	zhdr = handle_to_z3fold_header(handle);
 	page = virt_to_page(zhdr);
 
 	if (test_bit(PAGE_HEADLESS, &page->private)) {
 		/* HEADLESS page stored */
 		bud = HEADLESS;
+		spin_lock(&pool->lock);
 	} else {
-		bool is_unbuddied = zhdr->first_chunks == 0 ||
+		bool is_unbuddied;
+
+		raw_spin_lock(&zhdr->page_lock);
+		is_unbuddied = zhdr->first_chunks == 0 ||
 				zhdr->middle_chunks == 0 ||
 				zhdr->last_chunks == 0;
 
@@ -436,12 +448,17 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 			break;
 		default:
 			pr_err("%s: unknown bud %d\n", __func__, bud);
+			raw_spin_unlock(&zhdr->page_lock);
 			WARN_ON(1);
-			spin_unlock(&pool->lock);
 			return;
 		}
+		raw_spin_unlock(&zhdr->page_lock);
 		if (is_unbuddied)
 			atomic64_dec(&pool->unbuddied_nr);
+
+		spin_lock(&pool->lock);
+		/* Remove from existing buddy list */
+		list_del(&zhdr->buddy);
 	}
 
 	if (test_bit(UNDER_RECLAIM, &page->private)) {
@@ -450,11 +467,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 		return;
 	}
 
-	if (bud != HEADLESS) {
-		/* Remove from existing buddy list */
-		list_del(&zhdr->buddy);
-	}
-
+	/* We've got the page and it is not under reclaim */
 	if (bud == HEADLESS ||
 	    (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 &&
 			zhdr->last_chunks == 0)) {
@@ -462,16 +475,16 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 		list_del(&page->lru);
 		clear_bit(PAGE_HEADLESS, &page->private);
 		free_z3fold_page(zhdr);
+		spin_unlock(&pool->lock);
 		atomic64_dec(&pool->pages_nr);
 	} else {
 		z3fold_compact_page(zhdr);
 		/* Add to the unbuddied list */
 		freechunks = num_free_chunks(zhdr);
 		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+		spin_unlock(&pool->lock);
 		atomic64_inc(&pool->unbuddied_nr);
 	}
-
-	spin_unlock(&pool->lock);
 }
 
 /**
@@ -580,6 +593,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 		if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) ||
 		    (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 &&
 		     zhdr->middle_chunks == 0)) {
+			spin_unlock(&pool->lock);
 			/*
 			 * All buddies are now free, free the z3fold page and
 			 * return success.
@@ -587,7 +601,6 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 			clear_bit(PAGE_HEADLESS, &page->private);
 			free_z3fold_page(zhdr);
 			atomic64_dec(&pool->pages_nr);
-			spin_unlock(&pool->lock);
 			return 0;
 		}  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
 			if (zhdr->first_chunks != 0 &&
@@ -629,7 +642,6 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
 	void *addr;
 	enum buddy buddy;
 
-	spin_lock(&pool->lock);
 	zhdr = handle_to_z3fold_header(handle);
 	addr = zhdr;
 	page = virt_to_page(zhdr);
@@ -637,7 +649,9 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
 	if (test_bit(PAGE_HEADLESS, &page->private))
 		goto out;
 
+	raw_spin_lock(&zhdr->page_lock);
 	buddy = handle_to_buddy(handle);
+
 	switch (buddy) {
 	case FIRST:
 		addr += ZHDR_SIZE_ALIGNED;
@@ -655,8 +669,8 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
 		addr = NULL;
 		break;
 	}
+	raw_spin_unlock(&zhdr->page_lock);
 out:
-	spin_unlock(&pool->lock);
 	return addr;
 }
 
@@ -671,19 +685,16 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
 	struct page *page;
 	enum buddy buddy;
 
-	spin_lock(&pool->lock);
 	zhdr = handle_to_z3fold_header(handle);
 	page = virt_to_page(zhdr);
 
-	if (test_bit(PAGE_HEADLESS, &page->private)) {
-		spin_unlock(&pool->lock);
-		return;
+	if (!test_bit(PAGE_HEADLESS, &page->private)) {
+		raw_spin_lock(&zhdr->page_lock);
+		buddy = handle_to_buddy(handle);
+		if (buddy == MIDDLE)
+			clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
+		raw_spin_unlock(&zhdr->page_lock);
 	}
-
-	buddy = handle_to_buddy(handle);
-	if (buddy == MIDDLE)
-		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
-	spin_unlock(&pool->lock);
 }
 
 /**
-- 
2.4.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/3] z3fold: add compaction worker
  2016-10-19 16:33 [PATCH 0/3] z3fold: background page compaction Vitaly Wool
  2016-10-19 16:35 ` [PATCH 1/3] z3fold: make counters atomic Vitaly Wool
  2016-10-19 16:35 ` [PATCH 2/3] z3fold: remove redundant locking Vitaly Wool
@ 2016-10-19 16:36 ` Vitaly Wool
  2 siblings, 0 replies; 9+ messages in thread
From: Vitaly Wool @ 2016-10-19 16:36 UTC (permalink / raw)
  To: Linux-MM, linux-kernel; +Cc: Dan Streetman, Andrew Morton

This patch implements compaction worker thread for z3fold. This
worker does not free up any pages directly but it allows for a
denser placement of compressed objects which results in less
actual pages consumed and higher compression ratio therefore.

This patch has been checked with the latest Linus's tree.

Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
---
 mm/z3fold.c | 159 ++++++++++++++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 133 insertions(+), 26 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 329bc26..580a732 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -27,6 +27,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/preempt.h>
+#include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/zpool.h>
@@ -59,6 +60,7 @@ struct z3fold_ops {
 
 /**
  * struct z3fold_pool - stores metadata for each z3fold pool
+ * @name:	pool name
  * @lock:	protects all pool fields and first|last_chunk fields of any
  *		z3fold page in the pool
  * @unbuddied:	array of lists tracking z3fold pages that contain 2- buddies;
@@ -72,11 +74,14 @@ struct z3fold_ops {
  * @unbuddied_nr:	number of unbuddied z3fold pages in the pool.
  * @ops:	pointer to a structure of user defined operations specified at
  *		pool creation time.
+ * @compact_wq:	workqueue for page layout background optimization
+ * @work:	compaction work item
  *
  * This structure is allocated at pool creation time and maintains metadata
  * pertaining to a particular z3fold pool.
  */
 struct z3fold_pool {
+	const char *name;
 	spinlock_t lock;
 	struct list_head unbuddied[NCHUNKS];
 	struct list_head buddied;
@@ -86,6 +91,8 @@ struct z3fold_pool {
 	const struct z3fold_ops *ops;
 	struct zpool *zpool;
 	const struct zpool_ops *zpool_ops;
+	struct workqueue_struct *compact_wq;
+	struct delayed_work work;
 };
 
 enum buddy {
@@ -123,6 +130,7 @@ enum z3fold_page_flags {
 	UNDER_RECLAIM = 0,
 	PAGE_HEADLESS,
 	MIDDLE_CHUNK_MAPPED,
+	COMPACTION_DEFERRED,
 };
 
 /*****************
@@ -138,6 +146,9 @@ static int size_to_chunks(size_t size)
 #define for_each_unbuddied_list(_iter, _begin) \
 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
 
+#define for_each_unbuddied_list_reverse(_iter, _end) \
+	for ((_iter) = (_end); (_iter) > 0; (_iter)--)
+
 /* Initializes the z3fold header of a newly allocated z3fold page */
 static struct z3fold_header *init_z3fold_page(struct page *page)
 {
@@ -147,6 +158,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page)
 	clear_bit(UNDER_RECLAIM, &page->private);
 	clear_bit(PAGE_HEADLESS, &page->private);
 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
+	clear_bit(COMPACTION_DEFERRED, &page->private);
 
 	zhdr->first_chunks = 0;
 	zhdr->middle_chunks = 0;
@@ -219,6 +231,113 @@ static int num_free_chunks(struct z3fold_header *zhdr)
 	return nfree;
 }
 
+static inline void *mchunk_memmove(struct z3fold_header *zhdr,
+				unsigned short dst_chunk)
+{
+	void *beg = zhdr;
+	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
+		       beg + (zhdr->start_middle << CHUNK_SHIFT),
+		       zhdr->middle_chunks << CHUNK_SHIFT);
+}
+
+static int z3fold_compact_page(struct z3fold_header *zhdr, bool sync)
+{
+	struct page *page = virt_to_page(zhdr);
+	int ret = 0;
+
+	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private) ||
+	    test_bit(UNDER_RECLAIM, &page->private)) {
+		set_bit(COMPACTION_DEFERRED, &page->private);
+		ret = -1;
+		goto out;
+	}
+
+	clear_bit(COMPACTION_DEFERRED, &page->private);
+	if (zhdr->middle_chunks != 0) {
+		    if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+			raw_spin_lock(&zhdr->page_lock);
+			mchunk_memmove(zhdr, 1); /* move to the beginning */
+			zhdr->first_chunks = zhdr->middle_chunks;
+			zhdr->middle_chunks = 0;
+			zhdr->start_middle = 0;
+			zhdr->first_num++;
+			raw_spin_unlock(&zhdr->page_lock);
+			ret = 1;
+			goto out;
+		}
+		if (sync)
+			goto out;
+
+		/* moving data is expensive, so let's only do that if
+		 * there's substantial gain (2+ chunks)
+		 */
+		if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
+		    zhdr->start_middle > zhdr->first_chunks + 2) {
+			raw_spin_lock(&zhdr->page_lock);
+			mchunk_memmove(zhdr, zhdr->first_chunks + 1);
+			zhdr->start_middle = zhdr->first_chunks + 1;
+			raw_spin_unlock(&zhdr->page_lock);
+			ret = 1;
+			goto out;
+		}
+		if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
+		    zhdr->middle_chunks + zhdr->last_chunks <=
+		    NCHUNKS - zhdr->start_middle - 2) {
+			unsigned short new_start = NCHUNKS - zhdr->last_chunks -
+				zhdr->middle_chunks;
+			raw_spin_lock(&zhdr->page_lock);
+			mchunk_memmove(zhdr, new_start);
+			zhdr->start_middle = new_start;
+			raw_spin_unlock(&zhdr->page_lock);
+			ret = 1;
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+#define COMPACTION_BATCH	(NCHUNKS/2)
+static void z3fold_compact_work(struct work_struct *w)
+{
+	struct z3fold_pool *pool = container_of(to_delayed_work(w),
+						struct z3fold_pool, work);
+	struct z3fold_header *zhdr;
+	struct page *page;
+	int i, ret, compacted = 0;
+	bool requeue = false;
+
+	spin_lock(&pool->lock);
+	for_each_unbuddied_list_reverse(i, NCHUNKS - 3) {
+		zhdr = list_first_entry_or_null(&pool->unbuddied[i],
+						struct z3fold_header, buddy);
+		if (!zhdr)
+			continue;
+		page = virt_to_page(zhdr);
+		if (likely(!test_bit(COMPACTION_DEFERRED, &page->private)))
+			continue;
+		list_del(&zhdr->buddy);
+		spin_unlock(&pool->lock);
+		ret = z3fold_compact_page(zhdr, false);
+		if (ret < 0)
+			requeue = true;
+		else
+			compacted += ret;
+		cond_resched();
+		spin_lock(&pool->lock);
+		list_add_tail(&zhdr->buddy,
+			&pool->unbuddied[num_free_chunks(zhdr)]);
+		if (compacted >= COMPACTION_BATCH) {
+			requeue = true;
+			break;
+		}
+	}
+	spin_unlock(&pool->lock);
+	if (requeue && !delayed_work_pending(&pool->work))
+		queue_delayed_work(pool->compact_wq, &pool->work, HZ);
+}
+
+
 /*****************
  * API Functions
 *****************/
@@ -238,7 +357,7 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
 
 	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
 	if (!pool)
-		return NULL;
+		goto out;
 	spin_lock_init(&pool->lock);
 	for_each_unbuddied_list(i, 0)
 		INIT_LIST_HEAD(&pool->unbuddied[i]);
@@ -246,8 +365,13 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
 	INIT_LIST_HEAD(&pool->lru);
 	atomic64_set(&pool->pages_nr, 0);
 	atomic64_set(&pool->unbuddied_nr, 0);
+	pool->compact_wq = create_singlethread_workqueue(pool->name);
+	INIT_DELAYED_WORK(&pool->work, z3fold_compact_work);
 	pool->ops = ops;
 	return pool;
+
+out:
+	return NULL;
 }
 
 /**
@@ -258,32 +382,11 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
  */
 static void z3fold_destroy_pool(struct z3fold_pool *pool)
 {
+	if (pool->compact_wq)
+		destroy_workqueue(pool->compact_wq);
 	kfree(pool);
 }
 
-static int z3fold_compact_page(struct z3fold_header *zhdr)
-{
-	struct page *page = virt_to_page(zhdr);
-	void *beg = zhdr;
-
-
-	if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
-	    zhdr->middle_chunks != 0 &&
-	    zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
-		raw_spin_lock(&zhdr->page_lock);
-		memmove(beg + ZHDR_SIZE_ALIGNED,
-			beg + (zhdr->start_middle << CHUNK_SHIFT),
-			zhdr->middle_chunks << CHUNK_SHIFT);
-		zhdr->first_chunks = zhdr->middle_chunks;
-		zhdr->middle_chunks = 0;
-		zhdr->start_middle = 0;
-		zhdr->first_num++;
-		raw_spin_unlock(&zhdr->page_lock);
-		return 1;
-	}
-	return 0;
-}
-
 /**
  * z3fold_alloc() - allocates a region of a given size
  * @pool:	z3fold pool from which to allocate
@@ -474,16 +577,19 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 		/* z3fold page is empty, free */
 		list_del(&page->lru);
 		clear_bit(PAGE_HEADLESS, &page->private);
+		clear_bit(COMPACTION_DEFERRED, &page->private);
 		free_z3fold_page(zhdr);
 		spin_unlock(&pool->lock);
 		atomic64_dec(&pool->pages_nr);
 	} else {
-		z3fold_compact_page(zhdr);
+		set_bit(COMPACTION_DEFERRED, &page->private);
 		/* Add to the unbuddied list */
 		freechunks = num_free_chunks(zhdr);
 		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
 		spin_unlock(&pool->lock);
 		atomic64_inc(&pool->unbuddied_nr);
+		if (!delayed_work_pending(&pool->work))
+			queue_delayed_work(pool->compact_wq, &pool->work, HZ);
 	}
 }
 
@@ -609,7 +715,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 				/* Full, add to buddied list */
 				list_add(&zhdr->buddy, &pool->buddied);
 			} else {
-				z3fold_compact_page(zhdr);
+				z3fold_compact_page(zhdr, true);
 				/* add to unbuddied list */
 				freechunks = num_free_chunks(zhdr);
 				list_add(&zhdr->buddy,
@@ -734,6 +840,7 @@ static void *z3fold_zpool_create(const char *name, gfp_t gfp,
 	if (pool) {
 		pool->zpool = zpool;
 		pool->zpool_ops = zpool_ops;
+		pool->name = name;
 	}
 	return pool;
 }
-- 
2.4.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] z3fold: remove redundant locking
  2016-10-19 16:35 ` [PATCH 2/3] z3fold: remove redundant locking Vitaly Wool
@ 2016-10-20 20:15   ` Dan Streetman
  2016-10-22 18:51     ` Vitaly Wool
  0 siblings, 1 reply; 9+ messages in thread
From: Dan Streetman @ 2016-10-20 20:15 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Wed, Oct 19, 2016 at 12:35 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
> The per-pool z3fold spinlock should generally be taken only when
> a non-atomic pool variable is modified. There's no need to take it
> to map/unmap an object. This patch introduces per-page lock that
> will be used instead to protect per-page variables in map/unmap
> functions.

I think the per-page lock must be held around almost all access to any
page zhdr data; previously that was protected by the pool lock.

>
> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
> ---
>  mm/z3fold.c | 65 ++++++++++++++++++++++++++++++++++++-------------------------
>  1 file changed, 38 insertions(+), 27 deletions(-)
>
> diff --git a/mm/z3fold.c b/mm/z3fold.c
> index 5ac325a..329bc26 100644
> --- a/mm/z3fold.c
> +++ b/mm/z3fold.c
> @@ -104,6 +104,7 @@ enum buddy {
>   * @middle_chunks:     the size of the middle buddy in chunks, 0 if free
>   * @last_chunks:       the size of the last buddy in chunks, 0 if free
>   * @first_num:         the starting number (for the first handle)
> + * @page_lock:         per-page lock
>   */
>  struct z3fold_header {
>         struct list_head buddy;
> @@ -112,6 +113,7 @@ struct z3fold_header {
>         unsigned short last_chunks;
>         unsigned short start_middle;
>         unsigned short first_num:NCHUNKS_ORDER;
> +       raw_spinlock_t page_lock;
>  };
>
>  /*
> @@ -152,6 +154,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page)
>         zhdr->first_num = 0;
>         zhdr->start_middle = 0;
>         INIT_LIST_HEAD(&zhdr->buddy);
> +       raw_spin_lock_init(&zhdr->page_lock);
>         return zhdr;
>  }
>
> @@ -163,15 +166,17 @@ static void free_z3fold_page(struct z3fold_header *zhdr)
>
>  /*
>   * Encodes the handle of a particular buddy within a z3fold page
> - * Pool lock should be held as this function accesses first_num
>   */
>  static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
>  {
>         unsigned long handle;
>
>         handle = (unsigned long)zhdr;
> -       if (bud != HEADLESS)
> +       if (bud != HEADLESS) {
> +               raw_spin_lock(&zhdr->page_lock);
>                 handle += (bud + zhdr->first_num) & BUDDY_MASK;
> +               raw_spin_unlock(&zhdr->page_lock);
> +       }
>         return handle;
>  }
>
> @@ -181,7 +186,10 @@ static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
>         return (struct z3fold_header *)(handle & PAGE_MASK);
>  }
>
> -/* Returns buddy number */
> +/*
> + * Returns buddy number.
> + * NB: can't be used with HEADLESS pages.

either indicate this needs to be called with zhdr->page_lock held, or
ensure it never is and take lock internally, like the encode_handle
function above.

> + */
>  static enum buddy handle_to_buddy(unsigned long handle)
>  {
>         struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
> @@ -253,7 +261,6 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
>         kfree(pool);
>  }
>
> -/* Has to be called with lock held */
>  static int z3fold_compact_page(struct z3fold_header *zhdr)
>  {
>         struct page *page = virt_to_page(zhdr);
> @@ -263,6 +270,7 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
>         if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) &&
>             zhdr->middle_chunks != 0 &&
>             zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
> +               raw_spin_lock(&zhdr->page_lock);

mapping and chunk checks above need to be protected by the per-page lock.

>                 memmove(beg + ZHDR_SIZE_ALIGNED,
>                         beg + (zhdr->start_middle << CHUNK_SHIFT),
>                         zhdr->middle_chunks << CHUNK_SHIFT);
> @@ -270,6 +278,7 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)
>                 zhdr->middle_chunks = 0;
>                 zhdr->start_middle = 0;
>                 zhdr->first_num++;
> +               raw_spin_unlock(&zhdr->page_lock);
>                 return 1;
>         }
>         return 0;
> @@ -385,9 +394,9 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,

all the zhdr->*_chunks field checks in this function need to be
protected by the page_lock.

>                 list_del(&page->lru);
>
>         list_add(&page->lru, &pool->lru);
> +       spin_unlock(&pool->lock);
>
>         *handle = encode_handle(zhdr, bud);
> -       spin_unlock(&pool->lock);
>
>         return 0;
>  }
> @@ -409,15 +418,18 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>         struct page *page;
>         enum buddy bud;
>
> -       spin_lock(&pool->lock);
>         zhdr = handle_to_z3fold_header(handle);
>         page = virt_to_page(zhdr);
>
>         if (test_bit(PAGE_HEADLESS, &page->private)) {
>                 /* HEADLESS page stored */
>                 bud = HEADLESS;
> +               spin_lock(&pool->lock);
>         } else {
> -               bool is_unbuddied = zhdr->first_chunks == 0 ||
> +               bool is_unbuddied;
> +
> +               raw_spin_lock(&zhdr->page_lock);
> +               is_unbuddied = zhdr->first_chunks == 0 ||
>                                 zhdr->middle_chunks == 0 ||
>                                 zhdr->last_chunks == 0;
>
> @@ -436,12 +448,17 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>                         break;
>                 default:
>                         pr_err("%s: unknown bud %d\n", __func__, bud);
> +                       raw_spin_unlock(&zhdr->page_lock);
>                         WARN_ON(1);
> -                       spin_unlock(&pool->lock);
>                         return;
>                 }
> +               raw_spin_unlock(&zhdr->page_lock);
>                 if (is_unbuddied)
>                         atomic64_dec(&pool->unbuddied_nr);
> +
> +               spin_lock(&pool->lock);
> +               /* Remove from existing buddy list */
> +               list_del(&zhdr->buddy);

I think this needs to be left in place below, after the UNDER_RECLAIM
check; if it is being reclaimed, it's already been taken off of its
buddy list.

>         }
>
>         if (test_bit(UNDER_RECLAIM, &page->private)) {
> @@ -450,11 +467,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>                 return;
>         }
>
> -       if (bud != HEADLESS) {
> -               /* Remove from existing buddy list */
> -               list_del(&zhdr->buddy);
> -       }
> -
> +       /* We've got the page and it is not under reclaim */

kind of.  another free for a different bud in this page could come in
at the same time, so we have no guarantee that we have exclusive
access to it.  must be careful going between the pool and page locks,
to avoid deadlock; and to avoid removing the page from its buddy list,
if it's already been removed.

>         if (bud == HEADLESS ||
>             (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 &&
>                         zhdr->last_chunks == 0)) {
> @@ -462,16 +475,16 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>                 list_del(&page->lru);
>                 clear_bit(PAGE_HEADLESS, &page->private);
>                 free_z3fold_page(zhdr);
> +               spin_unlock(&pool->lock);
>                 atomic64_dec(&pool->pages_nr);
>         } else {
>                 z3fold_compact_page(zhdr);
>                 /* Add to the unbuddied list */
>                 freechunks = num_free_chunks(zhdr);
>                 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
> +               spin_unlock(&pool->lock);
>                 atomic64_inc(&pool->unbuddied_nr);
>         }
> -
> -       spin_unlock(&pool->lock);
>  }
>
>  /**
> @@ -580,6 +593,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>                 if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) ||
>                     (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 &&
>                      zhdr->middle_chunks == 0)) {
> +                       spin_unlock(&pool->lock);

as in compact, lock needs to protect all zhdr field access

>                         /*
>                          * All buddies are now free, free the z3fold page and
>                          * return success.
> @@ -587,7 +601,6 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>                         clear_bit(PAGE_HEADLESS, &page->private);
>                         free_z3fold_page(zhdr);
>                         atomic64_dec(&pool->pages_nr);
> -                       spin_unlock(&pool->lock);
>                         return 0;
>                 }  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
>                         if (zhdr->first_chunks != 0 &&
> @@ -629,7 +642,6 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
>         void *addr;
>         enum buddy buddy;
>
> -       spin_lock(&pool->lock);
>         zhdr = handle_to_z3fold_header(handle);
>         addr = zhdr;
>         page = virt_to_page(zhdr);
> @@ -637,7 +649,9 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
>         if (test_bit(PAGE_HEADLESS, &page->private))
>                 goto out;
>
> +       raw_spin_lock(&zhdr->page_lock);
>         buddy = handle_to_buddy(handle);
> +
>         switch (buddy) {
>         case FIRST:
>                 addr += ZHDR_SIZE_ALIGNED;
> @@ -655,8 +669,8 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
>                 addr = NULL;
>                 break;
>         }
> +       raw_spin_unlock(&zhdr->page_lock);
>  out:
> -       spin_unlock(&pool->lock);
>         return addr;
>  }
>
> @@ -671,19 +685,16 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
>         struct page *page;
>         enum buddy buddy;
>
> -       spin_lock(&pool->lock);
>         zhdr = handle_to_z3fold_header(handle);
>         page = virt_to_page(zhdr);
>
> -       if (test_bit(PAGE_HEADLESS, &page->private)) {
> -               spin_unlock(&pool->lock);
> -               return;
> +       if (!test_bit(PAGE_HEADLESS, &page->private)) {
> +               raw_spin_lock(&zhdr->page_lock);
> +               buddy = handle_to_buddy(handle);
> +               if (buddy == MIDDLE)
> +                       clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
> +               raw_spin_unlock(&zhdr->page_lock);
>         }
> -
> -       buddy = handle_to_buddy(handle);
> -       if (buddy == MIDDLE)
> -               clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
> -       spin_unlock(&pool->lock);
>  }
>
>  /**
> --
> 2.4.2

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/3] z3fold: make counters atomic
  2016-10-19 16:35 ` [PATCH 1/3] z3fold: make counters atomic Vitaly Wool
@ 2016-10-20 20:17   ` Dan Streetman
  2016-10-22 18:28     ` Vitaly Wool
  0 siblings, 1 reply; 9+ messages in thread
From: Dan Streetman @ 2016-10-20 20:17 UTC (permalink / raw)
  To: Vitaly Wool; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Wed, Oct 19, 2016 at 12:35 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
> This patch converts pages_nr per-pool counter to atomic64_t.
> It also introduces a new counter, unbuddied_nr, which is
> atomic64_t, too, to track the number of unbuddied (compactable)
> z3fold pages.
>
> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
> ---
>  mm/z3fold.c | 33 +++++++++++++++++++++++++--------
>  1 file changed, 25 insertions(+), 8 deletions(-)
>
> diff --git a/mm/z3fold.c b/mm/z3fold.c
> index 8f9e89c..5ac325a 100644
> --- a/mm/z3fold.c
> +++ b/mm/z3fold.c
> @@ -69,6 +69,7 @@ struct z3fold_ops {
>   * @lru:       list tracking the z3fold pages in LRU order by most recently
>   *             added buddy.
>   * @pages_nr:  number of z3fold pages in the pool.
> + * @unbuddied_nr:      number of unbuddied z3fold pages in the pool.
>   * @ops:       pointer to a structure of user defined operations specified at
>   *             pool creation time.
>   *
> @@ -80,7 +81,8 @@ struct z3fold_pool {
>         struct list_head unbuddied[NCHUNKS];
>         struct list_head buddied;
>         struct list_head lru;
> -       u64 pages_nr;
> +       atomic64_t pages_nr;
> +       atomic64_t unbuddied_nr;
>         const struct z3fold_ops *ops;
>         struct zpool *zpool;
>         const struct zpool_ops *zpool_ops;
> @@ -234,7 +236,8 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
>                 INIT_LIST_HEAD(&pool->unbuddied[i]);
>         INIT_LIST_HEAD(&pool->buddied);
>         INIT_LIST_HEAD(&pool->lru);
> -       pool->pages_nr = 0;
> +       atomic64_set(&pool->pages_nr, 0);
> +       atomic64_set(&pool->unbuddied_nr, 0);
>         pool->ops = ops;
>         return pool;
>  }
> @@ -334,6 +337,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>                                         continue;
>                                 }
>                                 list_del(&zhdr->buddy);
> +                               atomic64_dec(&pool->unbuddied_nr);
>                                 goto found;
>                         }
>                 }
> @@ -346,7 +350,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>         if (!page)
>                 return -ENOMEM;
>         spin_lock(&pool->lock);
> -       pool->pages_nr++;
> +       atomic64_inc(&pool->pages_nr);
>         zhdr = init_z3fold_page(page);
>
>         if (bud == HEADLESS) {
> @@ -369,6 +373,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>                 /* Add to unbuddied list */
>                 freechunks = num_free_chunks(zhdr);
>                 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
> +               atomic64_inc(&pool->unbuddied_nr);
>         } else {
>                 /* Add to buddied list */
>                 list_add(&zhdr->buddy, &pool->buddied);
> @@ -412,6 +417,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>                 /* HEADLESS page stored */
>                 bud = HEADLESS;
>         } else {
> +               bool is_unbuddied = zhdr->first_chunks == 0 ||
> +                               zhdr->middle_chunks == 0 ||
> +                               zhdr->last_chunks == 0;
> +
>                 bud = handle_to_buddy(handle);
>
>                 switch (bud) {
> @@ -431,6 +440,8 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>                         spin_unlock(&pool->lock);
>                         return;
>                 }
> +               if (is_unbuddied)
> +                       atomic64_dec(&pool->unbuddied_nr);

this should be below, where it's removed from its buddy list.  If it's
under reclaim, it's already been removd from the buddy list and we
shouldn't dec the counter.

>         }
>
>         if (test_bit(UNDER_RECLAIM, &page->private)) {
> @@ -451,12 +462,13 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>                 list_del(&page->lru);
>                 clear_bit(PAGE_HEADLESS, &page->private);
>                 free_z3fold_page(zhdr);
> -               pool->pages_nr--;
> +               atomic64_dec(&pool->pages_nr);
>         } else {
>                 z3fold_compact_page(zhdr);
>                 /* Add to the unbuddied list */
>                 freechunks = num_free_chunks(zhdr);
>                 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
> +               atomic64_inc(&pool->unbuddied_nr);
>         }
>
>         spin_unlock(&pool->lock);
> @@ -520,6 +532,11 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>                 zhdr = page_address(page);
>                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
>                         list_del(&zhdr->buddy);
> +                       if (zhdr->first_chunks == 0 ||
> +                           zhdr->middle_chunks == 0 ||
> +                           zhdr->last_chunks == 0)
> +                               atomic64_dec(&pool->unbuddied_nr);
> +
>                         /*
>                          * We need encode the handles before unlocking, since
>                          * we can race with free that will set
> @@ -569,7 +586,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>                          */
>                         clear_bit(PAGE_HEADLESS, &page->private);
>                         free_z3fold_page(zhdr);
> -                       pool->pages_nr--;
> +                       atomic64_dec(&pool->pages_nr);
>                         spin_unlock(&pool->lock);
>                         return 0;
>                 }  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
> @@ -584,6 +601,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>                                 freechunks = num_free_chunks(zhdr);
>                                 list_add(&zhdr->buddy,
>                                          &pool->unbuddied[freechunks]);
> +                               atomic64_inc(&pool->unbuddied_nr);
>                         }
>                 }
>
> @@ -672,12 +690,11 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
>   * z3fold_get_pool_size() - gets the z3fold pool size in pages
>   * @pool:      pool whose size is being queried
>   *
> - * Returns: size in pages of the given pool.  The pool lock need not be
> - * taken to access pages_nr.
> + * Returns: size in pages of the given pool.
>   */
>  static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
>  {
> -       return pool->pages_nr;
> +       return atomic64_read(&pool->pages_nr);
>  }
>
>  /*****************
> --
> 2.4.2

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/3] z3fold: make counters atomic
  2016-10-20 20:17   ` Dan Streetman
@ 2016-10-22 18:28     ` Vitaly Wool
  0 siblings, 0 replies; 9+ messages in thread
From: Vitaly Wool @ 2016-10-22 18:28 UTC (permalink / raw)
  To: Dan Streetman; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Thu, Oct 20, 2016 at 10:17 PM, Dan Streetman <ddstreet@ieee.org> wrote:
> On Wed, Oct 19, 2016 at 12:35 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
>> This patch converts pages_nr per-pool counter to atomic64_t.
>> It also introduces a new counter, unbuddied_nr, which is
>> atomic64_t, too, to track the number of unbuddied (compactable)
>> z3fold pages.
>>
>> Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
>> ---
>>  mm/z3fold.c | 33 +++++++++++++++++++++++++--------
>>  1 file changed, 25 insertions(+), 8 deletions(-)
>>
>> diff --git a/mm/z3fold.c b/mm/z3fold.c
>> index 8f9e89c..5ac325a 100644
>> --- a/mm/z3fold.c
>> +++ b/mm/z3fold.c
>> @@ -69,6 +69,7 @@ struct z3fold_ops {
>>   * @lru:       list tracking the z3fold pages in LRU order by most recently
>>   *             added buddy.
>>   * @pages_nr:  number of z3fold pages in the pool.
>> + * @unbuddied_nr:      number of unbuddied z3fold pages in the pool.
>>   * @ops:       pointer to a structure of user defined operations specified at
>>   *             pool creation time.
>>   *
>> @@ -80,7 +81,8 @@ struct z3fold_pool {
>>         struct list_head unbuddied[NCHUNKS];
>>         struct list_head buddied;
>>         struct list_head lru;
>> -       u64 pages_nr;
>> +       atomic64_t pages_nr;
>> +       atomic64_t unbuddied_nr;
>>         const struct z3fold_ops *ops;
>>         struct zpool *zpool;
>>         const struct zpool_ops *zpool_ops;
>> @@ -234,7 +236,8 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
>>                 INIT_LIST_HEAD(&pool->unbuddied[i]);
>>         INIT_LIST_HEAD(&pool->buddied);
>>         INIT_LIST_HEAD(&pool->lru);
>> -       pool->pages_nr = 0;
>> +       atomic64_set(&pool->pages_nr, 0);
>> +       atomic64_set(&pool->unbuddied_nr, 0);
>>         pool->ops = ops;
>>         return pool;
>>  }
>> @@ -334,6 +337,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>>                                         continue;
>>                                 }
>>                                 list_del(&zhdr->buddy);
>> +                               atomic64_dec(&pool->unbuddied_nr);
>>                                 goto found;
>>                         }
>>                 }
>> @@ -346,7 +350,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>>         if (!page)
>>                 return -ENOMEM;
>>         spin_lock(&pool->lock);
>> -       pool->pages_nr++;
>> +       atomic64_inc(&pool->pages_nr);
>>         zhdr = init_z3fold_page(page);
>>
>>         if (bud == HEADLESS) {
>> @@ -369,6 +373,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>>                 /* Add to unbuddied list */
>>                 freechunks = num_free_chunks(zhdr);
>>                 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
>> +               atomic64_inc(&pool->unbuddied_nr);
>>         } else {
>>                 /* Add to buddied list */
>>                 list_add(&zhdr->buddy, &pool->buddied);
>> @@ -412,6 +417,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>>                 /* HEADLESS page stored */
>>                 bud = HEADLESS;
>>         } else {
>> +               bool is_unbuddied = zhdr->first_chunks == 0 ||
>> +                               zhdr->middle_chunks == 0 ||
>> +                               zhdr->last_chunks == 0;
>> +
>>                 bud = handle_to_buddy(handle);
>>
>>                 switch (bud) {
>> @@ -431,6 +440,8 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>>                         spin_unlock(&pool->lock);
>>                         return;
>>                 }
>> +               if (is_unbuddied)
>> +                       atomic64_dec(&pool->unbuddied_nr);
>
> this should be below, where it's removed from its buddy list.  If it's
> under reclaim, it's already been removd from the buddy list and we
> shouldn't dec the counter.

Right, thanks. Will fix in the new respin.

~vitaly

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] z3fold: remove redundant locking
  2016-10-20 20:15   ` Dan Streetman
@ 2016-10-22 18:51     ` Vitaly Wool
  0 siblings, 0 replies; 9+ messages in thread
From: Vitaly Wool @ 2016-10-22 18:51 UTC (permalink / raw)
  To: Dan Streetman; +Cc: Linux-MM, linux-kernel, Andrew Morton

On Thu, Oct 20, 2016 at 10:15 PM, Dan Streetman <ddstreet@ieee.org> wrote:
> On Wed, Oct 19, 2016 at 12:35 PM, Vitaly Wool <vitalywool@gmail.com> wrote:
>> The per-pool z3fold spinlock should generally be taken only when
>> a non-atomic pool variable is modified. There's no need to take it
>> to map/unmap an object. This patch introduces per-page lock that
>> will be used instead to protect per-page variables in map/unmap
>> functions.
>
> I think the per-page lock must be held around almost all access to any
> page zhdr data; previously that was protected by the pool lock.

Right, except for list operations. At this point I think per-page
locks will have to be
thought over again, and there is some nice performance gain from making spinlock
a rwlock anyway, so I'll stick with the latest patchset, fixing tiny
bits like wrong
unbuddied_nr increment in the other patch.

Best regards,
   Vitaly

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 2/3] z3fold: remove redundant locking
  2016-10-13 16:47 [PATCHv4 0/3] z3fold: add shrinker Vitaly Wool
@ 2016-10-13 16:50 ` Vitaly Wool
  0 siblings, 0 replies; 9+ messages in thread
From: Vitaly Wool @ 2016-10-13 16:50 UTC (permalink / raw)
  To: Linux-MM, linux-kernel; +Cc: Dan Streetman, Andrew Morton, Dave Chinner

The per-pool z3fold spinlock should generally be taken only when
a non-atomic pool variable is modified. There's no need to take it
to map/unmap an object.

Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
---
 mm/z3fold.c | 17 +++++------------
 1 file changed, 5 insertions(+), 12 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 5197d7b..10513b5 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -580,6 +580,7 @@ next:
 		if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) ||
 		    (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 &&
 		     zhdr->middle_chunks == 0)) {
+			spin_unlock(&pool->lock);
 			/*
 			 * All buddies are now free, free the z3fold page and
 			 * return success.
@@ -587,7 +588,6 @@ next:
 			clear_bit(PAGE_HEADLESS, &page->private);
 			free_z3fold_page(zhdr);
 			atomic64_dec(&pool->pages_nr);
-			spin_unlock(&pool->lock);
 			return 0;
 		}  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
 			if (zhdr->first_chunks != 0 &&
@@ -629,7 +629,6 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
 	void *addr;
 	enum buddy buddy;
 
-	spin_lock(&pool->lock);
 	zhdr = handle_to_z3fold_header(handle);
 	addr = zhdr;
 	page = virt_to_page(zhdr);
@@ -656,7 +655,6 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
 		break;
 	}
 out:
-	spin_unlock(&pool->lock);
 	return addr;
 }
 
@@ -671,19 +669,14 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
 	struct page *page;
 	enum buddy buddy;
 
-	spin_lock(&pool->lock);
 	zhdr = handle_to_z3fold_header(handle);
 	page = virt_to_page(zhdr);
 
-	if (test_bit(PAGE_HEADLESS, &page->private)) {
-		spin_unlock(&pool->lock);
-		return;
+	if (!test_bit(PAGE_HEADLESS, &page->private)) {
+		buddy = handle_to_buddy(handle);
+		if (buddy == MIDDLE)
+			clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
 	}
-
-	buddy = handle_to_buddy(handle);
-	if (buddy == MIDDLE)
-		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
-	spin_unlock(&pool->lock);
 }
 
 /**
-- 
2.4.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2016-10-22 18:51 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-10-19 16:33 [PATCH 0/3] z3fold: background page compaction Vitaly Wool
2016-10-19 16:35 ` [PATCH 1/3] z3fold: make counters atomic Vitaly Wool
2016-10-20 20:17   ` Dan Streetman
2016-10-22 18:28     ` Vitaly Wool
2016-10-19 16:35 ` [PATCH 2/3] z3fold: remove redundant locking Vitaly Wool
2016-10-20 20:15   ` Dan Streetman
2016-10-22 18:51     ` Vitaly Wool
2016-10-19 16:36 ` [PATCH 3/3] z3fold: add compaction worker Vitaly Wool
  -- strict thread matches above, loose matches on Subject: below --
2016-10-13 16:47 [PATCHv4 0/3] z3fold: add shrinker Vitaly Wool
2016-10-13 16:50 ` [PATCH 2/3] z3fold: remove redundant locking Vitaly Wool

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).