linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] z3fold: stability / rt fixes
@ 2020-12-09 14:51 Vitaly Wool
  2020-12-09 14:51 ` [PATCH 1/3] z3fold: simplify freeing slots Vitaly Wool
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Vitaly Wool @ 2020-12-09 14:51 UTC (permalink / raw)
  To: linux-mm
  Cc: lkml, linux-rt-users, Sebastian Andrzej Siewior, Mike Galbraith,
	akpm, Vitaly Wool

This patchset addresses z3fold stability issues under stress load,
primarily in the reclaim and free aspects. Besides, it fixes the
locking problems that were only seen in real-time kernel
configuration.

Vitaly Wool (2):
  z3fold: simplify freeing slots
  z3fold: stricter locking and more careful reclaim

Mike Galbraith (1):
  z3fold: Remove preempt disabled sections for RT

 mm/z3fold.c | 191 ++++++++++++++++++++++++++--------------------------
 1 file changed, 96 insertions(+), 95 deletions(-)

-- 
2.20.1



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/3] z3fold: simplify freeing slots
  2020-12-09 14:51 [PATCH 0/3] z3fold: stability / rt fixes Vitaly Wool
@ 2020-12-09 14:51 ` Vitaly Wool
  2020-12-09 14:51 ` [PATCH 2/3] z3fold: Remove preempt disabled sections for RT Vitaly Wool
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 6+ messages in thread
From: Vitaly Wool @ 2020-12-09 14:51 UTC (permalink / raw)
  To: linux-mm
  Cc: lkml, linux-rt-users, Sebastian Andrzej Siewior, Mike Galbraith,
	akpm, Vitaly Wool, stable

There used to be two places in the code where slots could be freed,
namely when freeing the last allocated handle from the slots and
when releasing the z3fold header these slots aree linked to. The
logic to decide on whether to free certain slots was complicated
and error prone in both functions and it led to failures in RT case.

To fix that, make free_handle() the single point of freeing slots.

Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.com>
Tested-by: Mike Galbraith <efault@gmx.de>
Cc: stable@kernel.org
---
 mm/z3fold.c | 55 +++++++++++++----------------------------------------
 1 file changed, 13 insertions(+), 42 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 18feaa0bc537..6c2325cd3fba 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -90,7 +90,7 @@ struct z3fold_buddy_slots {
 	 * be enough slots to hold all possible variants
 	 */
 	unsigned long slot[BUDDY_MASK + 1];
-	unsigned long pool; /* back link + flags */
+	unsigned long pool; /* back link */
 	rwlock_t lock;
 };
 #define HANDLE_FLAG_MASK	(0x03)
@@ -181,13 +181,6 @@ enum z3fold_page_flags {
 	PAGE_CLAIMED, /* by either reclaim or free */
 };
 
-/*
- * handle flags, go under HANDLE_FLAG_MASK
- */
-enum z3fold_handle_flags {
-	HANDLES_ORPHANED = 0,
-};
-
 /*
  * Forward declarations
  */
@@ -303,10 +296,9 @@ static inline void put_z3fold_header(struct z3fold_header *zhdr)
 		z3fold_page_unlock(zhdr);
 }
 
-static inline void free_handle(unsigned long handle)
+static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
 {
 	struct z3fold_buddy_slots *slots;
-	struct z3fold_header *zhdr;
 	int i;
 	bool is_free;
 
@@ -316,22 +308,13 @@ static inline void free_handle(unsigned long handle)
 	if (WARN_ON(*(unsigned long *)handle == 0))
 		return;
 
-	zhdr = handle_to_z3fold_header(handle);
 	slots = handle_to_slots(handle);
 	write_lock(&slots->lock);
 	*(unsigned long *)handle = 0;
-	if (zhdr->slots == slots) {
-		write_unlock(&slots->lock);
-		return; /* simple case, nothing else to do */
-	}
+	if (zhdr->slots != slots)
+		zhdr->foreign_handles--;
 
-	/* we are freeing a foreign handle if we are here */
-	zhdr->foreign_handles--;
 	is_free = true;
-	if (!test_bit(HANDLES_ORPHANED, &slots->pool)) {
-		write_unlock(&slots->lock);
-		return;
-	}
 	for (i = 0; i <= BUDDY_MASK; i++) {
 		if (slots->slot[i]) {
 			is_free = false;
@@ -343,6 +326,8 @@ static inline void free_handle(unsigned long handle)
 	if (is_free) {
 		struct z3fold_pool *pool = slots_to_pool(slots);
 
+		if (zhdr->slots == slots)
+			zhdr->slots = NULL;
 		kmem_cache_free(pool->c_handle, slots);
 	}
 }
@@ -525,8 +510,6 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
 {
 	struct page *page = virt_to_page(zhdr);
 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
-	bool is_free = true;
-	int i;
 
 	WARN_ON(!list_empty(&zhdr->buddy));
 	set_bit(PAGE_STALE, &page->private);
@@ -536,21 +519,6 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
 		list_del_init(&page->lru);
 	spin_unlock(&pool->lock);
 
-	/* If there are no foreign handles, free the handles array */
-	read_lock(&zhdr->slots->lock);
-	for (i = 0; i <= BUDDY_MASK; i++) {
-		if (zhdr->slots->slot[i]) {
-			is_free = false;
-			break;
-		}
-	}
-	if (!is_free)
-		set_bit(HANDLES_ORPHANED, &zhdr->slots->pool);
-	read_unlock(&zhdr->slots->lock);
-
-	if (is_free)
-		kmem_cache_free(pool->c_handle, zhdr->slots);
-
 	if (locked)
 		z3fold_page_unlock(zhdr);
 
@@ -973,6 +941,9 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
 		}
 	}
 
+	if (zhdr && !zhdr->slots)
+		zhdr->slots = alloc_slots(pool,
+					can_sleep ? GFP_NOIO : GFP_ATOMIC);
 	return zhdr;
 }
 
@@ -1270,7 +1241,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 	}
 
 	if (!page_claimed)
-		free_handle(handle);
+		free_handle(handle, zhdr);
 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
 		atomic64_dec(&pool->pages_nr);
 		return;
@@ -1429,19 +1400,19 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 			ret = pool->ops->evict(pool, middle_handle);
 			if (ret)
 				goto next;
-			free_handle(middle_handle);
+			free_handle(middle_handle, zhdr);
 		}
 		if (first_handle) {
 			ret = pool->ops->evict(pool, first_handle);
 			if (ret)
 				goto next;
-			free_handle(first_handle);
+			free_handle(first_handle, zhdr);
 		}
 		if (last_handle) {
 			ret = pool->ops->evict(pool, last_handle);
 			if (ret)
 				goto next;
-			free_handle(last_handle);
+			free_handle(last_handle, zhdr);
 		}
 next:
 		if (test_bit(PAGE_HEADLESS, &page->private)) {
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/3] z3fold: Remove preempt disabled sections for RT
  2020-12-09 14:51 [PATCH 0/3] z3fold: stability / rt fixes Vitaly Wool
  2020-12-09 14:51 ` [PATCH 1/3] z3fold: simplify freeing slots Vitaly Wool
@ 2020-12-09 14:51 ` Vitaly Wool
  2020-12-09 14:51 ` [PATCH 3/3] z3fold: stricter locking and more careful reclaim Vitaly Wool
  2020-12-18 23:19 ` [PATCH 0/3] z3fold: stability / rt fixes Oleksandr Natalenko
  3 siblings, 0 replies; 6+ messages in thread
From: Vitaly Wool @ 2020-12-09 14:51 UTC (permalink / raw)
  To: linux-mm
  Cc: lkml, linux-rt-users, Sebastian Andrzej Siewior, Mike Galbraith,
	akpm, Vitaly Wool

Replace get_cpu_ptr() with migrate_disable()+this_cpu_ptr()
so RT can take spinlocks that become sleeping locks.

Signed-off-by Mike Galbraith <efault@gmx.de>
Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.com>
---
 mm/z3fold.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 6c2325cd3fba..9fc1cc9630fe 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -610,14 +610,16 @@ static inline void add_to_unbuddied(struct z3fold_pool *pool,
 {
 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
 			zhdr->middle_chunks == 0) {
-		struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
-
+		struct list_head *unbuddied;
 		int freechunks = num_free_chunks(zhdr);
+
+		migrate_disable();
+		unbuddied = this_cpu_ptr(pool->unbuddied);
 		spin_lock(&pool->lock);
 		list_add(&zhdr->buddy, &unbuddied[freechunks]);
 		spin_unlock(&pool->lock);
 		zhdr->cpu = smp_processor_id();
-		put_cpu_ptr(pool->unbuddied);
+		migrate_enable();
 	}
 }
 
@@ -854,8 +856,9 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
 	int chunks = size_to_chunks(size), i;
 
 lookup:
+	migrate_disable();
 	/* First, try to find an unbuddied z3fold page. */
-	unbuddied = get_cpu_ptr(pool->unbuddied);
+	unbuddied = this_cpu_ptr(pool->unbuddied);
 	for_each_unbuddied_list(i, chunks) {
 		struct list_head *l = &unbuddied[i];
 
@@ -873,7 +876,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
 		    !z3fold_page_trylock(zhdr)) {
 			spin_unlock(&pool->lock);
 			zhdr = NULL;
-			put_cpu_ptr(pool->unbuddied);
+			migrate_enable();
 			if (can_sleep)
 				cond_resched();
 			goto lookup;
@@ -887,7 +890,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
 		    test_bit(PAGE_CLAIMED, &page->private)) {
 			z3fold_page_unlock(zhdr);
 			zhdr = NULL;
-			put_cpu_ptr(pool->unbuddied);
+			migrate_enable();
 			if (can_sleep)
 				cond_resched();
 			goto lookup;
@@ -902,7 +905,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
 		kref_get(&zhdr->refcount);
 		break;
 	}
-	put_cpu_ptr(pool->unbuddied);
+	migrate_enable();
 
 	if (!zhdr) {
 		int cpu;
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/3] z3fold: stricter locking and more careful reclaim
  2020-12-09 14:51 [PATCH 0/3] z3fold: stability / rt fixes Vitaly Wool
  2020-12-09 14:51 ` [PATCH 1/3] z3fold: simplify freeing slots Vitaly Wool
  2020-12-09 14:51 ` [PATCH 2/3] z3fold: Remove preempt disabled sections for RT Vitaly Wool
@ 2020-12-09 14:51 ` Vitaly Wool
  2020-12-18 23:19 ` [PATCH 0/3] z3fold: stability / rt fixes Oleksandr Natalenko
  3 siblings, 0 replies; 6+ messages in thread
From: Vitaly Wool @ 2020-12-09 14:51 UTC (permalink / raw)
  To: linux-mm
  Cc: lkml, linux-rt-users, Sebastian Andrzej Siewior, Mike Galbraith,
	akpm, Vitaly Wool, stable

Use temporary slots in reclaim function to avoid possible race when
freeing those.

While at it, make sure we check CLAIMED flag under page lock in the
reclaim function to make sure we are not racing with z3fold_alloc().

Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.com>
Cc: stable@kernel.org
---
 mm/z3fold.c | 143 +++++++++++++++++++++++++++++++---------------------
 1 file changed, 85 insertions(+), 58 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 9fc1cc9630fe..dacb0d70fa61 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -181,6 +181,13 @@ enum z3fold_page_flags {
 	PAGE_CLAIMED, /* by either reclaim or free */
 };
 
+/*
+ * handle flags, go under HANDLE_FLAG_MASK
+ */
+enum z3fold_handle_flags {
+	HANDLES_NOFREE = 0,
+};
+
 /*
  * Forward declarations
  */
@@ -311,6 +318,12 @@ static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
 	slots = handle_to_slots(handle);
 	write_lock(&slots->lock);
 	*(unsigned long *)handle = 0;
+
+	if (test_bit(HANDLES_NOFREE, &slots->pool)) {
+		write_unlock(&slots->lock);
+		return; /* simple case, nothing else to do */
+	}
+
 	if (zhdr->slots != slots)
 		zhdr->foreign_handles--;
 
@@ -623,6 +636,28 @@ static inline void add_to_unbuddied(struct z3fold_pool *pool,
 	}
 }
 
+static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
+{
+	enum buddy bud = HEADLESS;
+
+	if (zhdr->middle_chunks) {
+		if (!zhdr->first_chunks &&
+		    chunks <= zhdr->start_middle - ZHDR_CHUNKS)
+			bud = FIRST;
+		else if (!zhdr->last_chunks)
+			bud = LAST;
+	} else {
+		if (!zhdr->first_chunks)
+			bud = FIRST;
+		else if (!zhdr->last_chunks)
+			bud = LAST;
+		else
+			bud = MIDDLE;
+	}
+
+	return bud;
+}
+
 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
 				unsigned short dst_chunk)
 {
@@ -684,18 +719,7 @@ static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
 		if (WARN_ON(new_zhdr == zhdr))
 			goto out_fail;
 
-		if (new_zhdr->first_chunks == 0) {
-			if (new_zhdr->middle_chunks != 0 &&
-					chunks >= new_zhdr->start_middle) {
-				new_bud = LAST;
-			} else {
-				new_bud = FIRST;
-			}
-		} else if (new_zhdr->last_chunks == 0) {
-			new_bud = LAST;
-		} else if (new_zhdr->middle_chunks == 0) {
-			new_bud = MIDDLE;
-		}
+		new_bud = get_free_buddy(new_zhdr, chunks);
 		q = new_zhdr;
 		switch (new_bud) {
 		case FIRST:
@@ -817,9 +841,8 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
 		return;
 	}
 
-	if (unlikely(PageIsolated(page) ||
-		     test_bit(PAGE_CLAIMED, &page->private) ||
-		     test_bit(PAGE_STALE, &page->private))) {
+	if (test_bit(PAGE_STALE, &page->private) ||
+	    test_and_set_bit(PAGE_CLAIMED, &page->private)) {
 		z3fold_page_unlock(zhdr);
 		return;
 	}
@@ -828,13 +851,16 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
 	    zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
 		if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
 			atomic64_dec(&pool->pages_nr);
-		else
+		else {
+			clear_bit(PAGE_CLAIMED, &page->private);
 			z3fold_page_unlock(zhdr);
+		}
 		return;
 	}
 
 	z3fold_compact_page(zhdr);
 	add_to_unbuddied(pool, zhdr);
+	clear_bit(PAGE_CLAIMED, &page->private);
 	z3fold_page_unlock(zhdr);
 }
 
@@ -1083,17 +1109,8 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
 retry:
 		zhdr = __z3fold_alloc(pool, size, can_sleep);
 		if (zhdr) {
-			if (zhdr->first_chunks == 0) {
-				if (zhdr->middle_chunks != 0 &&
-				    chunks >= zhdr->start_middle)
-					bud = LAST;
-				else
-					bud = FIRST;
-			} else if (zhdr->last_chunks == 0)
-				bud = LAST;
-			else if (zhdr->middle_chunks == 0)
-				bud = MIDDLE;
-			else {
+			bud = get_free_buddy(zhdr, chunks);
+			if (bud == HEADLESS) {
 				if (kref_put(&zhdr->refcount,
 					     release_z3fold_page_locked))
 					atomic64_dec(&pool->pages_nr);
@@ -1239,7 +1256,6 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 		pr_err("%s: unknown bud %d\n", __func__, bud);
 		WARN_ON(1);
 		put_z3fold_header(zhdr);
-		clear_bit(PAGE_CLAIMED, &page->private);
 		return;
 	}
 
@@ -1254,8 +1270,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 		z3fold_page_unlock(zhdr);
 		return;
 	}
-	if (unlikely(PageIsolated(page)) ||
-	    test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
+	if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
 		put_z3fold_header(zhdr);
 		clear_bit(PAGE_CLAIMED, &page->private);
 		return;
@@ -1319,6 +1334,10 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 	struct page *page = NULL;
 	struct list_head *pos;
 	unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
+	struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
+
+	rwlock_init(&slots.lock);
+	slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
 
 	spin_lock(&pool->lock);
 	if (!pool->ops || !pool->ops->evict || retries == 0) {
@@ -1333,35 +1352,36 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 		list_for_each_prev(pos, &pool->lru) {
 			page = list_entry(pos, struct page, lru);
 
-			/* this bit could have been set by free, in which case
-			 * we pass over to the next page in the pool.
-			 */
-			if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
-				page = NULL;
-				continue;
-			}
-
-			if (unlikely(PageIsolated(page))) {
-				clear_bit(PAGE_CLAIMED, &page->private);
-				page = NULL;
-				continue;
-			}
 			zhdr = page_address(page);
 			if (test_bit(PAGE_HEADLESS, &page->private))
 				break;
 
+			if (kref_get_unless_zero(&zhdr->refcount) == 0) {
+				zhdr = NULL;
+				break;
+			}
 			if (!z3fold_page_trylock(zhdr)) {
-				clear_bit(PAGE_CLAIMED, &page->private);
+				if (kref_put(&zhdr->refcount,
+						release_z3fold_page))
+					atomic64_dec(&pool->pages_nr);
 				zhdr = NULL;
 				continue; /* can't evict at this point */
 			}
-			if (zhdr->foreign_handles) {
-				clear_bit(PAGE_CLAIMED, &page->private);
-				z3fold_page_unlock(zhdr);
+
+			/* test_and_set_bit is of course atomic, but we still
+			 * need to do it under page lock, otherwise checking
+			 * that bit in __z3fold_alloc wouldn't make sense
+			 */
+			if (zhdr->foreign_handles ||
+			    test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+				if (kref_put(&zhdr->refcount,
+						release_z3fold_page))
+					atomic64_dec(&pool->pages_nr);
+				else
+					z3fold_page_unlock(zhdr);
 				zhdr = NULL;
 				continue; /* can't evict such page */
 			}
-			kref_get(&zhdr->refcount);
 			list_del_init(&zhdr->buddy);
 			zhdr->cpu = -1;
 			break;
@@ -1383,12 +1403,16 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 			first_handle = 0;
 			last_handle = 0;
 			middle_handle = 0;
+			memset(slots.slot, 0, sizeof(slots.slot));
 			if (zhdr->first_chunks)
-				first_handle = encode_handle(zhdr, FIRST);
+				first_handle = __encode_handle(zhdr, &slots,
+								FIRST);
 			if (zhdr->middle_chunks)
-				middle_handle = encode_handle(zhdr, MIDDLE);
+				middle_handle = __encode_handle(zhdr, &slots,
+								MIDDLE);
 			if (zhdr->last_chunks)
-				last_handle = encode_handle(zhdr, LAST);
+				last_handle = __encode_handle(zhdr, &slots,
+								LAST);
 			/*
 			 * it's safe to unlock here because we hold a
 			 * reference to this page
@@ -1403,19 +1427,16 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 			ret = pool->ops->evict(pool, middle_handle);
 			if (ret)
 				goto next;
-			free_handle(middle_handle, zhdr);
 		}
 		if (first_handle) {
 			ret = pool->ops->evict(pool, first_handle);
 			if (ret)
 				goto next;
-			free_handle(first_handle, zhdr);
 		}
 		if (last_handle) {
 			ret = pool->ops->evict(pool, last_handle);
 			if (ret)
 				goto next;
-			free_handle(last_handle, zhdr);
 		}
 next:
 		if (test_bit(PAGE_HEADLESS, &page->private)) {
@@ -1429,9 +1450,11 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 			spin_unlock(&pool->lock);
 			clear_bit(PAGE_CLAIMED, &page->private);
 		} else {
+			struct z3fold_buddy_slots *slots = zhdr->slots;
 			z3fold_page_lock(zhdr);
 			if (kref_put(&zhdr->refcount,
 					release_z3fold_page_locked)) {
+				kmem_cache_free(pool->c_handle, slots);
 				atomic64_dec(&pool->pages_nr);
 				return 0;
 			}
@@ -1547,8 +1570,7 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
 	VM_BUG_ON_PAGE(!PageMovable(page), page);
 	VM_BUG_ON_PAGE(PageIsolated(page), page);
 
-	if (test_bit(PAGE_HEADLESS, &page->private) ||
-	    test_bit(PAGE_CLAIMED, &page->private))
+	if (test_bit(PAGE_HEADLESS, &page->private))
 		return false;
 
 	zhdr = page_address(page);
@@ -1560,6 +1582,8 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
 	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
 		goto out;
 
+	if (test_and_set_bit(PAGE_CLAIMED, &page->private))
+		goto out;
 	pool = zhdr_to_pool(zhdr);
 	spin_lock(&pool->lock);
 	if (!list_empty(&zhdr->buddy))
@@ -1586,16 +1610,17 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
 
 	VM_BUG_ON_PAGE(!PageMovable(page), page);
 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
+	VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 
 	zhdr = page_address(page);
 	pool = zhdr_to_pool(zhdr);
 
-	if (!z3fold_page_trylock(zhdr)) {
+	if (!z3fold_page_trylock(zhdr))
 		return -EAGAIN;
-	}
 	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
 		z3fold_page_unlock(zhdr);
+		clear_bit(PAGE_CLAIMED, &page->private);
 		return -EBUSY;
 	}
 	if (work_pending(&zhdr->work)) {
@@ -1637,6 +1662,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
 	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
 
 	page_mapcount_reset(page);
+	clear_bit(PAGE_CLAIMED, &page->private);
 	put_page(page);
 	return 0;
 }
@@ -1660,6 +1686,7 @@ static void z3fold_page_putback(struct page *page)
 	spin_lock(&pool->lock);
 	list_add(&page->lru, &pool->lru);
 	spin_unlock(&pool->lock);
+	clear_bit(PAGE_CLAIMED, &page->private);
 	z3fold_page_unlock(zhdr);
 }
 
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 0/3] z3fold: stability / rt fixes
  2020-12-09 14:51 [PATCH 0/3] z3fold: stability / rt fixes Vitaly Wool
                   ` (2 preceding siblings ...)
  2020-12-09 14:51 ` [PATCH 3/3] z3fold: stricter locking and more careful reclaim Vitaly Wool
@ 2020-12-18 23:19 ` Oleksandr Natalenko
  2020-12-18 23:26   ` Vitaly Wool
  3 siblings, 1 reply; 6+ messages in thread
From: Oleksandr Natalenko @ 2020-12-18 23:19 UTC (permalink / raw)
  To: Vitaly Wool
  Cc: linux-mm, lkml, linux-rt-users, Sebastian Andrzej Siewior,
	Mike Galbraith, akpm

On Wed, Dec 09, 2020 at 04:51:48PM +0200, Vitaly Wool wrote:
> This patchset addresses z3fold stability issues under stress load,
> primarily in the reclaim and free aspects. Besides, it fixes the
> locking problems that were only seen in real-time kernel
> configuration.
> 
> Vitaly Wool (2):
>   z3fold: simplify freeing slots
>   z3fold: stricter locking and more careful reclaim
> 
> Mike Galbraith (1):
>   z3fold: Remove preempt disabled sections for RT
> 
>  mm/z3fold.c | 191 ++++++++++++++++++++++++++--------------------------
>  1 file changed, 96 insertions(+), 95 deletions(-)
> 
> -- 
> 2.20.1
> 

Thanks for addressing this!

(I'd appreciate Cc'ing me on this though since I was the one who
reported it initially...)

-- 
  Oleksandr Natalenko (post-factum)


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 0/3] z3fold: stability / rt fixes
  2020-12-18 23:19 ` [PATCH 0/3] z3fold: stability / rt fixes Oleksandr Natalenko
@ 2020-12-18 23:26   ` Vitaly Wool
  0 siblings, 0 replies; 6+ messages in thread
From: Vitaly Wool @ 2020-12-18 23:26 UTC (permalink / raw)
  To: Oleksandr Natalenko
  Cc: Linux-MM, lkml, linux-rt-users, Sebastian Andrzej Siewior,
	Mike Galbraith, Andrew Morton

[-- Attachment #1: Type: text/plain, Size: 1189 bytes --]

Hi Oleksandr,

On Sat, 19 Dec 2020, 00:19 Oleksandr Natalenko, <oleksandr@natalenko.name>
wrote:

> On Wed, Dec 09, 2020 at 04:51:48PM +0200, Vitaly Wool wrote:
> > This patchset addresses z3fold stability issues under stress load,
> > primarily in the reclaim and free aspects. Besides, it fixes the
> > locking problems that were only seen in real-time kernel
> > configuration.
> >
> > Vitaly Wool (2):
> >   z3fold: simplify freeing slots
> >   z3fold: stricter locking and more careful reclaim
> >
> > Mike Galbraith (1):
> >   z3fold: Remove preempt disabled sections for RT
> >
> >  mm/z3fold.c | 191 ++++++++++++++++++++++++++--------------------------
> >  1 file changed, 96 insertions(+), 95 deletions(-)
> >
> > --
> > 2.20.1
> >
>
> Thanks for addressing this!
>
> (I'd appreciate Cc'ing me on this though since I was the one who
> reported it initially...)
>

I'm glad it helped. Leaving you off the explicit CC-list is surely
unintentional. In fact, I didn't know about the original issue, I was just
added by Sebastian to the ongoing conversation and so I believe I CC'd
everyone who took part in it.

Best regards,
   Vitaly


> --
>   Oleksandr Natalenko (post-factum)
>

[-- Attachment #2: Type: text/html, Size: 1953 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-12-18 23:27 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-09 14:51 [PATCH 0/3] z3fold: stability / rt fixes Vitaly Wool
2020-12-09 14:51 ` [PATCH 1/3] z3fold: simplify freeing slots Vitaly Wool
2020-12-09 14:51 ` [PATCH 2/3] z3fold: Remove preempt disabled sections for RT Vitaly Wool
2020-12-09 14:51 ` [PATCH 3/3] z3fold: stricter locking and more careful reclaim Vitaly Wool
2020-12-18 23:19 ` [PATCH 0/3] z3fold: stability / rt fixes Oleksandr Natalenko
2020-12-18 23:26   ` Vitaly Wool

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).