All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yosry Ahmed <yosryahmed@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
	Nhat Pham <nphamcs@gmail.com>,
	 Chengming Zhou <chengming.zhou@linux.dev>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	 Yosry Ahmed <yosryahmed@google.com>
Subject: [RFC PATCH 9/9] mm: zswap: use zswap_entry_free() for partially initialized entries
Date: Mon, 25 Mar 2024 23:50:17 +0000	[thread overview]
Message-ID: <20240325235018.2028408-10-yosryahmed@google.com> (raw)
In-Reply-To: <20240325235018.2028408-1-yosryahmed@google.com>

zswap_entry_free() performs four types of cleanups before freeing a
zswap_entry:
- Deletes the entry from the LRU.
- Frees compressed memory.
- Puts the pool reference.
- Uncharges the compressed memory and puts the objcg.

zswap_entry_free() always expects a fully initialized entry. Allow
zswap_entry_free() to handle partially initialized entries by making it
possible to identify what cleanups are needed as follows:
- Allocate entries with __GFP_ZERO and initialize zswap_entry.lru when
  the entry is allocated. Points are NULL and length is zero upon
  initialization.
- Use zswap_entry.length to identify if there is compressed memory to
  free. This is possible now that zero-filled pages are handled
  separately, so a length of zero means we did not successfully compress
  the page.
- Only initialize entry->objcg after the memory is charged in
  zswap_store().

With this in place, use zswap_entry_free() in the failure path of
zswap_store() to cleanup partially initialized entries. This simplifies
the cleanup code in zswap_store(). While we are at it, rename the
remaining cleanup labels to more meaningful names.

Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
---
 mm/zswap.c | 62 ++++++++++++++++++++++++++----------------------------
 1 file changed, 30 insertions(+), 32 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index 9357328d940af..c50f9df230ca3 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -774,12 +774,13 @@ void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
 **********************************/
 static struct kmem_cache *zswap_entry_cache;
 
-static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
+static struct zswap_entry *zswap_entry_cache_alloc(int nid)
 {
 	struct zswap_entry *entry;
-	entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
-	if (!entry)
-		return NULL;
+	entry = kmem_cache_alloc_node(zswap_entry_cache,
+				      GFP_KERNEL | __GFP_ZERO, nid);
+	if (entry)
+		INIT_LIST_HEAD(&entry->lru);
 	return entry;
 }
 
@@ -795,9 +796,12 @@ static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
 
 static void zswap_entry_free(struct zswap_entry *entry)
 {
-	zswap_lru_del(&zswap_list_lru, entry);
-	zpool_free(zswap_find_zpool(entry), entry->handle);
-	zswap_pool_put(entry->pool);
+	if (!list_empty(&entry->lru))
+		zswap_lru_del(&zswap_list_lru, entry);
+	if (entry->length)
+		zpool_free(zswap_find_zpool(entry), entry->handle);
+	if (entry->pool)
+		zswap_pool_put(entry->pool);
 	if (entry->objcg) {
 		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
 		obj_cgroup_put(entry->objcg);
@@ -1447,7 +1451,7 @@ bool zswap_store(struct folio *folio)
 		return false;
 
 	if (!zswap_enabled)
-		goto check_old;
+		goto erase_old;
 
 	/* Check cgroup limits */
 	objcg = get_obj_cgroup_from_folio(folio);
@@ -1455,54 +1459,52 @@ bool zswap_store(struct folio *folio)
 		memcg = get_mem_cgroup_from_objcg(objcg);
 		if (shrink_memcg(memcg)) {
 			mem_cgroup_put(memcg);
-			goto reject;
+			goto put_objcg;
 		}
 		mem_cgroup_put(memcg);
 	}
 
 	if (zswap_is_folio_zero_filled(folio)) {
 		if (zswap_store_zero_filled(tree, offset, objcg))
-			goto reject;
+			goto put_objcg;
 		goto stored;
 	}
 
 	if (!zswap_non_zero_filled_pages_enabled)
-		goto reject;
+		goto put_objcg;
 
 	if (!zswap_check_limit())
-		goto reject;
+		goto put_objcg;
 
-	entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
+	entry = zswap_entry_cache_alloc(folio_nid(folio));
 	if (!entry) {
 		zswap_reject_kmemcache_fail++;
-		goto reject;
+		goto put_objcg;
 	}
 
-	/* if entry is successfully added, it keeps the reference */
 	entry->pool = zswap_pool_current_get();
 	if (!entry->pool)
-		goto freepage;
+		goto free_entry;
 
 	if (objcg) {
 		memcg = get_mem_cgroup_from_objcg(objcg);
 		if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
 			mem_cgroup_put(memcg);
-			goto put_pool;
+			goto free_entry;
 		}
 		mem_cgroup_put(memcg);
 	}
 
 	if (!zswap_compress(folio, entry))
-		goto put_pool;
-
-	entry->swpentry = swp;
-	entry->objcg = objcg;
+		goto free_entry;
 
 	if (zswap_tree_store(tree, offset, entry))
-		goto store_failed;
+		goto free_entry;
 
-	if (objcg)
+	if (objcg) {
 		obj_cgroup_charge_zswap(objcg, entry->length);
+		entry->objcg = objcg;
+	}
 
 	/*
 	 * We finish initializing the entry while it's already in xarray.
@@ -1514,7 +1516,7 @@ bool zswap_store(struct folio *folio)
 	 *    The publishing order matters to prevent writeback from seeing
 	 *    an incoherent entry.
 	 */
-	INIT_LIST_HEAD(&entry->lru);
+	entry->swpentry = swp;
 	zswap_lru_add(&zswap_list_lru, entry);
 
 stored:
@@ -1525,17 +1527,13 @@ bool zswap_store(struct folio *folio)
 
 	return true;
 
-store_failed:
-	zpool_free(zswap_find_zpool(entry), entry->handle);
-put_pool:
-	zswap_pool_put(entry->pool);
-freepage:
-	zswap_entry_cache_free(entry);
-reject:
+free_entry:
+	zswap_entry_free(entry);
+put_objcg:
 	obj_cgroup_put(objcg);
 	if (zswap_pool_reached_full)
 		queue_work(shrink_wq, &zswap_shrink_work);
-check_old:
+erase_old:
 	/*
 	 * If the zswap store fails or zswap is disabled, we must invalidate the
 	 * possibly stale entry which was previously stored at this offset.
-- 
2.44.0.396.g6e790dbe36-goog


  parent reply	other threads:[~2024-03-25 23:50 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-25 23:50 [RFC PATCH 0/9] zswap: store zero-filled pages more efficiently Yosry Ahmed
2024-03-25 23:50 ` [RFC PATCH 1/9] mm: zswap: always shrink in zswap_store() if zswap_pool_reached_full Yosry Ahmed
2024-03-26 21:49   ` Nhat Pham
2024-03-27  2:21   ` Chengming Zhou
2024-03-28 19:09   ` Johannes Weiner
2024-03-25 23:50 ` [RFC PATCH 2/9] mm: zswap: refactor storing to the tree out of zswap_store() Yosry Ahmed
2024-03-27  2:25   ` Chengming Zhou
2024-03-27 22:29     ` Yosry Ahmed
2024-03-25 23:50 ` [RFC PATCH 3/9] mm: zswap: refactor limit checking from zswap_store() Yosry Ahmed
2024-03-27  2:42   ` Chengming Zhou
2024-03-27 22:30     ` Yosry Ahmed
2024-03-25 23:50 ` [RFC PATCH 4/9] mm: zswap: move more same-filled pages checks outside of zswap_store() Yosry Ahmed
2024-03-26 21:57   ` Nhat Pham
2024-03-27  2:39   ` Chengming Zhou
2024-03-27 22:32     ` Yosry Ahmed
2024-03-25 23:50 ` [RFC PATCH 5/9] mm: zswap: remove zswap_same_filled_pages_enabled Yosry Ahmed
2024-03-26 22:01   ` Nhat Pham
2024-03-27  2:44   ` Chengming Zhou
2024-03-27 22:34     ` Yosry Ahmed
2024-03-28 19:11   ` Johannes Weiner
2024-03-28 20:06     ` Yosry Ahmed
2024-03-29  2:14       ` Yosry Ahmed
2024-03-29 14:02         ` Maciej S. Szmigiero
2024-03-29 17:44           ` Johannes Weiner
2024-03-29 18:22             ` Yosry Ahmed
2024-04-01 10:37               ` Maciej S. Szmigiero
2024-04-01 18:29                 ` Yosry Ahmed
2024-03-25 23:50 ` [RFC PATCH 6/9] mm: zswap: drop support for non-zero same-filled pages handling Yosry Ahmed
2024-03-27 11:25   ` Chengming Zhou
2024-03-27 16:40   ` Nhat Pham
2024-03-27 22:38     ` Yosry Ahmed
2024-03-28 19:31   ` Johannes Weiner
2024-03-28 20:23     ` Yosry Ahmed
2024-03-28 21:07       ` Johannes Weiner
2024-03-28 23:19         ` Nhat Pham
2024-03-29  2:05           ` Yosry Ahmed
2024-03-29  4:27             ` Yosry Ahmed
2024-03-29 17:37               ` Johannes Weiner
2024-03-29 18:56                 ` Yosry Ahmed
2024-03-29 21:17                   ` Johannes Weiner
2024-03-29 22:29                     ` Yosry Ahmed
2024-03-28 23:33       ` Nhat Pham
2024-03-29  2:07         ` Yosry Ahmed
2024-03-25 23:50 ` [RFC PATCH 7/9] mm: zswap: store zero-filled pages without a zswap_entry Yosry Ahmed
2024-03-28  8:12   ` Chengming Zhou
2024-03-28 18:45     ` Yosry Ahmed
2024-03-28 19:38   ` Johannes Weiner
2024-03-28 20:29     ` Yosry Ahmed
2024-03-25 23:50 ` [RFC PATCH 8/9] mm: zswap: do not check the global limit for zero-filled pages Yosry Ahmed
2024-03-28  8:15   ` Chengming Zhou
2024-03-25 23:50 ` Yosry Ahmed [this message]
2024-03-28  8:31   ` [RFC PATCH 9/9] mm: zswap: use zswap_entry_free() for partially initialized entries Chengming Zhou
2024-03-28 18:49     ` Yosry Ahmed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240325235018.2028408-10-yosryahmed@google.com \
    --to=yosryahmed@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=chengming.zhou@linux.dev \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=nphamcs@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.