linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Lincheng Yang <lincheng.yang@transsion.corp-partner.google.com>
To: akpm@linux-foundation.org, rostedt@goodmis.org,
	mhiramat@kernel.org, willy@infradead.org, hughd@google.com,
	peterx@redhat.com, mike.kravetz@oracle.com, jgg@ziepe.ca,
	surenb@google.com, steven.price@arm.com,
	pasha.tatashin@soleen.com, kirill.shutemov@linux.intel.com,
	yuanchu@google.com, david@redhat.com,
	mathieu.desnoyers@efficios.com, dhowells@redhat.com,
	shakeelb@google.com, pcc@google.com, tytso@mit.edu,
	42.hyeyoo@gmail.com, vbabka@suse.cz, catalin.marinas@arm.com,
	lrh2000@pku.edu.cn, ying.huang@intel.com, mhocko@suse.com,
	vishal.moola@gmail.com, yosryahmed@google.com,
	findns94@gmail.com, neilb@suse.de
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	wanbin.wang@transsion.com, chunlei.zhuang@transsion.com,
	jinsheng.zhao@transsion.com, jiajun.ling@transsion.com,
	dongyun.liu@transsion.com,
	Lincheng Yang <lincheng.yang@transsion.com>
Subject: [RFC PATCH 1/5] mm/swap_slots: cleanup swap slot cache
Date: Sun,  8 Oct 2023 17:59:20 +0800	[thread overview]
Message-ID: <20231008095924.1165106-2-lincheng.yang@transsion.com> (raw)
In-Reply-To: <20231008095924.1165106-1-lincheng.yang@transsion.com>

The function of the swap slot cache will be cleaned to prepare for
subsequent modifications.

Signed-off-by: Lincheng Yang <lincheng.yang@transsion.com>
---
 mm/swap_slots.c | 111 ++++++++++++++++++++++++++++--------------------
 1 file changed, 66 insertions(+), 45 deletions(-)

diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 0bec1f705f8e..bb41c8460b62 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -110,11 +110,13 @@ static bool check_cache_active(void)
 	return swap_slot_cache_active;
 }
 
-static int alloc_swap_slot_cache(unsigned int cpu)
+static int __alloc_swap_slot_cache(struct swap_slots_cache *cache)
 {
-	struct swap_slots_cache *cache;
 	swp_entry_t *slots, *slots_ret;
 
+	if (!cache)
+		return 0;
+
 	/*
 	 * Do allocation outside swap_slots_cache_mutex
 	 * as kvzalloc could trigger reclaim and folio_alloc_swap,
@@ -133,17 +135,6 @@ static int alloc_swap_slot_cache(unsigned int cpu)
 	}
 
 	mutex_lock(&swap_slots_cache_mutex);
-	cache = &per_cpu(swp_slots, cpu);
-	if (cache->slots || cache->slots_ret) {
-		/* cache already allocated */
-		mutex_unlock(&swap_slots_cache_mutex);
-
-		kvfree(slots);
-		kvfree(slots_ret);
-
-		return 0;
-	}
-
 	if (!cache->lock_initialized) {
 		mutex_init(&cache->alloc_lock);
 		spin_lock_init(&cache->free_lock);
@@ -165,13 +156,26 @@ static int alloc_swap_slot_cache(unsigned int cpu)
 	return 0;
 }
 
-static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
-				  bool free_slots)
+static int alloc_swap_slot_cache(unsigned int cpu)
 {
 	struct swap_slots_cache *cache;
-	swp_entry_t *slots = NULL;
 
+	mutex_lock(&swap_slots_cache_mutex);
 	cache = &per_cpu(swp_slots, cpu);
+	if (cache->slots || cache->slots_ret)   /* cache already allocated */
+		cache = NULL;
+	mutex_unlock(&swap_slots_cache_mutex);
+
+	__alloc_swap_slot_cache(cache);
+
+	return 0;
+}
+
+static void __drain_slots_cache_cpu(struct swap_slots_cache *cache,
+				    unsigned int type, bool free_slots)
+{
+	swp_entry_t *slots = NULL;
+
 	if ((type & SLOTS_CACHE) && cache->slots) {
 		mutex_lock(&cache->alloc_lock);
 		swapcache_free_entries(cache->slots + cache->cur, cache->nr);
@@ -196,6 +200,15 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
 	}
 }
 
+static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
+				  bool free_slots)
+{
+	struct swap_slots_cache *cache;
+
+	cache = &per_cpu(swp_slots, cpu);
+	__drain_slots_cache_cpu(cache, type, free_slots);
+}
+
 static void __drain_swap_slots_cache(unsigned int type)
 {
 	unsigned int cpu;
@@ -269,11 +282,8 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache)
 	return cache->nr;
 }
 
-void free_swap_slot(swp_entry_t entry)
+static void __free_swap_slot(struct swap_slots_cache *cache, swp_entry_t entry)
 {
-	struct swap_slots_cache *cache;
-
-	cache = raw_cpu_ptr(&swp_slots);
 	if (likely(use_swap_slot_cache && cache->slots_ret)) {
 		spin_lock_irq(&cache->free_lock);
 		/* Swap slots cache may be deactivated before acquiring lock */
@@ -299,18 +309,18 @@ void free_swap_slot(swp_entry_t entry)
 	}
 }
 
-swp_entry_t folio_alloc_swap(struct folio *folio)
+void free_swap_slot(swp_entry_t entry)
 {
-	swp_entry_t entry;
 	struct swap_slots_cache *cache;
 
-	entry.val = 0;
+	cache = raw_cpu_ptr(&swp_slots);
+	__free_swap_slot(cache, entry);
+}
 
-	if (folio_test_large(folio)) {
-		if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported())
-			get_swap_pages(1, &entry, folio_nr_pages(folio));
-		goto out;
-	}
+static int __folio_alloc_swap(struct swap_slots_cache *cache, swp_entry_t *entry)
+{
+	if (unlikely(!check_cache_active() || !cache->slots))
+		return -EINVAL;
 
 	/*
 	 * Preemption is allowed here, because we may sleep
@@ -321,26 +331,37 @@ swp_entry_t folio_alloc_swap(struct folio *folio)
 	 * The alloc path here does not touch cache->slots_ret
 	 * so cache->free_lock is not taken.
 	 */
-	cache = raw_cpu_ptr(&swp_slots);
-
-	if (likely(check_cache_active() && cache->slots)) {
-		mutex_lock(&cache->alloc_lock);
-		if (cache->slots) {
+	mutex_lock(&cache->alloc_lock);
 repeat:
-			if (cache->nr) {
-				entry = cache->slots[cache->cur];
-				cache->slots[cache->cur++].val = 0;
-				cache->nr--;
-			} else if (refill_swap_slots_cache(cache)) {
-				goto repeat;
-			}
-		}
-		mutex_unlock(&cache->alloc_lock);
-		if (entry.val)
-			goto out;
+	if (cache->nr) {
+		*entry = cache->slots[cache->cur];
+		cache->slots[cache->cur++].val = 0;
+		cache->nr--;
+	} else if (refill_swap_slots_cache(cache)) {
+		goto repeat;
 	}
+	mutex_unlock(&cache->alloc_lock);
+
+	return !!entry->val;
+}
+
+swp_entry_t folio_alloc_swap(struct folio *folio)
+{
+	swp_entry_t entry;
+	struct swap_slots_cache *cache;
+
+	entry.val = 0;
+
+	if (folio_test_large(folio)) {
+		if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported())
+			get_swap_pages(1, &entry, folio_nr_pages(folio));
+		goto out;
+	}
+
+	cache = raw_cpu_ptr(&swp_slots);
+	if (__folio_alloc_swap(cache, &entry))
+		get_swap_pages(1, &entry, 1);
 
-	get_swap_pages(1, &entry, 1);
 out:
 	if (mem_cgroup_try_charge_swap(folio, entry)) {
 		put_swap_folio(folio, entry);
-- 
2.34.1



  reply	other threads:[~2023-10-08  9:59 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-08  9:59 [RFC PATCH 0/5] hot page swap to zram, cold page swap to swapfile directly Lincheng Yang
2023-10-08  9:59 ` Lincheng Yang [this message]
2023-10-08  9:59 ` [RFC PATCH 2/5] mm: introduce hot and cold anon page flags Lincheng Yang
2023-10-08  9:59 ` [RFC PATCH 3/5] mm: add VMA hot flag Lincheng Yang
2023-10-08  9:59 ` [RFC PATCH 4/5] mm: add page implyreclaim flag Lincheng Yang
2023-10-08 11:07   ` Matthew Wilcox
2023-10-10  3:27     ` Lincheng Yang
2023-10-08  9:59 ` [RFC PATCH 5/5] mm/swapfile: add swapfile_write_enable interface Lincheng Yang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231008095924.1165106-2-lincheng.yang@transsion.com \
    --to=lincheng.yang@transsion.corp-partner.google.com \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=catalin.marinas@arm.com \
    --cc=chunlei.zhuang@transsion.com \
    --cc=david@redhat.com \
    --cc=dhowells@redhat.com \
    --cc=dongyun.liu@transsion.com \
    --cc=findns94@gmail.com \
    --cc=hughd@google.com \
    --cc=jgg@ziepe.ca \
    --cc=jiajun.ling@transsion.com \
    --cc=jinsheng.zhao@transsion.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=lincheng.yang@transsion.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lrh2000@pku.edu.cn \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mhiramat@kernel.org \
    --cc=mhocko@suse.com \
    --cc=mike.kravetz@oracle.com \
    --cc=neilb@suse.de \
    --cc=pasha.tatashin@soleen.com \
    --cc=pcc@google.com \
    --cc=peterx@redhat.com \
    --cc=rostedt@goodmis.org \
    --cc=shakeelb@google.com \
    --cc=steven.price@arm.com \
    --cc=surenb@google.com \
    --cc=tytso@mit.edu \
    --cc=vbabka@suse.cz \
    --cc=vishal.moola@gmail.com \
    --cc=wanbin.wang@transsion.com \
    --cc=willy@infradead.org \
    --cc=ying.huang@intel.com \
    --cc=yosryahmed@google.com \
    --cc=yuanchu@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).