linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm: swap: async free swap slot cache entries
@ 2023-12-22  6:25 Chris Li
  2023-12-22 19:52 ` Andrew Morton
                   ` (2 more replies)
  0 siblings, 3 replies; 18+ messages in thread
From: Chris Li @ 2023-12-22  6:25 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Wei Xu, Yu Zhao,
	Greg Thelen, Chun-Tse Shao, Suren Baghdasaryan,
	Yosry Ahmed,
	Brain Geffon, Minchan Kim, Michal Hocko, Mel Gorman, Huang Ying,
	Nhat Pham, Johannes Weiner, Kairui Song, Zhongkun He, Kemeng Shi,
	Barry Song, Chris Li

We discovered that 1% swap page fault is 100us+ while 50% of
the swap fault is under 20us.

Further investigation show that a large portion of the time
spent in the free_swap_slots() function for the long tail case.

The percpu cache of swap slots is freed in a batch of 64 entries
inside free_swap_slots(). These cache entries are accumulated
from previous page faults, which may not be related to the current
process.

Doing the batch free in the page fault handler causes longer
tail latencies and penalizes the current process.

Move free_swap_slots() outside of the swapin page fault handler into an
async work queue to avoid such long tail latencies.

Testing:

Chun-Tse did some benchmark in chromebook, showing that
zram_wait_metrics improve about 15% with 80% and 95% confidence.

I recently ran some experiments on about 1000 Google production
machines. It shows swapin latency drops in the long tail
100us - 500us bucket dramatically.

platform	(100-500us)	 	(0-100us)
A		1.12% -> 0.36%		98.47% -> 99.22%
B		0.65% -> 0.15%		98.96% -> 99.46%
C		0.61% -> 0.23%		98.96% -> 99.38%

Signed-off-by: Chris Li <chrisl@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: Wei Xu <weixugc@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Chun-Tse Shao <ctshao@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Brain Geffon <bgeffon@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kairui Song <kasong@tencent.com>
Cc: Zhongkun He <hezhongkun.hzk@bytedance.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Barry Song <v-songbaohua@oppo.com>
---
 include/linux/swap_slots.h |  1 +
 mm/swap_slots.c            | 37 +++++++++++++++++++++++++++++--------
 2 files changed, 30 insertions(+), 8 deletions(-)

diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h
index 15adfb8c813a..67bc8fa30d63 100644
--- a/include/linux/swap_slots.h
+++ b/include/linux/swap_slots.h
@@ -19,6 +19,7 @@ struct swap_slots_cache {
 	spinlock_t	free_lock;  /* protects slots_ret, n_ret */
 	swp_entry_t	*slots_ret;
 	int		n_ret;
+	struct work_struct async_free;
 };
 
 void disable_swap_slots_cache_lock(void);
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 0bec1f705f8e..a3b306550732 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -42,8 +42,10 @@ static bool	swap_slot_cache_initialized;
 static DEFINE_MUTEX(swap_slots_cache_mutex);
 /* Serialize swap slots cache enable/disable operations */
 static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
+static struct workqueue_struct *swap_free_queue;
 
 static void __drain_swap_slots_cache(unsigned int type);
+static void swapcache_async_free_entries(struct work_struct *data);
 
 #define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
 #define SLOTS_CACHE 0x1
@@ -149,6 +151,7 @@ static int alloc_swap_slot_cache(unsigned int cpu)
 		spin_lock_init(&cache->free_lock);
 		cache->lock_initialized = true;
 	}
+	INIT_WORK(&cache->async_free, swapcache_async_free_entries);
 	cache->nr = 0;
 	cache->cur = 0;
 	cache->n_ret = 0;
@@ -269,6 +272,20 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache)
 	return cache->nr;
 }
 
+static void swapcache_async_free_entries(struct work_struct *data)
+{
+	struct swap_slots_cache *cache;
+
+	cache = container_of(data, struct swap_slots_cache, async_free);
+	spin_lock_irq(&cache->free_lock);
+	/* Swap slots cache may be deactivated before acquiring lock */
+	if (cache->slots_ret) {
+		swapcache_free_entries(cache->slots_ret, cache->n_ret);
+		cache->n_ret = 0;
+	}
+	spin_unlock_irq(&cache->free_lock);
+}
+
 void free_swap_slot(swp_entry_t entry)
 {
 	struct swap_slots_cache *cache;
@@ -282,17 +299,14 @@ void free_swap_slot(swp_entry_t entry)
 			goto direct_free;
 		}
 		if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
-			/*
-			 * Return slots to global pool.
-			 * The current swap_map value is SWAP_HAS_CACHE.
-			 * Set it to 0 to indicate it is available for
-			 * allocation in global pool
-			 */
-			swapcache_free_entries(cache->slots_ret, cache->n_ret);
-			cache->n_ret = 0;
+			spin_unlock_irq(&cache->free_lock);
+			queue_work(swap_free_queue, &cache->async_free);
+			goto direct_free;
 		}
 		cache->slots_ret[cache->n_ret++] = entry;
 		spin_unlock_irq(&cache->free_lock);
+		if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE)
+			queue_work(swap_free_queue, &cache->async_free);
 	} else {
 direct_free:
 		swapcache_free_entries(&entry, 1);
@@ -348,3 +362,10 @@ swp_entry_t folio_alloc_swap(struct folio *folio)
 	}
 	return entry;
 }
+
+static int __init async_queue_init(void)
+{
+	swap_free_queue = create_workqueue("async swap cache");
+	return 0;
+}
+subsys_initcall(async_queue_init);

---
base-commit: eacce8189e28717da6f44ee492b7404c636ae0de
change-id: 20231216-async-free-bef392015432

Best regards,
-- 
Chris Li <chrisl@kernel.org>


^ permalink raw reply related	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2024-02-01  1:22 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-22  6:25 [PATCH] mm: swap: async free swap slot cache entries Chris Li
2023-12-22 19:52 ` Andrew Morton
2023-12-22 23:16   ` Chris Li
2023-12-23  6:11     ` David Rientjes
2023-12-23 16:51       ` Chris Li
2023-12-24  3:01         ` David Rientjes
2023-12-24 18:15           ` Chris Li
2023-12-24 21:13             ` David Rientjes
2023-12-24 22:06               ` Chris Li
2023-12-24 22:20                 ` David Rientjes
2023-12-28 15:34                 ` Yosry Ahmed
2023-12-25  7:07     ` Huang, Ying
2024-02-01  0:43       ` Chris Li
2023-12-23  1:44 ` Nhat Pham
2023-12-23  4:41   ` Chris Li
2023-12-28 15:33 ` Yosry Ahmed
2024-02-01  0:57   ` Chris Li
2024-02-01  1:21     ` Yosry Ahmed

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).