* + zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch added to mm-unstable branch
@ 2022-11-08 20:03 Andrew Morton
0 siblings, 0 replies; 4+ messages in thread
From: Andrew Morton @ 2022-11-08 20:03 UTC (permalink / raw)
To: mm-commits, vitaly.wool, sjenning, senozhatsky, ngupta, minchan,
hannes, ddstreet, nphamcs, akpm
The patch titled
Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order
has been added to the -mm mm-unstable branch. Its filename is
zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
This patch will later appear in the mm-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: Nhat Pham <nphamcs@gmail.com>
Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order
Date: Tue, 8 Nov 2022 11:32:05 -0800
This helps determines the coldest zspages as candidates for writeback.
Link: https://lkml.kernel.org/r/20221108193207.3297327-4-nphamcs@gmail.com
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
mm/zsmalloc.c | 27 +++++++++++++++++++++++++++
1 file changed, 27 insertions(+)
--- a/mm/zsmalloc.c~zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order
+++ a/mm/zsmalloc.c
@@ -231,6 +231,9 @@ struct zs_pool {
/* Compact classes */
struct shrinker shrinker;
+ /* List tracking the zspages in LRU order by most recently added object */
+ struct list_head lru;
+
#ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
#endif
@@ -252,6 +255,10 @@ struct zspage {
unsigned int freeobj;
struct page *first_page;
struct list_head list; /* fullness list */
+
+ /* links the zspage to the lru list in the pool */
+ struct list_head lru;
+
struct zs_pool *pool;
#ifdef CONFIG_COMPACTION
rwlock_t lock;
@@ -344,6 +351,16 @@ static void cache_free_zspage(struct zs_
kmem_cache_free(pool->zspage_cachep, zspage);
}
+/* Moves the zspage to the front of the zspool's LRU */
+static void move_to_front(struct zs_pool *pool, struct zspage *zspage)
+{
+ assert_spin_locked(&pool->lock);
+
+ if (!list_empty(&zspage->lru))
+ list_del(&zspage->lru);
+ list_add(&zspage->lru, &pool->lru);
+}
+
/* pool->lock(which owns the handle) synchronizes races */
static void record_obj(unsigned long handle, unsigned long obj)
{
@@ -948,6 +965,7 @@ static void free_zspage(struct zs_pool *
}
remove_zspage(class, zspage, ZS_EMPTY);
+ list_del(&zspage->lru);
__free_zspage(pool, class, zspage);
}
@@ -993,6 +1011,8 @@ static void init_zspage(struct size_clas
off %= PAGE_SIZE;
}
+ INIT_LIST_HEAD(&zspage->lru);
+
set_freeobj(zspage, 0);
}
@@ -1434,6 +1454,8 @@ unsigned long zs_malloc(struct zs_pool *
fix_fullness_group(class, zspage);
record_obj(handle, obj);
class_stat_inc(class, OBJ_USED, 1);
+ /* Move the zspage to front of pool's LRU */
+ move_to_front(pool, zspage);
spin_unlock(&pool->lock);
return handle;
@@ -1460,6 +1482,8 @@ unsigned long zs_malloc(struct zs_pool *
/* We completely set up zspage so mark them as movable */
SetZsPageMovable(pool, zspage);
+ /* Move the zspage to front of pool's LRU */
+ move_to_front(pool, zspage);
spin_unlock(&pool->lock);
return handle;
@@ -1983,6 +2007,7 @@ static void async_free_zspage(struct wor
VM_BUG_ON(fullness != ZS_EMPTY);
class = pool->size_class[class_idx];
spin_lock(&pool->lock);
+ list_del(&zspage->lru);
__free_zspage(pool, class, zspage);
spin_unlock(&pool->lock);
}
@@ -2313,6 +2338,8 @@ struct zs_pool *zs_create_pool(const cha
*/
zs_register_shrinker(pool);
+ INIT_LIST_HEAD(&pool->lru);
+
return pool;
err:
_
Patches currently in -mm which might be from nphamcs@gmail.com are
zsmalloc-consolidate-zs_pools-migrate_lock-and-size_classs-locks.patch
zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
zsmalloc-add-ops-fields-to-zs_pool-to-store-evict-handlers.patch
zsmalloc-implement-writeback-mechanism-for-zsmalloc.patch
^ permalink raw reply [flat|nested] 4+ messages in thread
* + zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch added to mm-unstable branch
@ 2022-11-28 21:39 Andrew Morton
0 siblings, 0 replies; 4+ messages in thread
From: Andrew Morton @ 2022-11-28 21:39 UTC (permalink / raw)
To: mm-commits, vitaly.wool, sjenning, senozhatsky, ngupta, minchan,
hannes, ddstreet, nphamcs, akpm
The patch titled
Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order
has been added to the -mm mm-unstable branch. Its filename is
zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
This patch will later appear in the mm-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: Nhat Pham <nphamcs@gmail.com>
Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order
Date: Mon, 28 Nov 2022 11:16:13 -0800
This helps determines the coldest zspages as candidates for writeback.
Link: https://lkml.kernel.org/r/20221128191616.1261026-5-nphamcs@gmail.com
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
mm/zsmalloc.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 50 insertions(+)
--- a/mm/zsmalloc.c~zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order
+++ a/mm/zsmalloc.c
@@ -239,6 +239,11 @@ struct zs_pool {
/* Compact classes */
struct shrinker shrinker;
+#ifdef CONFIG_ZPOOL
+ /* List tracking the zspages in LRU order by most recently added object */
+ struct list_head lru;
+#endif
+
#ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
#endif
@@ -260,6 +265,12 @@ struct zspage {
unsigned int freeobj;
struct page *first_page;
struct list_head list; /* fullness list */
+
+#ifdef CONFIG_ZPOOL
+ /* links the zspage to the lru list in the pool */
+ struct list_head lru;
+#endif
+
struct zs_pool *pool;
#ifdef CONFIG_COMPACTION
rwlock_t lock;
@@ -953,6 +964,9 @@ static void free_zspage(struct zs_pool *
}
remove_zspage(class, zspage, ZS_EMPTY);
+#ifdef CONFIG_ZPOOL
+ list_del(&zspage->lru);
+#endif
__free_zspage(pool, class, zspage);
}
@@ -998,6 +1012,10 @@ static void init_zspage(struct size_clas
off %= PAGE_SIZE;
}
+#ifdef CONFIG_ZPOOL
+ INIT_LIST_HEAD(&zspage->lru);
+#endif
+
set_freeobj(zspage, 0);
}
@@ -1270,6 +1288,31 @@ void *zs_map_object(struct zs_pool *pool
obj_to_location(obj, &page, &obj_idx);
zspage = get_zspage(page);
+#ifdef CONFIG_ZPOOL
+ /*
+ * Move the zspage to front of pool's LRU.
+ *
+ * Note that this is swap-specific, so by definition there are no ongoing
+ * accesses to the memory while the page is swapped out that would make
+ * it "hot". A new entry is hot, then ages to the tail until it gets either
+ * written back or swaps back in.
+ *
+ * Furthermore, map is also called during writeback. We must not put an
+ * isolated page on the LRU mid-reclaim.
+ *
+ * As a result, only update the LRU when the page is mapped for write
+ * when it's first instantiated.
+ *
+ * This is a deviation from the other backends, which perform this update
+ * in the allocation function (zbud_alloc, z3fold_alloc).
+ */
+ if (mm == ZS_MM_WO) {
+ if (!list_empty(&zspage->lru))
+ list_del(&zspage->lru);
+ list_add(&zspage->lru, &pool->lru);
+ }
+#endif
+
/*
* migration cannot move any zpages in this zspage. Here, pool->lock
* is too heavy since callers would take some time until they calls
@@ -1988,6 +2031,9 @@ static void async_free_zspage(struct wor
VM_BUG_ON(fullness != ZS_EMPTY);
class = pool->size_class[class_idx];
spin_lock(&pool->lock);
+#ifdef CONFIG_ZPOOL
+ list_del(&zspage->lru);
+#endif
__free_zspage(pool, class, zspage);
spin_unlock(&pool->lock);
}
@@ -2299,6 +2345,10 @@ struct zs_pool *zs_create_pool(const cha
*/
zs_register_shrinker(pool);
+#ifdef CONFIG_ZPOOL
+ INIT_LIST_HEAD(&pool->lru);
+#endif
+
return pool;
err:
_
Patches currently in -mm which might be from nphamcs@gmail.com are
zsmalloc-consolidate-zs_pools-migrate_lock-and-size_classs-locks.patch
zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
zsmalloc-add-zpool_ops-field-to-zs_pool-to-store-evict-handlers.patch
zsmalloc-implement-writeback-mechanism-for-zsmalloc.patch
^ permalink raw reply [flat|nested] 4+ messages in thread
* + zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch added to mm-unstable branch
@ 2022-11-21 22:35 Andrew Morton
0 siblings, 0 replies; 4+ messages in thread
From: Andrew Morton @ 2022-11-21 22:35 UTC (permalink / raw)
To: mm-commits, vitaly.wool, sjenning, senozhatsky, ngupta, minchan,
hannes, ddstreet, nphamcs, akpm
The patch titled
Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order
has been added to the -mm mm-unstable branch. Its filename is
zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
This patch will later appear in the mm-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: Nhat Pham <nphamcs@gmail.com>
Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order
Date: Fri, 18 Nov 2022 16:15:34 -0800
This helps determines the coldest zspages as candidates for writeback.
Link: https://lkml.kernel.org/r/20221119001536.2086599-5-nphamcs@gmail.com
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Minchan Kim <minchan@kernel.org>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
mm/zsmalloc.c | 34 ++++++++++++++++++++++++++++++++++
1 file changed, 34 insertions(+)
--- a/mm/zsmalloc.c~zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order
+++ a/mm/zsmalloc.c
@@ -239,6 +239,11 @@ struct zs_pool {
/* Compact classes */
struct shrinker shrinker;
+#ifdef CONFIG_ZPOOL
+ /* List tracking the zspages in LRU order by most recently added object */
+ struct list_head lru;
+#endif
+
#ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
#endif
@@ -260,6 +265,12 @@ struct zspage {
unsigned int freeobj;
struct page *first_page;
struct list_head list; /* fullness list */
+
+#ifdef CONFIG_ZPOOL
+ /* links the zspage to the lru list in the pool */
+ struct list_head lru;
+#endif
+
struct zs_pool *pool;
#ifdef CONFIG_COMPACTION
rwlock_t lock;
@@ -953,6 +964,9 @@ static void free_zspage(struct zs_pool *
}
remove_zspage(class, zspage, ZS_EMPTY);
+#ifdef CONFIG_ZPOOL
+ list_del(&zspage->lru);
+#endif
__free_zspage(pool, class, zspage);
}
@@ -998,6 +1012,10 @@ static void init_zspage(struct size_clas
off %= PAGE_SIZE;
}
+#ifdef CONFIG_ZPOOL
+ INIT_LIST_HEAD(&zspage->lru);
+#endif
+
set_freeobj(zspage, 0);
}
@@ -1270,6 +1288,15 @@ void *zs_map_object(struct zs_pool *pool
obj_to_location(obj, &page, &obj_idx);
zspage = get_zspage(page);
+#ifdef CONFIG_ZPOOL
+ /* Move the zspage to front of pool's LRU */
+ if (mm == ZS_MM_WO) {
+ if (!list_empty(&zspage->lru))
+ list_del(&zspage->lru);
+ list_add(&zspage->lru, &pool->lru);
+ }
+#endif
+
/*
* migration cannot move any zpages in this zspage. Here, pool->lock
* is too heavy since callers would take some time until they calls
@@ -1988,6 +2015,9 @@ static void async_free_zspage(struct wor
VM_BUG_ON(fullness != ZS_EMPTY);
class = pool->size_class[class_idx];
spin_lock(&pool->lock);
+#ifdef CONFIG_ZPOOL
+ list_del(&zspage->lru);
+#endif
__free_zspage(pool, class, zspage);
spin_unlock(&pool->lock);
}
@@ -2299,6 +2329,10 @@ struct zs_pool *zs_create_pool(const cha
*/
zs_register_shrinker(pool);
+#ifdef CONFIG_ZPOOL
+ INIT_LIST_HEAD(&pool->lru);
+#endif
+
return pool;
err:
_
Patches currently in -mm which might be from nphamcs@gmail.com are
zsmalloc-consolidate-zs_pools-migrate_lock-and-size_classs-locks.patch
zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
zsmalloc-add-zpool_ops-field-to-zs_pool-to-store-evict-handlers.patch
zsmalloc-implement-writeback-mechanism-for-zsmalloc.patch
^ permalink raw reply [flat|nested] 4+ messages in thread
* + zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch added to mm-unstable branch
@ 2022-10-28 21:40 Andrew Morton
0 siblings, 0 replies; 4+ messages in thread
From: Andrew Morton @ 2022-10-28 21:40 UTC (permalink / raw)
To: mm-commits, vitaly.wool, sjenning, senozhatsky, ngupta, minchan,
hannes, ddstreet, nphamcs, akpm
The patch titled
Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order
has been added to the -mm mm-unstable branch. Its filename is
zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
This patch will later appear in the mm-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: Nhat Pham <nphamcs@gmail.com>
Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order
Date: Wed, 26 Oct 2022 13:06:11 -0700
This helps determines the coldest zspages as candidates for writeback.
Link: https://lkml.kernel.org/r/20221026200613.1031261-4-nphamcs@gmail.com
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
--- a/mm/zsmalloc.c~zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order
+++ a/mm/zsmalloc.c
@@ -231,6 +231,9 @@ struct zs_pool {
/* Compact classes */
struct shrinker shrinker;
+ /* List tracking the zspages in LRU order by most recently added object */
+ struct list_head lru;
+
#ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
#endif
@@ -252,6 +255,10 @@ struct zspage {
unsigned int freeobj;
struct page *first_page;
struct list_head list; /* fullness list */
+
+ /* links the zspage to the lru list in the pool */
+ struct list_head lru;
+
struct zs_pool *pool;
#ifdef CONFIG_COMPACTION
rwlock_t lock;
@@ -344,6 +351,16 @@ static void cache_free_zspage(struct zs_
kmem_cache_free(pool->zspage_cachep, zspage);
}
+/* Moves the zspage to the front of the zspool's LRU */
+static void move_to_front(struct zs_pool *pool, struct zspage *zspage)
+{
+ assert_spin_locked(&pool->lock);
+
+ if (!list_empty(&zspage->lru))
+ list_del(&zspage->lru);
+ list_add(&zspage->lru, &pool->lru);
+}
+
/* pool->lock(which owns the handle) synchronizes races */
static void record_obj(unsigned long handle, unsigned long obj)
{
@@ -948,6 +965,7 @@ static void free_zspage(struct zs_pool *
}
remove_zspage(class, zspage, ZS_EMPTY);
+ list_del(&zspage->lru);
__free_zspage(pool, class, zspage);
}
@@ -993,6 +1011,8 @@ static void init_zspage(struct size_clas
off %= PAGE_SIZE;
}
+ INIT_LIST_HEAD(&zspage->lru);
+
set_freeobj(zspage, 0);
}
@@ -1434,6 +1454,8 @@ unsigned long zs_malloc(struct zs_pool *
fix_fullness_group(class, zspage);
record_obj(handle, obj);
class_stat_inc(class, OBJ_USED, 1);
+ /* Move the zspage to front of pool's LRU */
+ move_to_front(pool, zspage);
spin_unlock(&pool->lock);
return handle;
@@ -1460,6 +1482,8 @@ unsigned long zs_malloc(struct zs_pool *
/* We completely set up zspage so mark them as movable */
SetZsPageMovable(pool, zspage);
+ /* Move the zspage to front of pool's LRU */
+ move_to_front(pool, zspage);
spin_unlock(&pool->lock);
return handle;
@@ -1983,6 +2007,7 @@ static void async_free_zspage(struct wor
VM_BUG_ON(fullness != ZS_EMPTY);
class = pool->size_class[class_idx];
spin_lock(&pool->lock);
+ list_del(&zspage->lru);
__free_zspage(pool, class, zspage);
spin_unlock(&pool->lock);
}
@@ -2315,6 +2340,8 @@ struct zs_pool *zs_create_pool(const cha
*/
zs_register_shrinker(pool);
+ INIT_LIST_HEAD(&pool->lru);
+
return pool;
err:
_
Patches currently in -mm which might be from nphamcs@gmail.com are
zsmalloc-consolidate-zs_pools-migrate_lock-and-size_classs-locks.patch
zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch
zsmalloc-add-ops-fields-to-zs_pool-to-store-evict-handlers.patch
zsmalloc-implement-writeback-mechanism-for-zsmalloc.patch
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-11-28 21:40 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-08 20:03 + zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch added to mm-unstable branch Andrew Morton
-- strict thread matches above, loose matches on Subject: below --
2022-11-28 21:39 Andrew Morton
2022-11-21 22:35 Andrew Morton
2022-10-28 21:40 Andrew Morton
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.