All of lore.kernel.org
 help / color / mirror / Atom feed
From: Muchun Song <songmuchun@bytedance.com>
To: willy@infradead.org, akpm@linux-foundation.org,
	hannes@cmpxchg.org, mhocko@kernel.org, vdavydov.dev@gmail.com,
	shakeelb@google.com, guro@fb.com, shy828301@gmail.com,
	alexs@kernel.org, richard.weiyang@gmail.com, david@fromorbit.com,
	trond.myklebust@hammerspace.com, anna.schumaker@netapp.com,
	jaegeuk@kernel.org, chao@kernel.org, kari.argillander@gmail.com
Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-nfs@vger.kernel.org,
	zhengqi.arch@bytedance.com, duanxiongchun@bytedance.com,
	fam.zheng@bytedance.com, smuchun@gmail.com,
	Muchun Song <songmuchun@bytedance.com>
Subject: [PATCH v5 08/16] xarray: use kmem_cache_alloc_lru to allocate xa_node
Date: Mon, 20 Dec 2021 16:56:41 +0800	[thread overview]
Message-ID: <20211220085649.8196-9-songmuchun@bytedance.com> (raw)
In-Reply-To: <20211220085649.8196-1-songmuchun@bytedance.com>

The workingset will add the xa_node to the shadow_nodes list. So the
allocation of xa_node should be done by kmem_cache_alloc_lru(). Using
xas_set_lru() to pass the list_lru which we want to insert xa_node
into to set up the xa_node reclaim context correctly.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
---
 include/linux/swap.h   |  5 ++++-
 include/linux/xarray.h |  9 ++++++++-
 lib/xarray.c           | 10 +++++-----
 mm/workingset.c        |  2 +-
 4 files changed, 18 insertions(+), 8 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index d1ea44b31f19..1ae9d3473c02 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -334,9 +334,12 @@ void workingset_activation(struct folio *folio);
 
 /* Only track the nodes of mappings with shadow entries */
 void workingset_update_node(struct xa_node *node);
+extern struct list_lru shadow_nodes;
 #define mapping_set_update(xas, mapping) do {				\
-	if (!dax_mapping(mapping) && !shmem_mapping(mapping))		\
+	if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {		\
 		xas_set_update(xas, workingset_update_node);		\
+		xas_set_lru(xas, &shadow_nodes);			\
+	}								\
 } while (0)
 
 /* linux/mm/page_alloc.c */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index a91e3d90df8a..31f3e5ef3c7b 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -1317,6 +1317,7 @@ struct xa_state {
 	struct xa_node *xa_node;
 	struct xa_node *xa_alloc;
 	xa_update_node_t xa_update;
+	struct list_lru *xa_lru;
 };
 
 /*
@@ -1336,7 +1337,8 @@ struct xa_state {
 	.xa_pad = 0,					\
 	.xa_node = XAS_RESTART,				\
 	.xa_alloc = NULL,				\
-	.xa_update = NULL				\
+	.xa_update = NULL,				\
+	.xa_lru = NULL,					\
 }
 
 /**
@@ -1613,6 +1615,11 @@ static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
 	xas->xa_update = update;
 }
 
+static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
+{
+	xas->xa_lru = lru;
+}
+
 /**
  * xas_next_entry() - Advance iterator to next present entry.
  * @xas: XArray operation state.
diff --git a/lib/xarray.c b/lib/xarray.c
index f5d8f54907b4..e9b818abc823 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -302,7 +302,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
 	}
 	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 		gfp |= __GFP_ACCOUNT;
-	xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+	xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 	if (!xas->xa_alloc)
 		return false;
 	xas->xa_alloc->parent = NULL;
@@ -334,10 +334,10 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
 		gfp |= __GFP_ACCOUNT;
 	if (gfpflags_allow_blocking(gfp)) {
 		xas_unlock_type(xas, lock_type);
-		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+		xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 		xas_lock_type(xas, lock_type);
 	} else {
-		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+		xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 	}
 	if (!xas->xa_alloc)
 		return false;
@@ -371,7 +371,7 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
 		if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 			gfp |= __GFP_ACCOUNT;
 
-		node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+		node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 		if (!node) {
 			xas_set_err(xas, -ENOMEM);
 			return NULL;
@@ -1014,7 +1014,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
 		void *sibling = NULL;
 		struct xa_node *node;
 
-		node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+		node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
 		if (!node)
 			goto nomem;
 		node->array = xas->xa;
diff --git a/mm/workingset.c b/mm/workingset.c
index 8c03afe1d67c..979c7130c266 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -429,7 +429,7 @@ void workingset_activation(struct folio *folio)
  * point where they would still be useful.
  */
 
-static struct list_lru shadow_nodes;
+struct list_lru shadow_nodes;
 
 void workingset_update_node(struct xa_node *node)
 {
-- 
2.11.0


  parent reply	other threads:[~2021-12-20  8:58 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-20  8:56 [PATCH v5 00/16] Optimize list lru memory consumption Muchun Song
2021-12-20  8:56 ` [PATCH v5 01/16] mm: list_lru: optimize memory consumption of arrays of per cgroup lists Muchun Song
2022-01-07  0:05   ` Roman Gushchin
2022-01-09  4:49     ` Muchun Song
2022-01-10 18:42       ` Roman Gushchin
2022-01-11  3:19         ` Muchun Song
2021-12-20  8:56 ` [PATCH v5 02/16] mm: introduce kmem_cache_alloc_lru Muchun Song
2022-01-07  3:04   ` Roman Gushchin
2022-01-09  6:21     ` Muchun Song
2022-01-10 18:47       ` Roman Gushchin
2022-01-11 15:41         ` Vlastimil Babka
2022-01-11 17:54           ` Roman Gushchin
2021-12-20  8:56 ` [PATCH v5 03/16] fs: introduce alloc_inode_sb() to allocate filesystems specific inode Muchun Song
2022-01-11 18:55   ` Roman Gushchin
2022-01-12  2:54     ` Muchun Song
2021-12-20  8:56 ` [PATCH v5 04/16] fs: allocate inode by using alloc_inode_sb() Muchun Song
2022-01-11 18:58   ` Roman Gushchin
2022-01-12  2:55     ` Muchun Song
2021-12-20  8:56 ` [PATCH v5 05/16] f2fs: " Muchun Song
2022-01-11 19:03   ` Roman Gushchin
2021-12-20  8:56 ` [PATCH v5 06/16] nfs42: use a specific kmem_cache to allocate nfs4_xattr_entry Muchun Song
2021-12-20  8:56 ` [PATCH v5 07/16] mm: dcache: use kmem_cache_alloc_lru() to allocate dentry Muchun Song
2022-01-11 19:05   ` Roman Gushchin
2021-12-20  8:56 ` Muchun Song [this message]
2022-01-11 19:14   ` [PATCH v5 08/16] xarray: use kmem_cache_alloc_lru to allocate xa_node Roman Gushchin
2021-12-20  8:56 ` [PATCH v5 09/16] mm: memcontrol: move memcg_online_kmem() to mem_cgroup_css_online() Muchun Song
2022-01-11 19:17   ` Roman Gushchin
2021-12-20  8:56 ` [PATCH v5 10/16] mm: list_lru: allocate list_lru_one only when needed Muchun Song
2022-01-06 11:00   ` Michal Koutný
2022-01-12 13:22     ` Muchun Song
2022-01-13 13:32       ` Michal Koutný
2022-01-18 12:05         ` Muchun Song
2022-01-19  9:33           ` Michal Koutný
2022-01-21  5:28             ` Muchun Song
2022-01-11 20:00   ` Roman Gushchin
2022-01-12  4:48     ` Muchun Song
2021-12-20  8:56 ` [PATCH v5 11/16] mm: list_lru: rename memcg_drain_all_list_lrus to memcg_reparent_list_lrus Muchun Song
2021-12-20  8:56 ` [PATCH v5 12/16] mm: list_lru: replace linear array with xarray Muchun Song
2021-12-20  8:56 ` [PATCH v5 13/16] mm: memcontrol: reuse memory cgroup ID for kmem ID Muchun Song
2021-12-20  9:27   ` Mika Penttilä
2022-01-05 17:03   ` Michal Koutný
2022-01-06  3:34     ` Muchun Song
2021-12-20  8:56 ` [PATCH v5 14/16] mm: memcontrol: fix cannot alloc the maximum memcg ID Muchun Song
2021-12-20  8:56 ` [PATCH v5 15/16] mm: list_lru: rename list_lru_per_memcg to list_lru_memcg Muchun Song
2021-12-20  8:56 ` [PATCH v5 16/16] mm: memcontrol: rename memcg_cache_id to memcg_kmem_id Muchun Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211220085649.8196-9-songmuchun@bytedance.com \
    --to=songmuchun@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexs@kernel.org \
    --cc=anna.schumaker@netapp.com \
    --cc=chao@kernel.org \
    --cc=david@fromorbit.com \
    --cc=duanxiongchun@bytedance.com \
    --cc=fam.zheng@bytedance.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=jaegeuk@kernel.org \
    --cc=kari.argillander@gmail.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=mhocko@kernel.org \
    --cc=richard.weiyang@gmail.com \
    --cc=shakeelb@google.com \
    --cc=shy828301@gmail.com \
    --cc=smuchun@gmail.com \
    --cc=trond.myklebust@hammerspace.com \
    --cc=vdavydov.dev@gmail.com \
    --cc=willy@infradead.org \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.