All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] mm/zsmalloc: some cleanup for get/set_zspage_mapping()
@ 2024-02-20  6:52 Chengming Zhou
  2024-02-20  6:53 ` [PATCH 1/3] mm/zsmalloc: remove set_zspage_mapping() Chengming Zhou
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Chengming Zhou @ 2024-02-20  6:52 UTC (permalink / raw)
  To: nphamcs, Andrew Morton, Sergey Senozhatsky, hannes, yosryahmed,
	Minchan Kim
  Cc: Chengming Zhou, linux-kernel, linux-mm

The discussion[1] with Sergey shows there are some cleanup works to do
in get/set_zspage_mapping():

- the fullness returned from get_zspage_mapping() is not stable outside
  pool->lock, this usage pattern is confusing, but should be ok in this
  free_zspage path.

- we seldom use the class_idx returned from get_zspage_mapping(), only
  free_zspage path use to get its class.

- set_zspage_mapping() always set the zspage->class, but it's never
  changed after zspage allocated.

[1] https://lore.kernel.org/all/a6c22e30-cf10-4122-91bc-ceb9fb57a5d6@bytedance.com/

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
Chengming Zhou (3):
      mm/zsmalloc: remove set_zspage_mapping()
      mm/zsmalloc: remove_zspage() don't need fullness parameter
      mm/zsmalloc: remove get_zspage_mapping()

 mm/zsmalloc.c | 55 +++++++++++++------------------------------------------
 1 file changed, 13 insertions(+), 42 deletions(-)
---
base-commit: 207636f0f52428f3b46540b212d6f93c6ac484cf
change-id: 20240220-b4-zsmalloc-cleanup-560a4159bb6b

Best regards,
-- 
Chengming Zhou <zhouchengming@bytedance.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/3] mm/zsmalloc: remove set_zspage_mapping()
  2024-02-20  6:52 [PATCH 0/3] mm/zsmalloc: some cleanup for get/set_zspage_mapping() Chengming Zhou
@ 2024-02-20  6:53 ` Chengming Zhou
  2024-02-20  6:53 ` [PATCH 2/3] mm/zsmalloc: remove_zspage() don't need fullness parameter Chengming Zhou
  2024-02-20  6:53 ` [PATCH 3/3] mm/zsmalloc: remove get_zspage_mapping() Chengming Zhou
  2 siblings, 0 replies; 4+ messages in thread
From: Chengming Zhou @ 2024-02-20  6:53 UTC (permalink / raw)
  To: nphamcs, Andrew Morton, Sergey Senozhatsky, hannes, yosryahmed,
	Minchan Kim
  Cc: Chengming Zhou, linux-kernel, linux-mm

We only need to update zspage->fullness when insert_zspage(), since
zspage->class is never changed after allocated.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 mm/zsmalloc.c | 13 ++-----------
 1 file changed, 2 insertions(+), 11 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index a48f4651d143..a6653915bf17 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -486,14 +486,6 @@ static struct size_class *zspage_class(struct zs_pool *pool,
 	return pool->size_class[zspage->class];
 }
 
-static void set_zspage_mapping(struct zspage *zspage,
-			       unsigned int class_idx,
-			       int fullness)
-{
-	zspage->class = class_idx;
-	zspage->fullness = fullness;
-}
-
 /*
  * zsmalloc divides the pool into various size classes where each
  * class maintains a list of zspages where each zspage is divided
@@ -688,6 +680,7 @@ static void insert_zspage(struct size_class *class,
 {
 	class_stat_inc(class, fullness, 1);
 	list_add(&zspage->list, &class->fullness_list[fullness]);
+	zspage->fullness = fullness;
 }
 
 /*
@@ -725,7 +718,6 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
 
 	remove_zspage(class, zspage, currfg);
 	insert_zspage(class, zspage, newfg);
-	set_zspage_mapping(zspage, class_idx, newfg);
 out:
 	return newfg;
 }
@@ -1005,6 +997,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
 	create_page_chain(class, zspage, pages);
 	init_zspage(class, zspage);
 	zspage->pool = pool;
+	zspage->class = class->index;
 
 	return zspage;
 }
@@ -1397,7 +1390,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
 	obj = obj_malloc(pool, zspage, handle);
 	newfg = get_fullness_group(class, zspage);
 	insert_zspage(class, zspage, newfg);
-	set_zspage_mapping(zspage, class->index, newfg);
 	record_obj(handle, obj);
 	atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
 	class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
@@ -1655,7 +1647,6 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
 
 	fullness = get_fullness_group(class, zspage);
 	insert_zspage(class, zspage, fullness);
-	set_zspage_mapping(zspage, class->index, fullness);
 
 	return fullness;
 }

-- 
b4 0.10.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/3] mm/zsmalloc: remove_zspage() don't need fullness parameter
  2024-02-20  6:52 [PATCH 0/3] mm/zsmalloc: some cleanup for get/set_zspage_mapping() Chengming Zhou
  2024-02-20  6:53 ` [PATCH 1/3] mm/zsmalloc: remove set_zspage_mapping() Chengming Zhou
@ 2024-02-20  6:53 ` Chengming Zhou
  2024-02-20  6:53 ` [PATCH 3/3] mm/zsmalloc: remove get_zspage_mapping() Chengming Zhou
  2 siblings, 0 replies; 4+ messages in thread
From: Chengming Zhou @ 2024-02-20  6:53 UTC (permalink / raw)
  To: nphamcs, Andrew Morton, Sergey Senozhatsky, hannes, yosryahmed,
	Minchan Kim
  Cc: Chengming Zhou, linux-kernel, linux-mm

We must remove_zspage() from its current fullness list, then use
insert_zspage() to update its fullness and insert to new fullness list.
Obviously, remove_zspage() doesn't need the fullness parameter.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 mm/zsmalloc.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index a6653915bf17..c39fac9361d7 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -687,10 +687,10 @@ static void insert_zspage(struct size_class *class,
  * This function removes the given zspage from the freelist identified
  * by <class, fullness_group>.
  */
-static void remove_zspage(struct size_class *class,
-				struct zspage *zspage,
-				int fullness)
+static void remove_zspage(struct size_class *class, struct zspage *zspage)
 {
+	int fullness = zspage->fullness;
+
 	VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
 
 	list_del_init(&zspage->list);
@@ -716,7 +716,7 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
 	if (newfg == currfg)
 		goto out;
 
-	remove_zspage(class, zspage, currfg);
+	remove_zspage(class, zspage);
 	insert_zspage(class, zspage, newfg);
 out:
 	return newfg;
@@ -878,7 +878,7 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class,
 		return;
 	}
 
-	remove_zspage(class, zspage, ZS_INUSE_RATIO_0);
+	remove_zspage(class, zspage);
 	__free_zspage(pool, class, zspage);
 }
 
@@ -1609,7 +1609,7 @@ static struct zspage *isolate_src_zspage(struct size_class *class)
 		zspage = list_first_entry_or_null(&class->fullness_list[fg],
 						  struct zspage, list);
 		if (zspage) {
-			remove_zspage(class, zspage, fg);
+			remove_zspage(class, zspage);
 			return zspage;
 		}
 	}
@@ -1626,7 +1626,7 @@ static struct zspage *isolate_dst_zspage(struct size_class *class)
 		zspage = list_first_entry_or_null(&class->fullness_list[fg],
 						  struct zspage, list);
 		if (zspage) {
-			remove_zspage(class, zspage, fg);
+			remove_zspage(class, zspage);
 			return zspage;
 		}
 	}

-- 
b4 0.10.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/3] mm/zsmalloc: remove get_zspage_mapping()
  2024-02-20  6:52 [PATCH 0/3] mm/zsmalloc: some cleanup for get/set_zspage_mapping() Chengming Zhou
  2024-02-20  6:53 ` [PATCH 1/3] mm/zsmalloc: remove set_zspage_mapping() Chengming Zhou
  2024-02-20  6:53 ` [PATCH 2/3] mm/zsmalloc: remove_zspage() don't need fullness parameter Chengming Zhou
@ 2024-02-20  6:53 ` Chengming Zhou
  2 siblings, 0 replies; 4+ messages in thread
From: Chengming Zhou @ 2024-02-20  6:53 UTC (permalink / raw)
  To: nphamcs, Andrew Morton, Sergey Senozhatsky, hannes, yosryahmed,
	Minchan Kim
  Cc: Chengming Zhou, linux-kernel, linux-mm

Actually we seldom use the class_idx returned from get_zspage_mapping(),
only the zspage->fullness is useful, just use zspage->fullness to remove
this helper.

Note zspage->fullness is not stable outside pool->lock, remove redundant
"VM_BUG_ON(fullness != ZS_INUSE_RATIO_0)" in async_free_zspage() since
we already have the same VM_BUG_ON() in __free_zspage(), which is safe to
access zspage->fullness with pool->lock held.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 mm/zsmalloc.c | 28 ++++------------------------
 1 file changed, 4 insertions(+), 24 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c39fac9361d7..63ec385cd670 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -470,16 +470,6 @@ static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
 	zspage->freeobj = obj;
 }
 
-static void get_zspage_mapping(struct zspage *zspage,
-			       unsigned int *class_idx,
-			       int *fullness)
-{
-	BUG_ON(zspage->magic != ZSPAGE_MAGIC);
-
-	*fullness = zspage->fullness;
-	*class_idx = zspage->class;
-}
-
 static struct size_class *zspage_class(struct zs_pool *pool,
 				       struct zspage *zspage)
 {
@@ -708,12 +698,10 @@ static void remove_zspage(struct size_class *class, struct zspage *zspage)
  */
 static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
 {
-	int class_idx;
-	int currfg, newfg;
+	int newfg;
 
-	get_zspage_mapping(zspage, &class_idx, &currfg);
 	newfg = get_fullness_group(class, zspage);
-	if (newfg == currfg)
+	if (newfg == zspage->fullness)
 		goto out;
 
 	remove_zspage(class, zspage);
@@ -835,15 +823,11 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
 				struct zspage *zspage)
 {
 	struct page *page, *next;
-	int fg;
-	unsigned int class_idx;
-
-	get_zspage_mapping(zspage, &class_idx, &fg);
 
 	assert_spin_locked(&pool->lock);
 
 	VM_BUG_ON(get_zspage_inuse(zspage));
-	VM_BUG_ON(fg != ZS_INUSE_RATIO_0);
+	VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
 
 	next = page = get_first_page(zspage);
 	do {
@@ -1857,8 +1841,6 @@ static void async_free_zspage(struct work_struct *work)
 {
 	int i;
 	struct size_class *class;
-	unsigned int class_idx;
-	int fullness;
 	struct zspage *zspage, *tmp;
 	LIST_HEAD(free_pages);
 	struct zs_pool *pool = container_of(work, struct zs_pool,
@@ -1879,10 +1861,8 @@ static void async_free_zspage(struct work_struct *work)
 		list_del(&zspage->list);
 		lock_zspage(zspage);
 
-		get_zspage_mapping(zspage, &class_idx, &fullness);
-		VM_BUG_ON(fullness != ZS_INUSE_RATIO_0);
-		class = pool->size_class[class_idx];
 		spin_lock(&pool->lock);
+		class = zspage_class(pool, zspage);
 		__free_zspage(pool, class, zspage);
 		spin_unlock(&pool->lock);
 	}

-- 
b4 0.10.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2024-02-20  6:53 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-20  6:52 [PATCH 0/3] mm/zsmalloc: some cleanup for get/set_zspage_mapping() Chengming Zhou
2024-02-20  6:53 ` [PATCH 1/3] mm/zsmalloc: remove set_zspage_mapping() Chengming Zhou
2024-02-20  6:53 ` [PATCH 2/3] mm/zsmalloc: remove_zspage() don't need fullness parameter Chengming Zhou
2024-02-20  6:53 ` [PATCH 3/3] mm/zsmalloc: remove get_zspage_mapping() Chengming Zhou

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.