All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability.
@ 2024-04-28 10:06 alexs
  2024-04-28 10:06 ` [PATCH 2/4] mm/ksm: rename variable mm_slot to ksm_slot in unmerge_and_remove_all_rmap_items alexs
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: alexs @ 2024-04-28 10:06 UTC (permalink / raw)
  To: Andrew Morton, linux-mm, linux-kernel, willy, izik.eidus
  Cc: Alex Shi (tencent), David Hildenbrand

From: "Alex Shi (tencent)" <alexs@kernel.org>

mm_slot is a struct of mm, and ksm_mm_slot is named the same again in
ksm_scan struct. Furthermore, the ksm_mm_slot pointer is named as
mm_slot again in functions, beside with 'struct mm_slot' variable.
That makes code readability pretty worse.

struct ksm_mm_slot {
        struct mm_slot slot;
	...
};

struct ksm_scan {
        struct ksm_mm_slot *mm_slot;
	...
};

int __ksm_enter(struct mm_struct *mm)
{
        struct ksm_mm_slot *mm_slot;
        struct mm_slot *slot;
	...

So let's rename the mm_slot member to ksm_slot in ksm_scan, and ksm_slot
for ksm_mm_slot* type variables in functions to reduce this confusing.

 struct ksm_scan {
-       struct ksm_mm_slot *mm_slot;
+       struct ksm_mm_slot *ksm_slot;

Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
---
 mm/ksm.c | 84 ++++++++++++++++++++++++++++----------------------------
 1 file changed, 42 insertions(+), 42 deletions(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index 486c9974f8e2..d2c4eb98816d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -131,7 +131,7 @@ struct ksm_mm_slot {
 
 /**
  * struct ksm_scan - cursor for scanning
- * @mm_slot: the current mm_slot we are scanning
+ * @ksm_slot: the current ksm_slot we are scanning
  * @address: the next address inside that to be scanned
  * @rmap_list: link to the next rmap to be scanned in the rmap_list
  * @seqnr: count of completed full scans (needed when removing unstable node)
@@ -139,7 +139,7 @@ struct ksm_mm_slot {
  * There is only the one ksm_scan instance of this cursor structure.
  */
 struct ksm_scan {
-	struct ksm_mm_slot *mm_slot;
+	struct ksm_mm_slot *ksm_slot;
 	unsigned long address;
 	struct ksm_rmap_item **rmap_list;
 	unsigned long seqnr;
@@ -187,7 +187,7 @@ struct ksm_stable_node {
 
 /**
  * struct ksm_rmap_item - reverse mapping item for virtual addresses
- * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
+ * @rmap_list: next rmap_item in ksm_slot's singly-linked rmap_list
  * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
  * @nid: NUMA node id of unstable tree in which linked (may not match page)
  * @mm: the memory structure this rmap_item is pointing into
@@ -242,7 +242,7 @@ static struct ksm_mm_slot ksm_mm_head = {
 	.slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node),
 };
 static struct ksm_scan ksm_scan = {
-	.mm_slot = &ksm_mm_head,
+	.ksm_slot = &ksm_mm_head,
 };
 
 static struct kmem_cache *rmap_item_cache;
@@ -1205,11 +1205,11 @@ static int unmerge_and_remove_all_rmap_items(void)
 	spin_lock(&ksm_mmlist_lock);
 	slot = list_entry(ksm_mm_head.slot.mm_node.next,
 			  struct mm_slot, mm_node);
-	ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
+	ksm_scan.ksm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
 	spin_unlock(&ksm_mmlist_lock);
 
-	for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head;
-	     mm_slot = ksm_scan.mm_slot) {
+	for (mm_slot = ksm_scan.ksm_slot; mm_slot != &ksm_mm_head;
+	     mm_slot = ksm_scan.ksm_slot) {
 		VMA_ITERATOR(vmi, mm_slot->slot.mm, 0);
 
 		mm = mm_slot->slot.mm;
@@ -1238,7 +1238,7 @@ static int unmerge_and_remove_all_rmap_items(void)
 		spin_lock(&ksm_mmlist_lock);
 		slot = list_entry(mm_slot->slot.mm_node.next,
 				  struct mm_slot, mm_node);
-		ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
+		ksm_scan.ksm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
 		if (ksm_test_exit(mm)) {
 			hash_del(&mm_slot->slot.hash);
 			list_del(&mm_slot->slot.mm_node);
@@ -1260,7 +1260,7 @@ static int unmerge_and_remove_all_rmap_items(void)
 error:
 	mmap_read_unlock(mm);
 	spin_lock(&ksm_mmlist_lock);
-	ksm_scan.mm_slot = &ksm_mm_head;
+	ksm_scan.ksm_slot = &ksm_mm_head;
 	spin_unlock(&ksm_mmlist_lock);
 	return err;
 }
@@ -2565,7 +2565,7 @@ static bool should_skip_rmap_item(struct page *page,
 static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
 {
 	struct mm_struct *mm;
-	struct ksm_mm_slot *mm_slot;
+	struct ksm_mm_slot *ksm_slot;
 	struct mm_slot *slot;
 	struct vm_area_struct *vma;
 	struct ksm_rmap_item *rmap_item;
@@ -2575,8 +2575,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
 	if (list_empty(&ksm_mm_head.slot.mm_node))
 		return NULL;
 
-	mm_slot = ksm_scan.mm_slot;
-	if (mm_slot == &ksm_mm_head) {
+	ksm_slot = ksm_scan.ksm_slot;
+	if (ksm_slot == &ksm_mm_head) {
 		advisor_start_scan();
 		trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
 
@@ -2616,23 +2616,23 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
 			root_unstable_tree[nid] = RB_ROOT;
 
 		spin_lock(&ksm_mmlist_lock);
-		slot = list_entry(mm_slot->slot.mm_node.next,
+		slot = list_entry(ksm_slot->slot.mm_node.next,
 				  struct mm_slot, mm_node);
-		mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
-		ksm_scan.mm_slot = mm_slot;
+		ksm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
+		ksm_scan.ksm_slot = ksm_slot;
 		spin_unlock(&ksm_mmlist_lock);
 		/*
 		 * Although we tested list_empty() above, a racing __ksm_exit
 		 * of the last mm on the list may have removed it since then.
 		 */
-		if (mm_slot == &ksm_mm_head)
+		if (ksm_slot == &ksm_mm_head)
 			return NULL;
 next_mm:
 		ksm_scan.address = 0;
-		ksm_scan.rmap_list = &mm_slot->rmap_list;
+		ksm_scan.rmap_list = &ksm_slot->rmap_list;
 	}
 
-	slot = &mm_slot->slot;
+	slot = &ksm_slot->slot;
 	mm = slot->mm;
 	vma_iter_init(&vmi, mm, ksm_scan.address);
 
@@ -2662,7 +2662,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
 			if (PageAnon(*page)) {
 				flush_anon_page(vma, *page, ksm_scan.address);
 				flush_dcache_page(*page);
-				rmap_item = get_next_rmap_item(mm_slot,
+				rmap_item = get_next_rmap_item(ksm_slot,
 					ksm_scan.rmap_list, ksm_scan.address);
 				if (rmap_item) {
 					ksm_scan.rmap_list =
@@ -2687,7 +2687,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
 	if (ksm_test_exit(mm)) {
 no_vmas:
 		ksm_scan.address = 0;
-		ksm_scan.rmap_list = &mm_slot->rmap_list;
+		ksm_scan.rmap_list = &ksm_slot->rmap_list;
 	}
 	/*
 	 * Nuke all the rmap_items that are above this current rmap:
@@ -2696,9 +2696,9 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
 	remove_trailing_rmap_items(ksm_scan.rmap_list);
 
 	spin_lock(&ksm_mmlist_lock);
-	slot = list_entry(mm_slot->slot.mm_node.next,
+	slot = list_entry(ksm_slot->slot.mm_node.next,
 			  struct mm_slot, mm_node);
-	ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
+	ksm_scan.ksm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
 	if (ksm_scan.address == 0) {
 		/*
 		 * We've completed a full scan of all vmas, holding mmap_lock
@@ -2709,11 +2709,11 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
 		 * or when all VM_MERGEABLE areas have been unmapped (and
 		 * mmap_lock then protects against race with MADV_MERGEABLE).
 		 */
-		hash_del(&mm_slot->slot.hash);
-		list_del(&mm_slot->slot.mm_node);
+		hash_del(&ksm_slot->slot.hash);
+		list_del(&ksm_slot->slot.mm_node);
 		spin_unlock(&ksm_mmlist_lock);
 
-		mm_slot_free(mm_slot_cache, mm_slot);
+		mm_slot_free(mm_slot_cache, ksm_slot);
 		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
 		clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
 		mmap_read_unlock(mm);
@@ -2725,14 +2725,14 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
 		 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
 		 * already have been freed under us by __ksm_exit()
 		 * because the "mm_slot" is still hashed and
-		 * ksm_scan.mm_slot doesn't point to it anymore.
+		 * ksm_scan.ksm_slot doesn't point to it anymore.
 		 */
 		spin_unlock(&ksm_mmlist_lock);
 	}
 
 	/* Repeat until we've completed scanning the whole list */
-	mm_slot = ksm_scan.mm_slot;
-	if (mm_slot != &ksm_mm_head)
+	ksm_slot = ksm_scan.ksm_slot;
+	if (ksm_slot != &ksm_mm_head)
 		goto next_mm;
 
 	advisor_stop_scan();
@@ -2968,15 +2968,15 @@ EXPORT_SYMBOL_GPL(ksm_madvise);
 
 int __ksm_enter(struct mm_struct *mm)
 {
-	struct ksm_mm_slot *mm_slot;
+	struct ksm_mm_slot *ksm_slot;
 	struct mm_slot *slot;
 	int needs_wakeup;
 
-	mm_slot = mm_slot_alloc(mm_slot_cache);
-	if (!mm_slot)
+	ksm_slot = mm_slot_alloc(mm_slot_cache);
+	if (!ksm_slot)
 		return -ENOMEM;
 
-	slot = &mm_slot->slot;
+	slot = &ksm_slot->slot;
 
 	/* Check ksm_run too?  Would need tighter locking */
 	needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node);
@@ -2996,7 +2996,7 @@ int __ksm_enter(struct mm_struct *mm)
 	if (ksm_run & KSM_RUN_UNMERGE)
 		list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node);
 	else
-		list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node);
+		list_add_tail(&slot->mm_node, &ksm_scan.ksm_slot->slot.mm_node);
 	spin_unlock(&ksm_mmlist_lock);
 
 	set_bit(MMF_VM_MERGEABLE, &mm->flags);
@@ -3011,40 +3011,40 @@ int __ksm_enter(struct mm_struct *mm)
 
 void __ksm_exit(struct mm_struct *mm)
 {
-	struct ksm_mm_slot *mm_slot;
+	struct ksm_mm_slot *ksm_slot;
 	struct mm_slot *slot;
 	int easy_to_free = 0;
 
 	/*
 	 * This process is exiting: if it's straightforward (as is the
-	 * case when ksmd was never running), free mm_slot immediately.
+	 * case when ksmd was never running), free ksm_slot immediately.
 	 * But if it's at the cursor or has rmap_items linked to it, use
 	 * mmap_lock to synchronize with any break_cows before pagetables
-	 * are freed, and leave the mm_slot on the list for ksmd to free.
+	 * are freed, and leave the ksm_slot on the list for ksmd to free.
 	 * Beware: ksm may already have noticed it exiting and freed the slot.
 	 */
 
 	spin_lock(&ksm_mmlist_lock);
 	slot = mm_slot_lookup(mm_slots_hash, mm);
-	mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
-	if (mm_slot && ksm_scan.mm_slot != mm_slot) {
-		if (!mm_slot->rmap_list) {
+	ksm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
+	if (ksm_slot && ksm_scan.ksm_slot != ksm_slot) {
+		if (!ksm_slot->rmap_list) {
 			hash_del(&slot->hash);
 			list_del(&slot->mm_node);
 			easy_to_free = 1;
 		} else {
 			list_move(&slot->mm_node,
-				  &ksm_scan.mm_slot->slot.mm_node);
+				  &ksm_scan.ksm_slot->slot.mm_node);
 		}
 	}
 	spin_unlock(&ksm_mmlist_lock);
 
 	if (easy_to_free) {
-		mm_slot_free(mm_slot_cache, mm_slot);
+		mm_slot_free(mm_slot_cache, ksm_slot);
 		clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
 		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
 		mmdrop(mm);
-	} else if (mm_slot) {
+	} else if (ksm_slot) {
 		mmap_write_lock(mm);
 		mmap_write_unlock(mm);
 	}
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/4] mm/ksm: rename variable mm_slot to ksm_slot in unmerge_and_remove_all_rmap_items
  2024-04-28 10:06 [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability alexs
@ 2024-04-28 10:06 ` alexs
  2024-04-28 10:06 ` [PATCH 3/4] mm/ksm: rename mm_slot_cache to ksm_slot_cache alexs
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 8+ messages in thread
From: alexs @ 2024-04-28 10:06 UTC (permalink / raw)
  To: Andrew Morton, linux-mm, linux-kernel, willy, izik.eidus
  Cc: Alex Shi (tencent), David Hildenbrand

From: "Alex Shi (tencent)" <alexs@kernel.org>

To distinguish ksm_mm_slot and mm_slot for better code readability,
rename ksm_mm_slot variable as ksm_slot in function
unmerge_and_remove_all_rmap_items. No function changes.

Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
---
 mm/ksm.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index d2c4eb98816d..6efa33c48381 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1196,7 +1196,7 @@ static int remove_all_stable_nodes(void)
 
 static int unmerge_and_remove_all_rmap_items(void)
 {
-	struct ksm_mm_slot *mm_slot;
+	struct ksm_mm_slot *ksm_slot;
 	struct mm_slot *slot;
 	struct mm_struct *mm;
 	struct vm_area_struct *vma;
@@ -1208,11 +1208,11 @@ static int unmerge_and_remove_all_rmap_items(void)
 	ksm_scan.ksm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
 	spin_unlock(&ksm_mmlist_lock);
 
-	for (mm_slot = ksm_scan.ksm_slot; mm_slot != &ksm_mm_head;
-	     mm_slot = ksm_scan.ksm_slot) {
-		VMA_ITERATOR(vmi, mm_slot->slot.mm, 0);
+	for (ksm_slot = ksm_scan.ksm_slot; ksm_slot != &ksm_mm_head;
+	     ksm_slot = ksm_scan.ksm_slot) {
+		VMA_ITERATOR(vmi, ksm_slot->slot.mm, 0);
 
-		mm = mm_slot->slot.mm;
+		mm = ksm_slot->slot.mm;
 		mmap_read_lock(mm);
 
 		/*
@@ -1232,19 +1232,19 @@ static int unmerge_and_remove_all_rmap_items(void)
 		}
 
 mm_exiting:
-		remove_trailing_rmap_items(&mm_slot->rmap_list);
+		remove_trailing_rmap_items(&ksm_slot->rmap_list);
 		mmap_read_unlock(mm);
 
 		spin_lock(&ksm_mmlist_lock);
-		slot = list_entry(mm_slot->slot.mm_node.next,
+		slot = list_entry(ksm_slot->slot.mm_node.next,
 				  struct mm_slot, mm_node);
 		ksm_scan.ksm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
 		if (ksm_test_exit(mm)) {
-			hash_del(&mm_slot->slot.hash);
-			list_del(&mm_slot->slot.mm_node);
+			hash_del(&ksm_slot->slot.hash);
+			list_del(&ksm_slot->slot.mm_node);
 			spin_unlock(&ksm_mmlist_lock);
 
-			mm_slot_free(mm_slot_cache, mm_slot);
+			mm_slot_free(mm_slot_cache, ksm_slot);
 			clear_bit(MMF_VM_MERGEABLE, &mm->flags);
 			clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
 			mmdrop(mm);
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/4] mm/ksm: rename mm_slot_cache to ksm_slot_cache
  2024-04-28 10:06 [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability alexs
  2024-04-28 10:06 ` [PATCH 2/4] mm/ksm: rename variable mm_slot to ksm_slot in unmerge_and_remove_all_rmap_items alexs
@ 2024-04-28 10:06 ` alexs
  2024-04-30 12:57   ` David Hildenbrand
  2024-04-28 10:06 ` [PATCH 4/4] mm/ksm: rename mm_slot for get_next_rmap_item alexs
  2024-04-30 12:53 ` [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability David Hildenbrand
  3 siblings, 1 reply; 8+ messages in thread
From: alexs @ 2024-04-28 10:06 UTC (permalink / raw)
  To: Andrew Morton, linux-mm, linux-kernel, willy, izik.eidus
  Cc: Alex Shi (tencent), David Hildenbrand

From: "Alex Shi (tencent)" <alexs@kernel.org>

To distinguish ksm_mm_slot and mm_slot for better code readability,
rename mm_slot_cache as ksm_slot_cache. No function change.

Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
---
 mm/ksm.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index 6efa33c48381..22d2132f83a4 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -247,7 +247,7 @@ static struct ksm_scan ksm_scan = {
 
 static struct kmem_cache *rmap_item_cache;
 static struct kmem_cache *stable_node_cache;
-static struct kmem_cache *mm_slot_cache;
+static struct kmem_cache *ksm_slot_cache;
 
 /* Default number of pages to scan per batch */
 #define DEFAULT_PAGES_TO_SCAN 100
@@ -502,8 +502,8 @@ static int __init ksm_slab_init(void)
 	if (!stable_node_cache)
 		goto out_free1;
 
-	mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0);
-	if (!mm_slot_cache)
+	ksm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0);
+	if (!ksm_slot_cache)
 		goto out_free2;
 
 	return 0;
@@ -518,10 +518,10 @@ static int __init ksm_slab_init(void)
 
 static void __init ksm_slab_free(void)
 {
-	kmem_cache_destroy(mm_slot_cache);
+	kmem_cache_destroy(ksm_slot_cache);
 	kmem_cache_destroy(stable_node_cache);
 	kmem_cache_destroy(rmap_item_cache);
-	mm_slot_cache = NULL;
+	ksm_slot_cache = NULL;
 }
 
 static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain)
@@ -1244,7 +1244,7 @@ static int unmerge_and_remove_all_rmap_items(void)
 			list_del(&ksm_slot->slot.mm_node);
 			spin_unlock(&ksm_mmlist_lock);
 
-			mm_slot_free(mm_slot_cache, ksm_slot);
+			mm_slot_free(ksm_slot_cache, ksm_slot);
 			clear_bit(MMF_VM_MERGEABLE, &mm->flags);
 			clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
 			mmdrop(mm);
@@ -2713,7 +2713,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
 		list_del(&ksm_slot->slot.mm_node);
 		spin_unlock(&ksm_mmlist_lock);
 
-		mm_slot_free(mm_slot_cache, ksm_slot);
+		mm_slot_free(ksm_slot_cache, ksm_slot);
 		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
 		clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
 		mmap_read_unlock(mm);
@@ -2972,7 +2972,7 @@ int __ksm_enter(struct mm_struct *mm)
 	struct mm_slot *slot;
 	int needs_wakeup;
 
-	ksm_slot = mm_slot_alloc(mm_slot_cache);
+	ksm_slot = mm_slot_alloc(ksm_slot_cache);
 	if (!ksm_slot)
 		return -ENOMEM;
 
@@ -3040,7 +3040,7 @@ void __ksm_exit(struct mm_struct *mm)
 	spin_unlock(&ksm_mmlist_lock);
 
 	if (easy_to_free) {
-		mm_slot_free(mm_slot_cache, ksm_slot);
+		mm_slot_free(ksm_slot_cache, ksm_slot);
 		clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
 		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
 		mmdrop(mm);
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 4/4] mm/ksm: rename mm_slot for get_next_rmap_item
  2024-04-28 10:06 [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability alexs
  2024-04-28 10:06 ` [PATCH 2/4] mm/ksm: rename variable mm_slot to ksm_slot in unmerge_and_remove_all_rmap_items alexs
  2024-04-28 10:06 ` [PATCH 3/4] mm/ksm: rename mm_slot_cache to ksm_slot_cache alexs
@ 2024-04-28 10:06 ` alexs
  2024-04-30 12:53 ` [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability David Hildenbrand
  3 siblings, 0 replies; 8+ messages in thread
From: alexs @ 2024-04-28 10:06 UTC (permalink / raw)
  To: Andrew Morton, linux-mm, linux-kernel, willy, izik.eidus
  Cc: Alex Shi (tencent), David Hildenbrand

From: "Alex Shi (tencent)" <alexs@kernel.org>

To distinguish ksm_mm_slot and mm_slot for better code readability,
rename parameter "struct ksm_mm_slot *mm_slot", as "struct ksm_mm_slot
*ksm_slot".  No function change.

Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
---
 mm/ksm.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index 22d2132f83a4..2d10f1aef123 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2465,7 +2465,7 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
 	}
 }
 
-static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
+static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *ksm_slot,
 					    struct ksm_rmap_item **rmap_list,
 					    unsigned long addr)
 {
@@ -2485,7 +2485,7 @@ static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
 	rmap_item = alloc_rmap_item();
 	if (rmap_item) {
 		/* It has already been zeroed */
-		rmap_item->mm = mm_slot->slot.mm;
+		rmap_item->mm = ksm_slot->slot.mm;
 		rmap_item->mm->ksm_rmap_items++;
 		rmap_item->address = addr;
 		rmap_item->rmap_list = *rmap_list;
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability.
  2024-04-28 10:06 [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability alexs
                   ` (2 preceding siblings ...)
  2024-04-28 10:06 ` [PATCH 4/4] mm/ksm: rename mm_slot for get_next_rmap_item alexs
@ 2024-04-30 12:53 ` David Hildenbrand
  3 siblings, 0 replies; 8+ messages in thread
From: David Hildenbrand @ 2024-04-30 12:53 UTC (permalink / raw)
  To: alexs, Andrew Morton, linux-mm, linux-kernel, willy, izik.eidus

On 28.04.24 12:06, alexs@kernel.org wrote:
> From: "Alex Shi (tencent)" <alexs@kernel.org>
> 
> mm_slot is a struct of mm, and ksm_mm_slot is named the same again in
> ksm_scan struct. Furthermore, the ksm_mm_slot pointer is named as
> mm_slot again in functions, beside with 'struct mm_slot' variable.
> That makes code readability pretty worse.
> 
> struct ksm_mm_slot {
>          struct mm_slot slot;
> 	...
> };
> 
> struct ksm_scan {
>          struct ksm_mm_slot *mm_slot;
> 	...
> };
> 
> int __ksm_enter(struct mm_struct *mm)
> {
>          struct ksm_mm_slot *mm_slot;
>          struct mm_slot *slot;
> 	...
> 
> So let's rename the mm_slot member to ksm_slot in ksm_scan, and ksm_slot
> for ksm_mm_slot* type variables in functions to reduce this confusing.
> 
>   struct ksm_scan {
> -       struct ksm_mm_slot *mm_slot;
> +       struct ksm_mm_slot *ksm_slot;
> 
> Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
> Cc: David Hildenbrand <david@redhat.com>

[...]

>   	}
>   	spin_unlock(&ksm_mmlist_lock);
>   
>   	if (easy_to_free) {
> -		mm_slot_free(mm_slot_cache, mm_slot);
> +		mm_slot_free(mm_slot_cache, ksm_slot);

And at this point I am not sure this is the right decision. You made 
that line more confusing.

Quite some churn for little (no?) benefit.


-- 
Cheers,

David / dhildenb


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 3/4] mm/ksm: rename mm_slot_cache to ksm_slot_cache
  2024-04-28 10:06 ` [PATCH 3/4] mm/ksm: rename mm_slot_cache to ksm_slot_cache alexs
@ 2024-04-30 12:57   ` David Hildenbrand
  2024-05-16 12:15     ` Alex Shi
  0 siblings, 1 reply; 8+ messages in thread
From: David Hildenbrand @ 2024-04-30 12:57 UTC (permalink / raw)
  To: alexs, Andrew Morton, linux-mm, linux-kernel, willy, izik.eidus

On 28.04.24 12:06, alexs@kernel.org wrote:
> From: "Alex Shi (tencent)" <alexs@kernel.org>
> 
> To distinguish ksm_mm_slot and mm_slot for better code readability,
> rename mm_slot_cache as ksm_slot_cache. No function change.
> 
> Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
> Cc: David Hildenbrand <david@redhat.com>
> ---
>   mm/ksm.c | 18 +++++++++---------
>   1 file changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 6efa33c48381..22d2132f83a4 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -247,7 +247,7 @@ static struct ksm_scan ksm_scan = {
>   
>   static struct kmem_cache *rmap_item_cache;
>   static struct kmem_cache *stable_node_cache;
> -static struct kmem_cache *mm_slot_cache;
> +static struct kmem_cache *ksm_slot_cache;
>   
>   /* Default number of pages to scan per batch */
>   #define DEFAULT_PAGES_TO_SCAN 100
> @@ -502,8 +502,8 @@ static int __init ksm_slab_init(void)
>   	if (!stable_node_cache)
>   		goto out_free1;
>   
> -	mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0);
> -	if (!mm_slot_cache)
> +	ksm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0);
> +	if (!ksm_slot_cache)
>   		goto out_free2;
>   
>   	return 0;
> @@ -518,10 +518,10 @@ static int __init ksm_slab_init(void)
>   
>   static void __init ksm_slab_free(void)
>   {
> -	kmem_cache_destroy(mm_slot_cache);
> +	kmem_cache_destroy(ksm_slot_cache);
>   	kmem_cache_destroy(stable_node_cache);
>   	kmem_cache_destroy(rmap_item_cache);
> -	mm_slot_cache = NULL;
> +	ksm_slot_cache = NULL;
>   }
>   
>   static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain)
> @@ -1244,7 +1244,7 @@ static int unmerge_and_remove_all_rmap_items(void)
>   			list_del(&ksm_slot->slot.mm_node);
>   			spin_unlock(&ksm_mmlist_lock);
>   
> -			mm_slot_free(mm_slot_cache, ksm_slot);
> +			mm_slot_free(ksm_slot_cache, ksm_slot);
>   			clear_bit(MMF_VM_MERGEABLE, &mm->flags);
>   			clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
>   			mmdrop(mm);
> @@ -2713,7 +2713,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
>   		list_del(&ksm_slot->slot.mm_node);
>   		spin_unlock(&ksm_mmlist_lock);
>   
> -		mm_slot_free(mm_slot_cache, ksm_slot);
> +		mm_slot_free(ksm_slot_cache, ksm_slot);
>   		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
>   		clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
>   		mmap_read_unlock(mm);
> @@ -2972,7 +2972,7 @@ int __ksm_enter(struct mm_struct *mm)
>   	struct mm_slot *slot;
>   	int needs_wakeup;
>   
> -	ksm_slot = mm_slot_alloc(mm_slot_cache);
> +	ksm_slot = mm_slot_alloc(ksm_slot_cache);

Similarly, this makes the code more confusion. The pattern in khugepaged 
is similarly:

mm_slot = mm_slot_alloc(mm_slot_cache);

I don't think we want these renamings.

E.g., "ksm_mm_slot_cache" might be a bit better than "mm_slot_cache". 
But then, we are in KSM code ... so I don't really see an improvement.

-- 
Cheers,

David / dhildenb


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 3/4] mm/ksm: rename mm_slot_cache to ksm_slot_cache
  2024-04-30 12:57   ` David Hildenbrand
@ 2024-05-16 12:15     ` Alex Shi
  2024-05-22 11:49       ` David Hildenbrand
  0 siblings, 1 reply; 8+ messages in thread
From: Alex Shi @ 2024-05-16 12:15 UTC (permalink / raw)
  To: David Hildenbrand, alexs, Andrew Morton, linux-mm, linux-kernel,
	willy, izik.eidus



On 4/30/24 8:57 PM, David Hildenbrand wrote:
>>
>> @@ -2972,7 +2972,7 @@ int __ksm_enter(struct mm_struct *mm)
>>       struct mm_slot *slot;
>>       int needs_wakeup;
>>   -    ksm_slot = mm_slot_alloc(mm_slot_cache);
>> +    ksm_slot = mm_slot_alloc(ksm_slot_cache);
> 
> Similarly, this makes the code more confusion. The pattern in khugepaged is similarly:
> 
> mm_slot = mm_slot_alloc(mm_slot_cache);

Could we rename it to khg_mm_slot_cache in khugepaged?
 
> 
> I don't think we want these renamings.
> 
> E.g., "ksm_mm_slot_cache" might be a bit better than "mm_slot_cache". But then, we are in KSM code ... so I don't really see an improvement.

Thanks for comments and sorry for response late.

yes, ksm_mm_slot_cache is better even in KSM code. As a cscope/tag dependency patient, this change could reduce much of confusing in name searching. And that's why a one-side change satisfies me.
Yes, maybe better naming could make it more readable, any more further help? :)

Thanks a lot!
Alex

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 3/4] mm/ksm: rename mm_slot_cache to ksm_slot_cache
  2024-05-16 12:15     ` Alex Shi
@ 2024-05-22 11:49       ` David Hildenbrand
  0 siblings, 0 replies; 8+ messages in thread
From: David Hildenbrand @ 2024-05-22 11:49 UTC (permalink / raw)
  To: Alex Shi, alexs, Andrew Morton, linux-mm, linux-kernel, willy,
	izik.eidus

On 16.05.24 14:15, Alex Shi wrote:
> 
> 
> On 4/30/24 8:57 PM, David Hildenbrand wrote:
>>>
>>> @@ -2972,7 +2972,7 @@ int __ksm_enter(struct mm_struct *mm)
>>>        struct mm_slot *slot;
>>>        int needs_wakeup;
>>>    -    ksm_slot = mm_slot_alloc(mm_slot_cache);
>>> +    ksm_slot = mm_slot_alloc(ksm_slot_cache);
>>
>> Similarly, this makes the code more confusion. The pattern in khugepaged is similarly:
>>
>> mm_slot = mm_slot_alloc(mm_slot_cache);
> 
> Could we rename it to khg_mm_slot_cache in khugepaged?

I don't see any sense in such renaming, sorry. This code resides in 
ksm.c/khugepaged.c respectively and at least for me is, therefore, quite 
clear.

-- 
Cheers,

David / dhildenb


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2024-05-22 11:49 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-28 10:06 [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability alexs
2024-04-28 10:06 ` [PATCH 2/4] mm/ksm: rename variable mm_slot to ksm_slot in unmerge_and_remove_all_rmap_items alexs
2024-04-28 10:06 ` [PATCH 3/4] mm/ksm: rename mm_slot_cache to ksm_slot_cache alexs
2024-04-30 12:57   ` David Hildenbrand
2024-05-16 12:15     ` Alex Shi
2024-05-22 11:49       ` David Hildenbrand
2024-04-28 10:06 ` [PATCH 4/4] mm/ksm: rename mm_slot for get_next_rmap_item alexs
2024-04-30 12:53 ` [PATCH 1/4] mm/ksm: rename mm_slot members to ksm_slot for better readability David Hildenbrand

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.