All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: willy@infradead.org,vbabka@suse.cz,tglx@linutronix.de,hughd@google.com,bigeasy@linutronix.de,akpm@linux-foundation.org,patches@lists.linux.dev,linux-mm@kvack.org,mm-commits@vger.kernel.org,torvalds@linux-foundation.org,akpm@linux-foundation.org
Subject: [patch 08/16] mm/munlock: protect the per-CPU pagevec by a local_lock_t
Date: Fri, 01 Apr 2022 11:21:12 -0700	[thread overview]
Message-ID: <20220401182113.0555FC2BBE4@smtp.kernel.org> (raw)
In-Reply-To: <l>

From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: mm/munlock: protect the per-CPU pagevec by a local_lock_t

The access to mlock_pvec is protected by disabling preemption via
get_cpu_var() or implicit by having preemption disabled by the caller (in
mlock_page_drain() case).  This breaks on PREEMPT_RT since
folio_lruvec_lock_irq() acquires a sleeping lock in this section.

Create struct mlock_pvec which consits of the local_lock_t and the
pagevec.  Acquire the local_lock() before accessing the per-CPU pagevec. 
Replace mlock_page_drain() with a _local() version which is invoked on the
local CPU and acquires the local_lock_t and a _remote() version which uses
the pagevec from a remote CPU which offline.

Link: https://lkml.kernel.org/r/YjizWi9IY0mpvIfb@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 mm/internal.h   |    6 ++++--
 mm/migrate.c    |    2 +-
 mm/mlock.c      |   46 ++++++++++++++++++++++++++++++++++++----------
 mm/page_alloc.c |    1 +
 mm/rmap.c       |    4 ++--
 mm/swap.c       |    4 +++-
 6 files changed, 47 insertions(+), 16 deletions(-)

--- a/mm/internal.h~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/internal.h
@@ -456,7 +456,8 @@ static inline void munlock_vma_page(stru
 }
 void mlock_new_page(struct page *page);
 bool need_mlock_page_drain(int cpu);
-void mlock_page_drain(int cpu);
+void mlock_page_drain_local(void);
+void mlock_page_drain_remote(int cpu);
 
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
@@ -539,7 +540,8 @@ static inline void munlock_vma_page(stru
 			struct vm_area_struct *vma, bool compound) { }
 static inline void mlock_new_page(struct page *page) { }
 static inline bool need_mlock_page_drain(int cpu) { return false; }
-static inline void mlock_page_drain(int cpu) { }
+static inline void mlock_page_drain_local(void) { }
+static inline void mlock_page_drain_remote(int cpu) { }
 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
 {
 }
--- a/mm/migrate.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/migrate.c
@@ -246,7 +246,7 @@ static bool remove_migration_pte(struct
 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
 		}
 		if (vma->vm_flags & VM_LOCKED)
-			mlock_page_drain(smp_processor_id());
+			mlock_page_drain_local();
 
 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
 					   compound_order(new));
--- a/mm/mlock.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/mlock.c
@@ -28,7 +28,14 @@
 
 #include "internal.h"
 
-static DEFINE_PER_CPU(struct pagevec, mlock_pvec);
+struct mlock_pvec {
+	local_lock_t lock;
+	struct pagevec vec;
+};
+
+static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = {
+	.lock = INIT_LOCAL_LOCK(lock),
+};
 
 bool can_do_mlock(void)
 {
@@ -203,18 +210,30 @@ static void mlock_pagevec(struct pagevec
 	pagevec_reinit(pvec);
 }
 
-void mlock_page_drain(int cpu)
+void mlock_page_drain_local(void)
 {
 	struct pagevec *pvec;
 
-	pvec = &per_cpu(mlock_pvec, cpu);
+	local_lock(&mlock_pvec.lock);
+	pvec = this_cpu_ptr(&mlock_pvec.vec);
+	if (pagevec_count(pvec))
+		mlock_pagevec(pvec);
+	local_unlock(&mlock_pvec.lock);
+}
+
+void mlock_page_drain_remote(int cpu)
+{
+	struct pagevec *pvec;
+
+	WARN_ON_ONCE(cpu_online(cpu));
+	pvec = &per_cpu(mlock_pvec.vec, cpu);
 	if (pagevec_count(pvec))
 		mlock_pagevec(pvec);
 }
 
 bool need_mlock_page_drain(int cpu)
 {
-	return pagevec_count(&per_cpu(mlock_pvec, cpu));
+	return pagevec_count(&per_cpu(mlock_pvec.vec, cpu));
 }
 
 /**
@@ -223,7 +242,10 @@ bool need_mlock_page_drain(int cpu)
  */
 void mlock_folio(struct folio *folio)
 {
-	struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+	struct pagevec *pvec;
+
+	local_lock(&mlock_pvec.lock);
+	pvec = this_cpu_ptr(&mlock_pvec.vec);
 
 	if (!folio_test_set_mlocked(folio)) {
 		int nr_pages = folio_nr_pages(folio);
@@ -236,7 +258,7 @@ void mlock_folio(struct folio *folio)
 	if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
 	    folio_test_large(folio) || lru_cache_disabled())
 		mlock_pagevec(pvec);
-	put_cpu_var(mlock_pvec);
+	local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -245,9 +267,11 @@ void mlock_folio(struct folio *folio)
  */
 void mlock_new_page(struct page *page)
 {
-	struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+	struct pagevec *pvec;
 	int nr_pages = thp_nr_pages(page);
 
+	local_lock(&mlock_pvec.lock);
+	pvec = this_cpu_ptr(&mlock_pvec.vec);
 	SetPageMlocked(page);
 	mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
 	__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
@@ -256,7 +280,7 @@ void mlock_new_page(struct page *page)
 	if (!pagevec_add(pvec, mlock_new(page)) ||
 	    PageHead(page) || lru_cache_disabled())
 		mlock_pagevec(pvec);
-	put_cpu_var(mlock_pvec);
+	local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -265,8 +289,10 @@ void mlock_new_page(struct page *page)
  */
 void munlock_page(struct page *page)
 {
-	struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+	struct pagevec *pvec;
 
+	local_lock(&mlock_pvec.lock);
+	pvec = this_cpu_ptr(&mlock_pvec.vec);
 	/*
 	 * TestClearPageMlocked(page) must be left to __munlock_page(),
 	 * which will check whether the page is multiply mlocked.
@@ -276,7 +302,7 @@ void munlock_page(struct page *page)
 	if (!pagevec_add(pvec, page) ||
 	    PageHead(page) || lru_cache_disabled())
 		mlock_pagevec(pvec);
-	put_cpu_var(mlock_pvec);
+	local_unlock(&mlock_pvec.lock);
 }
 
 static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
--- a/mm/page_alloc.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/page_alloc.c
@@ -8367,6 +8367,7 @@ static int page_alloc_cpu_dead(unsigned
 	struct zone *zone;
 
 	lru_add_drain_cpu(cpu);
+	mlock_page_drain_remote(cpu);
 	drain_pages(cpu);
 
 	/*
--- a/mm/rmap.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/rmap.c
@@ -1683,7 +1683,7 @@ discard:
 		 */
 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
 		if (vma->vm_flags & VM_LOCKED)
-			mlock_page_drain(smp_processor_id());
+			mlock_page_drain_local();
 		folio_put(folio);
 	}
 
@@ -1961,7 +1961,7 @@ static bool try_to_migrate_one(struct fo
 		 */
 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
 		if (vma->vm_flags & VM_LOCKED)
-			mlock_page_drain(smp_processor_id());
+			mlock_page_drain_local();
 		folio_put(folio);
 	}
 
--- a/mm/swap.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/swap.c
@@ -624,7 +624,6 @@ void lru_add_drain_cpu(int cpu)
 		pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
 
 	activate_page_drain(cpu);
-	mlock_page_drain(cpu);
 }
 
 /**
@@ -706,6 +705,7 @@ void lru_add_drain(void)
 	local_lock(&lru_pvecs.lock);
 	lru_add_drain_cpu(smp_processor_id());
 	local_unlock(&lru_pvecs.lock);
+	mlock_page_drain_local();
 }
 
 /*
@@ -720,6 +720,7 @@ static void lru_add_and_bh_lrus_drain(vo
 	lru_add_drain_cpu(smp_processor_id());
 	local_unlock(&lru_pvecs.lock);
 	invalidate_bh_lrus_cpu();
+	mlock_page_drain_local();
 }
 
 void lru_add_drain_cpu_zone(struct zone *zone)
@@ -728,6 +729,7 @@ void lru_add_drain_cpu_zone(struct zone
 	lru_add_drain_cpu(smp_processor_id());
 	drain_local_pages(zone);
 	local_unlock(&lru_pvecs.lock);
+	mlock_page_drain_local();
 }
 
 #ifdef CONFIG_SMP
_

WARNING: multiple messages have this Message-ID (diff)
From: Andrew Morton <akpm@linux-foundation.org>
To: willy@infradead.org, vbabka@suse.cz, tglx@linutronix.de,
	hughd@google.com, bigeasy@linutronix.de,
	akpm@linux-foundation.org, patches@lists.linux.dev,
	linux-mm@kvack.org, mm-commits@vger.kernel.org,
	torvalds@linux-foundation.org, akpm@linux-foundation.org
Subject: [patch 08/16] mm/munlock: protect the per-CPU pagevec by a local_lock_t
Date: Fri, 01 Apr 2022 11:21:12 -0700	[thread overview]
Message-ID: <20220401182113.0555FC2BBE4@smtp.kernel.org> (raw)
In-Reply-To: <l>

From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: mm/munlock: protect the per-CPU pagevec by a local_lock_t

The access to mlock_pvec is protected by disabling preemption via
get_cpu_var() or implicit by having preemption disabled by the caller (in
mlock_page_drain() case).  This breaks on PREEMPT_RT since
folio_lruvec_lock_irq() acquires a sleeping lock in this section.

Create struct mlock_pvec which consits of the local_lock_t and the
pagevec.  Acquire the local_lock() before accessing the per-CPU pagevec. 
Replace mlock_page_drain() with a _local() version which is invoked on the
local CPU and acquires the local_lock_t and a _remote() version which uses
the pagevec from a remote CPU which offline.

Link: https://lkml.kernel.org/r/YjizWi9IY0mpvIfb@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 mm/internal.h   |    6 ++++--
 mm/migrate.c    |    2 +-
 mm/mlock.c      |   46 ++++++++++++++++++++++++++++++++++++----------
 mm/page_alloc.c |    1 +
 mm/rmap.c       |    4 ++--
 mm/swap.c       |    4 +++-
 6 files changed, 47 insertions(+), 16 deletions(-)

--- a/mm/internal.h~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/internal.h
@@ -456,7 +456,8 @@ static inline void munlock_vma_page(stru
 }
 void mlock_new_page(struct page *page);
 bool need_mlock_page_drain(int cpu);
-void mlock_page_drain(int cpu);
+void mlock_page_drain_local(void);
+void mlock_page_drain_remote(int cpu);
 
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
@@ -539,7 +540,8 @@ static inline void munlock_vma_page(stru
 			struct vm_area_struct *vma, bool compound) { }
 static inline void mlock_new_page(struct page *page) { }
 static inline bool need_mlock_page_drain(int cpu) { return false; }
-static inline void mlock_page_drain(int cpu) { }
+static inline void mlock_page_drain_local(void) { }
+static inline void mlock_page_drain_remote(int cpu) { }
 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
 {
 }
--- a/mm/migrate.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/migrate.c
@@ -246,7 +246,7 @@ static bool remove_migration_pte(struct
 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
 		}
 		if (vma->vm_flags & VM_LOCKED)
-			mlock_page_drain(smp_processor_id());
+			mlock_page_drain_local();
 
 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
 					   compound_order(new));
--- a/mm/mlock.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/mlock.c
@@ -28,7 +28,14 @@
 
 #include "internal.h"
 
-static DEFINE_PER_CPU(struct pagevec, mlock_pvec);
+struct mlock_pvec {
+	local_lock_t lock;
+	struct pagevec vec;
+};
+
+static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = {
+	.lock = INIT_LOCAL_LOCK(lock),
+};
 
 bool can_do_mlock(void)
 {
@@ -203,18 +210,30 @@ static void mlock_pagevec(struct pagevec
 	pagevec_reinit(pvec);
 }
 
-void mlock_page_drain(int cpu)
+void mlock_page_drain_local(void)
 {
 	struct pagevec *pvec;
 
-	pvec = &per_cpu(mlock_pvec, cpu);
+	local_lock(&mlock_pvec.lock);
+	pvec = this_cpu_ptr(&mlock_pvec.vec);
+	if (pagevec_count(pvec))
+		mlock_pagevec(pvec);
+	local_unlock(&mlock_pvec.lock);
+}
+
+void mlock_page_drain_remote(int cpu)
+{
+	struct pagevec *pvec;
+
+	WARN_ON_ONCE(cpu_online(cpu));
+	pvec = &per_cpu(mlock_pvec.vec, cpu);
 	if (pagevec_count(pvec))
 		mlock_pagevec(pvec);
 }
 
 bool need_mlock_page_drain(int cpu)
 {
-	return pagevec_count(&per_cpu(mlock_pvec, cpu));
+	return pagevec_count(&per_cpu(mlock_pvec.vec, cpu));
 }
 
 /**
@@ -223,7 +242,10 @@ bool need_mlock_page_drain(int cpu)
  */
 void mlock_folio(struct folio *folio)
 {
-	struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+	struct pagevec *pvec;
+
+	local_lock(&mlock_pvec.lock);
+	pvec = this_cpu_ptr(&mlock_pvec.vec);
 
 	if (!folio_test_set_mlocked(folio)) {
 		int nr_pages = folio_nr_pages(folio);
@@ -236,7 +258,7 @@ void mlock_folio(struct folio *folio)
 	if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
 	    folio_test_large(folio) || lru_cache_disabled())
 		mlock_pagevec(pvec);
-	put_cpu_var(mlock_pvec);
+	local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -245,9 +267,11 @@ void mlock_folio(struct folio *folio)
  */
 void mlock_new_page(struct page *page)
 {
-	struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+	struct pagevec *pvec;
 	int nr_pages = thp_nr_pages(page);
 
+	local_lock(&mlock_pvec.lock);
+	pvec = this_cpu_ptr(&mlock_pvec.vec);
 	SetPageMlocked(page);
 	mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
 	__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
@@ -256,7 +280,7 @@ void mlock_new_page(struct page *page)
 	if (!pagevec_add(pvec, mlock_new(page)) ||
 	    PageHead(page) || lru_cache_disabled())
 		mlock_pagevec(pvec);
-	put_cpu_var(mlock_pvec);
+	local_unlock(&mlock_pvec.lock);
 }
 
 /**
@@ -265,8 +289,10 @@ void mlock_new_page(struct page *page)
  */
 void munlock_page(struct page *page)
 {
-	struct pagevec *pvec = &get_cpu_var(mlock_pvec);
+	struct pagevec *pvec;
 
+	local_lock(&mlock_pvec.lock);
+	pvec = this_cpu_ptr(&mlock_pvec.vec);
 	/*
 	 * TestClearPageMlocked(page) must be left to __munlock_page(),
 	 * which will check whether the page is multiply mlocked.
@@ -276,7 +302,7 @@ void munlock_page(struct page *page)
 	if (!pagevec_add(pvec, page) ||
 	    PageHead(page) || lru_cache_disabled())
 		mlock_pagevec(pvec);
-	put_cpu_var(mlock_pvec);
+	local_unlock(&mlock_pvec.lock);
 }
 
 static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
--- a/mm/page_alloc.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/page_alloc.c
@@ -8367,6 +8367,7 @@ static int page_alloc_cpu_dead(unsigned
 	struct zone *zone;
 
 	lru_add_drain_cpu(cpu);
+	mlock_page_drain_remote(cpu);
 	drain_pages(cpu);
 
 	/*
--- a/mm/rmap.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/rmap.c
@@ -1683,7 +1683,7 @@ discard:
 		 */
 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
 		if (vma->vm_flags & VM_LOCKED)
-			mlock_page_drain(smp_processor_id());
+			mlock_page_drain_local();
 		folio_put(folio);
 	}
 
@@ -1961,7 +1961,7 @@ static bool try_to_migrate_one(struct fo
 		 */
 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
 		if (vma->vm_flags & VM_LOCKED)
-			mlock_page_drain(smp_processor_id());
+			mlock_page_drain_local();
 		folio_put(folio);
 	}
 
--- a/mm/swap.c~mm-munlock-protect-the-per-cpu-pagevec-by-a-local_lock_t
+++ a/mm/swap.c
@@ -624,7 +624,6 @@ void lru_add_drain_cpu(int cpu)
 		pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
 
 	activate_page_drain(cpu);
-	mlock_page_drain(cpu);
 }
 
 /**
@@ -706,6 +705,7 @@ void lru_add_drain(void)
 	local_lock(&lru_pvecs.lock);
 	lru_add_drain_cpu(smp_processor_id());
 	local_unlock(&lru_pvecs.lock);
+	mlock_page_drain_local();
 }
 
 /*
@@ -720,6 +720,7 @@ static void lru_add_and_bh_lrus_drain(vo
 	lru_add_drain_cpu(smp_processor_id());
 	local_unlock(&lru_pvecs.lock);
 	invalidate_bh_lrus_cpu();
+	mlock_page_drain_local();
 }
 
 void lru_add_drain_cpu_zone(struct zone *zone)
@@ -728,6 +729,7 @@ void lru_add_drain_cpu_zone(struct zone
 	lru_add_drain_cpu(smp_processor_id());
 	drain_local_pages(zone);
 	local_unlock(&lru_pvecs.lock);
+	mlock_page_drain_local();
 }
 
 #ifdef CONFIG_SMP
_

  parent reply	other threads:[~2022-04-01 18:21 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <l>
2022-04-01 18:20 ` [patch 01/16] Revert "mm: madvise: skip unmapped vma holes passed to process_madvise" Andrew Morton
2022-04-01 18:20   ` Andrew Morton
2022-04-01 18:20 ` [patch 02/16] ocfs2: fix crash when mount with quota enabled Andrew Morton
2022-04-01 18:20   ` Andrew Morton
2022-04-01 18:20 ` [patch 03/16] nilfs2: fix lockdep warnings in page operations for btree nodes Andrew Morton
2022-04-01 18:20   ` Andrew Morton
2022-04-01 18:21 ` [patch 04/16] nilfs2: fix lockdep warnings during disk space reclamation Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 05/16] nilfs2: get rid of nilfs_mapping_init() Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 06/16] mm/munlock: add lru_add_drain() to fix memcg_stat_test Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 07/16] mm/munlock: update Documentation/vm/unevictable-lru.rst Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` Andrew Morton [this message]
2022-04-01 18:21   ` [patch 08/16] mm/munlock: protect the per-CPU pagevec by a local_lock_t Andrew Morton
2022-04-01 18:21 ` [patch 09/16] mm: kfence: fix objcgs vector allocation Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 10/16] mailmap: update Kirill's email Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 11/16] mm,hwpoison: unmap poisoned page before invalidation Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 12/16] mm, kasan: fix __GFP_BITS_SHIFT definition breaking LOCKDEP Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 13/16] tools/vm/page_owner_sort.c: remove -c option Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 14/16] doc/vm/page_owner.rst: remove content related to " Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 15/16] mm/kmemleak: reset tag when compare object pointer Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:21 ` [patch 16/16] mm/damon: prevent activated scheme from sleeping by deactivated schemes Andrew Morton
2022-04-01 18:21   ` Andrew Morton
2022-04-01 18:27 incoming Andrew Morton
2022-04-01 18:28 ` [patch 08/16] mm/munlock: protect the per-CPU pagevec by a local_lock_t Andrew Morton
2022-04-01 18:28   ` Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220401182113.0555FC2BBE4@smtp.kernel.org \
    --to=akpm@linux-foundation.org \
    --cc=bigeasy@linutronix.de \
    --cc=hughd@google.com \
    --cc=linux-mm@kvack.org \
    --cc=mm-commits@vger.kernel.org \
    --cc=patches@lists.linux.dev \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.