mm-commits.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, alex.shi@linux.alibaba.com,
	bsingharora@gmail.com, guro@fb.com, hannes@cmpxchg.org,
	hughd@google.com, iamjoonsoo.kim@lge.com, kirill@shutemov.name,
	linux-mm@kvack.org, mhocko@suse.com, mm-commits@vger.kernel.org,
	shakeelb@google.com, torvalds@linux-foundation.org
Subject: [patch 092/131] mm: memcontrol: switch to native NR_FILE_PAGES and NR_SHMEM counters
Date: Wed, 03 Jun 2020 16:01:54 -0700	[thread overview]
Message-ID: <20200603230154.nf75Itrgi%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200603155549.e041363450869eaae4c7f05b@linux-foundation.org>

From: Johannes Weiner <hannes@cmpxchg.org>
Subject: mm: memcontrol: switch to native NR_FILE_PAGES and NR_SHMEM counters

Memcg maintains private MEMCG_CACHE and NR_SHMEM counters.  This
divergence from the generic VM accounting means unnecessary code overhead,
and creates a dependency for memcg that page->mapping is set up at the
time of charging, so that page types can be told apart.

Convert the generic accounting sites to mod_lruvec_page_state and friends
to maintain the per-cgroup vmstat counters of NR_FILE_PAGES and NR_SHMEM. 
The page is already locked in these places, so page->mem_cgroup is stable;
we only need minimal tweaks of two mem_cgroup_migrate() calls to ensure
it's set up in time.

Then replace MEMCG_CACHE with NR_FILE_PAGES and delete the private
NR_SHMEM accounting sites.

Link: http://lkml.kernel.org/r/20200508183105.225460-10-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 include/linux/memcontrol.h |    3 +--
 mm/filemap.c               |   17 +++++++++--------
 mm/khugepaged.c            |   16 +++++++++++-----
 mm/memcontrol.c            |   28 +++++++++++-----------------
 mm/migrate.c               |   15 +++++++++++----
 mm/shmem.c                 |   14 +++++++-------
 6 files changed, 50 insertions(+), 43 deletions(-)

--- a/include/linux/memcontrol.h~mm-memcontrol-switch-to-native-nr_file_pages-and-nr_shmem-counters
+++ a/include/linux/memcontrol.h
@@ -29,8 +29,7 @@ struct kmem_cache;
 
 /* Cgroup-specific page state, on top of universal node page state */
 enum memcg_stat_item {
-	MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
-	MEMCG_RSS,
+	MEMCG_RSS = NR_VM_NODE_STAT_ITEMS,
 	MEMCG_RSS_HUGE,
 	MEMCG_SWAP,
 	MEMCG_SOCK,
--- a/mm/filemap.c~mm-memcontrol-switch-to-native-nr_file_pages-and-nr_shmem-counters
+++ a/mm/filemap.c
@@ -199,9 +199,9 @@ static void unaccount_page_cache_page(st
 
 	nr = hpage_nr_pages(page);
 
-	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
+	__mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
 	if (PageSwapBacked(page)) {
-		__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
+		__mod_lruvec_page_state(page, NR_SHMEM, -nr);
 		if (PageTransHuge(page))
 			__dec_node_page_state(page, NR_SHMEM_THPS);
 	} else if (PageTransHuge(page)) {
@@ -802,21 +802,22 @@ int replace_page_cache_page(struct page
 	new->mapping = mapping;
 	new->index = offset;
 
+	mem_cgroup_migrate(old, new);
+
 	xas_lock_irqsave(&xas, flags);
 	xas_store(&xas, new);
 
 	old->mapping = NULL;
 	/* hugetlb pages do not participate in page cache accounting. */
 	if (!PageHuge(old))
-		__dec_node_page_state(old, NR_FILE_PAGES);
+		__dec_lruvec_page_state(old, NR_FILE_PAGES);
 	if (!PageHuge(new))
-		__inc_node_page_state(new, NR_FILE_PAGES);
+		__inc_lruvec_page_state(new, NR_FILE_PAGES);
 	if (PageSwapBacked(old))
-		__dec_node_page_state(old, NR_SHMEM);
+		__dec_lruvec_page_state(old, NR_SHMEM);
 	if (PageSwapBacked(new))
-		__inc_node_page_state(new, NR_SHMEM);
+		__inc_lruvec_page_state(new, NR_SHMEM);
 	xas_unlock_irqrestore(&xas, flags);
-	mem_cgroup_migrate(old, new);
 	if (freepage)
 		freepage(old);
 	put_page(old);
@@ -867,7 +868,7 @@ static int __add_to_page_cache_locked(st
 
 		/* hugetlb pages do not participate in page cache accounting */
 		if (!huge)
-			__inc_node_page_state(page, NR_FILE_PAGES);
+			__inc_lruvec_page_state(page, NR_FILE_PAGES);
 unlock:
 		xas_unlock_irq(&xas);
 	} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
--- a/mm/khugepaged.c~mm-memcontrol-switch-to-native-nr_file_pages-and-nr_shmem-counters
+++ a/mm/khugepaged.c
@@ -1844,12 +1844,18 @@ out_unlock:
 	}
 
 	if (nr_none) {
-		struct zone *zone = page_zone(new_page);
-
-		__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
+		struct lruvec *lruvec;
+		/*
+		 * XXX: We have started try_charge and pinned the
+		 * memcg, but the page isn't committed yet so we
+		 * cannot use mod_lruvec_page_state(). This hackery
+		 * will be cleaned up when remove the page->mapping
+		 * dependency from memcg and fully charge above.
+		 */
+		lruvec = mem_cgroup_lruvec(memcg, page_pgdat(new_page));
+		__mod_lruvec_state(lruvec, NR_FILE_PAGES, nr_none);
 		if (is_shmem)
-			__mod_node_page_state(zone->zone_pgdat,
-					      NR_SHMEM, nr_none);
+			__mod_lruvec_state(lruvec, NR_SHMEM, nr_none);
 	}
 
 xa_locked:
--- a/mm/memcontrol.c~mm-memcontrol-switch-to-native-nr_file_pages-and-nr_shmem-counters
+++ a/mm/memcontrol.c
@@ -842,11 +842,6 @@ static void mem_cgroup_charge_statistics
 	 */
 	if (PageAnon(page))
 		__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
-	else {
-		__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
-		if (PageSwapBacked(page))
-			__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
-	}
 
 	if (abs(nr_pages) > 1) {
 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
@@ -1392,7 +1387,7 @@ static char *memory_stat_format(struct m
 		       (u64)memcg_page_state(memcg, MEMCG_RSS) *
 		       PAGE_SIZE);
 	seq_buf_printf(&s, "file %llu\n",
-		       (u64)memcg_page_state(memcg, MEMCG_CACHE) *
+		       (u64)memcg_page_state(memcg, NR_FILE_PAGES) *
 		       PAGE_SIZE);
 	seq_buf_printf(&s, "kernel_stack %llu\n",
 		       (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
@@ -3357,7 +3352,7 @@ static unsigned long mem_cgroup_usage(st
 	unsigned long val;
 
 	if (mem_cgroup_is_root(memcg)) {
-		val = memcg_page_state(memcg, MEMCG_CACHE) +
+		val = memcg_page_state(memcg, NR_FILE_PAGES) +
 			memcg_page_state(memcg, MEMCG_RSS);
 		if (swap)
 			val += memcg_page_state(memcg, MEMCG_SWAP);
@@ -3828,7 +3823,7 @@ static int memcg_numa_stat_show(struct s
 #endif /* CONFIG_NUMA */
 
 static const unsigned int memcg1_stats[] = {
-	MEMCG_CACHE,
+	NR_FILE_PAGES,
 	MEMCG_RSS,
 	MEMCG_RSS_HUGE,
 	NR_SHMEM,
@@ -5461,6 +5456,14 @@ static int mem_cgroup_move_account(struc
 	lock_page_memcg(page);
 
 	if (!PageAnon(page)) {
+		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
+		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
+
+		if (PageSwapBacked(page)) {
+			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
+			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
+		}
+
 		if (page_mapped(page)) {
 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
@@ -6673,10 +6676,8 @@ struct uncharge_gather {
 	unsigned long nr_pages;
 	unsigned long pgpgout;
 	unsigned long nr_anon;
-	unsigned long nr_file;
 	unsigned long nr_kmem;
 	unsigned long nr_huge;
-	unsigned long nr_shmem;
 	struct page *dummy_page;
 };
 
@@ -6700,9 +6701,7 @@ static void uncharge_batch(const struct
 
 	local_irq_save(flags);
 	__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
-	__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
 	__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
-	__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
 	memcg_check_events(ug->memcg, ug->dummy_page);
@@ -6743,11 +6742,6 @@ static void uncharge_page(struct page *p
 			ug->nr_huge += nr_pages;
 		if (PageAnon(page))
 			ug->nr_anon += nr_pages;
-		else {
-			ug->nr_file += nr_pages;
-			if (PageSwapBacked(page))
-				ug->nr_shmem += nr_pages;
-		}
 		ug->pgpgout++;
 	} else {
 		ug->nr_kmem += nr_pages;
--- a/mm/migrate.c~mm-memcontrol-switch-to-native-nr_file_pages-and-nr_shmem-counters
+++ a/mm/migrate.c
@@ -490,11 +490,18 @@ int migrate_page_move_mapping(struct add
 	 * are mapped to swap space.
 	 */
 	if (newzone != oldzone) {
-		__dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
-		__inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
+		struct lruvec *old_lruvec, *new_lruvec;
+		struct mem_cgroup *memcg;
+
+		memcg = page_memcg(page);
+		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
+		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
+
+		__dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
+		__inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
 		if (PageSwapBacked(page) && !PageSwapCache(page)) {
-			__dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
-			__inc_node_state(newzone->zone_pgdat, NR_SHMEM);
+			__dec_lruvec_state(old_lruvec, NR_SHMEM);
+			__inc_lruvec_state(new_lruvec, NR_SHMEM);
 		}
 		if (dirty && mapping_cap_account_dirty(mapping)) {
 			__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
--- a/mm/shmem.c~mm-memcontrol-switch-to-native-nr_file_pages-and-nr_shmem-counters
+++ a/mm/shmem.c
@@ -653,8 +653,8 @@ next:
 			__inc_node_page_state(page, NR_SHMEM_THPS);
 		}
 		mapping->nrpages += nr;
-		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
-		__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
+		__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
+		__mod_lruvec_page_state(page, NR_SHMEM, nr);
 unlock:
 		xas_unlock_irq(&xas);
 	} while (xas_nomem(&xas, gfp));
@@ -685,8 +685,8 @@ static void shmem_delete_from_page_cache
 	error = shmem_replace_entry(mapping, page->index, page, radswap);
 	page->mapping = NULL;
 	mapping->nrpages--;
-	__dec_node_page_state(page, NR_FILE_PAGES);
-	__dec_node_page_state(page, NR_SHMEM);
+	__dec_lruvec_page_state(page, NR_FILE_PAGES);
+	__dec_lruvec_page_state(page, NR_SHMEM);
 	xa_unlock_irq(&mapping->i_pages);
 	put_page(page);
 	BUG_ON(error);
@@ -1593,8 +1593,9 @@ static int shmem_replace_page(struct pag
 	xa_lock_irq(&swap_mapping->i_pages);
 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
 	if (!error) {
-		__inc_node_page_state(newpage, NR_FILE_PAGES);
-		__dec_node_page_state(oldpage, NR_FILE_PAGES);
+		mem_cgroup_migrate(oldpage, newpage);
+		__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
+		__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
 	}
 	xa_unlock_irq(&swap_mapping->i_pages);
 
@@ -1606,7 +1607,6 @@ static int shmem_replace_page(struct pag
 		 */
 		oldpage = newpage;
 	} else {
-		mem_cgroup_migrate(oldpage, newpage);
 		lru_cache_add_anon(newpage);
 		*pagep = newpage;
 	}
_

  parent reply	other threads:[~2020-06-03 23:08 UTC|newest]

Thread overview: 138+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-03 22:55 incoming Andrew Morton
2020-06-03 22:56 ` [patch 001/131] mm/slub: fix a memory leak in sysfs_slab_add() Andrew Morton
2020-06-03 22:56 ` [patch 002/131] mm/memcg: optimize memory.numa_stat like memory.stat Andrew Morton
2020-06-03 22:56 ` [patch 003/131] mm/gup: move __get_user_pages_fast() down a few lines in gup.c Andrew Morton
2020-06-03 22:56 ` [patch 004/131] mm/gup: refactor and de-duplicate gup_fast() code Andrew Morton
2020-06-03 22:56 ` [patch 005/131] mm/gup: introduce pin_user_pages_fast_only() Andrew Morton
2020-06-03 22:56 ` [patch 006/131] drm/i915: convert get_user_pages() --> pin_user_pages() Andrew Morton
2020-06-03 22:56 ` [patch 007/131] mm/gup: might_lock_read(mmap_sem) in get_user_pages_fast() Andrew Morton
2020-06-03 22:56 ` [patch 008/131] kasan: stop tests being eliminated as dead code with FORTIFY_SOURCE Andrew Morton
2020-06-03 22:56 ` [patch 009/131] string.h: fix incompatibility between FORTIFY_SOURCE and KASAN Andrew Morton
2020-06-03 22:56 ` [patch 010/131] mm: clarify __GFP_MEMALLOC usage Andrew Morton
2020-06-03 22:56 ` [patch 011/131] mm: memblock: replace dereferences of memblock_region.nid with API calls Andrew Morton
2020-06-03 22:56 ` [patch 012/131] mm: make early_pfn_to_nid() and related defintions close to each other Andrew Morton
2020-06-03 22:57 ` [patch 013/131] mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option Andrew Morton
2020-06-03 22:57 ` [patch 014/131] mm: free_area_init: use maximal zone PFNs rather than zone sizes Andrew Morton
2020-06-03 22:57 ` [patch 015/131] mm: use free_area_init() instead of free_area_init_nodes() Andrew Morton
2020-06-03 22:57 ` [patch 016/131] alpha: simplify detection of memory zone boundaries Andrew Morton
2020-06-03 22:57 ` [patch 017/131] arm: " Andrew Morton
2020-06-03 22:57 ` [patch 018/131] arm64: simplify detection of memory zone boundaries for UMA configs Andrew Morton
2020-06-03 22:57 ` [patch 019/131] csky: simplify detection of memory zone boundaries Andrew Morton
2020-06-03 22:57 ` [patch 020/131] m68k: mm: " Andrew Morton
2020-06-03 22:57 ` [patch 021/131] parisc: " Andrew Morton
2020-06-03 22:57 ` [patch 022/131] sparc32: " Andrew Morton
2020-06-03 22:57 ` [patch 023/131] unicore32: " Andrew Morton
2020-06-03 22:57 ` [patch 024/131] xtensa: " Andrew Morton
2020-06-03 22:57 ` [patch 025/131] mm: memmap_init: iterate over memblock regions rather that check each PFN Andrew Morton
2020-06-03 22:57 ` [patch 026/131] mm: remove early_pfn_in_nid() and CONFIG_NODES_SPAN_OTHER_NODES Andrew Morton
2020-06-03 22:58 ` [patch 027/131] mm: free_area_init: allow defining max_zone_pfn in descending order Andrew Morton
2020-06-03 22:58 ` [patch 028/131] mm: rename free_area_init_node() to free_area_init_memoryless_node() Andrew Morton
2020-06-03 22:58 ` [patch 029/131] mm: clean up free_area_init_node() and its helpers Andrew Morton
2020-06-03 22:58 ` [patch 030/131] mm: simplify find_min_pfn_with_active_regions() Andrew Morton
2020-06-03 22:58 ` [patch 031/131] docs/vm: update memory-models documentation Andrew Morton
2020-06-03 22:58 ` [patch 032/131] mm/page_alloc.c: bad_[reason|flags] is not necessary when PageHWPoison Andrew Morton
2020-06-03 22:58 ` [patch 033/131] mm/page_alloc.c: bad_flags is not necessary for bad_page() Andrew Morton
2020-06-03 22:58 ` [patch 034/131] mm/page_alloc.c: rename free_pages_check_bad() to check_free_page_bad() Andrew Morton
2020-06-03 22:58 ` [patch 035/131] mm/page_alloc.c: rename free_pages_check() to check_free_page() Andrew Morton
2020-06-03 22:58 ` [patch 036/131] mm/page_alloc.c: extract check_[new|free]_page_bad() common part to page_bad_reason() Andrew Morton
2020-06-03 22:58 ` [patch 037/131] mm,page_alloc,cma: conditionally prefer cma pageblocks for movable allocations Andrew Morton
2020-06-03 22:58 ` [patch 038/131] mm/page_alloc.c: remove unused free_bootmem_with_active_regions Andrew Morton
2020-06-03 22:58 ` [patch 039/131] mm/page_alloc.c: only tune sysctl_lowmem_reserve_ratio value once when changing it Andrew Morton
2020-06-03 22:58 ` [patch 040/131] mm/page_alloc.c: clear out zone->lowmem_reserve[] if the zone is empty Andrew Morton
2020-06-03 22:58 ` [patch 041/131] mm/vmstat.c: do not show lowmem reserve protection information of empty zone Andrew Morton
2020-06-03 22:58 ` [patch 042/131] mm/page_alloc: use ac->high_zoneidx for classzone_idx Andrew Morton
2020-06-03 22:59 ` [patch 043/131] mm/page_alloc: integrate classzone_idx and high_zoneidx Andrew Morton
2020-06-03 22:59 ` [patch 044/131] mm/page_alloc.c: use NODE_MASK_NONE in build_zonelists() Andrew Morton
2020-06-03 22:59 ` [patch 045/131] mm: rename gfpflags_to_migratetype to gfp_migratetype for same convention Andrew Morton
2020-06-03 22:59 ` [patch 046/131] mm/page_alloc.c: reset numa stats for boot pagesets Andrew Morton
2020-06-03 22:59 ` [patch 047/131] mm, page_alloc: reset the zone->watermark_boost early Andrew Morton
2020-06-03 22:59 ` [patch 048/131] mm/page_alloc: restrict and formalize compound_page_dtors[] Andrew Morton
2020-06-03 22:59 ` [patch 049/131] mm/pagealloc.c: call touch_nmi_watchdog() on max order boundaries in deferred init Andrew Morton
2020-06-03 22:59 ` [patch 050/131] mm: initialize deferred pages with interrupts enabled Andrew Morton
2020-06-03 22:59 ` [patch 051/131] mm: call cond_resched() from deferred_init_memmap() Andrew Morton
2020-06-03 22:59 ` [patch 052/131] padata: remove exit routine Andrew Morton
2020-06-03 22:59 ` [patch 053/131] padata: initialize earlier Andrew Morton
2020-06-03 22:59 ` [patch 054/131] padata: allocate work structures for parallel jobs from a pool Andrew Morton
2020-06-03 22:59 ` [patch 055/131] padata: add basic support for multithreaded jobs Andrew Morton
2020-06-03 22:59 ` [patch 056/131] mm: don't track number of pages during deferred initialization Andrew Morton
2020-06-03 22:59 ` [patch 057/131] mm: parallelize deferred_init_memmap() Andrew Morton
2020-06-03 22:59 ` [patch 058/131] mm: make deferred init's max threads arch-specific Andrew Morton
2020-06-03 22:59 ` [patch 059/131] padata: document multithreaded jobs Andrew Morton
2020-06-03 23:00 ` [patch 060/131] mm/page_alloc.c: add missing newline Andrew Morton
2020-06-03 23:00 ` [patch 061/131] khugepaged: add self test Andrew Morton
2020-06-03 23:00 ` [patch 062/131] khugepaged: do not stop collapse if less than half PTEs are referenced Andrew Morton
2020-06-03 23:00 ` [patch 063/131] khugepaged: drain all LRU caches before scanning pages Andrew Morton
2020-06-03 23:00 ` [patch 064/131] khugepaged: drain LRU add pagevec after swapin Andrew Morton
2020-06-03 23:00 ` [patch 065/131] khugepaged: allow to collapse a page shared across fork Andrew Morton
2020-06-03 23:00 ` [patch 066/131] khugepaged: allow to collapse PTE-mapped compound pages Andrew Morton
2020-06-03 23:00 ` [patch 067/131] thp: change CoW semantics for anon-THP Andrew Morton
2020-06-03 23:00 ` [patch 068/131] khugepaged: introduce 'max_ptes_shared' tunable Andrew Morton
2020-06-03 23:00 ` [patch 069/131] hugetlbfs: add arch_hugetlb_valid_size Andrew Morton
2020-06-03 23:00 ` [patch 070/131] hugetlbfs: move hugepagesz= parsing to arch independent code Andrew Morton
2020-06-03 23:00 ` [patch 071/131] hugetlbfs: remove hugetlb_add_hstate() warning for existing hstate Andrew Morton
2020-06-03 23:00 ` [patch 072/131] hugetlbfs: clean up command line processing Andrew Morton
2020-06-03 23:00 ` [patch 073/131] hugetlbfs: fix changes to " Andrew Morton
2020-06-03 23:00 ` [patch 074/131] mm/hugetlb: avoid unnecessary check on pud and pmd entry in huge_pte_offset Andrew Morton
2020-06-03 23:00 ` [patch 075/131] arm64/mm: drop __HAVE_ARCH_HUGE_PTEP_GET Andrew Morton
2020-06-03 23:01 ` [patch 076/131] mm/hugetlb: define a generic fallback for is_hugepage_only_range() Andrew Morton
2020-06-03 23:01 ` [patch 077/131] mm/hugetlb: define a generic fallback for arch_clear_hugepage_flags() Andrew Morton
2020-06-03 23:01 ` [patch 078/131] mm: simplify calling a compound page destructor Andrew Morton
2020-06-03 23:01 ` [patch 079/131] mm/vmscan.c: use update_lru_size() in update_lru_sizes() Andrew Morton
2020-06-03 23:01 ` [patch 080/131] mm/vmscan: count layzfree pages and fix nr_isolated_* mismatch Andrew Morton
2020-06-03 23:01 ` [patch 081/131] mm/vmscan.c: change prototype for shrink_page_list Andrew Morton
2020-06-03 23:01 ` [patch 082/131] mm/vmscan: update the comment of should_continue_reclaim() Andrew Morton
2020-06-03 23:01 ` [patch 083/131] mm: fix NUMA node file count error in replace_page_cache() Andrew Morton
2020-06-03 23:01 ` [patch 084/131] mm: memcontrol: fix stat-corrupting race in charge moving Andrew Morton
2020-06-03 23:01 ` [patch 085/131] mm: memcontrol: drop @compound parameter from memcg charging API Andrew Morton
2020-06-03 23:01 ` [patch 086/131] mm: shmem: remove rare optimization when swapin races with hole punching Andrew Morton
2020-06-03 23:01 ` [patch 087/131] mm: memcontrol: move out cgroup swaprate throttling Andrew Morton
2020-06-03 23:01 ` [patch 088/131] mm: memcontrol: convert page cache to a new mem_cgroup_charge() API Andrew Morton
2020-06-03 23:01 ` [patch 089/131] mm: memcontrol: prepare uncharging for removal of private page type counters Andrew Morton
2020-06-03 23:01 ` [patch 090/131] mm: memcontrol: prepare move_account " Andrew Morton
2020-06-03 23:01 ` [patch 091/131] mm: memcontrol: prepare cgroup vmstat infrastructure for native anon counters Andrew Morton
2020-06-03 23:01 ` Andrew Morton [this message]
2020-06-03 23:01 ` [patch 093/131] mm: memcontrol: switch to native NR_ANON_MAPPED counter Andrew Morton
2020-06-03 23:02 ` [patch 094/131] mm: memcontrol: switch to native NR_ANON_THPS counter Andrew Morton
2020-06-03 23:02 ` [patch 095/131] mm: memcontrol: convert anon and file-thp to new mem_cgroup_charge() API Andrew Morton
2020-06-03 23:02 ` [patch 096/131] mm: memcontrol: drop unused try/commit/cancel charge API Andrew Morton
2020-06-03 23:02 ` [patch 097/131] mm: memcontrol: prepare swap controller setup for integration Andrew Morton
2020-06-03 23:02 ` [patch 098/131] mm: memcontrol: make swap tracking an integral part of memory control Andrew Morton
2020-06-03 23:02 ` [patch 099/131] mm: memcontrol: charge swapin pages on instantiation Andrew Morton
2020-06-03 23:02 ` [patch 100/131] mm: memcontrol: document the new swap control behavior Andrew Morton
2020-06-03 23:02 ` [patch 101/131] mm: memcontrol: delete unused lrucare handling Andrew Morton
2020-06-03 23:02 ` [patch 102/131] mm: memcontrol: update page->mem_cgroup stability rules Andrew Morton
2020-06-03 23:02 ` [patch 103/131] mm: fix LRU balancing effect of new transparent huge pages Andrew Morton
2020-06-03 23:02 ` [patch 104/131] mm: keep separate anon and file statistics on page reclaim activity Andrew Morton
2020-06-03 23:02 ` [patch 105/131] mm: allow swappiness that prefers reclaiming anon over the file workingset Andrew Morton
2020-06-03 23:02 ` [patch 106/131] mm: fold and remove lru_cache_add_anon() and lru_cache_add_file() Andrew Morton
2020-06-03 23:02 ` [patch 107/131] mm: workingset: let cache workingset challenge anon Andrew Morton
2020-06-03 23:02 ` [patch 108/131] mm: remove use-once cache bias from LRU balancing Andrew Morton
2020-06-03 23:02 ` [patch 109/131] mm: vmscan: drop unnecessary div0 avoidance rounding in get_scan_count() Andrew Morton
2020-06-03 23:02 ` [patch 110/131] mm: base LRU balancing on an explicit cost model Andrew Morton
2020-06-03 23:02 ` [patch 111/131] mm: deactivations shouldn't bias the LRU balance Andrew Morton
2020-06-03 23:03 ` [patch 112/131] mm: only count actual rotations as LRU reclaim cost Andrew Morton
2020-06-03 23:03 ` [patch 113/131] mm: balance LRU lists based on relative thrashing Andrew Morton
2020-06-03 23:03 ` [patch 114/131] mm: vmscan: determine anon/file pressure balance at the reclaim root Andrew Morton
2020-06-03 23:03 ` [patch 115/131] mm: vmscan: reclaim writepage is IO cost Andrew Morton
2020-06-03 23:03 ` [patch 116/131] mm: vmscan: limit the range of LRU type balancing Andrew Morton
2020-06-03 23:03 ` [patch 117/131] mm: swap: fix vmstats for huge pages Andrew Morton
2020-06-03 23:03 ` [patch 118/131] mm: swap: memcg: fix memcg stats " Andrew Morton
2020-06-03 23:03 ` [patch 119/131] tools/vm/page_owner_sort.c: filter out unneeded line Andrew Morton
2020-06-03 23:03 ` [patch 120/131] mm, mempolicy: fix up gup usage in lookup_node Andrew Morton
2020-06-03 23:03 ` [patch 121/131] include/linux/memblock.h: fix minor typo and unclear comment Andrew Morton
2020-06-03 23:03 ` [patch 122/131] sparc32: register memory occupied by kernel as memblock.memory Andrew Morton
2020-06-03 23:03 ` [patch 123/131] hugetlbfs: get unmapped area below TASK_UNMAPPED_BASE for hugetlbfs Andrew Morton
2020-06-03 23:03 ` [patch 124/131] mm: thp: don't need to drain lru cache when splitting and mlocking THP Andrew Morton
2020-06-03 23:03 ` [patch 125/131] powerpc/mm: drop platform defined pmd_mknotpresent() Andrew Morton
2020-06-03 23:03 ` [patch 126/131] mm/thp: rename pmd_mknotpresent() as pmd_mkinvalid() Andrew Morton
2020-06-03 23:03 ` [patch 127/131] drivers/base/memory.c: cache memory blocks in xarray to accelerate lookup Andrew Morton
2020-06-03 23:03 ` [patch 128/131] mm: add DEBUG_WX support Andrew Morton
2020-06-03 23:03 ` [patch 129/131] riscv: support DEBUG_WX Andrew Morton
2020-06-03 23:03 ` [patch 130/131] x86: mm: use ARCH_HAS_DEBUG_WX instead of arch defined Andrew Morton
2020-06-03 23:04 ` [patch 131/131] arm64: " Andrew Morton
2020-06-04  0:00 ` + lib-test-get_count_order-long-in-test_bitopsc-fix.patch added to -mm tree Andrew Morton
2020-06-04  0:54 ` mmotm 2020-06-03-17-54 uploaded Andrew Morton
2020-06-04 18:03 ` + mm-vmalloc-fix-a-typo-in-comment.patch added to -mm tree Andrew Morton
2020-06-04 19:59 ` + memory_hotplug-disable-the-functionality-for-32b.patch " Andrew Morton
2020-06-04 21:30 ` + mm-utilc-remove-the-vm_warn_once-for-vm_committed_as-underflow-check.patch " Andrew Morton
2020-06-04 21:39 ` [folded-merged] mm-page_alloc-skip-waternark_boost-for-atomic-order-0-allocations-fix.patch removed from " Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200603230154.nf75Itrgi%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=alex.shi@linux.alibaba.com \
    --cc=bsingharora@gmail.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.com \
    --cc=mm-commits@vger.kernel.org \
    --cc=shakeelb@google.com \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).