mm-commits.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: aarcange@redhat.com, akpm@linux-foundation.org,
	colin.king@canonical.com, jhubbard@nvidia.com,
	kirill.shutemov@linux.intel.com, linux-mm@kvack.org,
	mike.kravetz@oracle.com, mm-commits@vger.kernel.org,
	rcampbell@nvidia.com, torvalds@linux-foundation.org,
	william.kucharski@oracle.com, yang.shi@linux.alibaba.com,
	ziy@nvidia.com
Subject: [patch 068/131] khugepaged: introduce 'max_ptes_shared' tunable
Date: Wed, 03 Jun 2020 16:00:30 -0700	[thread overview]
Message-ID: <20200603230030.9ZAXhDHr5%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200603155549.e041363450869eaae4c7f05b@linux-foundation.org>

From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: khugepaged: introduce 'max_ptes_shared' tunable

'max_ptes_shared' specifies how many pages can be shared across multiple
processes.  Exceeding the number would block the collapse::

	/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_shared

A higher value may increase memory footprint for some workloads.

By default, at least half of pages has to be not shared.

[colin.king@canonical.com: fix several spelling mistakes]
  Link: http://lkml.kernel.org/r/20200420084241.65433-1-colin.king@canonical.com
Link: http://lkml.kernel.org/r/20200416160026.16538-9-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Colin Ian King <colin.king@canonical.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Tested-by: Zi Yan <ziy@nvidia.com>
Acked-by: Yang Shi <yang.shi@linux.alibaba.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 Documentation/admin-guide/mm/transhuge.rst |    7 +
 include/trace/events/huge_memory.h         |    3 
 mm/khugepaged.c                            |   52 ++++++++++-
 tools/testing/selftests/vm/khugepaged.c    |   83 +++++++++++++++++++
 4 files changed, 140 insertions(+), 5 deletions(-)

--- a/Documentation/admin-guide/mm/transhuge.rst~khugepaged-introduce-max_ptes_shared-tunable
+++ a/Documentation/admin-guide/mm/transhuge.rst
@@ -220,6 +220,13 @@ memory. A lower value can prevent THPs f
 collapsed, resulting fewer pages being collapsed into
 THPs, and lower memory access performance.
 
+``max_ptes_shared`` specifies how many pages can be shared across multiple
+processes. Exceeding the number would block the collapse::
+
+	/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_shared
+
+A higher value may increase memory footprint for some workloads.
+
 Boot parameter
 ==============
 
--- a/include/trace/events/huge_memory.h~khugepaged-introduce-max_ptes_shared-tunable
+++ a/include/trace/events/huge_memory.h
@@ -12,6 +12,8 @@
 	EM( SCAN_SUCCEED,		"succeeded")			\
 	EM( SCAN_PMD_NULL,		"pmd_null")			\
 	EM( SCAN_EXCEED_NONE_PTE,	"exceed_none_pte")		\
+	EM( SCAN_EXCEED_SWAP_PTE,	"exceed_swap_pte")		\
+	EM( SCAN_EXCEED_SHARED_PTE,	"exceed_shared_pte")		\
 	EM( SCAN_PTE_NON_PRESENT,	"pte_non_present")		\
 	EM( SCAN_PTE_UFFD_WP,		"pte_uffd_wp")			\
 	EM( SCAN_PAGE_RO,		"no_writable_page")		\
@@ -31,7 +33,6 @@
 	EM( SCAN_DEL_PAGE_LRU,		"could_not_delete_page_from_lru")\
 	EM( SCAN_ALLOC_HUGE_PAGE_FAIL,	"alloc_huge_page_failed")	\
 	EM( SCAN_CGROUP_CHARGE_FAIL,	"ccgroup_charge_failed")	\
-	EM( SCAN_EXCEED_SWAP_PTE,	"exceed_swap_pte")		\
 	EM( SCAN_TRUNCATED,		"truncated")			\
 	EMe(SCAN_PAGE_HAS_PRIVATE,	"page_has_private")		\
 
--- a/mm/khugepaged.c~khugepaged-introduce-max_ptes_shared-tunable
+++ a/mm/khugepaged.c
@@ -28,6 +28,8 @@ enum scan_result {
 	SCAN_SUCCEED,
 	SCAN_PMD_NULL,
 	SCAN_EXCEED_NONE_PTE,
+	SCAN_EXCEED_SWAP_PTE,
+	SCAN_EXCEED_SHARED_PTE,
 	SCAN_PTE_NON_PRESENT,
 	SCAN_PTE_UFFD_WP,
 	SCAN_PAGE_RO,
@@ -47,7 +49,6 @@ enum scan_result {
 	SCAN_DEL_PAGE_LRU,
 	SCAN_ALLOC_HUGE_PAGE_FAIL,
 	SCAN_CGROUP_CHARGE_FAIL,
-	SCAN_EXCEED_SWAP_PTE,
 	SCAN_TRUNCATED,
 	SCAN_PAGE_HAS_PRIVATE,
 };
@@ -72,6 +73,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepage
  */
 static unsigned int khugepaged_max_ptes_none __read_mostly;
 static unsigned int khugepaged_max_ptes_swap __read_mostly;
+static unsigned int khugepaged_max_ptes_shared __read_mostly;
 
 #define MM_SLOTS_HASH_BITS 10
 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
@@ -291,15 +293,43 @@ static struct kobj_attribute khugepaged_
 	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
 	       khugepaged_max_ptes_swap_store);
 
+static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
+					     struct kobj_attribute *attr,
+					     char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
+}
+
+static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
+					      struct kobj_attribute *attr,
+					      const char *buf, size_t count)
+{
+	int err;
+	unsigned long max_ptes_shared;
+
+	err  = kstrtoul(buf, 10, &max_ptes_shared);
+	if (err || max_ptes_shared > HPAGE_PMD_NR-1)
+		return -EINVAL;
+
+	khugepaged_max_ptes_shared = max_ptes_shared;
+
+	return count;
+}
+
+static struct kobj_attribute khugepaged_max_ptes_shared_attr =
+	__ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
+	       khugepaged_max_ptes_shared_store);
+
 static struct attribute *khugepaged_attr[] = {
 	&khugepaged_defrag_attr.attr,
 	&khugepaged_max_ptes_none_attr.attr,
+	&khugepaged_max_ptes_swap_attr.attr,
+	&khugepaged_max_ptes_shared_attr.attr,
 	&pages_to_scan_attr.attr,
 	&pages_collapsed_attr.attr,
 	&full_scans_attr.attr,
 	&scan_sleep_millisecs_attr.attr,
 	&alloc_sleep_millisecs_attr.attr,
-	&khugepaged_max_ptes_swap_attr.attr,
 	NULL,
 };
 
@@ -359,6 +389,7 @@ int __init khugepaged_init(void)
 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
+	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
 
 	return 0;
 }
@@ -557,7 +588,7 @@ static int __collapse_huge_page_isolate(
 {
 	struct page *page = NULL;
 	pte_t *_pte;
-	int none_or_zero = 0, result = 0, referenced = 0;
+	int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
 	bool writable = false;
 
 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
@@ -585,6 +616,12 @@ static int __collapse_huge_page_isolate(
 
 		VM_BUG_ON_PAGE(!PageAnon(page), page);
 
+		if (page_mapcount(page) > 1 &&
+				++shared > khugepaged_max_ptes_shared) {
+			result = SCAN_EXCEED_SHARED_PTE;
+			goto out;
+		}
+
 		if (PageCompound(page)) {
 			struct page *p;
 			page = compound_head(page);
@@ -1168,7 +1205,8 @@ static int khugepaged_scan_pmd(struct mm
 {
 	pmd_t *pmd;
 	pte_t *pte, *_pte;
-	int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
+	int ret = 0, result = 0, referenced = 0;
+	int none_or_zero = 0, shared = 0;
 	struct page *page = NULL;
 	unsigned long _address;
 	spinlock_t *ptl;
@@ -1240,6 +1278,12 @@ static int khugepaged_scan_pmd(struct mm
 			goto out_unmap;
 		}
 
+		if (page_mapcount(page) > 1 &&
+				++shared > khugepaged_max_ptes_shared) {
+			result = SCAN_EXCEED_SHARED_PTE;
+			goto out_unmap;
+		}
+
 		page = compound_head(page);
 
 		/*
--- a/tools/testing/selftests/vm/khugepaged.c~khugepaged-introduce-max_ptes_shared-tunable
+++ a/tools/testing/selftests/vm/khugepaged.c
@@ -78,6 +78,7 @@ struct khugepaged_settings {
 	unsigned int scan_sleep_millisecs;
 	unsigned int max_ptes_none;
 	unsigned int max_ptes_swap;
+	unsigned int max_ptes_shared;
 	unsigned long pages_to_scan;
 };
 
@@ -277,6 +278,7 @@ static void write_settings(struct settin
 			khugepaged->scan_sleep_millisecs);
 	write_num("khugepaged/max_ptes_none", khugepaged->max_ptes_none);
 	write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap);
+	write_num("khugepaged/max_ptes_shared", khugepaged->max_ptes_shared);
 	write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan);
 }
 
@@ -313,6 +315,7 @@ static void save_settings(void)
 			read_num("khugepaged/scan_sleep_millisecs"),
 		.max_ptes_none = read_num("khugepaged/max_ptes_none"),
 		.max_ptes_swap = read_num("khugepaged/max_ptes_swap"),
+		.max_ptes_shared = read_num("khugepaged/max_ptes_shared"),
 		.pages_to_scan = read_num("khugepaged/pages_to_scan"),
 	};
 	success("OK");
@@ -896,12 +899,90 @@ static void collapse_fork_compound(void)
 			fail("Fail");
 		fill_memory(p, 0, page_size);
 
+		write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1);
 		if (wait_for_scan("Collapse PTE table full of compound pages in child", p))
 			fail("Timeout");
 		else if (check_huge(p))
 			success("OK");
 		else
 			fail("Fail");
+		write_num("khugepaged/max_ptes_shared",
+				default_settings.khugepaged.max_ptes_shared);
+
+		validate_memory(p, 0, hpage_pmd_size);
+		munmap(p, hpage_pmd_size);
+		exit(exit_status);
+	}
+
+	wait(&wstatus);
+	exit_status += WEXITSTATUS(wstatus);
+
+	printf("Check if parent still has huge page...");
+	if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, hpage_pmd_size);
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_max_ptes_shared()
+{
+	int max_ptes_shared = read_num("khugepaged/max_ptes_shared");
+	int wstatus;
+	void *p;
+
+	p = alloc_mapping();
+
+	printf("Allocate huge page...");
+	madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
+	fill_memory(p, 0, hpage_pmd_size);
+	if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+
+	printf("Share huge page over fork()...");
+	if (!fork()) {
+		/* Do not touch settings on child exit */
+		skip_settings_restore = true;
+		exit_status = 0;
+
+		if (check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
+
+		printf("Trigger CoW on page %d of %d...",
+				hpage_pmd_nr - max_ptes_shared - 1, hpage_pmd_nr);
+		fill_memory(p, 0, (hpage_pmd_nr - max_ptes_shared - 1) * page_size);
+		if (!check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
+
+		if (wait_for_scan("Do not collapse with max_ptes_shared exceeded", p))
+			fail("Timeout");
+		else if (!check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
+
+		printf("Trigger CoW on page %d of %d...",
+				hpage_pmd_nr - max_ptes_shared, hpage_pmd_nr);
+		fill_memory(p, 0, (hpage_pmd_nr - max_ptes_shared) * page_size);
+		if (!check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
+
+
+		if (wait_for_scan("Collapse with max_ptes_shared PTEs shared", p))
+			fail("Timeout");
+		else if (check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
 
 		validate_memory(p, 0, hpage_pmd_size);
 		munmap(p, hpage_pmd_size);
@@ -930,6 +1011,7 @@ int main(void)
 
 	default_settings.khugepaged.max_ptes_none = hpage_pmd_nr - 1;
 	default_settings.khugepaged.max_ptes_swap = hpage_pmd_nr / 8;
+	default_settings.khugepaged.max_ptes_shared = hpage_pmd_nr / 2;
 	default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8;
 
 	save_settings();
@@ -947,6 +1029,7 @@ int main(void)
 	collapse_compound_extreme();
 	collapse_fork();
 	collapse_fork_compound();
+	collapse_max_ptes_shared();
 
 	restore_settings(0);
 }
_

  parent reply	other threads:[~2020-06-03 23:00 UTC|newest]

Thread overview: 138+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-03 22:55 incoming Andrew Morton
2020-06-03 22:56 ` [patch 001/131] mm/slub: fix a memory leak in sysfs_slab_add() Andrew Morton
2020-06-03 22:56 ` [patch 002/131] mm/memcg: optimize memory.numa_stat like memory.stat Andrew Morton
2020-06-03 22:56 ` [patch 003/131] mm/gup: move __get_user_pages_fast() down a few lines in gup.c Andrew Morton
2020-06-03 22:56 ` [patch 004/131] mm/gup: refactor and de-duplicate gup_fast() code Andrew Morton
2020-06-03 22:56 ` [patch 005/131] mm/gup: introduce pin_user_pages_fast_only() Andrew Morton
2020-06-03 22:56 ` [patch 006/131] drm/i915: convert get_user_pages() --> pin_user_pages() Andrew Morton
2020-06-03 22:56 ` [patch 007/131] mm/gup: might_lock_read(mmap_sem) in get_user_pages_fast() Andrew Morton
2020-06-03 22:56 ` [patch 008/131] kasan: stop tests being eliminated as dead code with FORTIFY_SOURCE Andrew Morton
2020-06-03 22:56 ` [patch 009/131] string.h: fix incompatibility between FORTIFY_SOURCE and KASAN Andrew Morton
2020-06-03 22:56 ` [patch 010/131] mm: clarify __GFP_MEMALLOC usage Andrew Morton
2020-06-03 22:56 ` [patch 011/131] mm: memblock: replace dereferences of memblock_region.nid with API calls Andrew Morton
2020-06-03 22:56 ` [patch 012/131] mm: make early_pfn_to_nid() and related defintions close to each other Andrew Morton
2020-06-03 22:57 ` [patch 013/131] mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option Andrew Morton
2020-06-03 22:57 ` [patch 014/131] mm: free_area_init: use maximal zone PFNs rather than zone sizes Andrew Morton
2020-06-03 22:57 ` [patch 015/131] mm: use free_area_init() instead of free_area_init_nodes() Andrew Morton
2020-06-03 22:57 ` [patch 016/131] alpha: simplify detection of memory zone boundaries Andrew Morton
2020-06-03 22:57 ` [patch 017/131] arm: " Andrew Morton
2020-06-03 22:57 ` [patch 018/131] arm64: simplify detection of memory zone boundaries for UMA configs Andrew Morton
2020-06-03 22:57 ` [patch 019/131] csky: simplify detection of memory zone boundaries Andrew Morton
2020-06-03 22:57 ` [patch 020/131] m68k: mm: " Andrew Morton
2020-06-03 22:57 ` [patch 021/131] parisc: " Andrew Morton
2020-06-03 22:57 ` [patch 022/131] sparc32: " Andrew Morton
2020-06-03 22:57 ` [patch 023/131] unicore32: " Andrew Morton
2020-06-03 22:57 ` [patch 024/131] xtensa: " Andrew Morton
2020-06-03 22:57 ` [patch 025/131] mm: memmap_init: iterate over memblock regions rather that check each PFN Andrew Morton
2020-06-03 22:57 ` [patch 026/131] mm: remove early_pfn_in_nid() and CONFIG_NODES_SPAN_OTHER_NODES Andrew Morton
2020-06-03 22:58 ` [patch 027/131] mm: free_area_init: allow defining max_zone_pfn in descending order Andrew Morton
2020-06-03 22:58 ` [patch 028/131] mm: rename free_area_init_node() to free_area_init_memoryless_node() Andrew Morton
2020-06-03 22:58 ` [patch 029/131] mm: clean up free_area_init_node() and its helpers Andrew Morton
2020-06-03 22:58 ` [patch 030/131] mm: simplify find_min_pfn_with_active_regions() Andrew Morton
2020-06-03 22:58 ` [patch 031/131] docs/vm: update memory-models documentation Andrew Morton
2020-06-03 22:58 ` [patch 032/131] mm/page_alloc.c: bad_[reason|flags] is not necessary when PageHWPoison Andrew Morton
2020-06-03 22:58 ` [patch 033/131] mm/page_alloc.c: bad_flags is not necessary for bad_page() Andrew Morton
2020-06-03 22:58 ` [patch 034/131] mm/page_alloc.c: rename free_pages_check_bad() to check_free_page_bad() Andrew Morton
2020-06-03 22:58 ` [patch 035/131] mm/page_alloc.c: rename free_pages_check() to check_free_page() Andrew Morton
2020-06-03 22:58 ` [patch 036/131] mm/page_alloc.c: extract check_[new|free]_page_bad() common part to page_bad_reason() Andrew Morton
2020-06-03 22:58 ` [patch 037/131] mm,page_alloc,cma: conditionally prefer cma pageblocks for movable allocations Andrew Morton
2020-06-03 22:58 ` [patch 038/131] mm/page_alloc.c: remove unused free_bootmem_with_active_regions Andrew Morton
2020-06-03 22:58 ` [patch 039/131] mm/page_alloc.c: only tune sysctl_lowmem_reserve_ratio value once when changing it Andrew Morton
2020-06-03 22:58 ` [patch 040/131] mm/page_alloc.c: clear out zone->lowmem_reserve[] if the zone is empty Andrew Morton
2020-06-03 22:58 ` [patch 041/131] mm/vmstat.c: do not show lowmem reserve protection information of empty zone Andrew Morton
2020-06-03 22:58 ` [patch 042/131] mm/page_alloc: use ac->high_zoneidx for classzone_idx Andrew Morton
2020-06-03 22:59 ` [patch 043/131] mm/page_alloc: integrate classzone_idx and high_zoneidx Andrew Morton
2020-06-03 22:59 ` [patch 044/131] mm/page_alloc.c: use NODE_MASK_NONE in build_zonelists() Andrew Morton
2020-06-03 22:59 ` [patch 045/131] mm: rename gfpflags_to_migratetype to gfp_migratetype for same convention Andrew Morton
2020-06-03 22:59 ` [patch 046/131] mm/page_alloc.c: reset numa stats for boot pagesets Andrew Morton
2020-06-03 22:59 ` [patch 047/131] mm, page_alloc: reset the zone->watermark_boost early Andrew Morton
2020-06-03 22:59 ` [patch 048/131] mm/page_alloc: restrict and formalize compound_page_dtors[] Andrew Morton
2020-06-03 22:59 ` [patch 049/131] mm/pagealloc.c: call touch_nmi_watchdog() on max order boundaries in deferred init Andrew Morton
2020-06-03 22:59 ` [patch 050/131] mm: initialize deferred pages with interrupts enabled Andrew Morton
2020-06-03 22:59 ` [patch 051/131] mm: call cond_resched() from deferred_init_memmap() Andrew Morton
2020-06-03 22:59 ` [patch 052/131] padata: remove exit routine Andrew Morton
2020-06-03 22:59 ` [patch 053/131] padata: initialize earlier Andrew Morton
2020-06-03 22:59 ` [patch 054/131] padata: allocate work structures for parallel jobs from a pool Andrew Morton
2020-06-03 22:59 ` [patch 055/131] padata: add basic support for multithreaded jobs Andrew Morton
2020-06-03 22:59 ` [patch 056/131] mm: don't track number of pages during deferred initialization Andrew Morton
2020-06-03 22:59 ` [patch 057/131] mm: parallelize deferred_init_memmap() Andrew Morton
2020-06-03 22:59 ` [patch 058/131] mm: make deferred init's max threads arch-specific Andrew Morton
2020-06-03 22:59 ` [patch 059/131] padata: document multithreaded jobs Andrew Morton
2020-06-03 23:00 ` [patch 060/131] mm/page_alloc.c: add missing newline Andrew Morton
2020-06-03 23:00 ` [patch 061/131] khugepaged: add self test Andrew Morton
2020-06-03 23:00 ` [patch 062/131] khugepaged: do not stop collapse if less than half PTEs are referenced Andrew Morton
2020-06-03 23:00 ` [patch 063/131] khugepaged: drain all LRU caches before scanning pages Andrew Morton
2020-06-03 23:00 ` [patch 064/131] khugepaged: drain LRU add pagevec after swapin Andrew Morton
2020-06-03 23:00 ` [patch 065/131] khugepaged: allow to collapse a page shared across fork Andrew Morton
2020-06-03 23:00 ` [patch 066/131] khugepaged: allow to collapse PTE-mapped compound pages Andrew Morton
2020-06-03 23:00 ` [patch 067/131] thp: change CoW semantics for anon-THP Andrew Morton
2020-06-03 23:00 ` Andrew Morton [this message]
2020-06-03 23:00 ` [patch 069/131] hugetlbfs: add arch_hugetlb_valid_size Andrew Morton
2020-06-03 23:00 ` [patch 070/131] hugetlbfs: move hugepagesz= parsing to arch independent code Andrew Morton
2020-06-03 23:00 ` [patch 071/131] hugetlbfs: remove hugetlb_add_hstate() warning for existing hstate Andrew Morton
2020-06-03 23:00 ` [patch 072/131] hugetlbfs: clean up command line processing Andrew Morton
2020-06-03 23:00 ` [patch 073/131] hugetlbfs: fix changes to " Andrew Morton
2020-06-03 23:00 ` [patch 074/131] mm/hugetlb: avoid unnecessary check on pud and pmd entry in huge_pte_offset Andrew Morton
2020-06-03 23:00 ` [patch 075/131] arm64/mm: drop __HAVE_ARCH_HUGE_PTEP_GET Andrew Morton
2020-06-03 23:01 ` [patch 076/131] mm/hugetlb: define a generic fallback for is_hugepage_only_range() Andrew Morton
2020-06-03 23:01 ` [patch 077/131] mm/hugetlb: define a generic fallback for arch_clear_hugepage_flags() Andrew Morton
2020-06-03 23:01 ` [patch 078/131] mm: simplify calling a compound page destructor Andrew Morton
2020-06-03 23:01 ` [patch 079/131] mm/vmscan.c: use update_lru_size() in update_lru_sizes() Andrew Morton
2020-06-03 23:01 ` [patch 080/131] mm/vmscan: count layzfree pages and fix nr_isolated_* mismatch Andrew Morton
2020-06-03 23:01 ` [patch 081/131] mm/vmscan.c: change prototype for shrink_page_list Andrew Morton
2020-06-03 23:01 ` [patch 082/131] mm/vmscan: update the comment of should_continue_reclaim() Andrew Morton
2020-06-03 23:01 ` [patch 083/131] mm: fix NUMA node file count error in replace_page_cache() Andrew Morton
2020-06-03 23:01 ` [patch 084/131] mm: memcontrol: fix stat-corrupting race in charge moving Andrew Morton
2020-06-03 23:01 ` [patch 085/131] mm: memcontrol: drop @compound parameter from memcg charging API Andrew Morton
2020-06-03 23:01 ` [patch 086/131] mm: shmem: remove rare optimization when swapin races with hole punching Andrew Morton
2020-06-03 23:01 ` [patch 087/131] mm: memcontrol: move out cgroup swaprate throttling Andrew Morton
2020-06-03 23:01 ` [patch 088/131] mm: memcontrol: convert page cache to a new mem_cgroup_charge() API Andrew Morton
2020-06-03 23:01 ` [patch 089/131] mm: memcontrol: prepare uncharging for removal of private page type counters Andrew Morton
2020-06-03 23:01 ` [patch 090/131] mm: memcontrol: prepare move_account " Andrew Morton
2020-06-03 23:01 ` [patch 091/131] mm: memcontrol: prepare cgroup vmstat infrastructure for native anon counters Andrew Morton
2020-06-03 23:01 ` [patch 092/131] mm: memcontrol: switch to native NR_FILE_PAGES and NR_SHMEM counters Andrew Morton
2020-06-03 23:01 ` [patch 093/131] mm: memcontrol: switch to native NR_ANON_MAPPED counter Andrew Morton
2020-06-03 23:02 ` [patch 094/131] mm: memcontrol: switch to native NR_ANON_THPS counter Andrew Morton
2020-06-03 23:02 ` [patch 095/131] mm: memcontrol: convert anon and file-thp to new mem_cgroup_charge() API Andrew Morton
2020-06-03 23:02 ` [patch 096/131] mm: memcontrol: drop unused try/commit/cancel charge API Andrew Morton
2020-06-03 23:02 ` [patch 097/131] mm: memcontrol: prepare swap controller setup for integration Andrew Morton
2020-06-03 23:02 ` [patch 098/131] mm: memcontrol: make swap tracking an integral part of memory control Andrew Morton
2020-06-03 23:02 ` [patch 099/131] mm: memcontrol: charge swapin pages on instantiation Andrew Morton
2020-06-03 23:02 ` [patch 100/131] mm: memcontrol: document the new swap control behavior Andrew Morton
2020-06-03 23:02 ` [patch 101/131] mm: memcontrol: delete unused lrucare handling Andrew Morton
2020-06-03 23:02 ` [patch 102/131] mm: memcontrol: update page->mem_cgroup stability rules Andrew Morton
2020-06-03 23:02 ` [patch 103/131] mm: fix LRU balancing effect of new transparent huge pages Andrew Morton
2020-06-03 23:02 ` [patch 104/131] mm: keep separate anon and file statistics on page reclaim activity Andrew Morton
2020-06-03 23:02 ` [patch 105/131] mm: allow swappiness that prefers reclaiming anon over the file workingset Andrew Morton
2020-06-03 23:02 ` [patch 106/131] mm: fold and remove lru_cache_add_anon() and lru_cache_add_file() Andrew Morton
2020-06-03 23:02 ` [patch 107/131] mm: workingset: let cache workingset challenge anon Andrew Morton
2020-06-03 23:02 ` [patch 108/131] mm: remove use-once cache bias from LRU balancing Andrew Morton
2020-06-03 23:02 ` [patch 109/131] mm: vmscan: drop unnecessary div0 avoidance rounding in get_scan_count() Andrew Morton
2020-06-03 23:02 ` [patch 110/131] mm: base LRU balancing on an explicit cost model Andrew Morton
2020-06-03 23:02 ` [patch 111/131] mm: deactivations shouldn't bias the LRU balance Andrew Morton
2020-06-03 23:03 ` [patch 112/131] mm: only count actual rotations as LRU reclaim cost Andrew Morton
2020-06-03 23:03 ` [patch 113/131] mm: balance LRU lists based on relative thrashing Andrew Morton
2020-06-03 23:03 ` [patch 114/131] mm: vmscan: determine anon/file pressure balance at the reclaim root Andrew Morton
2020-06-03 23:03 ` [patch 115/131] mm: vmscan: reclaim writepage is IO cost Andrew Morton
2020-06-03 23:03 ` [patch 116/131] mm: vmscan: limit the range of LRU type balancing Andrew Morton
2020-06-03 23:03 ` [patch 117/131] mm: swap: fix vmstats for huge pages Andrew Morton
2020-06-03 23:03 ` [patch 118/131] mm: swap: memcg: fix memcg stats " Andrew Morton
2020-06-03 23:03 ` [patch 119/131] tools/vm/page_owner_sort.c: filter out unneeded line Andrew Morton
2020-06-03 23:03 ` [patch 120/131] mm, mempolicy: fix up gup usage in lookup_node Andrew Morton
2020-06-03 23:03 ` [patch 121/131] include/linux/memblock.h: fix minor typo and unclear comment Andrew Morton
2020-06-03 23:03 ` [patch 122/131] sparc32: register memory occupied by kernel as memblock.memory Andrew Morton
2020-06-03 23:03 ` [patch 123/131] hugetlbfs: get unmapped area below TASK_UNMAPPED_BASE for hugetlbfs Andrew Morton
2020-06-03 23:03 ` [patch 124/131] mm: thp: don't need to drain lru cache when splitting and mlocking THP Andrew Morton
2020-06-03 23:03 ` [patch 125/131] powerpc/mm: drop platform defined pmd_mknotpresent() Andrew Morton
2020-06-03 23:03 ` [patch 126/131] mm/thp: rename pmd_mknotpresent() as pmd_mkinvalid() Andrew Morton
2020-06-03 23:03 ` [patch 127/131] drivers/base/memory.c: cache memory blocks in xarray to accelerate lookup Andrew Morton
2020-06-03 23:03 ` [patch 128/131] mm: add DEBUG_WX support Andrew Morton
2020-06-03 23:03 ` [patch 129/131] riscv: support DEBUG_WX Andrew Morton
2020-06-03 23:03 ` [patch 130/131] x86: mm: use ARCH_HAS_DEBUG_WX instead of arch defined Andrew Morton
2020-06-03 23:04 ` [patch 131/131] arm64: " Andrew Morton
2020-06-04  0:00 ` + lib-test-get_count_order-long-in-test_bitopsc-fix.patch added to -mm tree Andrew Morton
2020-06-04  0:54 ` mmotm 2020-06-03-17-54 uploaded Andrew Morton
2020-06-04 18:03 ` + mm-vmalloc-fix-a-typo-in-comment.patch added to -mm tree Andrew Morton
2020-06-04 19:59 ` + memory_hotplug-disable-the-functionality-for-32b.patch " Andrew Morton
2020-06-04 21:30 ` + mm-utilc-remove-the-vm_warn_once-for-vm_committed_as-underflow-check.patch " Andrew Morton
2020-06-04 21:39 ` [folded-merged] mm-page_alloc-skip-waternark_boost-for-atomic-order-0-allocations-fix.patch removed from " Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200603230030.9ZAXhDHr5%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=aarcange@redhat.com \
    --cc=colin.king@canonical.com \
    --cc=jhubbard@nvidia.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mike.kravetz@oracle.com \
    --cc=mm-commits@vger.kernel.org \
    --cc=rcampbell@nvidia.com \
    --cc=torvalds@linux-foundation.org \
    --cc=william.kucharski@oracle.com \
    --cc=yang.shi@linux.alibaba.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).