All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: aarcange@redhat.com, akpm@linux-foundation.org,
	aneesh.kumar@linux.ibm.com, colin.king@canonical.com,
	jhubbard@nvidia.com, kirill.shutemov@linux.intel.com,
	linux-mm@kvack.org, mike.kravetz@oracle.com,
	mm-commits@vger.kernel.org, rcampbell@nvidia.com,
	torvalds@linux-foundation.org, william.kucharski@oracle.com,
	yang.shi@linux.alibaba.com, ziy@nvidia.com
Subject: [patch 061/131] khugepaged: add self test
Date: Wed, 03 Jun 2020 16:00:06 -0700	[thread overview]
Message-ID: <20200603230006.Owk2zlFRn%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200603155549.e041363450869eaae4c7f05b@linux-foundation.org>

From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: khugepaged: add self test

Patch series "thp/khugepaged improvements and CoW semantics", v4.

The patchset adds khugepaged selftest (anon-THP only for now), expands
cases khugepaged can handle and switches anon-THP copy-on-write handling
to 4k.

This patch (of 8):

The test checks if khugepaged is able to recover huge page where we expect
to do so.  It only covers anon-THP for now.

Currently the test shows few failures.  They are going to be addressed by
the following patches.

[colin.king@canonical.com: fix several spelling mistakes]
  Link: http://lkml.kernel.org/r/20200420084241.65433-1-colin.king@canonical.com
[aneesh.kumar@linux.ibm.com: replace the usage of system(3) in the test]
  Link: http://lkml.kernel.org/r/20200429110727.89388-1-aneesh.kumar@linux.ibm.com
[kirill@shutemov.name: fixup for issues I've noticed]
  Link: http://lkml.kernel.org/r/20200429124816.jp272trghrzxx5j5@box
[jhubbard@nvidia.com: add khugepaged to .gitignore]
  Link: http://lkml.kernel.org/r/20200517002509.362401-1-jhubbard@nvidia.com
Link: http://lkml.kernel.org/r/20200416160026.16538-1-kirill.shutemov@linux.intel.com
Link: http://lkml.kernel.org/r/20200416160026.16538-2-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Colin Ian King <colin.king@canonical.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Tested-by: Zi Yan <ziy@nvidia.com>
Acked-by: Yang Shi <yang.shi@linux.alibaba.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 tools/testing/selftests/vm/.gitignore   |    1 
 tools/testing/selftests/vm/Makefile     |    1 
 tools/testing/selftests/vm/khugepaged.c |  952 ++++++++++++++++++++++
 3 files changed, 954 insertions(+)

--- a/tools/testing/selftests/vm/.gitignore~khugepaged-add-self-test
+++ a/tools/testing/selftests/vm/.gitignore
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 hugepage-mmap
 hugepage-shm
+khugepaged
 map_hugetlb
 map_populate
 thuge-gen
--- /dev/null
+++ a/tools/testing/selftests/vm/khugepaged.c
@@ -0,0 +1,952 @@
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/mman.h>
+#include <sys/wait.h>
+
+#ifndef MADV_PAGEOUT
+#define MADV_PAGEOUT 21
+#endif
+
+#define BASE_ADDR ((void *)(1UL << 30))
+static unsigned long hpage_pmd_size;
+static unsigned long page_size;
+static int hpage_pmd_nr;
+
+#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/"
+#define PID_SMAPS "/proc/self/smaps"
+
+enum thp_enabled {
+	THP_ALWAYS,
+	THP_MADVISE,
+	THP_NEVER,
+};
+
+static const char *thp_enabled_strings[] = {
+	"always",
+	"madvise",
+	"never",
+	NULL
+};
+
+enum thp_defrag {
+	THP_DEFRAG_ALWAYS,
+	THP_DEFRAG_DEFER,
+	THP_DEFRAG_DEFER_MADVISE,
+	THP_DEFRAG_MADVISE,
+	THP_DEFRAG_NEVER,
+};
+
+static const char *thp_defrag_strings[] = {
+	"always",
+	"defer",
+	"defer+madvise",
+	"madvise",
+	"never",
+	NULL
+};
+
+enum shmem_enabled {
+	SHMEM_ALWAYS,
+	SHMEM_WITHIN_SIZE,
+	SHMEM_ADVISE,
+	SHMEM_NEVER,
+	SHMEM_DENY,
+	SHMEM_FORCE,
+};
+
+static const char *shmem_enabled_strings[] = {
+	"always",
+	"within_size",
+	"advise",
+	"never",
+	"deny",
+	"force",
+	NULL
+};
+
+struct khugepaged_settings {
+	bool defrag;
+	unsigned int alloc_sleep_millisecs;
+	unsigned int scan_sleep_millisecs;
+	unsigned int max_ptes_none;
+	unsigned int max_ptes_swap;
+	unsigned long pages_to_scan;
+};
+
+struct settings {
+	enum thp_enabled thp_enabled;
+	enum thp_defrag thp_defrag;
+	enum shmem_enabled shmem_enabled;
+	bool debug_cow;
+	bool use_zero_page;
+	struct khugepaged_settings khugepaged;
+};
+
+static struct settings default_settings = {
+	.thp_enabled = THP_MADVISE,
+	.thp_defrag = THP_DEFRAG_ALWAYS,
+	.shmem_enabled = SHMEM_NEVER,
+	.debug_cow = 0,
+	.use_zero_page = 0,
+	.khugepaged = {
+		.defrag = 1,
+		.alloc_sleep_millisecs = 10,
+		.scan_sleep_millisecs = 10,
+	},
+};
+
+static struct settings saved_settings;
+static bool skip_settings_restore;
+
+static int exit_status;
+
+static void success(const char *msg)
+{
+	printf(" \e[32m%s\e[0m\n", msg);
+}
+
+static void fail(const char *msg)
+{
+	printf(" \e[31m%s\e[0m\n", msg);
+	exit_status++;
+}
+
+static int read_file(const char *path, char *buf, size_t buflen)
+{
+	int fd;
+	ssize_t numread;
+
+	fd = open(path, O_RDONLY);
+	if (fd == -1)
+		return 0;
+
+	numread = read(fd, buf, buflen - 1);
+	if (numread < 1) {
+		close(fd);
+		return 0;
+	}
+
+	buf[numread] = '\0';
+	close(fd);
+
+	return (unsigned int) numread;
+}
+
+static int write_file(const char *path, const char *buf, size_t buflen)
+{
+	int fd;
+	ssize_t numwritten;
+
+	fd = open(path, O_WRONLY);
+	if (fd == -1)
+		return 0;
+
+	numwritten = write(fd, buf, buflen - 1);
+	close(fd);
+	if (numwritten < 1)
+		return 0;
+
+	return (unsigned int) numwritten;
+}
+
+static int read_string(const char *name, const char *strings[])
+{
+	char path[PATH_MAX];
+	char buf[256];
+	char *c;
+	int ret;
+
+	ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
+	if (ret >= PATH_MAX) {
+		printf("%s: Pathname is too long\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+
+	if (!read_file(path, buf, sizeof(buf))) {
+		perror(path);
+		exit(EXIT_FAILURE);
+	}
+
+	c = strchr(buf, '[');
+	if (!c) {
+		printf("%s: Parse failure\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+
+	c++;
+	memmove(buf, c, sizeof(buf) - (c - buf));
+
+	c = strchr(buf, ']');
+	if (!c) {
+		printf("%s: Parse failure\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+	*c = '\0';
+
+	ret = 0;
+	while (strings[ret]) {
+		if (!strcmp(strings[ret], buf))
+			return ret;
+		ret++;
+	}
+
+	printf("Failed to parse %s\n", name);
+	exit(EXIT_FAILURE);
+}
+
+static void write_string(const char *name, const char *val)
+{
+	char path[PATH_MAX];
+	int ret;
+
+	ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
+	if (ret >= PATH_MAX) {
+		printf("%s: Pathname is too long\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+
+	if (!write_file(path, val, strlen(val) + 1)) {
+		perror(path);
+		exit(EXIT_FAILURE);
+	}
+}
+
+static const unsigned long read_num(const char *name)
+{
+	char path[PATH_MAX];
+	char buf[21];
+	int ret;
+
+	ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
+	if (ret >= PATH_MAX) {
+		printf("%s: Pathname is too long\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+
+	ret = read_file(path, buf, sizeof(buf));
+	if (ret < 0) {
+		perror("read_file(read_num)");
+		exit(EXIT_FAILURE);
+	}
+
+	return strtoul(buf, NULL, 10);
+}
+
+static void write_num(const char *name, unsigned long num)
+{
+	char path[PATH_MAX];
+	char buf[21];
+	int ret;
+
+	ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
+	if (ret >= PATH_MAX) {
+		printf("%s: Pathname is too long\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+
+	sprintf(buf, "%ld", num);
+	if (!write_file(path, buf, strlen(buf) + 1)) {
+		perror(path);
+		exit(EXIT_FAILURE);
+	}
+}
+
+static void write_settings(struct settings *settings)
+{
+	struct khugepaged_settings *khugepaged = &settings->khugepaged;
+
+	write_string("enabled", thp_enabled_strings[settings->thp_enabled]);
+	write_string("defrag", thp_defrag_strings[settings->thp_defrag]);
+	write_string("shmem_enabled",
+			shmem_enabled_strings[settings->shmem_enabled]);
+	write_num("debug_cow", settings->debug_cow);
+	write_num("use_zero_page", settings->use_zero_page);
+
+	write_num("khugepaged/defrag", khugepaged->defrag);
+	write_num("khugepaged/alloc_sleep_millisecs",
+			khugepaged->alloc_sleep_millisecs);
+	write_num("khugepaged/scan_sleep_millisecs",
+			khugepaged->scan_sleep_millisecs);
+	write_num("khugepaged/max_ptes_none", khugepaged->max_ptes_none);
+	write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap);
+	write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan);
+}
+
+static void restore_settings(int sig)
+{
+	if (skip_settings_restore)
+		goto out;
+
+	printf("Restore THP and khugepaged settings...");
+	write_settings(&saved_settings);
+	success("OK");
+	if (sig)
+		exit(EXIT_FAILURE);
+out:
+	exit(exit_status);
+}
+
+static void save_settings(void)
+{
+	printf("Save THP and khugepaged settings...");
+	saved_settings = (struct settings) {
+		.thp_enabled = read_string("enabled", thp_enabled_strings),
+		.thp_defrag = read_string("defrag", thp_defrag_strings),
+		.shmem_enabled =
+			read_string("shmem_enabled", shmem_enabled_strings),
+		.debug_cow = read_num("debug_cow"),
+		.use_zero_page = read_num("use_zero_page"),
+	};
+	saved_settings.khugepaged = (struct khugepaged_settings) {
+		.defrag = read_num("khugepaged/defrag"),
+		.alloc_sleep_millisecs =
+			read_num("khugepaged/alloc_sleep_millisecs"),
+		.scan_sleep_millisecs =
+			read_num("khugepaged/scan_sleep_millisecs"),
+		.max_ptes_none = read_num("khugepaged/max_ptes_none"),
+		.max_ptes_swap = read_num("khugepaged/max_ptes_swap"),
+		.pages_to_scan = read_num("khugepaged/pages_to_scan"),
+	};
+	success("OK");
+
+	signal(SIGTERM, restore_settings);
+	signal(SIGINT, restore_settings);
+	signal(SIGHUP, restore_settings);
+	signal(SIGQUIT, restore_settings);
+}
+
+static void adjust_settings(void)
+{
+
+	printf("Adjust settings...");
+	write_settings(&default_settings);
+	success("OK");
+}
+
+#define MAX_LINE_LENGTH 500
+
+static bool check_for_pattern(FILE *fp, char *pattern, char *buf)
+{
+	while (fgets(buf, MAX_LINE_LENGTH, fp) != NULL) {
+		if (!strncmp(buf, pattern, strlen(pattern)))
+			return true;
+	}
+	return false;
+}
+
+static bool check_huge(void *addr)
+{
+	bool thp = false;
+	int ret;
+	FILE *fp;
+	char buffer[MAX_LINE_LENGTH];
+	char addr_pattern[MAX_LINE_LENGTH];
+
+	ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
+		       (unsigned long) addr);
+	if (ret >= MAX_LINE_LENGTH) {
+		printf("%s: Pattern is too long\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+
+
+	fp = fopen(PID_SMAPS, "r");
+	if (!fp) {
+		printf("%s: Failed to open file %s\n", __func__, PID_SMAPS);
+		exit(EXIT_FAILURE);
+	}
+	if (!check_for_pattern(fp, addr_pattern, buffer))
+		goto err_out;
+
+	ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "AnonHugePages:%10ld kB",
+		       hpage_pmd_size >> 10);
+	if (ret >= MAX_LINE_LENGTH) {
+		printf("%s: Pattern is too long\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+	/*
+	 * Fetch the AnonHugePages: in the same block and check whether it got
+	 * the expected number of hugeepages next.
+	 */
+	if (!check_for_pattern(fp, "AnonHugePages:", buffer))
+		goto err_out;
+
+	if (strncmp(buffer, addr_pattern, strlen(addr_pattern)))
+		goto err_out;
+
+	thp = true;
+err_out:
+	fclose(fp);
+	return thp;
+}
+
+
+static bool check_swap(void *addr, unsigned long size)
+{
+	bool swap = false;
+	int ret;
+	FILE *fp;
+	char buffer[MAX_LINE_LENGTH];
+	char addr_pattern[MAX_LINE_LENGTH];
+
+	ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
+		       (unsigned long) addr);
+	if (ret >= MAX_LINE_LENGTH) {
+		printf("%s: Pattern is too long\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+
+
+	fp = fopen(PID_SMAPS, "r");
+	if (!fp) {
+		printf("%s: Failed to open file %s\n", __func__, PID_SMAPS);
+		exit(EXIT_FAILURE);
+	}
+	if (!check_for_pattern(fp, addr_pattern, buffer))
+		goto err_out;
+
+	ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "Swap:%19ld kB",
+		       size >> 10);
+	if (ret >= MAX_LINE_LENGTH) {
+		printf("%s: Pattern is too long\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+	/*
+	 * Fetch the Swap: in the same block and check whether it got
+	 * the expected number of hugeepages next.
+	 */
+	if (!check_for_pattern(fp, "Swap:", buffer))
+		goto err_out;
+
+	if (strncmp(buffer, addr_pattern, strlen(addr_pattern)))
+		goto err_out;
+
+	swap = true;
+err_out:
+	fclose(fp);
+	return swap;
+}
+
+static void *alloc_mapping(void)
+{
+	void *p;
+
+	p = mmap(BASE_ADDR, hpage_pmd_size, PROT_READ | PROT_WRITE,
+			MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+	if (p != BASE_ADDR) {
+		printf("Failed to allocate VMA at %p\n", BASE_ADDR);
+		exit(EXIT_FAILURE);
+	}
+
+	return p;
+}
+
+static void fill_memory(int *p, unsigned long start, unsigned long end)
+{
+	int i;
+
+	for (i = start / page_size; i < end / page_size; i++)
+		p[i * page_size / sizeof(*p)] = i + 0xdead0000;
+}
+
+static void validate_memory(int *p, unsigned long start, unsigned long end)
+{
+	int i;
+
+	for (i = start / page_size; i < end / page_size; i++) {
+		if (p[i * page_size / sizeof(*p)] != i + 0xdead0000) {
+			printf("Page %d is corrupted: %#x\n",
+					i, p[i * page_size / sizeof(*p)]);
+			exit(EXIT_FAILURE);
+		}
+	}
+}
+
+#define TICK 500000
+static bool wait_for_scan(const char *msg, char *p)
+{
+	int full_scans;
+	int timeout = 6; /* 3 seconds */
+
+	/* Sanity check */
+	if (check_huge(p)) {
+		printf("Unexpected huge page\n");
+		exit(EXIT_FAILURE);
+	}
+
+	madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
+
+	/* Wait until the second full_scan completed */
+	full_scans = read_num("khugepaged/full_scans") + 2;
+
+	printf("%s...", msg);
+	while (timeout--) {
+		if (check_huge(p))
+			break;
+		if (read_num("khugepaged/full_scans") >= full_scans)
+			break;
+		printf(".");
+		usleep(TICK);
+	}
+
+	madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
+
+	return !timeout;
+}
+
+static void alloc_at_fault(void)
+{
+	struct settings settings = default_settings;
+	char *p;
+
+	settings.thp_enabled = THP_ALWAYS;
+	write_settings(&settings);
+
+	p = alloc_mapping();
+	*p = 1;
+	printf("Allocate huge page on fault...");
+	if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+
+	write_settings(&default_settings);
+
+	madvise(p, page_size, MADV_DONTNEED);
+	printf("Split huge PMD on MADV_DONTNEED...");
+	if (!check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_full(void)
+{
+	void *p;
+
+	p = alloc_mapping();
+	fill_memory(p, 0, hpage_pmd_size);
+	if (wait_for_scan("Collapse fully populated PTE table", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, hpage_pmd_size);
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_empty(void)
+{
+	void *p;
+
+	p = alloc_mapping();
+	if (wait_for_scan("Do not collapse empty PTE table", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		fail("Fail");
+	else
+		success("OK");
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_single_pte_entry(void)
+{
+	void *p;
+
+	p = alloc_mapping();
+	fill_memory(p, 0, page_size);
+	if (wait_for_scan("Collapse PTE table with single PTE entry present", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, page_size);
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_max_ptes_none(void)
+{
+	int max_ptes_none = hpage_pmd_nr / 2;
+	struct settings settings = default_settings;
+	void *p;
+
+	settings.khugepaged.max_ptes_none = max_ptes_none;
+	write_settings(&settings);
+
+	p = alloc_mapping();
+
+	fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
+	if (wait_for_scan("Do not collapse with max_ptes_none exceeded", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		fail("Fail");
+	else
+		success("OK");
+	validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
+
+	fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size);
+	if (wait_for_scan("Collapse with max_ptes_none PTEs empty", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size);
+
+	munmap(p, hpage_pmd_size);
+	write_settings(&default_settings);
+}
+
+static void collapse_swapin_single_pte(void)
+{
+	void *p;
+	p = alloc_mapping();
+	fill_memory(p, 0, hpage_pmd_size);
+
+	printf("Swapout one page...");
+	if (madvise(p, page_size, MADV_PAGEOUT)) {
+		perror("madvise(MADV_PAGEOUT)");
+		exit(EXIT_FAILURE);
+	}
+	if (check_swap(p, page_size)) {
+		success("OK");
+	} else {
+		fail("Fail");
+		goto out;
+	}
+
+	if (wait_for_scan("Collapse with swapping in single PTE entry", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, hpage_pmd_size);
+out:
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_max_ptes_swap(void)
+{
+	int max_ptes_swap = read_num("khugepaged/max_ptes_swap");
+	void *p;
+
+	p = alloc_mapping();
+
+	fill_memory(p, 0, hpage_pmd_size);
+	printf("Swapout %d of %d pages...", max_ptes_swap + 1, hpage_pmd_nr);
+	if (madvise(p, (max_ptes_swap + 1) * page_size, MADV_PAGEOUT)) {
+		perror("madvise(MADV_PAGEOUT)");
+		exit(EXIT_FAILURE);
+	}
+	if (check_swap(p, (max_ptes_swap + 1) * page_size)) {
+		success("OK");
+	} else {
+		fail("Fail");
+		goto out;
+	}
+
+	if (wait_for_scan("Do not collapse with max_ptes_swap exceeded", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		fail("Fail");
+	else
+		success("OK");
+	validate_memory(p, 0, hpage_pmd_size);
+
+	fill_memory(p, 0, hpage_pmd_size);
+	printf("Swapout %d of %d pages...", max_ptes_swap, hpage_pmd_nr);
+	if (madvise(p, max_ptes_swap * page_size, MADV_PAGEOUT)) {
+		perror("madvise(MADV_PAGEOUT)");
+		exit(EXIT_FAILURE);
+	}
+	if (check_swap(p, max_ptes_swap * page_size)) {
+		success("OK");
+	} else {
+		fail("Fail");
+		goto out;
+	}
+
+	if (wait_for_scan("Collapse with max_ptes_swap pages swapped out", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, hpage_pmd_size);
+out:
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_single_pte_entry_compound(void)
+{
+	void *p;
+
+	p = alloc_mapping();
+
+	printf("Allocate huge page...");
+	madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
+	fill_memory(p, 0, hpage_pmd_size);
+	if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
+
+	printf("Split huge page leaving single PTE mapping compound page...");
+	madvise(p + page_size, hpage_pmd_size - page_size, MADV_DONTNEED);
+	if (!check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+
+	if (wait_for_scan("Collapse PTE table with single PTE mapping compound page", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, page_size);
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_full_of_compound(void)
+{
+	void *p;
+
+	p = alloc_mapping();
+
+	printf("Allocate huge page...");
+	madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
+	fill_memory(p, 0, hpage_pmd_size);
+	if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+
+	printf("Split huge page leaving single PTE page table full of compound pages...");
+	madvise(p, page_size, MADV_NOHUGEPAGE);
+	madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
+	if (!check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+
+	if (wait_for_scan("Collapse PTE table full of compound pages", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, hpage_pmd_size);
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_compound_extreme(void)
+{
+	void *p;
+	int i;
+
+	p = alloc_mapping();
+	for (i = 0; i < hpage_pmd_nr; i++) {
+		printf("\rConstruct PTE page table full of different PTE-mapped compound pages %3d/%d...",
+				i + 1, hpage_pmd_nr);
+
+		madvise(BASE_ADDR, hpage_pmd_size, MADV_HUGEPAGE);
+		fill_memory(BASE_ADDR, 0, hpage_pmd_size);
+		if (!check_huge(BASE_ADDR)) {
+			printf("Failed to allocate huge page\n");
+			exit(EXIT_FAILURE);
+		}
+		madvise(BASE_ADDR, hpage_pmd_size, MADV_NOHUGEPAGE);
+
+		p = mremap(BASE_ADDR - i * page_size,
+				i * page_size + hpage_pmd_size,
+				(i + 1) * page_size,
+				MREMAP_MAYMOVE | MREMAP_FIXED,
+				BASE_ADDR + 2 * hpage_pmd_size);
+		if (p == MAP_FAILED) {
+			perror("mremap+unmap");
+			exit(EXIT_FAILURE);
+		}
+
+		p = mremap(BASE_ADDR + 2 * hpage_pmd_size,
+				(i + 1) * page_size,
+				(i + 1) * page_size + hpage_pmd_size,
+				MREMAP_MAYMOVE | MREMAP_FIXED,
+				BASE_ADDR - (i + 1) * page_size);
+		if (p == MAP_FAILED) {
+			perror("mremap+alloc");
+			exit(EXIT_FAILURE);
+		}
+	}
+
+	munmap(BASE_ADDR, hpage_pmd_size);
+	fill_memory(p, 0, hpage_pmd_size);
+	if (!check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+
+	if (wait_for_scan("Collapse PTE table full of different compound pages", p))
+		fail("Timeout");
+	else if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+
+	validate_memory(p, 0, hpage_pmd_size);
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_fork(void)
+{
+	int wstatus;
+	void *p;
+
+	p = alloc_mapping();
+
+	printf("Allocate small page...");
+	fill_memory(p, 0, page_size);
+	if (!check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+
+	printf("Share small page over fork()...");
+	if (!fork()) {
+		/* Do not touch settings on child exit */
+		skip_settings_restore = true;
+		exit_status = 0;
+
+		if (!check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
+
+		fill_memory(p, page_size, 2 * page_size);
+
+		if (wait_for_scan("Collapse PTE table with single page shared with parent process", p))
+			fail("Timeout");
+		else if (check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
+
+		validate_memory(p, 0, page_size);
+		munmap(p, hpage_pmd_size);
+		exit(exit_status);
+	}
+
+	wait(&wstatus);
+	exit_status += WEXITSTATUS(wstatus);
+
+	printf("Check if parent still has small page...");
+	if (!check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, page_size);
+	munmap(p, hpage_pmd_size);
+}
+
+static void collapse_fork_compound(void)
+{
+	int wstatus;
+	void *p;
+
+	p = alloc_mapping();
+
+	printf("Allocate huge page...");
+	madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
+	fill_memory(p, 0, hpage_pmd_size);
+	if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+
+	printf("Share huge page over fork()...");
+	if (!fork()) {
+		/* Do not touch settings on child exit */
+		skip_settings_restore = true;
+		exit_status = 0;
+
+		if (check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
+
+		printf("Split huge page PMD in child process...");
+		madvise(p, page_size, MADV_NOHUGEPAGE);
+		madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
+		if (!check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
+		fill_memory(p, 0, page_size);
+
+		if (wait_for_scan("Collapse PTE table full of compound pages in child", p))
+			fail("Timeout");
+		else if (check_huge(p))
+			success("OK");
+		else
+			fail("Fail");
+
+		validate_memory(p, 0, hpage_pmd_size);
+		munmap(p, hpage_pmd_size);
+		exit(exit_status);
+	}
+
+	wait(&wstatus);
+	exit_status += WEXITSTATUS(wstatus);
+
+	printf("Check if parent still has huge page...");
+	if (check_huge(p))
+		success("OK");
+	else
+		fail("Fail");
+	validate_memory(p, 0, hpage_pmd_size);
+	munmap(p, hpage_pmd_size);
+}
+
+int main(void)
+{
+	setbuf(stdout, NULL);
+
+	page_size = getpagesize();
+	hpage_pmd_size = read_num("hpage_pmd_size");
+	hpage_pmd_nr = hpage_pmd_size / page_size;
+
+	default_settings.khugepaged.max_ptes_none = hpage_pmd_nr - 1;
+	default_settings.khugepaged.max_ptes_swap = hpage_pmd_nr / 8;
+	default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8;
+
+	save_settings();
+	adjust_settings();
+
+	alloc_at_fault();
+	collapse_full();
+	collapse_empty();
+	collapse_single_pte_entry();
+	collapse_max_ptes_none();
+	collapse_swapin_single_pte();
+	collapse_max_ptes_swap();
+	collapse_single_pte_entry_compound();
+	collapse_full_of_compound();
+	collapse_compound_extreme();
+	collapse_fork();
+	collapse_fork_compound();
+
+	restore_settings(0);
+}
--- a/tools/testing/selftests/vm/Makefile~khugepaged-add-self-test
+++ a/tools/testing/selftests/vm/Makefile
@@ -20,6 +20,7 @@ TEST_GEN_FILES += on-fault-limit
 TEST_GEN_FILES += thuge-gen
 TEST_GEN_FILES += transhuge-stress
 TEST_GEN_FILES += userfaultfd
+TEST_GEN_FILES += khugepaged
 
 ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64))
 TEST_GEN_FILES += va_128TBswitch
_

  parent reply	other threads:[~2020-06-03 23:00 UTC|newest]

Thread overview: 179+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-03 22:55 incoming Andrew Morton
2020-06-03 22:56 ` [patch 001/131] mm/slub: fix a memory leak in sysfs_slab_add() Andrew Morton
2020-06-03 22:56 ` [patch 002/131] mm/memcg: optimize memory.numa_stat like memory.stat Andrew Morton
2020-06-03 22:56 ` [patch 003/131] mm/gup: move __get_user_pages_fast() down a few lines in gup.c Andrew Morton
2020-06-04  1:51   ` John Hubbard
2020-06-03 22:56 ` [patch 004/131] mm/gup: refactor and de-duplicate gup_fast() code Andrew Morton
2020-06-03 22:56   ` Andrew Morton
2020-06-04  2:19   ` Linus Torvalds
2020-06-04  3:19     ` Linus Torvalds
2020-06-04  4:31       ` Linus Torvalds
2020-06-04  5:18         ` John Hubbard
2020-06-03 22:56 ` [patch 005/131] mm/gup: introduce pin_user_pages_fast_only() Andrew Morton
2020-06-03 22:56 ` [patch 006/131] drm/i915: convert get_user_pages() --> pin_user_pages() Andrew Morton
2020-06-03 22:56 ` [patch 007/131] mm/gup: might_lock_read(mmap_sem) in get_user_pages_fast() Andrew Morton
2020-06-03 22:56 ` [patch 008/131] kasan: stop tests being eliminated as dead code with FORTIFY_SOURCE Andrew Morton
2020-06-03 22:56 ` [patch 009/131] string.h: fix incompatibility between FORTIFY_SOURCE and KASAN Andrew Morton
2020-06-03 22:56 ` [patch 010/131] mm: clarify __GFP_MEMALLOC usage Andrew Morton
2020-06-03 22:56 ` [patch 011/131] mm: memblock: replace dereferences of memblock_region.nid with API calls Andrew Morton
2020-06-03 22:56   ` Andrew Morton
2020-06-03 22:56 ` [patch 012/131] mm: make early_pfn_to_nid() and related defintions close to each other Andrew Morton
2020-06-03 22:56   ` Andrew Morton
2020-06-03 22:57 ` [patch 013/131] mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option Andrew Morton
2020-06-03 22:57   ` Andrew Morton
2020-06-03 22:57 ` [patch 014/131] mm: free_area_init: use maximal zone PFNs rather than zone sizes Andrew Morton
2020-06-03 22:57 ` [patch 015/131] mm: use free_area_init() instead of free_area_init_nodes() Andrew Morton
2020-06-03 22:57   ` Andrew Morton
2020-06-03 22:57 ` [patch 016/131] alpha: simplify detection of memory zone boundaries Andrew Morton
2020-06-03 22:57 ` [patch 017/131] arm: " Andrew Morton
2020-06-03 22:57   ` Andrew Morton
2020-06-03 22:57 ` [patch 018/131] arm64: simplify detection of memory zone boundaries for UMA configs Andrew Morton
2020-06-03 22:57   ` Andrew Morton
2020-06-03 22:57 ` [patch 019/131] csky: simplify detection of memory zone boundaries Andrew Morton
2020-06-03 22:57 ` [patch 020/131] m68k: mm: " Andrew Morton
2020-06-03 22:57   ` Andrew Morton
2020-06-03 22:57 ` [patch 021/131] parisc: " Andrew Morton
2020-06-03 22:57   ` Andrew Morton
2020-06-03 22:57 ` [patch 022/131] sparc32: " Andrew Morton
2020-06-03 22:57   ` Andrew Morton
2020-06-03 22:57 ` [patch 023/131] unicore32: " Andrew Morton
2020-06-03 22:57 ` [patch 024/131] xtensa: " Andrew Morton
2020-06-03 22:57 ` [patch 025/131] mm: memmap_init: iterate over memblock regions rather that check each PFN Andrew Morton
2020-06-03 22:57   ` Andrew Morton
2020-06-03 22:57 ` [patch 026/131] mm: remove early_pfn_in_nid() and CONFIG_NODES_SPAN_OTHER_NODES Andrew Morton
2020-06-03 22:57   ` Andrew Morton
2020-06-03 22:58 ` [patch 027/131] mm: free_area_init: allow defining max_zone_pfn in descending order Andrew Morton
2020-06-03 22:58   ` Andrew Morton
2020-06-03 22:58 ` [patch 028/131] mm: rename free_area_init_node() to free_area_init_memoryless_node() Andrew Morton
2020-06-03 22:58 ` [patch 029/131] mm: clean up free_area_init_node() and its helpers Andrew Morton
2020-06-03 22:58 ` [patch 030/131] mm: simplify find_min_pfn_with_active_regions() Andrew Morton
2020-06-03 22:58   ` Andrew Morton
2020-06-03 22:58 ` [patch 031/131] docs/vm: update memory-models documentation Andrew Morton
2020-06-03 22:58 ` [patch 032/131] mm/page_alloc.c: bad_[reason|flags] is not necessary when PageHWPoison Andrew Morton
2020-06-03 22:58 ` [patch 033/131] mm/page_alloc.c: bad_flags is not necessary for bad_page() Andrew Morton
2020-06-03 22:58 ` [patch 034/131] mm/page_alloc.c: rename free_pages_check_bad() to check_free_page_bad() Andrew Morton
2020-06-03 22:58 ` [patch 035/131] mm/page_alloc.c: rename free_pages_check() to check_free_page() Andrew Morton
2020-06-03 22:58 ` [patch 036/131] mm/page_alloc.c: extract check_[new|free]_page_bad() common part to page_bad_reason() Andrew Morton
2020-06-03 22:58   ` Andrew Morton
2020-06-03 22:58 ` [patch 037/131] mm,page_alloc,cma: conditionally prefer cma pageblocks for movable allocations Andrew Morton
2020-06-03 22:58 ` [patch 038/131] mm/page_alloc.c: remove unused free_bootmem_with_active_regions Andrew Morton
2020-06-03 22:58   ` Andrew Morton
2020-06-03 22:58 ` [patch 039/131] mm/page_alloc.c: only tune sysctl_lowmem_reserve_ratio value once when changing it Andrew Morton
2020-06-03 22:58 ` [patch 040/131] mm/page_alloc.c: clear out zone->lowmem_reserve[] if the zone is empty Andrew Morton
2020-06-03 22:58 ` [patch 041/131] mm/vmstat.c: do not show lowmem reserve protection information of empty zone Andrew Morton
2020-06-03 22:58   ` Andrew Morton
2020-06-03 22:58 ` [patch 042/131] mm/page_alloc: use ac->high_zoneidx for classzone_idx Andrew Morton
2020-06-03 22:59 ` [patch 043/131] mm/page_alloc: integrate classzone_idx and high_zoneidx Andrew Morton
2020-06-03 22:59 ` [patch 044/131] mm/page_alloc.c: use NODE_MASK_NONE in build_zonelists() Andrew Morton
2020-06-03 22:59 ` [patch 045/131] mm: rename gfpflags_to_migratetype to gfp_migratetype for same convention Andrew Morton
2020-06-03 22:59 ` [patch 046/131] mm/page_alloc.c: reset numa stats for boot pagesets Andrew Morton
2020-06-03 22:59 ` [patch 047/131] mm, page_alloc: reset the zone->watermark_boost early Andrew Morton
2020-06-03 22:59 ` [patch 048/131] mm/page_alloc: restrict and formalize compound_page_dtors[] Andrew Morton
2020-06-03 22:59 ` [patch 049/131] mm/pagealloc.c: call touch_nmi_watchdog() on max order boundaries in deferred init Andrew Morton
2020-06-03 22:59 ` [patch 050/131] mm: initialize deferred pages with interrupts enabled Andrew Morton
2020-06-03 22:59   ` Andrew Morton
2020-06-03 22:59 ` [patch 051/131] mm: call cond_resched() from deferred_init_memmap() Andrew Morton
2020-06-03 22:59 ` [patch 052/131] padata: remove exit routine Andrew Morton
2020-06-03 22:59 ` [patch 053/131] padata: initialize earlier Andrew Morton
2020-06-03 22:59 ` [patch 054/131] padata: allocate work structures for parallel jobs from a pool Andrew Morton
2020-06-03 22:59 ` [patch 055/131] padata: add basic support for multithreaded jobs Andrew Morton
2020-06-03 22:59 ` [patch 056/131] mm: don't track number of pages during deferred initialization Andrew Morton
2020-06-03 22:59 ` [patch 057/131] mm: parallelize deferred_init_memmap() Andrew Morton
2020-06-03 22:59 ` [patch 058/131] mm: make deferred init's max threads arch-specific Andrew Morton
2020-06-03 22:59 ` [patch 059/131] padata: document multithreaded jobs Andrew Morton
2020-06-03 23:00 ` [patch 060/131] mm/page_alloc.c: add missing newline Andrew Morton
2020-06-03 23:00 ` Andrew Morton [this message]
2020-06-03 23:00 ` [patch 062/131] khugepaged: do not stop collapse if less than half PTEs are referenced Andrew Morton
2020-06-03 23:00 ` [patch 063/131] khugepaged: drain all LRU caches before scanning pages Andrew Morton
2020-06-03 23:00 ` [patch 064/131] khugepaged: drain LRU add pagevec after swapin Andrew Morton
2020-06-03 23:00 ` [patch 065/131] khugepaged: allow to collapse a page shared across fork Andrew Morton
2020-06-03 23:00 ` [patch 066/131] khugepaged: allow to collapse PTE-mapped compound pages Andrew Morton
2020-06-03 23:00 ` [patch 067/131] thp: change CoW semantics for anon-THP Andrew Morton
2020-06-03 23:00 ` [patch 068/131] khugepaged: introduce 'max_ptes_shared' tunable Andrew Morton
2020-06-03 23:00 ` [patch 069/131] hugetlbfs: add arch_hugetlb_valid_size Andrew Morton
2020-06-03 23:00 ` [patch 070/131] hugetlbfs: move hugepagesz= parsing to arch independent code Andrew Morton
2020-06-03 23:00   ` Andrew Morton
2020-06-03 23:00 ` [patch 071/131] hugetlbfs: remove hugetlb_add_hstate() warning for existing hstate Andrew Morton
2020-06-03 23:00 ` [patch 072/131] hugetlbfs: clean up command line processing Andrew Morton
2020-06-03 23:00 ` [patch 073/131] hugetlbfs: fix changes to " Andrew Morton
2020-06-03 23:00   ` Andrew Morton
2020-06-03 23:00 ` [patch 074/131] mm/hugetlb: avoid unnecessary check on pud and pmd entry in huge_pte_offset Andrew Morton
2020-06-03 23:00   ` Andrew Morton
2020-06-03 23:00 ` [patch 075/131] arm64/mm: drop __HAVE_ARCH_HUGE_PTEP_GET Andrew Morton
2020-06-03 23:00   ` Andrew Morton
2020-06-03 23:01 ` [patch 076/131] mm/hugetlb: define a generic fallback for is_hugepage_only_range() Andrew Morton
2020-06-03 23:01   ` Andrew Morton
2020-06-03 23:01 ` [patch 077/131] mm/hugetlb: define a generic fallback for arch_clear_hugepage_flags() Andrew Morton
2020-06-03 23:01   ` Andrew Morton
2020-06-03 23:01 ` [patch 078/131] mm: simplify calling a compound page destructor Andrew Morton
2020-06-03 23:01 ` [patch 079/131] mm/vmscan.c: use update_lru_size() in update_lru_sizes() Andrew Morton
2020-06-03 23:01 ` [patch 080/131] mm/vmscan: count layzfree pages and fix nr_isolated_* mismatch Andrew Morton
2020-06-03 23:01 ` [patch 081/131] mm/vmscan.c: change prototype for shrink_page_list Andrew Morton
2020-06-03 23:01 ` [patch 082/131] mm/vmscan: update the comment of should_continue_reclaim() Andrew Morton
2020-06-03 23:01 ` [patch 083/131] mm: fix NUMA node file count error in replace_page_cache() Andrew Morton
2020-06-03 23:01 ` [patch 084/131] mm: memcontrol: fix stat-corrupting race in charge moving Andrew Morton
2020-06-03 23:01 ` [patch 085/131] mm: memcontrol: drop @compound parameter from memcg charging API Andrew Morton
2020-06-03 23:01 ` [patch 086/131] mm: shmem: remove rare optimization when swapin races with hole punching Andrew Morton
2020-06-03 23:01 ` [patch 087/131] mm: memcontrol: move out cgroup swaprate throttling Andrew Morton
2020-06-03 23:01 ` [patch 088/131] mm: memcontrol: convert page cache to a new mem_cgroup_charge() API Andrew Morton
2020-06-03 23:01 ` [patch 089/131] mm: memcontrol: prepare uncharging for removal of private page type counters Andrew Morton
2020-06-03 23:01 ` [patch 090/131] mm: memcontrol: prepare move_account " Andrew Morton
2020-06-03 23:01 ` [patch 091/131] mm: memcontrol: prepare cgroup vmstat infrastructure for native anon counters Andrew Morton
2020-06-03 23:01 ` [patch 092/131] mm: memcontrol: switch to native NR_FILE_PAGES and NR_SHMEM counters Andrew Morton
2020-06-03 23:01 ` [patch 093/131] mm: memcontrol: switch to native NR_ANON_MAPPED counter Andrew Morton
2020-06-03 23:02 ` [patch 094/131] mm: memcontrol: switch to native NR_ANON_THPS counter Andrew Morton
2020-06-03 23:02 ` [patch 095/131] mm: memcontrol: convert anon and file-thp to new mem_cgroup_charge() API Andrew Morton
2020-06-03 23:02 ` [patch 096/131] mm: memcontrol: drop unused try/commit/cancel charge API Andrew Morton
2020-06-03 23:02 ` [patch 097/131] mm: memcontrol: prepare swap controller setup for integration Andrew Morton
2020-06-03 23:02 ` [patch 098/131] mm: memcontrol: make swap tracking an integral part of memory control Andrew Morton
2020-06-03 23:02   ` Andrew Morton
2020-06-03 23:02 ` [patch 099/131] mm: memcontrol: charge swapin pages on instantiation Andrew Morton
2020-06-03 23:02 ` [patch 100/131] mm: memcontrol: document the new swap control behavior Andrew Morton
2020-06-03 23:02   ` Andrew Morton
2020-06-03 23:02 ` [patch 101/131] mm: memcontrol: delete unused lrucare handling Andrew Morton
2020-06-03 23:02 ` [patch 102/131] mm: memcontrol: update page->mem_cgroup stability rules Andrew Morton
2020-06-03 23:02 ` [patch 103/131] mm: fix LRU balancing effect of new transparent huge pages Andrew Morton
2020-06-03 23:02   ` Andrew Morton
2020-06-03 23:02 ` [patch 104/131] mm: keep separate anon and file statistics on page reclaim activity Andrew Morton
2020-06-03 23:02   ` Andrew Morton
2020-06-03 23:02 ` [patch 105/131] mm: allow swappiness that prefers reclaiming anon over the file workingset Andrew Morton
2020-06-03 23:02 ` [patch 106/131] mm: fold and remove lru_cache_add_anon() and lru_cache_add_file() Andrew Morton
2020-06-03 23:02 ` [patch 107/131] mm: workingset: let cache workingset challenge anon Andrew Morton
2020-06-03 23:02 ` [patch 108/131] mm: remove use-once cache bias from LRU balancing Andrew Morton
2020-06-03 23:02 ` [patch 109/131] mm: vmscan: drop unnecessary div0 avoidance rounding in get_scan_count() Andrew Morton
2020-06-03 23:02 ` [patch 110/131] mm: base LRU balancing on an explicit cost model Andrew Morton
2020-06-03 23:02 ` [patch 111/131] mm: deactivations shouldn't bias the LRU balance Andrew Morton
2020-06-03 23:03 ` [patch 112/131] mm: only count actual rotations as LRU reclaim cost Andrew Morton
2020-06-03 23:03 ` [patch 113/131] mm: balance LRU lists based on relative thrashing Andrew Morton
2020-06-03 23:03   ` Andrew Morton
2020-06-09  9:15   ` Alex Shi
2020-06-09 14:45     ` Johannes Weiner
2020-06-10  5:23       ` Joonsoo Kim
2020-06-11  3:28         ` Alex Shi
2020-06-03 23:03 ` [patch 114/131] mm: vmscan: determine anon/file pressure balance at the reclaim root Andrew Morton
2020-06-03 23:03 ` [patch 115/131] mm: vmscan: reclaim writepage is IO cost Andrew Morton
2020-06-03 23:03 ` [patch 116/131] mm: vmscan: limit the range of LRU type balancing Andrew Morton
2020-06-03 23:03 ` [patch 117/131] mm: swap: fix vmstats for huge pages Andrew Morton
2020-06-03 23:03 ` [patch 118/131] mm: swap: memcg: fix memcg stats " Andrew Morton
2020-06-03 23:03 ` [patch 119/131] tools/vm/page_owner_sort.c: filter out unneeded line Andrew Morton
2020-06-03 23:03 ` [patch 120/131] mm, mempolicy: fix up gup usage in lookup_node Andrew Morton
2020-06-03 23:03 ` [patch 121/131] include/linux/memblock.h: fix minor typo and unclear comment Andrew Morton
2020-06-03 23:03 ` [patch 122/131] sparc32: register memory occupied by kernel as memblock.memory Andrew Morton
2020-06-03 23:03 ` [patch 123/131] hugetlbfs: get unmapped area below TASK_UNMAPPED_BASE for hugetlbfs Andrew Morton
2020-06-03 23:03 ` [patch 124/131] mm: thp: don't need to drain lru cache when splitting and mlocking THP Andrew Morton
2020-06-03 23:03   ` Andrew Morton
2020-06-03 23:03 ` [patch 125/131] powerpc/mm: drop platform defined pmd_mknotpresent() Andrew Morton
2020-06-03 23:03 ` [patch 126/131] mm/thp: rename pmd_mknotpresent() as pmd_mkinvalid() Andrew Morton
2020-06-03 23:03 ` [patch 127/131] drivers/base/memory.c: cache memory blocks in xarray to accelerate lookup Andrew Morton
2020-06-03 23:03 ` [patch 128/131] mm: add DEBUG_WX support Andrew Morton
2020-06-03 23:03 ` [patch 129/131] riscv: support DEBUG_WX Andrew Morton
2020-06-03 23:03 ` [patch 130/131] x86: mm: use ARCH_HAS_DEBUG_WX instead of arch defined Andrew Morton
2020-06-03 23:03   ` Andrew Morton
2020-06-03 23:04 ` [patch 131/131] arm64: " Andrew Morton
2020-06-03 23:04   ` Andrew Morton
2020-06-04  0:00 ` + lib-test-get_count_order-long-in-test_bitopsc-fix.patch added to -mm tree Andrew Morton
2020-06-04  0:54 ` mmotm 2020-06-03-17-54 uploaded Andrew Morton
2020-06-04 18:03 ` + mm-vmalloc-fix-a-typo-in-comment.patch added to -mm tree Andrew Morton
2020-06-04 19:59 ` + memory_hotplug-disable-the-functionality-for-32b.patch " Andrew Morton
2020-06-04 21:30 ` + mm-utilc-remove-the-vm_warn_once-for-vm_committed_as-underflow-check.patch " Andrew Morton
2020-06-04 21:39 ` [folded-merged] mm-page_alloc-skip-waternark_boost-for-atomic-order-0-allocations-fix.patch removed from " Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200603230006.Owk2zlFRn%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=aarcange@redhat.com \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=colin.king@canonical.com \
    --cc=jhubbard@nvidia.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mike.kravetz@oracle.com \
    --cc=mm-commits@vger.kernel.org \
    --cc=rcampbell@nvidia.com \
    --cc=torvalds@linux-foundation.org \
    --cc=william.kucharski@oracle.com \
    --cc=yang.shi@linux.alibaba.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.