All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alistair Popple <apopple@nvidia.com>
To: linux-mm@kvack.org, cgroups@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, jgg@nvidia.com,
	jhubbard@nvidia.com, tjmercier@google.com, hannes@cmpxchg.org,
	surenb@google.com, mkoutny@suse.com, daniel@ffwll.ch,
	"Daniel P . Berrange" <berrange@redhat.com>,
	Alex Williamson <alex.williamson@redhat.com>,
	Alistair Popple <apopple@nvidia.com>,
	Shuah Khan <shuah@kernel.org>,
	linux-kselftest@vger.kernel.org
Subject: [PATCH 19/19] selftests/vm: Add pins-cgroup selftest for mlock/mmap
Date: Mon,  6 Feb 2023 18:47:56 +1100	[thread overview]
Message-ID: <2bd6038b4a1571631e7797ce0f47a133f52acd9c.1675669136.git-series.apopple@nvidia.com> (raw)
In-Reply-To: <cover.c238416f0e82377b449846dbb2459ae9d7030c8e.1675669136.git-series.apopple@nvidia.com>

Add some basic tests of mlock/mmap cgroup accounting for pinned
memory.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: linux-kselftest@vger.kernel.org
Cc: cgroups@vger.kernel.org
---
 MAINTAINERS                              |   1 +-
 tools/testing/selftests/vm/Makefile      |   1 +-
 tools/testing/selftests/vm/pins-cgroup.c | 271 ++++++++++++++++++++++++-
 3 files changed, 273 insertions(+)
 create mode 100644 tools/testing/selftests/vm/pins-cgroup.c

diff --git a/MAINTAINERS b/MAINTAINERS
index f8526e2..4c4eed9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5387,6 +5387,7 @@ L:	cgroups@vger.kernel.org
 L:	linux-mm@kvack.org
 S:	Maintained
 F:	mm/pins_cgroup.c
+F:	tools/testing/selftests/vm/pins-cgroup.c
 
 CORETEMP HARDWARE MONITORING DRIVER
 M:	Fenghua Yu <fenghua.yu@intel.com>
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 89c14e4..0653720 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -56,6 +56,7 @@ TEST_GEN_PROGS += soft-dirty
 TEST_GEN_PROGS += split_huge_page_test
 TEST_GEN_FILES += ksm_tests
 TEST_GEN_PROGS += ksm_functional_tests
+TEST_GEN_FILES += pins-cgroup
 
 ifeq ($(MACHINE),x86_64)
 CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_program.c -m32)
diff --git a/tools/testing/selftests/vm/pins-cgroup.c b/tools/testing/selftests/vm/pins-cgroup.c
new file mode 100644
index 0000000..c2eabc2
--- /dev/null
+++ b/tools/testing/selftests/vm/pins-cgroup.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kselftest_harness.h"
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/capability.h>
+#include <unistd.h>
+
+#define CGROUP_TEMP "/sys/fs/cgroup/pins_XXXXXX"
+#define PINS_MAX (-1UL)
+
+FIXTURE(pins_cg)
+{
+	char *cg_path;
+	long page_size;
+};
+
+static char *cgroup_new(void)
+{
+	char *cg;
+
+	cg = malloc(sizeof(CGROUP_TEMP));
+	strcpy(cg, CGROUP_TEMP);
+	if (!mkdtemp(cg)) {
+		perror("Failed to create cgroup");
+		return NULL;
+	}
+
+	return cg;
+}
+
+static int cgroup_add_proc(char *cg, pid_t pid)
+{
+	char *cg_proc;
+	FILE *f;
+	int ret = 0;
+
+	if (asprintf(&cg_proc, "%s/cgroup.procs", cg) < 0)
+		return -1;
+
+	f = fopen(cg_proc, "w");
+	free(cg_proc);
+	if (!f)
+		return -1;
+
+	if (fprintf(f, "%ld\n", (long) pid) < 0)
+		ret = -1;
+
+	fclose(f);
+	return ret;
+}
+
+static int cgroup_set_limit(char *cg, unsigned long limit)
+{
+	char *cg_pins_max;
+	FILE *f;
+	int ret = 0;
+
+	if (asprintf(&cg_pins_max, "%s/pins.max", cg) < 0)
+		return -1;
+
+	f = fopen(cg_pins_max, "w");
+	free(cg_pins_max);
+	if (!f)
+		return -1;
+
+	if (limit != PINS_MAX) {
+		if (fprintf(f, "%ld\n", limit) < 0)
+			ret = -1;
+	} else {
+		if (fprintf(f, "max\n") < 0)
+			ret = -1;
+	}
+
+	fclose(f);
+	return ret;
+}
+
+FIXTURE_SETUP(pins_cg)
+{
+	char *cg_subtree_control;
+	FILE *f;
+
+	if (asprintf(&cg_subtree_control,
+			"/sys/fs/cgroup/cgroup.subtree_control") < 0)
+		return;
+
+	f = fopen(cg_subtree_control, "w");
+	free(cg_subtree_control);
+	if (!f)
+		return;
+
+	fprintf(f, "+pins\n");
+	fclose(f);
+
+	self->cg_path = cgroup_new();
+	self->page_size = sysconf(_SC_PAGE_SIZE);
+}
+
+FIXTURE_TEARDOWN(pins_cg)
+{
+	cgroup_add_proc("/sys/fs/cgroup", getpid());
+
+	rmdir(self->cg_path);
+	free(self->cg_path);
+}
+
+static long cgroup_pins(char *cg)
+{
+	long pin_count;
+	char *cg_pins_current;
+	FILE *f;
+	int ret;
+
+	if (asprintf(&cg_pins_current, "%s/pins.current", cg) < 0)
+		return -1;
+
+	f = fopen(cg_pins_current, "r");
+	if (!f) {
+		printf("Can't open %s\n", cg_pins_current);
+		getchar();
+		free(cg_pins_current);
+		return -2;
+	}
+
+	free(cg_pins_current);
+
+	if (fscanf(f, "%ld", &pin_count) == EOF)
+		ret = -3;
+	else
+		ret = pin_count;
+
+	fclose(f);
+	return ret;
+}
+
+static int set_rlim_memlock(unsigned long size)
+{
+	struct rlimit rlim_memlock = {
+		.rlim_cur = size,
+		.rlim_max = size,
+	};
+	cap_t cap;
+	cap_value_t capability[1] = { CAP_IPC_LOCK };
+
+	/*
+	 * Many of the rlimit checks are skipped if a process has
+	 * CAP_IP_LOCK. As this test should be run as root we need to
+	 * explicitly drop it.
+	 */
+	cap = cap_get_proc();
+	if (!cap)
+		return -1;
+	if (cap_set_flag(cap, CAP_EFFECTIVE, 1, capability, CAP_CLEAR))
+		return -1;
+	if (cap_set_proc(cap))
+		return -1;
+	return setrlimit(RLIMIT_MEMLOCK, &rlim_memlock);
+}
+
+TEST_F(pins_cg, basic)
+{
+	pid_t child_pid;
+	long page_size = self->page_size;
+	char *p;
+
+	ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+	p = mmap(NULL, 32*page_size, PROT_READ | PROT_WRITE,
+		MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+	ASSERT_NE(p, MAP_FAILED);
+
+	ASSERT_EQ(cgroup_pins(self->cg_path), 0);
+	memset(p, 0, 16*page_size);
+	ASSERT_EQ(mlock(p, page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 1);
+	ASSERT_EQ(mlock(p + page_size, page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+	ASSERT_EQ(mlock(p, page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+	ASSERT_EQ(mlock(p, 4*page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 4);
+	ASSERT_EQ(munlock(p + 2*page_size, 2*page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+	ASSERT_EQ(cgroup_set_limit(self->cg_path, 8), 0);
+	ASSERT_EQ(mlock(p, 16*page_size), -1);
+	ASSERT_EQ(errno, ENOMEM);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+	ASSERT_EQ(cgroup_set_limit(self->cg_path, PINS_MAX), 0);
+
+	/* check mremap() a locked region correctly accounts locked pages */
+	ASSERT_EQ(mlock(p, 32*page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+	p = mremap(p, 32*page_size, 64*page_size, MREMAP_MAYMOVE);
+	ASSERT_NE(p, MAP_FAILED);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 64);
+	ASSERT_EQ(munmap(p + 32*page_size, 32*page_size), 0)
+	ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+	p = mremap(p, 32*page_size, 32*page_size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP);
+	ASSERT_NE(p, MAP_FAILED);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+	ASSERT_EQ(munlock(p, 32*page_size), 0);
+
+	/* mremap() a locked region should fail if limit exceeded */
+	ASSERT_EQ(set_rlim_memlock(32*page_size), 0);
+	ASSERT_EQ(mlock(p, 32*page_size), 0);
+	ASSERT_EQ(mremap(p, 32*page_size, 64*page_size, 0), MAP_FAILED);
+	ASSERT_EQ(munlock(p, 32*page_size), 0);
+
+	/* Exceeds rlimit, expected to fail */
+	ASSERT_EQ(set_rlim_memlock(16*page_size), 0);
+	ASSERT_EQ(mlock(p, 32*page_size), -1);
+	ASSERT_EQ(errno, ENOMEM);
+
+	/* memory in the child isn't locked so shouldn't increase pin_cg count */
+	ASSERT_EQ(mlock(p, 16*page_size), 0);
+	child_pid = fork();
+	if (!child_pid) {
+		ASSERT_EQ(cgroup_pins(self->cg_path), 16);
+		ASSERT_EQ(mlock(p, 16*page_size), 0);
+		ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+		return;
+
+	}
+	waitpid(child_pid, NULL, 0);
+
+	/* check that child exit uncharged the pins */
+	ASSERT_EQ(cgroup_pins(self->cg_path), 16);
+}
+
+TEST_F(pins_cg, mmap)
+{
+	char *p;
+
+	ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+	p = mmap(NULL, 4*self->page_size, PROT_READ | PROT_WRITE,
+		MAP_ANONYMOUS | MAP_PRIVATE | MAP_LOCKED, -1, 0);
+	ASSERT_NE(p, MAP_FAILED);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 4);
+}
+
+/*
+ * Test moving to a different cgroup.
+ */
+TEST_F(pins_cg, move_cg)
+{
+	char *p, *new_cg;
+
+	ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+	p = mmap(NULL, 16*self->page_size, PROT_READ | PROT_WRITE,
+		MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+	ASSERT_NE(p, MAP_FAILED);
+	memset(p, 0, 16*self->page_size);
+	ASSERT_EQ(mlock(p, 16*self->page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 16);
+	ASSERT_NE(new_cg = cgroup_new(), NULL);
+	ASSERT_EQ(cgroup_add_proc(new_cg, getpid()), 0);
+	ASSERT_EQ(cgroup_pins(new_cg), 16);
+	ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+	rmdir(new_cg);
+}
+TEST_HARNESS_MAIN
-- 
git-series 0.9.1

WARNING: multiple messages have this Message-ID (diff)
From: Alistair Popple <apopple-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
To: linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org,
	cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	jgg-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org,
	jhubbard-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org,
	tjmercier-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
	hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org,
	surenb-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
	mkoutny-IBi9RG/b67k@public.gmane.org,
	daniel-/w4YWyX8dFk@public.gmane.org,
	"Daniel P . Berrange"
	<berrange-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	Alex Williamson
	<alex.williamson-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	Alistair Popple <apopple-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>,
	Shuah Khan <shuah-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	linux-kselftest-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Subject: [PATCH 19/19] selftests/vm: Add pins-cgroup selftest for mlock/mmap
Date: Mon,  6 Feb 2023 18:47:56 +1100	[thread overview]
Message-ID: <2bd6038b4a1571631e7797ce0f47a133f52acd9c.1675669136.git-series.apopple@nvidia.com> (raw)
In-Reply-To: <cover.c238416f0e82377b449846dbb2459ae9d7030c8e.1675669136.git-series.apopple-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>

Add some basic tests of mlock/mmap cgroup accounting for pinned
memory.

Signed-off-by: Alistair Popple <apopple-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
Cc: Shuah Khan <shuah-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
Cc: linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org
Cc: linux-kselftest-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
---
 MAINTAINERS                              |   1 +-
 tools/testing/selftests/vm/Makefile      |   1 +-
 tools/testing/selftests/vm/pins-cgroup.c | 271 ++++++++++++++++++++++++-
 3 files changed, 273 insertions(+)
 create mode 100644 tools/testing/selftests/vm/pins-cgroup.c

diff --git a/MAINTAINERS b/MAINTAINERS
index f8526e2..4c4eed9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5387,6 +5387,7 @@ L:	cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
 L:	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org
 S:	Maintained
 F:	mm/pins_cgroup.c
+F:	tools/testing/selftests/vm/pins-cgroup.c
 
 CORETEMP HARDWARE MONITORING DRIVER
 M:	Fenghua Yu <fenghua.yu-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 89c14e4..0653720 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -56,6 +56,7 @@ TEST_GEN_PROGS += soft-dirty
 TEST_GEN_PROGS += split_huge_page_test
 TEST_GEN_FILES += ksm_tests
 TEST_GEN_PROGS += ksm_functional_tests
+TEST_GEN_FILES += pins-cgroup
 
 ifeq ($(MACHINE),x86_64)
 CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_program.c -m32)
diff --git a/tools/testing/selftests/vm/pins-cgroup.c b/tools/testing/selftests/vm/pins-cgroup.c
new file mode 100644
index 0000000..c2eabc2
--- /dev/null
+++ b/tools/testing/selftests/vm/pins-cgroup.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kselftest_harness.h"
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/capability.h>
+#include <unistd.h>
+
+#define CGROUP_TEMP "/sys/fs/cgroup/pins_XXXXXX"
+#define PINS_MAX (-1UL)
+
+FIXTURE(pins_cg)
+{
+	char *cg_path;
+	long page_size;
+};
+
+static char *cgroup_new(void)
+{
+	char *cg;
+
+	cg = malloc(sizeof(CGROUP_TEMP));
+	strcpy(cg, CGROUP_TEMP);
+	if (!mkdtemp(cg)) {
+		perror("Failed to create cgroup");
+		return NULL;
+	}
+
+	return cg;
+}
+
+static int cgroup_add_proc(char *cg, pid_t pid)
+{
+	char *cg_proc;
+	FILE *f;
+	int ret = 0;
+
+	if (asprintf(&cg_proc, "%s/cgroup.procs", cg) < 0)
+		return -1;
+
+	f = fopen(cg_proc, "w");
+	free(cg_proc);
+	if (!f)
+		return -1;
+
+	if (fprintf(f, "%ld\n", (long) pid) < 0)
+		ret = -1;
+
+	fclose(f);
+	return ret;
+}
+
+static int cgroup_set_limit(char *cg, unsigned long limit)
+{
+	char *cg_pins_max;
+	FILE *f;
+	int ret = 0;
+
+	if (asprintf(&cg_pins_max, "%s/pins.max", cg) < 0)
+		return -1;
+
+	f = fopen(cg_pins_max, "w");
+	free(cg_pins_max);
+	if (!f)
+		return -1;
+
+	if (limit != PINS_MAX) {
+		if (fprintf(f, "%ld\n", limit) < 0)
+			ret = -1;
+	} else {
+		if (fprintf(f, "max\n") < 0)
+			ret = -1;
+	}
+
+	fclose(f);
+	return ret;
+}
+
+FIXTURE_SETUP(pins_cg)
+{
+	char *cg_subtree_control;
+	FILE *f;
+
+	if (asprintf(&cg_subtree_control,
+			"/sys/fs/cgroup/cgroup.subtree_control") < 0)
+		return;
+
+	f = fopen(cg_subtree_control, "w");
+	free(cg_subtree_control);
+	if (!f)
+		return;
+
+	fprintf(f, "+pins\n");
+	fclose(f);
+
+	self->cg_path = cgroup_new();
+	self->page_size = sysconf(_SC_PAGE_SIZE);
+}
+
+FIXTURE_TEARDOWN(pins_cg)
+{
+	cgroup_add_proc("/sys/fs/cgroup", getpid());
+
+	rmdir(self->cg_path);
+	free(self->cg_path);
+}
+
+static long cgroup_pins(char *cg)
+{
+	long pin_count;
+	char *cg_pins_current;
+	FILE *f;
+	int ret;
+
+	if (asprintf(&cg_pins_current, "%s/pins.current", cg) < 0)
+		return -1;
+
+	f = fopen(cg_pins_current, "r");
+	if (!f) {
+		printf("Can't open %s\n", cg_pins_current);
+		getchar();
+		free(cg_pins_current);
+		return -2;
+	}
+
+	free(cg_pins_current);
+
+	if (fscanf(f, "%ld", &pin_count) == EOF)
+		ret = -3;
+	else
+		ret = pin_count;
+
+	fclose(f);
+	return ret;
+}
+
+static int set_rlim_memlock(unsigned long size)
+{
+	struct rlimit rlim_memlock = {
+		.rlim_cur = size,
+		.rlim_max = size,
+	};
+	cap_t cap;
+	cap_value_t capability[1] = { CAP_IPC_LOCK };
+
+	/*
+	 * Many of the rlimit checks are skipped if a process has
+	 * CAP_IP_LOCK. As this test should be run as root we need to
+	 * explicitly drop it.
+	 */
+	cap = cap_get_proc();
+	if (!cap)
+		return -1;
+	if (cap_set_flag(cap, CAP_EFFECTIVE, 1, capability, CAP_CLEAR))
+		return -1;
+	if (cap_set_proc(cap))
+		return -1;
+	return setrlimit(RLIMIT_MEMLOCK, &rlim_memlock);
+}
+
+TEST_F(pins_cg, basic)
+{
+	pid_t child_pid;
+	long page_size = self->page_size;
+	char *p;
+
+	ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+	p = mmap(NULL, 32*page_size, PROT_READ | PROT_WRITE,
+		MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+	ASSERT_NE(p, MAP_FAILED);
+
+	ASSERT_EQ(cgroup_pins(self->cg_path), 0);
+	memset(p, 0, 16*page_size);
+	ASSERT_EQ(mlock(p, page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 1);
+	ASSERT_EQ(mlock(p + page_size, page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+	ASSERT_EQ(mlock(p, page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+	ASSERT_EQ(mlock(p, 4*page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 4);
+	ASSERT_EQ(munlock(p + 2*page_size, 2*page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+	ASSERT_EQ(cgroup_set_limit(self->cg_path, 8), 0);
+	ASSERT_EQ(mlock(p, 16*page_size), -1);
+	ASSERT_EQ(errno, ENOMEM);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+	ASSERT_EQ(cgroup_set_limit(self->cg_path, PINS_MAX), 0);
+
+	/* check mremap() a locked region correctly accounts locked pages */
+	ASSERT_EQ(mlock(p, 32*page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+	p = mremap(p, 32*page_size, 64*page_size, MREMAP_MAYMOVE);
+	ASSERT_NE(p, MAP_FAILED);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 64);
+	ASSERT_EQ(munmap(p + 32*page_size, 32*page_size), 0)
+	ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+	p = mremap(p, 32*page_size, 32*page_size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP);
+	ASSERT_NE(p, MAP_FAILED);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+	ASSERT_EQ(munlock(p, 32*page_size), 0);
+
+	/* mremap() a locked region should fail if limit exceeded */
+	ASSERT_EQ(set_rlim_memlock(32*page_size), 0);
+	ASSERT_EQ(mlock(p, 32*page_size), 0);
+	ASSERT_EQ(mremap(p, 32*page_size, 64*page_size, 0), MAP_FAILED);
+	ASSERT_EQ(munlock(p, 32*page_size), 0);
+
+	/* Exceeds rlimit, expected to fail */
+	ASSERT_EQ(set_rlim_memlock(16*page_size), 0);
+	ASSERT_EQ(mlock(p, 32*page_size), -1);
+	ASSERT_EQ(errno, ENOMEM);
+
+	/* memory in the child isn't locked so shouldn't increase pin_cg count */
+	ASSERT_EQ(mlock(p, 16*page_size), 0);
+	child_pid = fork();
+	if (!child_pid) {
+		ASSERT_EQ(cgroup_pins(self->cg_path), 16);
+		ASSERT_EQ(mlock(p, 16*page_size), 0);
+		ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+		return;
+
+	}
+	waitpid(child_pid, NULL, 0);
+
+	/* check that child exit uncharged the pins */
+	ASSERT_EQ(cgroup_pins(self->cg_path), 16);
+}
+
+TEST_F(pins_cg, mmap)
+{
+	char *p;
+
+	ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+	p = mmap(NULL, 4*self->page_size, PROT_READ | PROT_WRITE,
+		MAP_ANONYMOUS | MAP_PRIVATE | MAP_LOCKED, -1, 0);
+	ASSERT_NE(p, MAP_FAILED);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 4);
+}
+
+/*
+ * Test moving to a different cgroup.
+ */
+TEST_F(pins_cg, move_cg)
+{
+	char *p, *new_cg;
+
+	ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+	p = mmap(NULL, 16*self->page_size, PROT_READ | PROT_WRITE,
+		MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+	ASSERT_NE(p, MAP_FAILED);
+	memset(p, 0, 16*self->page_size);
+	ASSERT_EQ(mlock(p, 16*self->page_size), 0);
+	ASSERT_EQ(cgroup_pins(self->cg_path), 16);
+	ASSERT_NE(new_cg = cgroup_new(), NULL);
+	ASSERT_EQ(cgroup_add_proc(new_cg, getpid()), 0);
+	ASSERT_EQ(cgroup_pins(new_cg), 16);
+	ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+	rmdir(new_cg);
+}
+TEST_HARNESS_MAIN
-- 
git-series 0.9.1

  parent reply	other threads:[~2023-02-06  7:52 UTC|newest]

Thread overview: 128+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-06  7:47 [PATCH 00/19] mm: Introduce a cgroup to limit the amount of locked and pinned memory Alistair Popple
2023-02-06  7:47 ` Alistair Popple
2023-02-06  7:47 ` [PATCH 01/19] mm: Introduce vm_account Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 02/19] drivers/vhost: Convert to use vm_account Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 03/19] drivers/vdpa: Convert vdpa to use the new vm_structure Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 04/19] infiniband/umem: Convert to use vm_account Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 05/19] RMDA/siw: " Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-12 17:32   ` Bernard Metzler
2023-02-06  7:47 ` [PATCH 06/19] RDMA/usnic: convert " Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 07/19] vfio/type1: Charge pinned pages to pinned_vm instead of locked_vm Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 08/19] vfio/spapr_tce: Convert accounting to pinned_vm Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 09/19] io_uring: convert to use vm_account Alistair Popple
2023-02-06 15:29   ` Jens Axboe
2023-02-06 15:29     ` Jens Axboe
2023-02-07  1:03     ` Alistair Popple
2023-02-07  1:03       ` Alistair Popple
2023-02-07 14:28       ` Jens Axboe
2023-02-07 14:55         ` Jason Gunthorpe
2023-02-07 14:55           ` Jason Gunthorpe
2023-02-07 17:05           ` Jens Axboe
2023-02-07 17:05             ` Jens Axboe
2023-02-13 11:30             ` Alistair Popple
2023-02-13 11:30               ` Alistair Popple
2023-02-06  7:47 ` [PATCH 10/19] net: skb: Switch to using vm_account Alistair Popple
2023-02-06  7:47 ` [PATCH 11/19] xdp: convert to use vm_account Alistair Popple
2023-02-06  7:47 ` [PATCH 12/19] kvm/book3s_64_vio: Convert account_locked_vm() to vm_account_pinned() Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 13/19] fpga: dfl: afu: convert to use vm_account Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 14/19] mm: Introduce a cgroup for pinned memory Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06 21:01   ` Yosry Ahmed
2023-02-06 21:01     ` Yosry Ahmed
2023-02-06 21:14   ` Tejun Heo
2023-02-06 21:14     ` Tejun Heo
2023-02-06 22:32     ` Yosry Ahmed
2023-02-06 22:32       ` Yosry Ahmed
2023-02-06 22:36       ` Tejun Heo
2023-02-06 22:39         ` Yosry Ahmed
2023-02-06 22:39           ` Yosry Ahmed
2023-02-06 23:25           ` Tejun Heo
2023-02-06 23:25             ` Tejun Heo
2023-02-06 23:34             ` Yosry Ahmed
2023-02-06 23:34               ` Yosry Ahmed
2023-02-06 23:40             ` Jason Gunthorpe
2023-02-06 23:40               ` Jason Gunthorpe
2023-02-07  0:32               ` Tejun Heo
2023-02-07  0:32                 ` Tejun Heo
2023-02-07 12:19                 ` Jason Gunthorpe
2023-02-07 12:19                   ` Jason Gunthorpe
2023-02-15 19:00                 ` Michal Hocko
2023-02-15 19:00                   ` Michal Hocko
2023-02-15 19:07                   ` Jason Gunthorpe
2023-02-15 19:07                     ` Jason Gunthorpe
2023-02-16  8:04                     ` Michal Hocko
2023-02-16  8:04                       ` Michal Hocko
2023-02-16 12:45                       ` Jason Gunthorpe
2023-02-16 12:45                         ` Jason Gunthorpe
2023-02-21 16:51                         ` Tejun Heo
2023-02-21 16:51                           ` Tejun Heo
2023-02-21 17:25                           ` Jason Gunthorpe
2023-02-21 17:29                             ` Tejun Heo
2023-02-21 17:29                               ` Tejun Heo
2023-02-21 17:51                               ` Jason Gunthorpe
2023-02-21 17:51                                 ` Jason Gunthorpe
2023-02-21 18:07                                 ` Tejun Heo
2023-02-21 18:07                                   ` Tejun Heo
2023-02-21 19:26                                   ` Jason Gunthorpe
2023-02-21 19:26                                     ` Jason Gunthorpe
2023-02-21 19:45                                     ` Tejun Heo
2023-02-21 19:45                                       ` Tejun Heo
2023-02-21 19:49                                       ` Tejun Heo
2023-02-21 19:49                                         ` Tejun Heo
2023-02-21 19:57                                       ` Jason Gunthorpe
2023-02-22 11:38                                         ` Alistair Popple
2023-02-22 11:38                                           ` Alistair Popple
2023-02-22 12:57                                           ` Jason Gunthorpe
2023-02-22 12:57                                             ` Jason Gunthorpe
2023-02-22 22:59                                             ` Alistair Popple
2023-02-22 22:59                                               ` Alistair Popple
2023-02-23  0:05                                               ` Christoph Hellwig
2023-02-23  0:35                                                 ` Alistair Popple
2023-02-23  0:35                                                   ` Alistair Popple
2023-02-23  1:53                                               ` Jason Gunthorpe
2023-02-23  1:53                                                 ` Jason Gunthorpe
2023-02-23  9:12                                                 ` Daniel P. Berrangé
2023-02-23 17:31                                                   ` Jason Gunthorpe
2023-02-23 17:31                                                     ` Jason Gunthorpe
2023-02-23 17:18                                                 ` T.J. Mercier
2023-02-23 17:28                                                   ` Jason Gunthorpe
2023-02-23 17:28                                                     ` Jason Gunthorpe
2023-02-23 18:03                                                     ` Yosry Ahmed
2023-02-23 18:10                                                       ` Jason Gunthorpe
2023-02-23 18:10                                                         ` Jason Gunthorpe
2023-02-23 18:14                                                         ` Yosry Ahmed
2023-02-23 18:14                                                           ` Yosry Ahmed
2023-02-23 18:15                                                         ` Tejun Heo
2023-02-23 18:17                                                           ` Jason Gunthorpe
2023-02-23 18:17                                                             ` Jason Gunthorpe
2023-02-23 18:22                                                             ` Tejun Heo
2023-02-23 18:22                                                               ` Tejun Heo
2023-02-07  1:00           ` Waiman Long
2023-02-07  1:00             ` Waiman Long
2023-02-07  1:03             ` Tejun Heo
2023-02-07  1:50               ` Alistair Popple
2023-02-07  1:50                 ` Alistair Popple
2023-02-06  7:47 ` [PATCH 15/19] mm/util: Extend vm_account to charge pages against the pin cgroup Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 16/19] mm/util: Refactor account_locked_vm Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 17/19] mm: Convert mmap and mlock to use account_locked_vm Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06  7:47 ` [PATCH 18/19] mm/mmap: Charge locked memory to pins cgroup Alistair Popple
2023-02-06  7:47   ` Alistair Popple
2023-02-06 21:12   ` Yosry Ahmed
2023-02-06  7:47 ` Alistair Popple [this message]
2023-02-06  7:47   ` [PATCH 19/19] selftests/vm: Add pins-cgroup selftest for mlock/mmap Alistair Popple
2023-02-16 11:01 ` [PATCH 00/19] mm: Introduce a cgroup to limit the amount of locked and pinned memory David Hildenbrand
2023-02-16 11:01   ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2bd6038b4a1571631e7797ce0f47a133f52acd9c.1675669136.git-series.apopple@nvidia.com \
    --to=apopple@nvidia.com \
    --cc=alex.williamson@redhat.com \
    --cc=berrange@redhat.com \
    --cc=cgroups@vger.kernel.org \
    --cc=daniel@ffwll.ch \
    --cc=hannes@cmpxchg.org \
    --cc=jgg@nvidia.com \
    --cc=jhubbard@nvidia.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mkoutny@suse.com \
    --cc=shuah@kernel.org \
    --cc=surenb@google.com \
    --cc=tjmercier@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.