ltp.lists.linux.it archive mirror
 help / color / mirror / Atom feed
* [LTP] [PATCH v3] Migrating the libhugetlbfs/testcases/stack_grow_into_huge.c test.
@ 2024-04-07 10:24 Samir Mulani
  2024-04-12 11:28 ` Cyril Hrubis
  0 siblings, 1 reply; 2+ messages in thread
From: Samir Mulani @ 2024-04-07 10:24 UTC (permalink / raw)
  To: ltp; +Cc: Samir Mulani, rpalethorpe

Test Description:On PowerPC, the address space is divided into segments.
These segments can contain either huge pages or normal pages, but not
both.
All segments are initially set up to map normal pages. When a huge page
mapping is created within a set of empty segments, they are "enabled"
for huge pages at that time. Once enabled for huge pages, they can
not be used again for normal pages for the remaining lifetime of the
process.

If the segment immediately preceeding the segment containing the stack is
converted to huge pages and the stack is made to grow into the this
preceeding segment, some kernels may attempt to map normal pages into the
huge page-only segment -- resulting in bugs.

Signed-off-by: Samir Mulani <samir@linux.vnet.ibm.com>
---
v3:
--Addressed the below requested changes
  1. Added support to prevent the test case just run on PowerPC architecture and skip with message on other architecture.
  2. Added check mmap function, In case fail condition due to ENOMEM it should exit with TCONF saying there is no memory in the system.
  3. Replace SAFE_MMAP with direct mmap function call.
  4. Replaced MAP_FIXED with MAP_FIXED_NOREPLACE macro.
  5. Ran make check and fixed the issues.
---
 runtest/hugetlb                               |   1 +
 testcases/kernel/mem/.gitignore               |   1 +
 .../kernel/mem/hugetlb/hugemmap/hugemmap34.c  | 142 ++++++++++++++++++
 3 files changed, 144 insertions(+)
 create mode 100644 testcases/kernel/mem/hugetlb/hugemmap/hugemmap34.c

diff --git a/runtest/hugetlb b/runtest/hugetlb
index 299c07ac9..0c812c780 100644
--- a/runtest/hugetlb
+++ b/runtest/hugetlb
@@ -35,6 +35,7 @@ hugemmap29 hugemmap29
 hugemmap30 hugemmap30
 hugemmap31 hugemmap31
 hugemmap32 hugemmap32
+hugemmap34 hugemmap34
 hugemmap05_1 hugemmap05 -m
 hugemmap05_2 hugemmap05 -s
 hugemmap05_3 hugemmap05 -s -m
diff --git a/testcases/kernel/mem/.gitignore b/testcases/kernel/mem/.gitignore
index c96fe8bfc..828c62776 100644
--- a/testcases/kernel/mem/.gitignore
+++ b/testcases/kernel/mem/.gitignore
@@ -34,6 +34,7 @@
 /hugetlb/hugemmap/hugemmap30
 /hugetlb/hugemmap/hugemmap31
 /hugetlb/hugemmap/hugemmap32
+/hugetlb/hugemmap/hugemmap34
 /hugetlb/hugeshmat/hugeshmat01
 /hugetlb/hugeshmat/hugeshmat02
 /hugetlb/hugeshmat/hugeshmat03
diff --git a/testcases/kernel/mem/hugetlb/hugemmap/hugemmap34.c b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap34.c
new file mode 100644
index 000000000..a69f7e42c
--- /dev/null
+++ b/testcases/kernel/mem/hugetlb/hugemmap/hugemmap34.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005-2006 IBM Corporation
+ * Author: David Gibson & Adam Litke
+ */
+
+/*\
+ * [Description]
+ *
+ * Test Name: stack_grow_into_huge
+ *
+ * On PowerPC, the address space is divided into segments.  These segments can
+ * contain either huge pages or normal pages, but not both.  All segments are
+ * initially set up to map normal pages.  When a huge page mapping is created
+ * within a set of empty segments, they are "enabled" for huge pages at that
+ * time.  Once enabled for huge pages, they can not be used again for normal
+ * pages for the remaining lifetime of the process.
+ *
+ * If the segment immediately preceeding the segment containing the stack is
+ * converted to huge pages and the stack is made to grow into the this
+ * preceeding segment, some kernels may attempt to map normal pages into the
+ * huge page-only segment -- resulting in bugs.
+ */
+
+#include "hugetlb.h"
+#include <errno.h>
+
+#if defined(__powerpc__) || defined(__powerpc64__)
+
+#ifdef __LP64__
+#define STACK_ALLOCATION_SIZE	(256*1024*1024)
+#else
+#define STACK_ALLOCATION_SIZE	(16*1024*1024)
+#endif
+#define PALIGN(p, a) ((void *)LTP_ALIGN((unsigned long)(p), (a)))
+#define MNTPOINT "hugetlbfs/"
+static int  fd = -1;
+static unsigned long long hpage_size;
+static int page_size;
+
+
+void do_child(void *stop_address)
+{
+	struct rlimit r;
+	volatile int *x;
+
+	/* corefile from this process is not interesting and limiting
+	 * its size can save a lot of time. '1' is a special value,
+	 * that will also abort dumping via pipe, which by default
+	 * sets limit to RLIM_INFINITY.
+	 */
+	r.rlim_cur = 1;
+	r.rlim_max = 1;
+	SAFE_SETRLIMIT(RLIMIT_CORE, &r);
+
+	do {
+		x = alloca(STACK_ALLOCATION_SIZE);
+		*x = 1;
+	} while ((void *)x >= stop_address);
+}
+
+static void run_test(void)
+{
+	int pid, status;
+	void *stack_address, *mmap_address, *heap_address, *map;
+
+	stack_address = alloca(0);
+	heap_address = sbrk(0);
+
+	/*
+	 * paranoia: start mapping two hugepages below the start of the stack,
+	 * in case the alignment would cause us to map over something if we
+	 * only used a gap of one hugepage.
+	 */
+	mmap_address = PALIGN(stack_address - 2 * hpage_size, hpage_size);
+	do {
+		map = mmap(mmap_address, hpage_size, PROT_READ|PROT_WRITE,
+				MAP_SHARED | MAP_FIXED_NOREPLACE, fd, 0);
+		if (map == MAP_FAILED) {
+			if (errno == ENOMEM) {
+				tst_res(TCONF, "There is no enough memory in the system to do mmap");
+				exit(-1);
+			}
+		}
+		mmap_address -= hpage_size;
+		/*
+		 * if we get all the way down to the heap, stop trying
+		 */
+	} while (mmap_address <= heap_address);
+	pid = SAFE_FORK();
+	if (pid == 0)
+		do_child(mmap_address);
+
+	SAFE_WAITPID(pid, &status, 0);
+	if (WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV)
+		tst_res(TPASS, "Child killed by %s as expected", tst_strsig(SIGSEGV));
+	else
+		tst_res(TFAIL, "Child: %s", tst_strstatus(status));
+}
+
+void setup(void)
+{
+	struct rlimit r;
+
+	page_size = getpagesize();
+	hpage_size = tst_get_hugepage_size();
+	/*
+	 * Setting the stack size to unlimited.
+	 */
+	r.rlim_cur = RLIM_INFINITY;
+	r.rlim_max = RLIM_INFINITY;
+	SAFE_SETRLIMIT(RLIMIT_STACK, &r);
+	SAFE_GETRLIMIT(RLIMIT_STACK, &r);
+	if (r.rlim_cur != RLIM_INFINITY)
+		tst_brk(TCONF, "Stack rlimit must be 'unlimited'");
+	fd = tst_creat_unlinked(MNTPOINT, 0);
+}
+
+void cleanup(void)
+{
+	if (fd > 0)
+		SAFE_CLOSE(fd);
+}
+
+static struct tst_test test = {
+	.tags = (struct tst_tag[]) {
+		{"linux-git", "0d59a01bc461"},
+		{}
+	},
+	.needs_root = 1,
+	.mntpoint = MNTPOINT,
+	.needs_hugetlbfs = 1,
+	.needs_tmpdir = 1,
+	.setup = setup,
+	.cleanup = cleanup,
+	.test_all = run_test,
+	.hugepages = {1, TST_NEEDS},
+	.forks_child = 1,
+};
+#else
+TST_TEST_TCONF("stack_grow_into_huge dosen't support on other architecture");
+#endif
-- 
2.43.0


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [LTP] [PATCH v3] Migrating the libhugetlbfs/testcases/stack_grow_into_huge.c test.
  2024-04-07 10:24 [LTP] [PATCH v3] Migrating the libhugetlbfs/testcases/stack_grow_into_huge.c test Samir Mulani
@ 2024-04-12 11:28 ` Cyril Hrubis
  0 siblings, 0 replies; 2+ messages in thread
From: Cyril Hrubis @ 2024-04-12 11:28 UTC (permalink / raw)
  To: Samir Mulani; +Cc: rpalethorpe, ltp

Hi!
> +#if defined(__powerpc__) || defined(__powerpc64__)

We have supported_archs array in the tst_test structure now so instead
of ifdefing out the whole test we should add:

	.supported_archs = {"ppc", "ppc64", NULL},

> +#ifdef __LP64__
> +#define STACK_ALLOCATION_SIZE	(256*1024*1024)
> +#else
> +#define STACK_ALLOCATION_SIZE	(16*1024*1024)
> +#endif
> +#define PALIGN(p, a) ((void *)LTP_ALIGN((unsigned long)(p), (a)))
> +#define MNTPOINT "hugetlbfs/"
> +static int  fd = -1;
> +static unsigned long long hpage_size;
> +static int page_size;
> +
> +
> +void do_child(void *stop_address)
> +{
> +	struct rlimit r;
> +	volatile int *x;
> +
> +	/* corefile from this process is not interesting and limiting
> +	 * its size can save a lot of time. '1' is a special value,
> +	 * that will also abort dumping via pipe, which by default
> +	 * sets limit to RLIM_INFINITY.
> +	 */
> +	r.rlim_cur = 1;
> +	r.rlim_max = 1;
> +	SAFE_SETRLIMIT(RLIMIT_CORE, &r);
> +
> +	do {
> +		x = alloca(STACK_ALLOCATION_SIZE);
> +		*x = 1;
> +	} while ((void *)x >= stop_address);
> +}
> +
> +static void run_test(void)
> +{
> +	int pid, status;
> +	void *stack_address, *mmap_address, *heap_address, *map;
> +
> +	stack_address = alloca(0);
> +	heap_address = sbrk(0);
> +
> +	/*
> +	 * paranoia: start mapping two hugepages below the start of the stack,
> +	 * in case the alignment would cause us to map over something if we
> +	 * only used a gap of one hugepage.
> +	 */
> +	mmap_address = PALIGN(stack_address - 2 * hpage_size, hpage_size);
> +	do {
> +		map = mmap(mmap_address, hpage_size, PROT_READ|PROT_WRITE,
> +				MAP_SHARED | MAP_FIXED_NOREPLACE, fd, 0);
> +		if (map == MAP_FAILED) {
> +			if (errno == ENOMEM) {
> +				tst_res(TCONF, "There is no enough memory in the system to do mmap");
> +				exit(-1);

This should be just return;

> +			}
> +		}
> +		mmap_address -= hpage_size;
> +		/*
> +		 * if we get all the way down to the heap, stop trying
> +		 */
> +	} while (mmap_address <= heap_address);
> +	pid = SAFE_FORK();
> +	if (pid == 0)
> +		do_child(mmap_address);
> +
> +	SAFE_WAITPID(pid, &status, 0);
> +	if (WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV)
> +		tst_res(TPASS, "Child killed by %s as expected", tst_strsig(SIGSEGV));
> +	else
> +		tst_res(TFAIL, "Child: %s", tst_strstatus(status));
> +}
> +
> +void setup(void)
> +{
> +	struct rlimit r;
> +
> +	page_size = getpagesize();
> +	hpage_size = tst_get_hugepage_size();
> +	/*
> +	 * Setting the stack size to unlimited.
> +	 */
> +	r.rlim_cur = RLIM_INFINITY;
> +	r.rlim_max = RLIM_INFINITY;
> +	SAFE_SETRLIMIT(RLIMIT_STACK, &r);
> +	SAFE_GETRLIMIT(RLIMIT_STACK, &r);
> +	if (r.rlim_cur != RLIM_INFINITY)
> +		tst_brk(TCONF, "Stack rlimit must be 'unlimited'");
> +	fd = tst_creat_unlinked(MNTPOINT, 0);
> +}
> +
> +void cleanup(void)
> +{
> +	if (fd > 0)
> +		SAFE_CLOSE(fd);
> +}
> +
> +static struct tst_test test = {
> +	.tags = (struct tst_tag[]) {
> +		{"linux-git", "0d59a01bc461"},
> +		{}
> +	},
> +	.needs_root = 1,
> +	.mntpoint = MNTPOINT,
> +	.needs_hugetlbfs = 1,
> +	.needs_tmpdir = 1,
> +	.setup = setup,
> +	.cleanup = cleanup,
> +	.test_all = run_test,
> +	.hugepages = {1, TST_NEEDS},
> +	.forks_child = 1,
> +};
> +#else
> +TST_TEST_TCONF("stack_grow_into_huge dosen't support on other architecture");
> +#endif
> -- 
> 2.43.0
> 
> 
> -- 
> Mailing list info: https://lists.linux.it/listinfo/ltp

-- 
Cyril Hrubis
chrubis@suse.cz

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2024-04-12 11:29 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-07 10:24 [LTP] [PATCH v3] Migrating the libhugetlbfs/testcases/stack_grow_into_huge.c test Samir Mulani
2024-04-12 11:28 ` Cyril Hrubis

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).