All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dmitry Safonov <dsafonov@virtuozzo.com>
To: <linux-kernel@vger.kernel.org>
Cc: <0x7f454c46@gmail.com>, Dmitry Safonov <dsafonov@virtuozzo.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Andy Lutomirski <luto@kernel.org>, Borislav Petkov <bp@suse.de>,
	<x86@kernel.org>, <linux-mm@kvack.org>
Subject: [PATCHv4 2/5] x86/mm: introduce mmap{,_legacy}_base
Date: Mon, 30 Jan 2017 15:04:29 +0300	[thread overview]
Message-ID: <20170130120432.6716-3-dsafonov@virtuozzo.com> (raw)
In-Reply-To: <20170130120432.6716-1-dsafonov@virtuozzo.com>

In the following patch they will be used to compute:
- mmap{,_legacy}_base for 64-bit mmap()
- mmap_compat{,_legacy}_base for 32-bit mmap()

This patch makes it possible to calculate mmap bases for any specified
task_size, which is needed to correctly choose the base address for mmap
in 32-bit syscalls and 64-bit syscalls.

Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com>
---
 arch/x86/include/asm/elf.h       |  4 +++-
 arch/x86/include/asm/processor.h |  3 ++-
 arch/x86/mm/mmap.c               | 32 ++++++++++++++++++++------------
 3 files changed, 25 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index e7f155c3045e..120b4f3d8a6a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -286,6 +286,7 @@ do {									\
 
 #ifdef CONFIG_X86_32
 
+#define STACK_RND_MASK_MODE(native) (0x7ff)
 #define STACK_RND_MASK (0x7ff)
 
 #define ARCH_DLINFO		ARCH_DLINFO_IA32
@@ -295,7 +296,8 @@ do {									\
 #else /* CONFIG_X86_32 */
 
 /* 1GB for 64bit, 8MB for 32bit */
-#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
+#define STACK_RND_MASK_MODE(native) ((native) ? 0x3fffff : 0x7ff)
+#define STACK_RND_MASK STACK_RND_MASK_MODE(!test_thread_flag(TIF_ADDR32))
 
 #define ARCH_DLINFO							\
 do {									\
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 1be64da0384e..52086e65b422 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -862,7 +862,8 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
  * This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
+#define _TASK_UNMAPPED_BASE(task_size)	(PAGE_ALIGN(task_size / 3))
+#define TASK_UNMAPPED_BASE	_TASK_UNMAPPED_BASE(TASK_SIZE)
 
 #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
 
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 42063e787717..98be520fd270 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -35,12 +35,14 @@ struct va_alignment __read_mostly va_align = {
 	.flags = -1,
 };
 
-static unsigned long stack_maxrandom_size(void)
+static unsigned long stack_maxrandom_size(unsigned long task_size)
 {
 	unsigned long max = 0;
 	if ((current->flags & PF_RANDOMIZE) &&
 		!(current->personality & ADDR_NO_RANDOMIZE)) {
-		max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
+		max = (-1UL);
+		max &= STACK_RND_MASK_MODE(task_size == TASK_SIZE_MAX);
+		max <<= PAGE_SHIFT;
 	}
 
 	return max;
@@ -51,8 +53,8 @@ static unsigned long stack_maxrandom_size(void)
  *
  * Leave an at least ~128 MB hole with possible stack randomization.
  */
-#define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
-#define MAX_GAP (TASK_SIZE/6*5)
+#define MIN_GAP(task_size) (128*1024*1024UL + stack_maxrandom_size(task_size))
+#define MAX_GAP(task_size) (task_size/6*5)
 
 static int mmap_is_legacy(void)
 {
@@ -88,16 +90,22 @@ unsigned long arch_mmap_rnd(void)
 	return arch_native_rnd();
 }
 
-static unsigned long mmap_base(unsigned long rnd)
+static unsigned long mmap_base(unsigned long rnd, unsigned long task_size)
 {
 	unsigned long gap = rlimit(RLIMIT_STACK);
 
-	if (gap < MIN_GAP)
-		gap = MIN_GAP;
-	else if (gap > MAX_GAP)
-		gap = MAX_GAP;
+	if (gap < MIN_GAP(task_size))
+		gap = MIN_GAP(task_size);
+	else if (gap > MAX_GAP(task_size))
+		gap = MAX_GAP(task_size);
 
-	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
+	return PAGE_ALIGN(task_size - gap - rnd);
+}
+
+static unsigned long mmap_legacy_base(unsigned long rnd,
+		unsigned long task_size)
+{
+	return _TASK_UNMAPPED_BASE(task_size) + rnd;
 }
 
 /*
@@ -111,13 +119,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 	if (current->flags & PF_RANDOMIZE)
 		random_factor = arch_mmap_rnd();
 
-	mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor;
+	mm->mmap_legacy_base = mmap_legacy_base(random_factor, TASK_SIZE);
 
 	if (mmap_is_legacy()) {
 		mm->mmap_base = mm->mmap_legacy_base;
 		mm->get_unmapped_area = arch_get_unmapped_area;
 	} else {
-		mm->mmap_base = mmap_base(random_factor);
+		mm->mmap_base = mmap_base(random_factor, TASK_SIZE);
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 	}
 }
-- 
2.11.0

WARNING: multiple messages have this Message-ID (diff)
From: Dmitry Safonov <dsafonov@virtuozzo.com>
To: linux-kernel@vger.kernel.org
Cc: 0x7f454c46@gmail.com, Dmitry Safonov <dsafonov@virtuozzo.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Andy Lutomirski <luto@kernel.org>, Borislav Petkov <bp@suse.de>,
	x86@kernel.org, linux-mm@kvack.org
Subject: [PATCHv4 2/5] x86/mm: introduce mmap{,_legacy}_base
Date: Mon, 30 Jan 2017 15:04:29 +0300	[thread overview]
Message-ID: <20170130120432.6716-3-dsafonov@virtuozzo.com> (raw)
In-Reply-To: <20170130120432.6716-1-dsafonov@virtuozzo.com>

In the following patch they will be used to compute:
- mmap{,_legacy}_base for 64-bit mmap()
- mmap_compat{,_legacy}_base for 32-bit mmap()

This patch makes it possible to calculate mmap bases for any specified
task_size, which is needed to correctly choose the base address for mmap
in 32-bit syscalls and 64-bit syscalls.

Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com>
---
 arch/x86/include/asm/elf.h       |  4 +++-
 arch/x86/include/asm/processor.h |  3 ++-
 arch/x86/mm/mmap.c               | 32 ++++++++++++++++++++------------
 3 files changed, 25 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index e7f155c3045e..120b4f3d8a6a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -286,6 +286,7 @@ do {									\
 
 #ifdef CONFIG_X86_32
 
+#define STACK_RND_MASK_MODE(native) (0x7ff)
 #define STACK_RND_MASK (0x7ff)
 
 #define ARCH_DLINFO		ARCH_DLINFO_IA32
@@ -295,7 +296,8 @@ do {									\
 #else /* CONFIG_X86_32 */
 
 /* 1GB for 64bit, 8MB for 32bit */
-#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
+#define STACK_RND_MASK_MODE(native) ((native) ? 0x3fffff : 0x7ff)
+#define STACK_RND_MASK STACK_RND_MASK_MODE(!test_thread_flag(TIF_ADDR32))
 
 #define ARCH_DLINFO							\
 do {									\
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 1be64da0384e..52086e65b422 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -862,7 +862,8 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
  * This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
+#define _TASK_UNMAPPED_BASE(task_size)	(PAGE_ALIGN(task_size / 3))
+#define TASK_UNMAPPED_BASE	_TASK_UNMAPPED_BASE(TASK_SIZE)
 
 #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
 
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 42063e787717..98be520fd270 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -35,12 +35,14 @@ struct va_alignment __read_mostly va_align = {
 	.flags = -1,
 };
 
-static unsigned long stack_maxrandom_size(void)
+static unsigned long stack_maxrandom_size(unsigned long task_size)
 {
 	unsigned long max = 0;
 	if ((current->flags & PF_RANDOMIZE) &&
 		!(current->personality & ADDR_NO_RANDOMIZE)) {
-		max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
+		max = (-1UL);
+		max &= STACK_RND_MASK_MODE(task_size == TASK_SIZE_MAX);
+		max <<= PAGE_SHIFT;
 	}
 
 	return max;
@@ -51,8 +53,8 @@ static unsigned long stack_maxrandom_size(void)
  *
  * Leave an at least ~128 MB hole with possible stack randomization.
  */
-#define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
-#define MAX_GAP (TASK_SIZE/6*5)
+#define MIN_GAP(task_size) (128*1024*1024UL + stack_maxrandom_size(task_size))
+#define MAX_GAP(task_size) (task_size/6*5)
 
 static int mmap_is_legacy(void)
 {
@@ -88,16 +90,22 @@ unsigned long arch_mmap_rnd(void)
 	return arch_native_rnd();
 }
 
-static unsigned long mmap_base(unsigned long rnd)
+static unsigned long mmap_base(unsigned long rnd, unsigned long task_size)
 {
 	unsigned long gap = rlimit(RLIMIT_STACK);
 
-	if (gap < MIN_GAP)
-		gap = MIN_GAP;
-	else if (gap > MAX_GAP)
-		gap = MAX_GAP;
+	if (gap < MIN_GAP(task_size))
+		gap = MIN_GAP(task_size);
+	else if (gap > MAX_GAP(task_size))
+		gap = MAX_GAP(task_size);
 
-	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
+	return PAGE_ALIGN(task_size - gap - rnd);
+}
+
+static unsigned long mmap_legacy_base(unsigned long rnd,
+		unsigned long task_size)
+{
+	return _TASK_UNMAPPED_BASE(task_size) + rnd;
 }
 
 /*
@@ -111,13 +119,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 	if (current->flags & PF_RANDOMIZE)
 		random_factor = arch_mmap_rnd();
 
-	mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor;
+	mm->mmap_legacy_base = mmap_legacy_base(random_factor, TASK_SIZE);
 
 	if (mmap_is_legacy()) {
 		mm->mmap_base = mm->mmap_legacy_base;
 		mm->get_unmapped_area = arch_get_unmapped_area;
 	} else {
-		mm->mmap_base = mmap_base(random_factor);
+		mm->mmap_base = mmap_base(random_factor, TASK_SIZE);
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 	}
 }
-- 
2.11.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-01-30 12:05 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-30 12:04 [PATCHv4 0/5] Fix compatible mmap() return pointer over 4Gb Dmitry Safonov
2017-01-30 12:04 ` Dmitry Safonov
2017-01-30 12:04 ` [PATCHv4 1/5] x86/mm: split arch_mmap_rnd() on compat/native versions Dmitry Safonov
2017-01-30 12:04   ` Dmitry Safonov
2017-02-09 13:55   ` Borislav Petkov
2017-02-09 23:06     ` Andy Lutomirski
2017-02-09 23:06       ` Andy Lutomirski
2017-02-10 20:10     ` Thomas Gleixner
2017-02-10 20:10       ` Thomas Gleixner
2017-02-10 20:25       ` Borislav Petkov
2017-02-10 20:25         ` Borislav Petkov
2017-02-10 21:28       ` Dmitry Safonov
2017-02-10 21:28         ` Dmitry Safonov
2017-02-11  8:23         ` Thomas Gleixner
2017-02-11  8:23           ` Thomas Gleixner
2017-02-13 11:12           ` Dmitry Safonov
2017-02-13 11:12             ` Dmitry Safonov
2017-02-13 11:22             ` Thomas Gleixner
2017-02-13 11:22               ` Thomas Gleixner
2017-01-30 12:04 ` Dmitry Safonov [this message]
2017-01-30 12:04   ` [PATCHv4 2/5] x86/mm: introduce mmap{,_legacy}_base Dmitry Safonov
2017-02-11 14:13   ` Thomas Gleixner
2017-02-11 14:13     ` Thomas Gleixner
2017-02-13 13:02     ` Dmitry Safonov
2017-02-13 13:02       ` Dmitry Safonov
2017-02-13 13:13       ` Thomas Gleixner
2017-02-13 13:13         ` Thomas Gleixner
2017-02-13 14:37     ` Dmitry Safonov
2017-02-13 14:37       ` Dmitry Safonov
2017-02-13 15:35       ` Thomas Gleixner
2017-02-13 15:35         ` Thomas Gleixner
2017-01-30 12:04 ` [PATCHv4 3/5] x86/mm: fix 32-bit mmap() for 64-bit ELF Dmitry Safonov
2017-01-30 12:04   ` Dmitry Safonov
2017-02-11 19:49   ` Thomas Gleixner
2017-02-11 19:49     ` Thomas Gleixner
2017-02-14 15:24     ` Dmitry Safonov
2017-02-14 15:24       ` Dmitry Safonov
2017-01-30 12:04 ` [PATCHv4 4/5] x86/mm: check in_compat_syscall() instead TIF_ADDR32 for mmap(MAP_32BIT) Dmitry Safonov
2017-01-30 12:04   ` Dmitry Safonov
2017-02-11 20:13   ` Thomas Gleixner
2017-02-11 20:13     ` Thomas Gleixner
2017-02-14 16:11     ` Dmitry Safonov
2017-02-14 16:11       ` Dmitry Safonov
2017-02-14 16:14       ` Dmitry Safonov
2017-02-14 16:14         ` Dmitry Safonov
2017-01-30 12:04 ` [PATCHv4 5/5] selftests/x86: add test to check compat mmap() return addr Dmitry Safonov
2017-01-30 12:04   ` Dmitry Safonov
2017-02-06 16:46 ` [PATCHv4 0/5] Fix compatible mmap() return pointer over 4Gb Dmitry Safonov
2017-02-06 16:46   ` Dmitry Safonov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170130120432.6716-3-dsafonov@virtuozzo.com \
    --to=dsafonov@virtuozzo.com \
    --cc=0x7f454c46@gmail.com \
    --cc=bp@suse.de \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.