All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ilya Smith <blackzert@gmail.com>
To: rth@twiddle.net, ink@jurassic.park.msu.ru, mattst88@gmail.com,
	vgupta@synopsys.com, linux@armlinux.org.uk, tony.luck@intel.com,
	fenghua.yu@intel.com, jhogan@kernel.org, ralf@linux-mips.org,
	jejb@parisc-linux.org, deller@gmx.de, benh@kernel.crashing.org,
	paulus@samba.org, mpe@ellerman.id.au, schwidefsky@de.ibm.com,
	heiko.carstens@de.ibm.com, ysato@users.sourceforge.jp,
	dalias@libc.org, davem@davemloft.net, tglx@linutronix.de,
	mingo@redhat.com, hpa@zytor.com, x86@kernel.org,
	nyc@holomorphy.com, viro@zeniv.linux.org.uk, arnd@arndb.de,
	blackzert@gmail.com, gregkh@linuxfoundation.org,
	deepa.kernel@gmail.com, mhocko@suse.com, hughd@google.com,
	kstewart@linuxfoundation.org, pombredanne@nexb.com,
	akpm@linux-foundation.org, steve.capper@arm.com,
	punit.agrawal@arm.com, paul.burton@mips.com,
	aneesh.kumar@linux.vnet.ibm.com, npiggin@gmail.com,
	keescook@chromium.org, bhsharma@redhat.com, riel@redhat.com,
	nitin.m.gupta@oracle.com, kirill.shutemov@linux.intel.com,
	dan.j.williams@intel.com, jack@suse.cz,
	ross.zwisler@linux.intel.com, jglisse@redhat.com,
	willy@infradead.org, aarcange@redhat.com, oleg@redhat.com,
	linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-snps-arc@lists.infradead.org,
	linux-arm-kernel@lists.infradead.org, linux-ia64@vger.kernel.org,
	linux-metag@vger.kernel.org, linux-mips@linux-mips.org,
	linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-s390@vger.kernel.org, linux-sh@vger.kernel.org,
	sparclinux@vger.kernel.org, linux-mm@kvack.org
Subject: [RFC PATCH v2 1/2] Randomization of address chosen by mmap.
Date: Thu, 22 Mar 2018 16:36:37 +0000	[thread overview]
Message-ID: <1521736598-12812-2-git-send-email-blackzert@gmail.com> (raw)
In-Reply-To: <1521736598-12812-1-git-send-email-blackzert@gmail.com>

Signed-off-by: Ilya Smith <blackzert@gmail.com>
---
 include/linux/mm.h |  16 ++++--
 mm/mmap.c          | 164 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 175 insertions(+), 5 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index ad06d42..c716257 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -25,6 +25,7 @@
 #include <linux/err.h>
 #include <linux/page_ref.h>
 #include <linux/memremap.h>
+#include <linux/sched.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -2253,6 +2254,13 @@ struct vm_unmapped_area_info {
 	unsigned long align_offset;
 };
 
+#ifndef CONFIG_MMU
+#define randomize_va_space 0
+#else
+extern int randomize_va_space;
+#endif
+
+extern unsigned long unmapped_area_random(struct vm_unmapped_area_info *info);
 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
 
@@ -2268,6 +2276,9 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
 static inline unsigned long
 vm_unmapped_area(struct vm_unmapped_area_info *info)
 {
+	/* How about 32 bit process?? */
+	if ((current->flags & PF_RANDOMIZE) && randomize_va_space > 3)
+		return unmapped_area_random(info);
 	if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
 		return unmapped_area_topdown(info);
 	else
@@ -2529,11 +2540,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
 void drop_slab(void);
 void drop_slab_node(int nid);
 
-#ifndef CONFIG_MMU
-#define randomize_va_space 0
-#else
-extern int randomize_va_space;
-#endif
 
 const char * arch_vma_name(struct vm_area_struct *vma);
 void print_vma_addr(char *prefix, unsigned long rip);
diff --git a/mm/mmap.c b/mm/mmap.c
index 9efdc021..ba9cebb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -45,6 +45,7 @@
 #include <linux/moduleparam.h>
 #include <linux/pkeys.h>
 #include <linux/oom.h>
+#include <linux/random.h>
 
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
@@ -1780,6 +1781,169 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	return error;
 }
 
+unsigned long unmapped_area_random(struct vm_unmapped_area_info *info)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma = NULL;
+	struct vm_area_struct *visited_vma = NULL;
+	unsigned long entropy[2];
+	unsigned long length, low_limit, high_limit, gap_start, gap_end;
+	unsigned long addr = 0;
+
+	/* get entropy with prng */
+	prandom_bytes(&entropy, sizeof(entropy));
+	/* small hack to prevent EPERM result */
+	info->low_limit = max(info->low_limit, mmap_min_addr);
+
+	/* Adjust search length to account for worst case alignment overhead */
+	length = info->length + info->align_mask;
+	if (length < info->length)
+		return -ENOMEM;
+
+	/*
+	 * Adjust search limits by the desired length.
+	 * See implementation comment at top of unmapped_area().
+	 */
+	gap_end = info->high_limit;
+	if (gap_end < length)
+		return -ENOMEM;
+	high_limit = gap_end - length;
+
+	low_limit = info->low_limit + info->align_mask;
+	if (low_limit >= high_limit)
+		return -ENOMEM;
+
+	/* Choose random addr in limit range */
+	addr = entropy[0] % ((high_limit - low_limit) >> PAGE_SHIFT);
+	addr = low_limit + (addr << PAGE_SHIFT);
+	addr += (info->align_offset - addr) & info->align_mask;
+
+	/* Check if rbtree root looks promising */
+	if (RB_EMPTY_ROOT(&mm->mm_rb))
+		return -ENOMEM;
+
+	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+	if (vma->rb_subtree_gap < length)
+		return -ENOMEM;
+	/* use randomly chosen address to find closest suitable gap */
+	while (true) {
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+		gap_end = vm_start_gap(vma);
+		if (gap_end < low_limit)
+			break;
+		if (addr < vm_start_gap(vma)) {
+			/* random said check left */
+			if (vma->vm_rb.rb_left) {
+				struct vm_area_struct *left +					rb_entry(vma->vm_rb.rb_left,
+						 struct vm_area_struct, vm_rb);
+				if (addr <= vm_start_gap(left) &&
+				    left->rb_subtree_gap >= length) {
+					vma = left;
+					continue;
+				}
+			}
+		} else if (addr >= vm_end_gap(vma)) {
+			/* random said check right */
+			if (vma->vm_rb.rb_right) {
+				struct vm_area_struct *right +				rb_entry(vma->vm_rb.rb_right,
+					 struct vm_area_struct, vm_rb);
+				/* it want go to the right */
+				if (right->rb_subtree_gap >= length) {
+					vma = right;
+					continue;
+				}
+			}
+		}
+		if (gap_start < low_limit) {
+			if (gap_end <= low_limit)
+				break;
+			gap_start = low_limit;
+		} else if (gap_end > info->high_limit) {
+			if (gap_start >= info->high_limit)
+				break;
+			gap_end = info->high_limit;
+		}
+		if (gap_end > gap_start &&
+		    gap_end - gap_start >= length)
+			goto found;
+		visited_vma = vma;
+		break;
+	}
+	/* not found */
+	while (true) {
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+
+		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+			struct vm_area_struct *right +				rb_entry(vma->vm_rb.rb_right,
+					 struct vm_area_struct, vm_rb);
+			if (right->rb_subtree_gap >= length &&
+			    right != visited_vma) {
+				vma = right;
+				continue;
+			}
+		}
+
+check_current:
+		/* Check if current node has a suitable gap */
+		gap_end = vm_start_gap(vma);
+		if (gap_end <= low_limit)
+			goto go_back;
+
+		if (gap_start < low_limit)
+			gap_start = low_limit;
+
+		if (gap_start <= high_limit &&
+		    gap_end > gap_start && gap_end - gap_start >= length)
+			goto found;
+
+		/* Visit left subtree if it looks promising */
+		if (vma->vm_rb.rb_left) {
+			struct vm_area_struct *left +				rb_entry(vma->vm_rb.rb_left,
+					 struct vm_area_struct, vm_rb);
+			if (left->rb_subtree_gap >= length &&
+			    vm_end_gap(left) > low_limit &&
+				left != visited_vma) {
+				vma = left;
+				continue;
+			}
+		}
+go_back:
+		/* Go back up the rbtree to find next candidate node */
+		while (true) {
+			struct rb_node *prev = &vma->vm_rb;
+
+			if (!rb_parent(prev))
+				return -ENOMEM;
+			visited_vma = vma;
+			vma = rb_entry(rb_parent(prev),
+				       struct vm_area_struct, vm_rb);
+			if (prev = vma->vm_rb.rb_right) {
+				gap_start = vma->vm_prev ?
+					vm_end_gap(vma->vm_prev) : low_limit;
+				goto check_current;
+			}
+		}
+	}
+found:
+	/* We found a suitable gap. Clip it with the original high_limit. */
+	if (gap_end > info->high_limit)
+		gap_end = info->high_limit;
+	gap_end -= info->length;
+	gap_end -= (gap_end - info->align_offset) & info->align_mask;
+	/* only one suitable page */
+	if (gap_end =  gap_start)
+		return gap_start;
+	addr = entropy[1] % (min((gap_end - gap_start) >> PAGE_SHIFT,
+							 0x10000UL));
+	addr = gap_end - (addr << PAGE_SHIFT);
+	addr += (info->align_offset - addr) & info->align_mask;
+	return addr;
+}
+
 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 {
 	/*
-- 
2.7.4


WARNING: multiple messages have this Message-ID (diff)
From: Ilya Smith <blackzert@gmail.com>
To: rth@twiddle.net, ink@jurassic.park.msu.ru, mattst88@gmail.com,
	vgupta@synopsys.com, linux@armlinux.org.uk, tony.luck@intel.com,
	fenghua.yu@intel.com, jhogan@kernel.org, ralf@linux-mips.org,
	jejb@parisc-linux.org, deller@gmx.de, benh@kernel.crashing.org,
	paulus@samba.org, mpe@ellerman.id.au, schwidefsky@de.ibm.com,
	heiko.carstens@de.ibm.com, ysato@users.sourceforge.jp,
	dalias@libc.org, davem@davemloft.net, tglx@linutronix.de,
	mingo@redhat.com, hpa@zytor.com, x86@kernel.org,
	nyc@holomorphy.com, viro@zeniv.linux.org.uk, arnd@arndb.de,
	blackzert@gmail.com, gregkh@linuxfoundation.org,
	deepa.kernel@gmail.com, mhocko@suse.com, hughd@google.com,
	kstewart@linuxfoundation.org, pombredanne@nexb.com,
	akpm@linux-foundation.org, steve.capper@arm.com,
	punit.agrawal@arm.com, paul.burton@mips.com,
	aneesh.kumar@linux.vnet.ibm.com, npiggin@gmail.com,
	keescook@chromium.org, bhsharma@redhat.com, riel@redhat.com,
	nitin.m.gupta@oracle.com, kirill.shutemov@linux.intel.com,
	dan.j.williams@intel.com, jack@suse.cz,
	ross.zwisler@linux.intel.com, jglisse@redhat.com,
	willy@infradead.org, aarcange@redhat.com, oleg@redhat.com,
	linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-snps-arc@lists.infradead.org,
	linux-arm-kernel@lists.infradead.org, linux-ia64@vger.kernel.org,
	linux-metag@vger.kernel.org, linux-mips@linux-mips.org,
	linux-parisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-s390@vger.kernel.org, linux-sh@vger.kernel.org,
	sparclinux@vger.kernel.org, linux-mm@kvack.org
Subject: [RFC PATCH v2 1/2] Randomization of address chosen by mmap.
Date: Thu, 22 Mar 2018 19:36:37 +0300	[thread overview]
Message-ID: <1521736598-12812-2-git-send-email-blackzert@gmail.com> (raw)
In-Reply-To: <1521736598-12812-1-git-send-email-blackzert@gmail.com>

Signed-off-by: Ilya Smith <blackzert@gmail.com>
---
 include/linux/mm.h |  16 ++++--
 mm/mmap.c          | 164 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 175 insertions(+), 5 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index ad06d42..c716257 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -25,6 +25,7 @@
 #include <linux/err.h>
 #include <linux/page_ref.h>
 #include <linux/memremap.h>
+#include <linux/sched.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -2253,6 +2254,13 @@ struct vm_unmapped_area_info {
 	unsigned long align_offset;
 };
 
+#ifndef CONFIG_MMU
+#define randomize_va_space 0
+#else
+extern int randomize_va_space;
+#endif
+
+extern unsigned long unmapped_area_random(struct vm_unmapped_area_info *info);
 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
 
@@ -2268,6 +2276,9 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
 static inline unsigned long
 vm_unmapped_area(struct vm_unmapped_area_info *info)
 {
+	/* How about 32 bit process?? */
+	if ((current->flags & PF_RANDOMIZE) && randomize_va_space > 3)
+		return unmapped_area_random(info);
 	if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
 		return unmapped_area_topdown(info);
 	else
@@ -2529,11 +2540,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
 void drop_slab(void);
 void drop_slab_node(int nid);
 
-#ifndef CONFIG_MMU
-#define randomize_va_space 0
-#else
-extern int randomize_va_space;
-#endif
 
 const char * arch_vma_name(struct vm_area_struct *vma);
 void print_vma_addr(char *prefix, unsigned long rip);
diff --git a/mm/mmap.c b/mm/mmap.c
index 9efdc021..ba9cebb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -45,6 +45,7 @@
 #include <linux/moduleparam.h>
 #include <linux/pkeys.h>
 #include <linux/oom.h>
+#include <linux/random.h>
 
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
@@ -1780,6 +1781,169 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	return error;
 }
 
+unsigned long unmapped_area_random(struct vm_unmapped_area_info *info)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma = NULL;
+	struct vm_area_struct *visited_vma = NULL;
+	unsigned long entropy[2];
+	unsigned long length, low_limit, high_limit, gap_start, gap_end;
+	unsigned long addr = 0;
+
+	/* get entropy with prng */
+	prandom_bytes(&entropy, sizeof(entropy));
+	/* small hack to prevent EPERM result */
+	info->low_limit = max(info->low_limit, mmap_min_addr);
+
+	/* Adjust search length to account for worst case alignment overhead */
+	length = info->length + info->align_mask;
+	if (length < info->length)
+		return -ENOMEM;
+
+	/*
+	 * Adjust search limits by the desired length.
+	 * See implementation comment at top of unmapped_area().
+	 */
+	gap_end = info->high_limit;
+	if (gap_end < length)
+		return -ENOMEM;
+	high_limit = gap_end - length;
+
+	low_limit = info->low_limit + info->align_mask;
+	if (low_limit >= high_limit)
+		return -ENOMEM;
+
+	/* Choose random addr in limit range */
+	addr = entropy[0] % ((high_limit - low_limit) >> PAGE_SHIFT);
+	addr = low_limit + (addr << PAGE_SHIFT);
+	addr += (info->align_offset - addr) & info->align_mask;
+
+	/* Check if rbtree root looks promising */
+	if (RB_EMPTY_ROOT(&mm->mm_rb))
+		return -ENOMEM;
+
+	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+	if (vma->rb_subtree_gap < length)
+		return -ENOMEM;
+	/* use randomly chosen address to find closest suitable gap */
+	while (true) {
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+		gap_end = vm_start_gap(vma);
+		if (gap_end < low_limit)
+			break;
+		if (addr < vm_start_gap(vma)) {
+			/* random said check left */
+			if (vma->vm_rb.rb_left) {
+				struct vm_area_struct *left =
+					rb_entry(vma->vm_rb.rb_left,
+						 struct vm_area_struct, vm_rb);
+				if (addr <= vm_start_gap(left) &&
+				    left->rb_subtree_gap >= length) {
+					vma = left;
+					continue;
+				}
+			}
+		} else if (addr >= vm_end_gap(vma)) {
+			/* random said check right */
+			if (vma->vm_rb.rb_right) {
+				struct vm_area_struct *right =
+				rb_entry(vma->vm_rb.rb_right,
+					 struct vm_area_struct, vm_rb);
+				/* it want go to the right */
+				if (right->rb_subtree_gap >= length) {
+					vma = right;
+					continue;
+				}
+			}
+		}
+		if (gap_start < low_limit) {
+			if (gap_end <= low_limit)
+				break;
+			gap_start = low_limit;
+		} else if (gap_end > info->high_limit) {
+			if (gap_start >= info->high_limit)
+				break;
+			gap_end = info->high_limit;
+		}
+		if (gap_end > gap_start &&
+		    gap_end - gap_start >= length)
+			goto found;
+		visited_vma = vma;
+		break;
+	}
+	/* not found */
+	while (true) {
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+
+		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+			struct vm_area_struct *right =
+				rb_entry(vma->vm_rb.rb_right,
+					 struct vm_area_struct, vm_rb);
+			if (right->rb_subtree_gap >= length &&
+			    right != visited_vma) {
+				vma = right;
+				continue;
+			}
+		}
+
+check_current:
+		/* Check if current node has a suitable gap */
+		gap_end = vm_start_gap(vma);
+		if (gap_end <= low_limit)
+			goto go_back;
+
+		if (gap_start < low_limit)
+			gap_start = low_limit;
+
+		if (gap_start <= high_limit &&
+		    gap_end > gap_start && gap_end - gap_start >= length)
+			goto found;
+
+		/* Visit left subtree if it looks promising */
+		if (vma->vm_rb.rb_left) {
+			struct vm_area_struct *left =
+				rb_entry(vma->vm_rb.rb_left,
+					 struct vm_area_struct, vm_rb);
+			if (left->rb_subtree_gap >= length &&
+			    vm_end_gap(left) > low_limit &&
+				left != visited_vma) {
+				vma = left;
+				continue;
+			}
+		}
+go_back:
+		/* Go back up the rbtree to find next candidate node */
+		while (true) {
+			struct rb_node *prev = &vma->vm_rb;
+
+			if (!rb_parent(prev))
+				return -ENOMEM;
+			visited_vma = vma;
+			vma = rb_entry(rb_parent(prev),
+				       struct vm_area_struct, vm_rb);
+			if (prev == vma->vm_rb.rb_right) {
+				gap_start = vma->vm_prev ?
+					vm_end_gap(vma->vm_prev) : low_limit;
+				goto check_current;
+			}
+		}
+	}
+found:
+	/* We found a suitable gap. Clip it with the original high_limit. */
+	if (gap_end > info->high_limit)
+		gap_end = info->high_limit;
+	gap_end -= info->length;
+	gap_end -= (gap_end - info->align_offset) & info->align_mask;
+	/* only one suitable page */
+	if (gap_end ==  gap_start)
+		return gap_start;
+	addr = entropy[1] % (min((gap_end - gap_start) >> PAGE_SHIFT,
+							 0x10000UL));
+	addr = gap_end - (addr << PAGE_SHIFT);
+	addr += (info->align_offset - addr) & info->align_mask;
+	return addr;
+}
+
 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 {
 	/*
-- 
2.7.4

WARNING: multiple messages have this Message-ID (diff)
From: blackzert@gmail.com (Ilya Smith)
To: linux-snps-arc@lists.infradead.org
Subject: [RFC PATCH v2 1/2] Randomization of address chosen by mmap.
Date: Thu, 22 Mar 2018 19:36:37 +0300	[thread overview]
Message-ID: <1521736598-12812-2-git-send-email-blackzert@gmail.com> (raw)
In-Reply-To: <1521736598-12812-1-git-send-email-blackzert@gmail.com>

Signed-off-by: Ilya Smith <blackzert at gmail.com>
---
 include/linux/mm.h |  16 ++++--
 mm/mmap.c          | 164 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 175 insertions(+), 5 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index ad06d42..c716257 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -25,6 +25,7 @@
 #include <linux/err.h>
 #include <linux/page_ref.h>
 #include <linux/memremap.h>
+#include <linux/sched.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -2253,6 +2254,13 @@ struct vm_unmapped_area_info {
 	unsigned long align_offset;
 };
 
+#ifndef CONFIG_MMU
+#define randomize_va_space 0
+#else
+extern int randomize_va_space;
+#endif
+
+extern unsigned long unmapped_area_random(struct vm_unmapped_area_info *info);
 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
 
@@ -2268,6 +2276,9 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
 static inline unsigned long
 vm_unmapped_area(struct vm_unmapped_area_info *info)
 {
+	/* How about 32 bit process?? */
+	if ((current->flags & PF_RANDOMIZE) && randomize_va_space > 3)
+		return unmapped_area_random(info);
 	if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
 		return unmapped_area_topdown(info);
 	else
@@ -2529,11 +2540,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
 void drop_slab(void);
 void drop_slab_node(int nid);
 
-#ifndef CONFIG_MMU
-#define randomize_va_space 0
-#else
-extern int randomize_va_space;
-#endif
 
 const char * arch_vma_name(struct vm_area_struct *vma);
 void print_vma_addr(char *prefix, unsigned long rip);
diff --git a/mm/mmap.c b/mm/mmap.c
index 9efdc021..ba9cebb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -45,6 +45,7 @@
 #include <linux/moduleparam.h>
 #include <linux/pkeys.h>
 #include <linux/oom.h>
+#include <linux/random.h>
 
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
@@ -1780,6 +1781,169 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	return error;
 }
 
+unsigned long unmapped_area_random(struct vm_unmapped_area_info *info)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma = NULL;
+	struct vm_area_struct *visited_vma = NULL;
+	unsigned long entropy[2];
+	unsigned long length, low_limit, high_limit, gap_start, gap_end;
+	unsigned long addr = 0;
+
+	/* get entropy with prng */
+	prandom_bytes(&entropy, sizeof(entropy));
+	/* small hack to prevent EPERM result */
+	info->low_limit = max(info->low_limit, mmap_min_addr);
+
+	/* Adjust search length to account for worst case alignment overhead */
+	length = info->length + info->align_mask;
+	if (length < info->length)
+		return -ENOMEM;
+
+	/*
+	 * Adjust search limits by the desired length.
+	 * See implementation comment at top of unmapped_area().
+	 */
+	gap_end = info->high_limit;
+	if (gap_end < length)
+		return -ENOMEM;
+	high_limit = gap_end - length;
+
+	low_limit = info->low_limit + info->align_mask;
+	if (low_limit >= high_limit)
+		return -ENOMEM;
+
+	/* Choose random addr in limit range */
+	addr = entropy[0] % ((high_limit - low_limit) >> PAGE_SHIFT);
+	addr = low_limit + (addr << PAGE_SHIFT);
+	addr += (info->align_offset - addr) & info->align_mask;
+
+	/* Check if rbtree root looks promising */
+	if (RB_EMPTY_ROOT(&mm->mm_rb))
+		return -ENOMEM;
+
+	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+	if (vma->rb_subtree_gap < length)
+		return -ENOMEM;
+	/* use randomly chosen address to find closest suitable gap */
+	while (true) {
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+		gap_end = vm_start_gap(vma);
+		if (gap_end < low_limit)
+			break;
+		if (addr < vm_start_gap(vma)) {
+			/* random said check left */
+			if (vma->vm_rb.rb_left) {
+				struct vm_area_struct *left =
+					rb_entry(vma->vm_rb.rb_left,
+						 struct vm_area_struct, vm_rb);
+				if (addr <= vm_start_gap(left) &&
+				    left->rb_subtree_gap >= length) {
+					vma = left;
+					continue;
+				}
+			}
+		} else if (addr >= vm_end_gap(vma)) {
+			/* random said check right */
+			if (vma->vm_rb.rb_right) {
+				struct vm_area_struct *right =
+				rb_entry(vma->vm_rb.rb_right,
+					 struct vm_area_struct, vm_rb);
+				/* it want go to the right */
+				if (right->rb_subtree_gap >= length) {
+					vma = right;
+					continue;
+				}
+			}
+		}
+		if (gap_start < low_limit) {
+			if (gap_end <= low_limit)
+				break;
+			gap_start = low_limit;
+		} else if (gap_end > info->high_limit) {
+			if (gap_start >= info->high_limit)
+				break;
+			gap_end = info->high_limit;
+		}
+		if (gap_end > gap_start &&
+		    gap_end - gap_start >= length)
+			goto found;
+		visited_vma = vma;
+		break;
+	}
+	/* not found */
+	while (true) {
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+
+		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+			struct vm_area_struct *right =
+				rb_entry(vma->vm_rb.rb_right,
+					 struct vm_area_struct, vm_rb);
+			if (right->rb_subtree_gap >= length &&
+			    right != visited_vma) {
+				vma = right;
+				continue;
+			}
+		}
+
+check_current:
+		/* Check if current node has a suitable gap */
+		gap_end = vm_start_gap(vma);
+		if (gap_end <= low_limit)
+			goto go_back;
+
+		if (gap_start < low_limit)
+			gap_start = low_limit;
+
+		if (gap_start <= high_limit &&
+		    gap_end > gap_start && gap_end - gap_start >= length)
+			goto found;
+
+		/* Visit left subtree if it looks promising */
+		if (vma->vm_rb.rb_left) {
+			struct vm_area_struct *left =
+				rb_entry(vma->vm_rb.rb_left,
+					 struct vm_area_struct, vm_rb);
+			if (left->rb_subtree_gap >= length &&
+			    vm_end_gap(left) > low_limit &&
+				left != visited_vma) {
+				vma = left;
+				continue;
+			}
+		}
+go_back:
+		/* Go back up the rbtree to find next candidate node */
+		while (true) {
+			struct rb_node *prev = &vma->vm_rb;
+
+			if (!rb_parent(prev))
+				return -ENOMEM;
+			visited_vma = vma;
+			vma = rb_entry(rb_parent(prev),
+				       struct vm_area_struct, vm_rb);
+			if (prev == vma->vm_rb.rb_right) {
+				gap_start = vma->vm_prev ?
+					vm_end_gap(vma->vm_prev) : low_limit;
+				goto check_current;
+			}
+		}
+	}
+found:
+	/* We found a suitable gap. Clip it with the original high_limit. */
+	if (gap_end > info->high_limit)
+		gap_end = info->high_limit;
+	gap_end -= info->length;
+	gap_end -= (gap_end - info->align_offset) & info->align_mask;
+	/* only one suitable page */
+	if (gap_end ==  gap_start)
+		return gap_start;
+	addr = entropy[1] % (min((gap_end - gap_start) >> PAGE_SHIFT,
+							 0x10000UL));
+	addr = gap_end - (addr << PAGE_SHIFT);
+	addr += (info->align_offset - addr) & info->align_mask;
+	return addr;
+}
+
 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 {
 	/*
-- 
2.7.4

  reply	other threads:[~2018-03-22 16:36 UTC|newest]

Thread overview: 185+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-22 16:36 [RFC PATCH v2 0/2] Randomization of address chosen by mmap Ilya Smith
2018-03-22 16:36 ` Ilya Smith
2018-03-22 16:36 ` Ilya Smith
2018-03-22 16:36 ` Ilya Smith [this message]
2018-03-22 16:36   ` [RFC PATCH v2 1/2] " Ilya Smith
2018-03-22 16:36   ` Ilya Smith
2018-03-22 20:53   ` Andrew Morton
2018-03-22 20:53     ` Andrew Morton
2018-03-22 20:53     ` Andrew Morton
2018-03-22 20:53     ` Andrew Morton
2018-03-23 17:43     ` Ilya Smith
2018-03-23 17:43       ` Ilya Smith
2018-03-23 17:43       ` Ilya Smith
2018-03-23 17:43       ` Ilya Smith
2018-03-23 17:43       ` Ilya Smith
2018-03-23 17:43       ` Ilya Smith
2018-03-22 16:36 ` [RFC PATCH v2 2/2] Architecture defined limit on memory region random shift Ilya Smith
2018-03-22 16:36   ` Ilya Smith
2018-03-22 16:36   ` Ilya Smith
2018-03-22 20:54   ` Andrew Morton
2018-03-22 20:54     ` Andrew Morton
2018-03-22 20:54     ` Andrew Morton
2018-03-22 20:54     ` Andrew Morton
2018-03-23 17:48     ` Ilya Smith
2018-03-23 17:48       ` Ilya Smith
2018-03-23 17:49     ` Ilya Smith
2018-03-23 17:49       ` Ilya Smith
2018-03-23 17:49       ` Ilya Smith
2018-03-23 17:49       ` Ilya Smith
2018-03-23 17:49       ` Ilya Smith
2018-03-23 17:49       ` Ilya Smith
2018-03-22 20:57 ` [RFC PATCH v2 0/2] Randomization of address chosen by mmap Andrew Morton
2018-03-22 20:57   ` Andrew Morton
2018-03-22 20:57   ` Andrew Morton
2018-03-22 20:57   ` Andrew Morton
2018-03-23 17:25   ` Ilya Smith
2018-03-23 17:25     ` Ilya Smith
2018-03-23 17:25     ` Ilya Smith
2018-03-23 17:25     ` Ilya Smith
2018-03-23 17:25     ` Ilya Smith
2018-03-23 17:25     ` Ilya Smith
2018-03-23 12:48 ` Matthew Wilcox
2018-03-23 12:48   ` Matthew Wilcox
2018-03-23 12:48   ` Matthew Wilcox
2018-03-23 12:48   ` Matthew Wilcox
2018-03-23 17:55   ` Ilya Smith
2018-03-23 17:55     ` Ilya Smith
2018-03-23 17:55     ` Ilya Smith
2018-03-23 17:55     ` Ilya Smith
2018-03-23 17:55     ` Ilya Smith
2018-03-23 17:55     ` Ilya Smith
2018-03-26  8:46     ` Michal Hocko
2018-03-26  8:46       ` Michal Hocko
2018-03-26  8:46       ` Michal Hocko
2018-03-26  8:46       ` Michal Hocko
2018-03-26  8:46       ` Michal Hocko
2018-03-26 19:45       ` Ilya Smith
2018-03-26 19:45         ` Ilya Smith
2018-03-26 19:45         ` Ilya Smith
2018-03-26 19:45         ` Ilya Smith
2018-03-26 19:45         ` Ilya Smith
2018-03-26 19:45         ` Ilya Smith
2018-03-27  7:24         ` Michal Hocko
2018-03-27  7:24           ` Michal Hocko
2018-03-27  7:24           ` Michal Hocko
2018-03-27  7:24           ` Michal Hocko
2018-03-27  7:24           ` Michal Hocko
2018-03-27 13:51           ` Ilya Smith
2018-03-27 13:51             ` Ilya Smith
2018-03-27 13:51             ` Ilya Smith
2018-03-27 13:51             ` Ilya Smith
2018-03-27 13:51             ` Ilya Smith
2018-03-27 13:51             ` Ilya Smith
2018-03-27 14:38             ` Michal Hocko
2018-03-27 14:38               ` Michal Hocko
2018-03-27 14:38               ` Michal Hocko
2018-03-27 14:38               ` Michal Hocko
2018-03-27 14:38               ` Michal Hocko
2018-03-28 18:47               ` Ilya Smith
2018-03-28 18:47                 ` Ilya Smith
2018-03-28 18:47                 ` Ilya Smith
2018-03-28 18:47                 ` Ilya Smith
2018-03-28 18:47                 ` Ilya Smith
2018-03-28 18:47                 ` Ilya Smith
2018-03-27 22:16             ` Theodore Y. Ts'o
2018-03-27 22:16               ` Theodore Y. Ts'o
2018-03-27 22:16               ` Theodore Y. Ts'o
2018-03-27 22:16               ` Theodore Y. Ts'o
2018-03-27 22:16               ` Theodore Y. Ts'o
2018-03-27 23:58               ` Rich Felker
2018-03-27 23:58                 ` Rich Felker
2018-03-27 23:58                 ` Rich Felker
2018-03-27 23:58                 ` Rich Felker
2018-03-28 18:48               ` Ilya Smith
2018-03-28 18:48                 ` Ilya Smith
2018-03-28 18:48                 ` Ilya Smith
2018-03-28 18:48                 ` Ilya Smith
2018-03-28 18:48                 ` Ilya Smith
2018-03-28 18:48                 ` Ilya Smith
2018-03-27 22:53             ` Kees Cook
2018-03-27 22:53               ` Kees Cook
2018-03-27 22:53               ` Kees Cook
2018-03-27 22:53               ` Kees Cook
2018-03-27 22:53               ` Kees Cook
2018-03-27 23:49               ` Matthew Wilcox
2018-03-27 23:49                 ` Matthew Wilcox
2018-03-27 23:49                 ` Matthew Wilcox
2018-03-27 23:49                 ` Matthew Wilcox
2018-03-27 23:57                 ` Kees Cook
2018-03-27 23:57                   ` Kees Cook
2018-03-27 23:57                   ` Kees Cook
2018-03-27 23:57                   ` Kees Cook
2018-03-28  0:00                 ` Rich Felker
2018-03-28  0:00                   ` Rich Felker
2018-03-28  0:00                   ` Rich Felker
2018-03-28  0:00                   ` Rich Felker
2018-03-28 21:07                   ` Luck, Tony
2018-03-28 21:07                     ` Luck, Tony
2018-03-28 21:07                     ` Luck, Tony
2018-03-28 21:07                     ` Luck, Tony
2018-03-28 21:07                     ` Luck, Tony
2018-04-03  0:11                     ` Ilya Smith
2018-04-03  0:11                       ` Ilya Smith
2018-04-03  0:11                       ` Ilya Smith
2018-04-03  0:11                       ` Ilya Smith
2018-04-03  0:11                       ` Ilya Smith
2018-04-03  0:11                       ` Ilya Smith
2018-03-28 21:07                 ` Ilya Smith
2018-03-28 21:07                   ` Ilya Smith
2018-03-28 21:07                   ` Ilya Smith
2018-03-28 21:07                   ` Ilya Smith
2018-03-28 21:07                   ` Ilya Smith
2018-03-23 18:00   ` Rich Felker
2018-03-23 18:00     ` Rich Felker
2018-03-23 18:00     ` Rich Felker
2018-03-23 18:00     ` Rich Felker
2018-03-23 18:00     ` Rich Felker
2018-03-23 19:06     ` Matthew Wilcox
2018-03-23 19:06       ` Matthew Wilcox
2018-03-23 19:06       ` Matthew Wilcox
2018-03-23 19:06       ` Matthew Wilcox
2018-03-23 19:16       ` Rich Felker
2018-03-23 19:16         ` Rich Felker
2018-03-23 19:16         ` Rich Felker
2018-03-23 19:16         ` Rich Felker
2018-03-23 19:16         ` Rich Felker
2018-03-23 19:29         ` Matthew Wilcox
2018-03-23 19:29           ` Matthew Wilcox
2018-03-23 19:29           ` Matthew Wilcox
2018-03-23 19:29           ` Matthew Wilcox
2018-03-23 19:35           ` Rich Felker
2018-03-23 19:35             ` Rich Felker
2018-03-23 19:35             ` Rich Felker
2018-03-23 19:35             ` Rich Felker
2018-03-23 19:35             ` Rich Felker
2018-03-28  4:50       ` Rob Landley
2018-03-28  4:50         ` Rob Landley
2018-03-28  4:50         ` Rob Landley
2018-03-28  4:50         ` Rob Landley
2018-03-28  4:50         ` Rob Landley
2018-03-30  7:55 ` Pavel Machek
2018-03-30  7:55   ` Pavel Machek
2018-03-30  7:55   ` Pavel Machek
2018-03-30  7:55   ` Pavel Machek
2018-03-30  9:07   ` Ilya Smith
2018-03-30  9:07     ` Ilya Smith
2018-03-30  9:07     ` Ilya Smith
2018-03-30  9:07     ` Ilya Smith
2018-03-30  9:07     ` Ilya Smith
2018-03-30  9:07     ` Ilya Smith
2018-03-30  9:57     ` Pavel Machek
2018-03-30  9:57       ` Pavel Machek
2018-03-30  9:57       ` Pavel Machek
2018-03-30  9:57       ` Pavel Machek
2018-03-30 11:10       ` Ilya Smith
2018-03-30 11:10         ` Ilya Smith
2018-03-30 11:10         ` Ilya Smith
2018-03-30 11:10         ` Ilya Smith
2018-03-30 11:10         ` Ilya Smith
2018-03-30 11:10         ` Ilya Smith
2018-03-30 13:33   ` Rich Felker
2018-03-30 13:33     ` Rich Felker
2018-03-30 13:33     ` Rich Felker
2018-03-30 13:33     ` Rich Felker
2018-03-30 13:33     ` Rich Felker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1521736598-12812-2-git-send-email-blackzert@gmail.com \
    --to=blackzert@gmail.com \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=arnd@arndb.de \
    --cc=benh@kernel.crashing.org \
    --cc=bhsharma@redhat.com \
    --cc=dalias@libc.org \
    --cc=dan.j.williams@intel.com \
    --cc=davem@davemloft.net \
    --cc=deepa.kernel@gmail.com \
    --cc=deller@gmx.de \
    --cc=fenghua.yu@intel.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=heiko.carstens@de.ibm.com \
    --cc=hpa@zytor.com \
    --cc=hughd@google.com \
    --cc=ink@jurassic.park.msu.ru \
    --cc=jack@suse.cz \
    --cc=jejb@parisc-linux.org \
    --cc=jglisse@redhat.com \
    --cc=jhogan@kernel.org \
    --cc=keescook@chromium.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kstewart@linuxfoundation.org \
    --cc=linux-alpha@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-metag@vger.kernel.org \
    --cc=linux-mips@linux-mips.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-parisc@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=linux-snps-arc@lists.infradead.org \
    --cc=linux@armlinux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mattst88@gmail.com \
    --cc=mhocko@suse.com \
    --cc=mingo@redhat.com \
    --cc=mpe@ellerman.id.au \
    --cc=nitin.m.gupta@oracle.com \
    --cc=npiggin@gmail.com \
    --cc=nyc@holomorphy.com \
    --cc=oleg@redhat.com \
    --cc=paul.burton@mips.com \
    --cc=paulus@samba.org \
    --cc=pombredanne@nexb.com \
    --cc=punit.agrawal@arm.com \
    --cc=ralf@linux-mips.org \
    --cc=riel@redhat.com \
    --cc=ross.zwisler@linux.intel.com \
    --cc=rth@twiddle.net \
    --cc=schwidefsky@de.ibm.com \
    --cc=sparclinux@vger.kernel.org \
    --cc=steve.capper@arm.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=vgupta@synopsys.com \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=ysato@users.sourceforge.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.