All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kees Cook <keescook@chromium.org>
To: linux-kernel@vger.kernel.org
Cc: Kees Cook <keescook@chromium.org>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	David Windsor <dave@nullcore.net>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Andrew Morton <akpm@linux-foundation.org>,
	Andy Lutomirski <luto@kernel.org>,
	Christoph Hellwig <hch@infradead.org>,
	Christoph Lameter <cl@linux.com>,
	"David S. Miller" <davem@davemloft.net>,
	Laura Abbott <labbott@redhat.com>,
	Mark Rutland <mark.rutland@arm.com>,
	"Martin K. Petersen" <martin.petersen@oracle.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Christoffer Dall <christoffer.dall@linaro.org>,
	Dave Kleikamp <dave.kleikamp@oracle.com>, Jan Kara <jack@suse.cz>,
	Luis de Bethencourt <luisbg@kernel.org>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Rik van Riel <riel@redhat.com>,
	Matthew Garrett <mjg59@google.com>,
	linux-fsdevel@vger.kernel.org, linux-arch@vger.kernel.org,
	netdev@vger.kernel.org, linux-mm@kvack.org,
	kernel-hardening@lists.openwall.com
Subject: [PATCH 03/38] usercopy: Include offset in hardened usercopy report
Date: Wed, 10 Jan 2018 18:02:35 -0800	[thread overview]
Message-ID: <1515636190-24061-4-git-send-email-keescook@chromium.org> (raw)
In-Reply-To: <1515636190-24061-1-git-send-email-keescook@chromium.org>

This refactors the hardened usercopy code so that failure reporting can
happen within the checking functions instead of at the top level. This
simplifies the return value handling and allows more details and offsets
to be included in the report. Having the offset can be much more helpful
in understanding hardened usercopy bugs.

Signed-off-by: Kees Cook <keescook@chromium.org>
---
 include/linux/slab.h | 12 +++----
 mm/slab.c            |  8 ++---
 mm/slub.c            | 14 ++++----
 mm/usercopy.c        | 95 +++++++++++++++++++++++-----------------------------
 4 files changed, 57 insertions(+), 72 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 50697a1d6621..2dbeccdcb76b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -167,15 +167,11 @@ void kzfree(const void *);
 size_t ksize(const void *);
 
 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page);
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			bool to_user);
 #else
-static inline const char *__check_heap_object(const void *ptr,
-					      unsigned long n,
-					      struct page *page)
-{
-	return NULL;
-}
+static inline void __check_heap_object(const void *ptr, unsigned long n,
+				       struct page *page, bool to_user) { }
 #endif
 
 /*
diff --git a/mm/slab.c b/mm/slab.c
index 183e996dde5f..b2beb2cc15e2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4397,8 +4397,8 @@ module_init(slab_proc_init);
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page)
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			 bool to_user)
 {
 	struct kmem_cache *cachep;
 	unsigned int objnr;
@@ -4414,9 +4414,9 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 
 	/* Allow address range falling entirely within object size. */
 	if (offset <= cachep->object_size && n <= cachep->object_size - offset)
-		return NULL;
+		return;
 
-	return cachep->name;
+	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
 }
 #endif /* CONFIG_HARDENED_USERCOPY */
 
diff --git a/mm/slub.c b/mm/slub.c
index cfd56e5a35fb..bcd22332300a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3818,8 +3818,8 @@ EXPORT_SYMBOL(__kmalloc_node);
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page)
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			 bool to_user)
 {
 	struct kmem_cache *s;
 	unsigned long offset;
@@ -3831,7 +3831,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 
 	/* Reject impossible pointers. */
 	if (ptr < page_address(page))
-		return s->name;
+		usercopy_abort("SLUB object not in SLUB page?!", NULL,
+			       to_user, 0, n);
 
 	/* Find offset within object. */
 	offset = (ptr - page_address(page)) % s->size;
@@ -3839,15 +3840,16 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 	/* Adjust for redzone and reject if within the redzone. */
 	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
 		if (offset < s->red_left_pad)
-			return s->name;
+			usercopy_abort("SLUB object in left red zone",
+				       s->name, to_user, offset, n);
 		offset -= s->red_left_pad;
 	}
 
 	/* Allow address range falling entirely within object size. */
 	if (offset <= object_size && n <= object_size - offset)
-		return NULL;
+		return;
 
-	return s->name;
+	usercopy_abort("SLUB object", s->name, to_user, offset, n);
 }
 #endif /* CONFIG_HARDENED_USERCOPY */
 
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 8006baa4caac..a562dd094ace 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -86,10 +86,10 @@ void __noreturn usercopy_abort(const char *name, const char *detail,
 }
 
 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
-static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
-		     unsigned long high)
+static bool overlaps(const unsigned long ptr, unsigned long n,
+		     unsigned long low, unsigned long high)
 {
-	unsigned long check_low = (uintptr_t)ptr;
+	const unsigned long check_low = ptr;
 	unsigned long check_high = check_low + n;
 
 	/* Does not overlap if entirely above or entirely below. */
@@ -100,15 +100,15 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
 }
 
 /* Is this address range in the kernel text area? */
-static inline const char *check_kernel_text_object(const void *ptr,
-						   unsigned long n)
+static inline void check_kernel_text_object(const unsigned long ptr,
+					    unsigned long n, bool to_user)
 {
 	unsigned long textlow = (unsigned long)_stext;
 	unsigned long texthigh = (unsigned long)_etext;
 	unsigned long textlow_linear, texthigh_linear;
 
 	if (overlaps(ptr, n, textlow, texthigh))
-		return "<kernel text>";
+		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
 
 	/*
 	 * Some architectures have virtual memory mappings with a secondary
@@ -121,32 +121,30 @@ static inline const char *check_kernel_text_object(const void *ptr,
 	textlow_linear = (unsigned long)lm_alias(textlow);
 	/* No different mapping: we're done. */
 	if (textlow_linear == textlow)
-		return NULL;
+		return;
 
 	/* Check the secondary mapping... */
 	texthigh_linear = (unsigned long)lm_alias(texthigh);
 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
-		return "<linear kernel text>";
-
-	return NULL;
+		usercopy_abort("linear kernel text", NULL, to_user,
+			       ptr - textlow_linear, n);
 }
 
-static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
+				       bool to_user)
 {
 	/* Reject if object wraps past end of memory. */
-	if ((unsigned long)ptr + n < (unsigned long)ptr)
-		return "<wrapped address>";
+	if (ptr + n < ptr)
+		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
 
 	/* Reject if NULL or ZERO-allocation. */
 	if (ZERO_OR_NULL_PTR(ptr))
-		return "<null>";
-
-	return NULL;
+		usercopy_abort("null address", NULL, to_user, ptr, n);
 }
 
 /* Checks for allocs that are marked in some way as spanning multiple pages. */
-static inline const char *check_page_span(const void *ptr, unsigned long n,
-					  struct page *page, bool to_user)
+static inline void check_page_span(const void *ptr, unsigned long n,
+				   struct page *page, bool to_user)
 {
 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
 	const void *end = ptr + n - 1;
@@ -163,28 +161,28 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
 	if (ptr >= (const void *)__start_rodata &&
 	    end <= (const void *)__end_rodata) {
 		if (!to_user)
-			return "<rodata>";
-		return NULL;
+			usercopy_abort("rodata", NULL, to_user, 0, n);
+		return;
 	}
 
 	/* Allow kernel data region (if not marked as Reserved). */
 	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
-		return NULL;
+		return;
 
 	/* Allow kernel bss region (if not marked as Reserved). */
 	if (ptr >= (const void *)__bss_start &&
 	    end <= (const void *)__bss_stop)
-		return NULL;
+		return;
 
 	/* Is the object wholly within one base page? */
 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
-		return NULL;
+		return;
 
 	/* Allow if fully inside the same compound (__GFP_COMP) page. */
 	endpage = virt_to_head_page(end);
 	if (likely(endpage == page))
-		return NULL;
+		return;
 
 	/*
 	 * Reject if range is entirely either Reserved (i.e. special or
@@ -194,36 +192,37 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
 	is_reserved = PageReserved(page);
 	is_cma = is_migrate_cma_page(page);
 	if (!is_reserved && !is_cma)
-		return "<spans multiple pages>";
+		usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
 
 	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
 		page = virt_to_head_page(ptr);
 		if (is_reserved && !PageReserved(page))
-			return "<spans Reserved and non-Reserved pages>";
+			usercopy_abort("spans Reserved and non-Reserved pages",
+				       NULL, to_user, 0, n);
 		if (is_cma && !is_migrate_cma_page(page))
-			return "<spans CMA and non-CMA pages>";
+			usercopy_abort("spans CMA and non-CMA pages", NULL,
+				       to_user, 0, n);
 	}
 #endif
-
-	return NULL;
 }
 
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
-					    bool to_user)
+static inline void check_heap_object(const void *ptr, unsigned long n,
+				     bool to_user)
 {
 	struct page *page;
 
 	if (!virt_addr_valid(ptr))
-		return NULL;
+		return;
 
 	page = virt_to_head_page(ptr);
 
-	/* Check slab allocator for flags and size. */
-	if (PageSlab(page))
-		return __check_heap_object(ptr, n, page);
-
-	/* Verify object does not incorrectly span multiple pages. */
-	return check_page_span(ptr, n, page, to_user);
+	if (PageSlab(page)) {
+		/* Check slab allocator for flags and size. */
+		__check_heap_object(ptr, n, page, to_user);
+	} else {
+		/* Verify object does not incorrectly span multiple pages. */
+		check_page_span(ptr, n, page, to_user);
+	}
 }
 
 /*
@@ -234,21 +233,15 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
  */
 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
 {
-	const char *err;
-
 	/* Skip all tests if size is zero. */
 	if (!n)
 		return;
 
 	/* Check for invalid addresses. */
-	err = check_bogus_address(ptr, n);
-	if (err)
-		goto report;
+	check_bogus_address((const unsigned long)ptr, n, to_user);
 
 	/* Check for bad heap object. */
-	err = check_heap_object(ptr, n, to_user);
-	if (err)
-		goto report;
+	check_heap_object(ptr, n, to_user);
 
 	/* Check for bad stack object. */
 	switch (check_stack_object(ptr, n)) {
@@ -264,16 +257,10 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
 		 */
 		return;
 	default:
-		err = "<process stack>";
-		goto report;
+		usercopy_abort("process stack", NULL, to_user, 0, n);
 	}
 
 	/* Check for object in kernel to avoid text exposure. */
-	err = check_kernel_text_object(ptr, n);
-	if (!err)
-		return;
-
-report:
-	usercopy_abort(err, NULL, to_user, 0, n);
+	check_kernel_text_object((const unsigned long)ptr, n, to_user);
 }
 EXPORT_SYMBOL(__check_object_size);
-- 
2.7.4

WARNING: multiple messages have this Message-ID (diff)
From: Kees Cook <keescook@chromium.org>
To: linux-kernel@vger.kernel.org
Cc: Kees Cook <keescook@chromium.org>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	David Windsor <dave@nullcore.net>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Andrew Morton <akpm@linux-foundation.org>,
	Andy Lutomirski <luto@kernel.org>,
	Christoph Hellwig <hch@infradead.org>,
	Christoph Lameter <cl@linux.com>,
	"David S. Miller" <davem@davemloft.net>,
	Laura Abbott <labbott@redhat.com>,
	Mark Rutland <mark.rutland@arm.com>,
	"Martin K. Petersen" <martin.petersen@oracle.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Christoffer Dall <christoffer.dall@linaro.org>,
	Dave Kleikamp <dave.kleikamp@oracle.com>, Jan Kara <jack@suse.cz>,
	Luis de Bethencourt <luisbg@kernel.org>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Rik van Riel <riel@redhat.com>,
	Matthew Garrett <mjg59@google.com>,
	linux-fsdevel@vger.kernel.org, linux-arch@vger.kernel
Subject: [PATCH 03/38] usercopy: Include offset in hardened usercopy report
Date: Wed, 10 Jan 2018 18:02:35 -0800	[thread overview]
Message-ID: <1515636190-24061-4-git-send-email-keescook@chromium.org> (raw)
In-Reply-To: <1515636190-24061-1-git-send-email-keescook@chromium.org>

This refactors the hardened usercopy code so that failure reporting can
happen within the checking functions instead of at the top level. This
simplifies the return value handling and allows more details and offsets
to be included in the report. Having the offset can be much more helpful
in understanding hardened usercopy bugs.

Signed-off-by: Kees Cook <keescook@chromium.org>
---
 include/linux/slab.h | 12 +++----
 mm/slab.c            |  8 ++---
 mm/slub.c            | 14 ++++----
 mm/usercopy.c        | 95 +++++++++++++++++++++++-----------------------------
 4 files changed, 57 insertions(+), 72 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 50697a1d6621..2dbeccdcb76b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -167,15 +167,11 @@ void kzfree(const void *);
 size_t ksize(const void *);
 
 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page);
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			bool to_user);
 #else
-static inline const char *__check_heap_object(const void *ptr,
-					      unsigned long n,
-					      struct page *page)
-{
-	return NULL;
-}
+static inline void __check_heap_object(const void *ptr, unsigned long n,
+				       struct page *page, bool to_user) { }
 #endif
 
 /*
diff --git a/mm/slab.c b/mm/slab.c
index 183e996dde5f..b2beb2cc15e2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4397,8 +4397,8 @@ module_init(slab_proc_init);
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page)
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			 bool to_user)
 {
 	struct kmem_cache *cachep;
 	unsigned int objnr;
@@ -4414,9 +4414,9 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 
 	/* Allow address range falling entirely within object size. */
 	if (offset <= cachep->object_size && n <= cachep->object_size - offset)
-		return NULL;
+		return;
 
-	return cachep->name;
+	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
 }
 #endif /* CONFIG_HARDENED_USERCOPY */
 
diff --git a/mm/slub.c b/mm/slub.c
index cfd56e5a35fb..bcd22332300a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3818,8 +3818,8 @@ EXPORT_SYMBOL(__kmalloc_node);
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page)
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			 bool to_user)
 {
 	struct kmem_cache *s;
 	unsigned long offset;
@@ -3831,7 +3831,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 
 	/* Reject impossible pointers. */
 	if (ptr < page_address(page))
-		return s->name;
+		usercopy_abort("SLUB object not in SLUB page?!", NULL,
+			       to_user, 0, n);
 
 	/* Find offset within object. */
 	offset = (ptr - page_address(page)) % s->size;
@@ -3839,15 +3840,16 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 	/* Adjust for redzone and reject if within the redzone. */
 	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
 		if (offset < s->red_left_pad)
-			return s->name;
+			usercopy_abort("SLUB object in left red zone",
+				       s->name, to_user, offset, n);
 		offset -= s->red_left_pad;
 	}
 
 	/* Allow address range falling entirely within object size. */
 	if (offset <= object_size && n <= object_size - offset)
-		return NULL;
+		return;
 
-	return s->name;
+	usercopy_abort("SLUB object", s->name, to_user, offset, n);
 }
 #endif /* CONFIG_HARDENED_USERCOPY */
 
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 8006baa4caac..a562dd094ace 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -86,10 +86,10 @@ void __noreturn usercopy_abort(const char *name, const char *detail,
 }
 
 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
-static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
-		     unsigned long high)
+static bool overlaps(const unsigned long ptr, unsigned long n,
+		     unsigned long low, unsigned long high)
 {
-	unsigned long check_low = (uintptr_t)ptr;
+	const unsigned long check_low = ptr;
 	unsigned long check_high = check_low + n;
 
 	/* Does not overlap if entirely above or entirely below. */
@@ -100,15 +100,15 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
 }
 
 /* Is this address range in the kernel text area? */
-static inline const char *check_kernel_text_object(const void *ptr,
-						   unsigned long n)
+static inline void check_kernel_text_object(const unsigned long ptr,
+					    unsigned long n, bool to_user)
 {
 	unsigned long textlow = (unsigned long)_stext;
 	unsigned long texthigh = (unsigned long)_etext;
 	unsigned long textlow_linear, texthigh_linear;
 
 	if (overlaps(ptr, n, textlow, texthigh))
-		return "<kernel text>";
+		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
 
 	/*
 	 * Some architectures have virtual memory mappings with a secondary
@@ -121,32 +121,30 @@ static inline const char *check_kernel_text_object(const void *ptr,
 	textlow_linear = (unsigned long)lm_alias(textlow);
 	/* No different mapping: we're done. */
 	if (textlow_linear == textlow)
-		return NULL;
+		return;
 
 	/* Check the secondary mapping... */
 	texthigh_linear = (unsigned long)lm_alias(texthigh);
 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
-		return "<linear kernel text>";
-
-	return NULL;
+		usercopy_abort("linear kernel text", NULL, to_user,
+			       ptr - textlow_linear, n);
 }
 
-static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
+				       bool to_user)
 {
 	/* Reject if object wraps past end of memory. */
-	if ((unsigned long)ptr + n < (unsigned long)ptr)
-		return "<wrapped address>";
+	if (ptr + n < ptr)
+		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
 
 	/* Reject if NULL or ZERO-allocation. */
 	if (ZERO_OR_NULL_PTR(ptr))
-		return "<null>";
-
-	return NULL;
+		usercopy_abort("null address", NULL, to_user, ptr, n);
 }
 
 /* Checks for allocs that are marked in some way as spanning multiple pages. */
-static inline const char *check_page_span(const void *ptr, unsigned long n,
-					  struct page *page, bool to_user)
+static inline void check_page_span(const void *ptr, unsigned long n,
+				   struct page *page, bool to_user)
 {
 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
 	const void *end = ptr + n - 1;
@@ -163,28 +161,28 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
 	if (ptr >= (const void *)__start_rodata &&
 	    end <= (const void *)__end_rodata) {
 		if (!to_user)
-			return "<rodata>";
-		return NULL;
+			usercopy_abort("rodata", NULL, to_user, 0, n);
+		return;
 	}
 
 	/* Allow kernel data region (if not marked as Reserved). */
 	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
-		return NULL;
+		return;
 
 	/* Allow kernel bss region (if not marked as Reserved). */
 	if (ptr >= (const void *)__bss_start &&
 	    end <= (const void *)__bss_stop)
-		return NULL;
+		return;
 
 	/* Is the object wholly within one base page? */
 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
-		return NULL;
+		return;
 
 	/* Allow if fully inside the same compound (__GFP_COMP) page. */
 	endpage = virt_to_head_page(end);
 	if (likely(endpage == page))
-		return NULL;
+		return;
 
 	/*
 	 * Reject if range is entirely either Reserved (i.e. special or
@@ -194,36 +192,37 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
 	is_reserved = PageReserved(page);
 	is_cma = is_migrate_cma_page(page);
 	if (!is_reserved && !is_cma)
-		return "<spans multiple pages>";
+		usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
 
 	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
 		page = virt_to_head_page(ptr);
 		if (is_reserved && !PageReserved(page))
-			return "<spans Reserved and non-Reserved pages>";
+			usercopy_abort("spans Reserved and non-Reserved pages",
+				       NULL, to_user, 0, n);
 		if (is_cma && !is_migrate_cma_page(page))
-			return "<spans CMA and non-CMA pages>";
+			usercopy_abort("spans CMA and non-CMA pages", NULL,
+				       to_user, 0, n);
 	}
 #endif
-
-	return NULL;
 }
 
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
-					    bool to_user)
+static inline void check_heap_object(const void *ptr, unsigned long n,
+				     bool to_user)
 {
 	struct page *page;
 
 	if (!virt_addr_valid(ptr))
-		return NULL;
+		return;
 
 	page = virt_to_head_page(ptr);
 
-	/* Check slab allocator for flags and size. */
-	if (PageSlab(page))
-		return __check_heap_object(ptr, n, page);
-
-	/* Verify object does not incorrectly span multiple pages. */
-	return check_page_span(ptr, n, page, to_user);
+	if (PageSlab(page)) {
+		/* Check slab allocator for flags and size. */
+		__check_heap_object(ptr, n, page, to_user);
+	} else {
+		/* Verify object does not incorrectly span multiple pages. */
+		check_page_span(ptr, n, page, to_user);
+	}
 }
 
 /*
@@ -234,21 +233,15 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
  */
 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
 {
-	const char *err;
-
 	/* Skip all tests if size is zero. */
 	if (!n)
 		return;
 
 	/* Check for invalid addresses. */
-	err = check_bogus_address(ptr, n);
-	if (err)
-		goto report;
+	check_bogus_address((const unsigned long)ptr, n, to_user);
 
 	/* Check for bad heap object. */
-	err = check_heap_object(ptr, n, to_user);
-	if (err)
-		goto report;
+	check_heap_object(ptr, n, to_user);
 
 	/* Check for bad stack object. */
 	switch (check_stack_object(ptr, n)) {
@@ -264,16 +257,10 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
 		 */
 		return;
 	default:
-		err = "<process stack>";
-		goto report;
+		usercopy_abort("process stack", NULL, to_user, 0, n);
 	}
 
 	/* Check for object in kernel to avoid text exposure. */
-	err = check_kernel_text_object(ptr, n);
-	if (!err)
-		return;
-
-report:
-	usercopy_abort(err, NULL, to_user, 0, n);
+	check_kernel_text_object((const unsigned long)ptr, n, to_user);
 }
 EXPORT_SYMBOL(__check_object_size);
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Kees Cook <keescook@chromium.org>
To: linux-kernel@vger.kernel.org
Cc: Kees Cook <keescook@chromium.org>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	David Windsor <dave@nullcore.net>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Andrew Morton <akpm@linux-foundation.org>,
	Andy Lutomirski <luto@kernel.org>,
	Christoph Hellwig <hch@infradead.org>,
	Christoph Lameter <cl@linux.com>,
	"David S. Miller" <davem@davemloft.net>,
	Laura Abbott <labbott@redhat.com>,
	Mark Rutland <mark.rutland@arm.com>,
	"Martin K. Petersen" <martin.petersen@oracle.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Christoffer Dall <christoffer.dall@linaro.org>,
	Dave Kleikamp <dave.kleikamp@oracle.com>, Jan Kara <jack@suse.cz>,
	Luis de Bethencourt <luisbg@kernel.org>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Rik van Riel <riel@redhat.com>,
	Matthew Garrett <mjg59@google.com>,
	linux-fsdevel@vger.kernel.org, linux-arch@vger.kernel.org,
	netdev@vger.kernel.org, linux-mm@kvack.org,
	kernel-hardening@lists.openwall.com
Subject: [PATCH 03/38] usercopy: Include offset in hardened usercopy report
Date: Wed, 10 Jan 2018 18:02:35 -0800	[thread overview]
Message-ID: <1515636190-24061-4-git-send-email-keescook@chromium.org> (raw)
In-Reply-To: <1515636190-24061-1-git-send-email-keescook@chromium.org>

This refactors the hardened usercopy code so that failure reporting can
happen within the checking functions instead of at the top level. This
simplifies the return value handling and allows more details and offsets
to be included in the report. Having the offset can be much more helpful
in understanding hardened usercopy bugs.

Signed-off-by: Kees Cook <keescook@chromium.org>
---
 include/linux/slab.h | 12 +++----
 mm/slab.c            |  8 ++---
 mm/slub.c            | 14 ++++----
 mm/usercopy.c        | 95 +++++++++++++++++++++++-----------------------------
 4 files changed, 57 insertions(+), 72 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 50697a1d6621..2dbeccdcb76b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -167,15 +167,11 @@ void kzfree(const void *);
 size_t ksize(const void *);
 
 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page);
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			bool to_user);
 #else
-static inline const char *__check_heap_object(const void *ptr,
-					      unsigned long n,
-					      struct page *page)
-{
-	return NULL;
-}
+static inline void __check_heap_object(const void *ptr, unsigned long n,
+				       struct page *page, bool to_user) { }
 #endif
 
 /*
diff --git a/mm/slab.c b/mm/slab.c
index 183e996dde5f..b2beb2cc15e2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4397,8 +4397,8 @@ module_init(slab_proc_init);
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page)
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			 bool to_user)
 {
 	struct kmem_cache *cachep;
 	unsigned int objnr;
@@ -4414,9 +4414,9 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 
 	/* Allow address range falling entirely within object size. */
 	if (offset <= cachep->object_size && n <= cachep->object_size - offset)
-		return NULL;
+		return;
 
-	return cachep->name;
+	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
 }
 #endif /* CONFIG_HARDENED_USERCOPY */
 
diff --git a/mm/slub.c b/mm/slub.c
index cfd56e5a35fb..bcd22332300a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3818,8 +3818,8 @@ EXPORT_SYMBOL(__kmalloc_node);
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page)
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			 bool to_user)
 {
 	struct kmem_cache *s;
 	unsigned long offset;
@@ -3831,7 +3831,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 
 	/* Reject impossible pointers. */
 	if (ptr < page_address(page))
-		return s->name;
+		usercopy_abort("SLUB object not in SLUB page?!", NULL,
+			       to_user, 0, n);
 
 	/* Find offset within object. */
 	offset = (ptr - page_address(page)) % s->size;
@@ -3839,15 +3840,16 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 	/* Adjust for redzone and reject if within the redzone. */
 	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
 		if (offset < s->red_left_pad)
-			return s->name;
+			usercopy_abort("SLUB object in left red zone",
+				       s->name, to_user, offset, n);
 		offset -= s->red_left_pad;
 	}
 
 	/* Allow address range falling entirely within object size. */
 	if (offset <= object_size && n <= object_size - offset)
-		return NULL;
+		return;
 
-	return s->name;
+	usercopy_abort("SLUB object", s->name, to_user, offset, n);
 }
 #endif /* CONFIG_HARDENED_USERCOPY */
 
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 8006baa4caac..a562dd094ace 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -86,10 +86,10 @@ void __noreturn usercopy_abort(const char *name, const char *detail,
 }
 
 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
-static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
-		     unsigned long high)
+static bool overlaps(const unsigned long ptr, unsigned long n,
+		     unsigned long low, unsigned long high)
 {
-	unsigned long check_low = (uintptr_t)ptr;
+	const unsigned long check_low = ptr;
 	unsigned long check_high = check_low + n;
 
 	/* Does not overlap if entirely above or entirely below. */
@@ -100,15 +100,15 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
 }
 
 /* Is this address range in the kernel text area? */
-static inline const char *check_kernel_text_object(const void *ptr,
-						   unsigned long n)
+static inline void check_kernel_text_object(const unsigned long ptr,
+					    unsigned long n, bool to_user)
 {
 	unsigned long textlow = (unsigned long)_stext;
 	unsigned long texthigh = (unsigned long)_etext;
 	unsigned long textlow_linear, texthigh_linear;
 
 	if (overlaps(ptr, n, textlow, texthigh))
-		return "<kernel text>";
+		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
 
 	/*
 	 * Some architectures have virtual memory mappings with a secondary
@@ -121,32 +121,30 @@ static inline const char *check_kernel_text_object(const void *ptr,
 	textlow_linear = (unsigned long)lm_alias(textlow);
 	/* No different mapping: we're done. */
 	if (textlow_linear == textlow)
-		return NULL;
+		return;
 
 	/* Check the secondary mapping... */
 	texthigh_linear = (unsigned long)lm_alias(texthigh);
 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
-		return "<linear kernel text>";
-
-	return NULL;
+		usercopy_abort("linear kernel text", NULL, to_user,
+			       ptr - textlow_linear, n);
 }
 
-static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
+				       bool to_user)
 {
 	/* Reject if object wraps past end of memory. */
-	if ((unsigned long)ptr + n < (unsigned long)ptr)
-		return "<wrapped address>";
+	if (ptr + n < ptr)
+		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
 
 	/* Reject if NULL or ZERO-allocation. */
 	if (ZERO_OR_NULL_PTR(ptr))
-		return "<null>";
-
-	return NULL;
+		usercopy_abort("null address", NULL, to_user, ptr, n);
 }
 
 /* Checks for allocs that are marked in some way as spanning multiple pages. */
-static inline const char *check_page_span(const void *ptr, unsigned long n,
-					  struct page *page, bool to_user)
+static inline void check_page_span(const void *ptr, unsigned long n,
+				   struct page *page, bool to_user)
 {
 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
 	const void *end = ptr + n - 1;
@@ -163,28 +161,28 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
 	if (ptr >= (const void *)__start_rodata &&
 	    end <= (const void *)__end_rodata) {
 		if (!to_user)
-			return "<rodata>";
-		return NULL;
+			usercopy_abort("rodata", NULL, to_user, 0, n);
+		return;
 	}
 
 	/* Allow kernel data region (if not marked as Reserved). */
 	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
-		return NULL;
+		return;
 
 	/* Allow kernel bss region (if not marked as Reserved). */
 	if (ptr >= (const void *)__bss_start &&
 	    end <= (const void *)__bss_stop)
-		return NULL;
+		return;
 
 	/* Is the object wholly within one base page? */
 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
-		return NULL;
+		return;
 
 	/* Allow if fully inside the same compound (__GFP_COMP) page. */
 	endpage = virt_to_head_page(end);
 	if (likely(endpage == page))
-		return NULL;
+		return;
 
 	/*
 	 * Reject if range is entirely either Reserved (i.e. special or
@@ -194,36 +192,37 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
 	is_reserved = PageReserved(page);
 	is_cma = is_migrate_cma_page(page);
 	if (!is_reserved && !is_cma)
-		return "<spans multiple pages>";
+		usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
 
 	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
 		page = virt_to_head_page(ptr);
 		if (is_reserved && !PageReserved(page))
-			return "<spans Reserved and non-Reserved pages>";
+			usercopy_abort("spans Reserved and non-Reserved pages",
+				       NULL, to_user, 0, n);
 		if (is_cma && !is_migrate_cma_page(page))
-			return "<spans CMA and non-CMA pages>";
+			usercopy_abort("spans CMA and non-CMA pages", NULL,
+				       to_user, 0, n);
 	}
 #endif
-
-	return NULL;
 }
 
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
-					    bool to_user)
+static inline void check_heap_object(const void *ptr, unsigned long n,
+				     bool to_user)
 {
 	struct page *page;
 
 	if (!virt_addr_valid(ptr))
-		return NULL;
+		return;
 
 	page = virt_to_head_page(ptr);
 
-	/* Check slab allocator for flags and size. */
-	if (PageSlab(page))
-		return __check_heap_object(ptr, n, page);
-
-	/* Verify object does not incorrectly span multiple pages. */
-	return check_page_span(ptr, n, page, to_user);
+	if (PageSlab(page)) {
+		/* Check slab allocator for flags and size. */
+		__check_heap_object(ptr, n, page, to_user);
+	} else {
+		/* Verify object does not incorrectly span multiple pages. */
+		check_page_span(ptr, n, page, to_user);
+	}
 }
 
 /*
@@ -234,21 +233,15 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
  */
 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
 {
-	const char *err;
-
 	/* Skip all tests if size is zero. */
 	if (!n)
 		return;
 
 	/* Check for invalid addresses. */
-	err = check_bogus_address(ptr, n);
-	if (err)
-		goto report;
+	check_bogus_address((const unsigned long)ptr, n, to_user);
 
 	/* Check for bad heap object. */
-	err = check_heap_object(ptr, n, to_user);
-	if (err)
-		goto report;
+	check_heap_object(ptr, n, to_user);
 
 	/* Check for bad stack object. */
 	switch (check_stack_object(ptr, n)) {
@@ -264,16 +257,10 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
 		 */
 		return;
 	default:
-		err = "<process stack>";
-		goto report;
+		usercopy_abort("process stack", NULL, to_user, 0, n);
 	}
 
 	/* Check for object in kernel to avoid text exposure. */
-	err = check_kernel_text_object(ptr, n);
-	if (!err)
-		return;
-
-report:
-	usercopy_abort(err, NULL, to_user, 0, n);
+	check_kernel_text_object((const unsigned long)ptr, n, to_user);
 }
 EXPORT_SYMBOL(__check_object_size);
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Kees Cook <keescook@chromium.org>
To: linux-kernel@vger.kernel.org
Cc: Kees Cook <keescook@chromium.org>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	David Windsor <dave@nullcore.net>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Andrew Morton <akpm@linux-foundation.org>,
	Andy Lutomirski <luto@kernel.org>,
	Christoph Hellwig <hch@infradead.org>,
	Christoph Lameter <cl@linux.com>,
	"David S. Miller" <davem@davemloft.net>,
	Laura Abbott <labbott@redhat.com>,
	Mark Rutland <mark.rutland@arm.com>,
	"Martin K. Petersen" <martin.petersen@oracle.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Christoffer Dall <christoffer.dall@linaro.org>,
	Dave Kleikamp <dave.kleikamp@oracle.com>, Jan Kara <jack@suse.cz>,
	Luis de Bethencourt <luisbg@kernel.org>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Rik van Riel <riel@redhat.com>,
	Matthew Garrett <mjg59@google.com>,
	linux-fsdevel@vger.kernel.org, linux-arch@vger.kernel.org,
	netdev@vger.kernel.org, linux-mm@kvack.org,
	kernel-hardening@lists.openwall.com
Subject: [kernel-hardening] [PATCH 03/38] usercopy: Include offset in hardened usercopy report
Date: Wed, 10 Jan 2018 18:02:35 -0800	[thread overview]
Message-ID: <1515636190-24061-4-git-send-email-keescook@chromium.org> (raw)
In-Reply-To: <1515636190-24061-1-git-send-email-keescook@chromium.org>

This refactors the hardened usercopy code so that failure reporting can
happen within the checking functions instead of at the top level. This
simplifies the return value handling and allows more details and offsets
to be included in the report. Having the offset can be much more helpful
in understanding hardened usercopy bugs.

Signed-off-by: Kees Cook <keescook@chromium.org>
---
 include/linux/slab.h | 12 +++----
 mm/slab.c            |  8 ++---
 mm/slub.c            | 14 ++++----
 mm/usercopy.c        | 95 +++++++++++++++++++++++-----------------------------
 4 files changed, 57 insertions(+), 72 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 50697a1d6621..2dbeccdcb76b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -167,15 +167,11 @@ void kzfree(const void *);
 size_t ksize(const void *);
 
 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page);
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			bool to_user);
 #else
-static inline const char *__check_heap_object(const void *ptr,
-					      unsigned long n,
-					      struct page *page)
-{
-	return NULL;
-}
+static inline void __check_heap_object(const void *ptr, unsigned long n,
+				       struct page *page, bool to_user) { }
 #endif
 
 /*
diff --git a/mm/slab.c b/mm/slab.c
index 183e996dde5f..b2beb2cc15e2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4397,8 +4397,8 @@ module_init(slab_proc_init);
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page)
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			 bool to_user)
 {
 	struct kmem_cache *cachep;
 	unsigned int objnr;
@@ -4414,9 +4414,9 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 
 	/* Allow address range falling entirely within object size. */
 	if (offset <= cachep->object_size && n <= cachep->object_size - offset)
-		return NULL;
+		return;
 
-	return cachep->name;
+	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
 }
 #endif /* CONFIG_HARDENED_USERCOPY */
 
diff --git a/mm/slub.c b/mm/slub.c
index cfd56e5a35fb..bcd22332300a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3818,8 +3818,8 @@ EXPORT_SYMBOL(__kmalloc_node);
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-const char *__check_heap_object(const void *ptr, unsigned long n,
-				struct page *page)
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+			 bool to_user)
 {
 	struct kmem_cache *s;
 	unsigned long offset;
@@ -3831,7 +3831,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 
 	/* Reject impossible pointers. */
 	if (ptr < page_address(page))
-		return s->name;
+		usercopy_abort("SLUB object not in SLUB page?!", NULL,
+			       to_user, 0, n);
 
 	/* Find offset within object. */
 	offset = (ptr - page_address(page)) % s->size;
@@ -3839,15 +3840,16 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
 	/* Adjust for redzone and reject if within the redzone. */
 	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
 		if (offset < s->red_left_pad)
-			return s->name;
+			usercopy_abort("SLUB object in left red zone",
+				       s->name, to_user, offset, n);
 		offset -= s->red_left_pad;
 	}
 
 	/* Allow address range falling entirely within object size. */
 	if (offset <= object_size && n <= object_size - offset)
-		return NULL;
+		return;
 
-	return s->name;
+	usercopy_abort("SLUB object", s->name, to_user, offset, n);
 }
 #endif /* CONFIG_HARDENED_USERCOPY */
 
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 8006baa4caac..a562dd094ace 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -86,10 +86,10 @@ void __noreturn usercopy_abort(const char *name, const char *detail,
 }
 
 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
-static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
-		     unsigned long high)
+static bool overlaps(const unsigned long ptr, unsigned long n,
+		     unsigned long low, unsigned long high)
 {
-	unsigned long check_low = (uintptr_t)ptr;
+	const unsigned long check_low = ptr;
 	unsigned long check_high = check_low + n;
 
 	/* Does not overlap if entirely above or entirely below. */
@@ -100,15 +100,15 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
 }
 
 /* Is this address range in the kernel text area? */
-static inline const char *check_kernel_text_object(const void *ptr,
-						   unsigned long n)
+static inline void check_kernel_text_object(const unsigned long ptr,
+					    unsigned long n, bool to_user)
 {
 	unsigned long textlow = (unsigned long)_stext;
 	unsigned long texthigh = (unsigned long)_etext;
 	unsigned long textlow_linear, texthigh_linear;
 
 	if (overlaps(ptr, n, textlow, texthigh))
-		return "<kernel text>";
+		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
 
 	/*
 	 * Some architectures have virtual memory mappings with a secondary
@@ -121,32 +121,30 @@ static inline const char *check_kernel_text_object(const void *ptr,
 	textlow_linear = (unsigned long)lm_alias(textlow);
 	/* No different mapping: we're done. */
 	if (textlow_linear == textlow)
-		return NULL;
+		return;
 
 	/* Check the secondary mapping... */
 	texthigh_linear = (unsigned long)lm_alias(texthigh);
 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
-		return "<linear kernel text>";
-
-	return NULL;
+		usercopy_abort("linear kernel text", NULL, to_user,
+			       ptr - textlow_linear, n);
 }
 
-static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
+				       bool to_user)
 {
 	/* Reject if object wraps past end of memory. */
-	if ((unsigned long)ptr + n < (unsigned long)ptr)
-		return "<wrapped address>";
+	if (ptr + n < ptr)
+		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
 
 	/* Reject if NULL or ZERO-allocation. */
 	if (ZERO_OR_NULL_PTR(ptr))
-		return "<null>";
-
-	return NULL;
+		usercopy_abort("null address", NULL, to_user, ptr, n);
 }
 
 /* Checks for allocs that are marked in some way as spanning multiple pages. */
-static inline const char *check_page_span(const void *ptr, unsigned long n,
-					  struct page *page, bool to_user)
+static inline void check_page_span(const void *ptr, unsigned long n,
+				   struct page *page, bool to_user)
 {
 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
 	const void *end = ptr + n - 1;
@@ -163,28 +161,28 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
 	if (ptr >= (const void *)__start_rodata &&
 	    end <= (const void *)__end_rodata) {
 		if (!to_user)
-			return "<rodata>";
-		return NULL;
+			usercopy_abort("rodata", NULL, to_user, 0, n);
+		return;
 	}
 
 	/* Allow kernel data region (if not marked as Reserved). */
 	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
-		return NULL;
+		return;
 
 	/* Allow kernel bss region (if not marked as Reserved). */
 	if (ptr >= (const void *)__bss_start &&
 	    end <= (const void *)__bss_stop)
-		return NULL;
+		return;
 
 	/* Is the object wholly within one base page? */
 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
-		return NULL;
+		return;
 
 	/* Allow if fully inside the same compound (__GFP_COMP) page. */
 	endpage = virt_to_head_page(end);
 	if (likely(endpage == page))
-		return NULL;
+		return;
 
 	/*
 	 * Reject if range is entirely either Reserved (i.e. special or
@@ -194,36 +192,37 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
 	is_reserved = PageReserved(page);
 	is_cma = is_migrate_cma_page(page);
 	if (!is_reserved && !is_cma)
-		return "<spans multiple pages>";
+		usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
 
 	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
 		page = virt_to_head_page(ptr);
 		if (is_reserved && !PageReserved(page))
-			return "<spans Reserved and non-Reserved pages>";
+			usercopy_abort("spans Reserved and non-Reserved pages",
+				       NULL, to_user, 0, n);
 		if (is_cma && !is_migrate_cma_page(page))
-			return "<spans CMA and non-CMA pages>";
+			usercopy_abort("spans CMA and non-CMA pages", NULL,
+				       to_user, 0, n);
 	}
 #endif
-
-	return NULL;
 }
 
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
-					    bool to_user)
+static inline void check_heap_object(const void *ptr, unsigned long n,
+				     bool to_user)
 {
 	struct page *page;
 
 	if (!virt_addr_valid(ptr))
-		return NULL;
+		return;
 
 	page = virt_to_head_page(ptr);
 
-	/* Check slab allocator for flags and size. */
-	if (PageSlab(page))
-		return __check_heap_object(ptr, n, page);
-
-	/* Verify object does not incorrectly span multiple pages. */
-	return check_page_span(ptr, n, page, to_user);
+	if (PageSlab(page)) {
+		/* Check slab allocator for flags and size. */
+		__check_heap_object(ptr, n, page, to_user);
+	} else {
+		/* Verify object does not incorrectly span multiple pages. */
+		check_page_span(ptr, n, page, to_user);
+	}
 }
 
 /*
@@ -234,21 +233,15 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
  */
 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
 {
-	const char *err;
-
 	/* Skip all tests if size is zero. */
 	if (!n)
 		return;
 
 	/* Check for invalid addresses. */
-	err = check_bogus_address(ptr, n);
-	if (err)
-		goto report;
+	check_bogus_address((const unsigned long)ptr, n, to_user);
 
 	/* Check for bad heap object. */
-	err = check_heap_object(ptr, n, to_user);
-	if (err)
-		goto report;
+	check_heap_object(ptr, n, to_user);
 
 	/* Check for bad stack object. */
 	switch (check_stack_object(ptr, n)) {
@@ -264,16 +257,10 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
 		 */
 		return;
 	default:
-		err = "<process stack>";
-		goto report;
+		usercopy_abort("process stack", NULL, to_user, 0, n);
 	}
 
 	/* Check for object in kernel to avoid text exposure. */
-	err = check_kernel_text_object(ptr, n);
-	if (!err)
-		return;
-
-report:
-	usercopy_abort(err, NULL, to_user, 0, n);
+	check_kernel_text_object((const unsigned long)ptr, n, to_user);
 }
 EXPORT_SYMBOL(__check_object_size);
-- 
2.7.4

  parent reply	other threads:[~2018-01-11  2:17 UTC|newest]

Thread overview: 320+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-11  2:02 [PATCH v5 00/38] Hardened usercopy whitelisting Kees Cook
2018-01-11  2:02 ` [kernel-hardening] " Kees Cook
2018-01-11  2:02 ` Kees Cook
2018-01-11  2:02 ` Kees Cook
2018-01-11  2:02 ` [PATCH 01/38] usercopy: Remove pointer from overflow report Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 02/38] usercopy: Enhance and rename report_usercopy() Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11 17:06   ` Christopher Lameter
2018-01-11 17:06     ` [kernel-hardening] " Christopher Lameter
2018-01-11 17:06     ` Christopher Lameter
2018-01-11 17:06     ` Christopher Lameter
2018-01-11 17:06     ` Christopher Lameter
2018-01-14 20:57     ` Kees Cook
2018-01-14 20:57       ` [kernel-hardening] " Kees Cook
2018-01-14 20:57       ` Kees Cook
2018-01-14 20:57       ` Kees Cook
2018-01-14 20:57       ` Kees Cook
2018-01-11  2:02 ` Kees Cook [this message]
2018-01-11  2:02   ` [kernel-hardening] [PATCH 03/38] usercopy: Include offset in hardened usercopy report Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 04/38] lkdtm/usercopy: Adjust test to include an offset to check reporting Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 05/38] stddef.h: Introduce sizeof_field() Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 06/38] usercopy: Prepare for usercopy whitelisting Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 07/38] usercopy: WARN() on slab cache usercopy region violations Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 08/38] usercopy: Allow strict enforcement of whitelists Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 09/38] usercopy: Mark kmalloc caches as usercopy caches Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2019-11-12  7:17   ` [kernel-hardening] " Jiri Slaby
2019-11-12  7:17     ` Jiri Slaby
2019-11-12 21:21     ` Kees Cook
2019-11-12 21:21       ` Kees Cook
2019-11-14 21:27       ` Kees Cook
2019-11-14 21:27         ` Kees Cook
2020-01-23  8:14         ` Jiri Slaby
2020-01-23  8:14           ` Jiri Slaby
2020-01-27 23:19           ` Kees Cook
2020-01-27 23:19             ` Kees Cook
2020-01-28  7:58             ` Christian Borntraeger
2020-01-28  7:58               ` Christian Borntraeger
2020-01-28 23:01               ` Kees Cook
2020-01-28 23:01                 ` Kees Cook
2020-01-29  9:26                 ` Ursula Braun
2020-01-29  9:26                   ` Ursula Braun
2020-01-29 16:43                 ` Christopher Lameter
2020-01-29 16:43                   ` Christopher Lameter
2020-01-29 16:43                   ` Christopher Lameter
2020-01-29 17:07                   ` Christian Borntraeger
2020-01-29 17:07                     ` Christian Borntraeger
2020-01-29 17:09                     ` Christoph Hellwig
2020-01-29 17:09                       ` Christoph Hellwig
2020-01-29 17:19                       ` Christian Borntraeger
2020-01-29 17:19                         ` Christian Borntraeger
2020-01-30 19:23                         ` Kees Cook
2020-01-30 19:23                           ` Kees Cook
2020-01-31 12:03                           ` Jann Horn
2020-01-31 12:03                             ` Jann Horn
2020-01-31 12:03                             ` Jann Horn
2020-02-01 17:56                             ` Kees Cook
2020-02-01 17:56                               ` Kees Cook
2020-02-01 19:27                               ` Jann Horn
2020-02-01 19:27                                 ` Jann Horn
2020-02-01 19:27                                 ` Jann Horn
2020-02-03  7:46                                 ` Matthew Wilcox
2020-02-03  7:46                                   ` Matthew Wilcox
2020-02-03 17:41                                   ` Christoph Hellwig
2020-02-03 17:41                                     ` Christoph Hellwig
2020-02-03 17:20                               ` Christopher Lameter
2020-02-03 17:20                                 ` Christopher Lameter
2020-02-03 17:20                                 ` Christopher Lameter
2020-04-07  8:00                             ` Vlastimil Babka
2020-04-07  8:00                               ` Vlastimil Babka
2020-04-07 11:05                               ` Christian Borntraeger
2020-04-07 11:05                                 ` Christian Borntraeger
2020-04-20  7:53                               ` Jiri Slaby
2020-04-20  7:53                                 ` Jiri Slaby
2020-04-20 17:43                                 ` Kees Cook
2020-04-20 17:43                                   ` Kees Cook
2020-02-03 17:38                           ` Christoph Hellwig
2020-02-03 17:38                             ` Christoph Hellwig
2020-02-03 17:36                         ` Christoph Hellwig
2020-02-03 17:36                           ` Christoph Hellwig
2018-01-11  2:02 ` [PATCH 10/38] dcache: Define usercopy region in dentry_cache slab cache Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 11/38] vfs: Define usercopy region in names_cache slab caches Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 12/38] vfs: Copy struct mount.mnt_id to userspace using put_user() Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 13/38] ext4: Define usercopy region in ext4_inode_cache slab cache Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11 17:01   ` Theodore Ts'o
2018-01-11 17:01     ` [kernel-hardening] " Theodore Ts'o
2018-01-11 17:01     ` Theodore Ts'o
2018-01-11 17:01     ` Theodore Ts'o
2018-01-11 17:01     ` Theodore Ts'o
2018-01-11 23:05     ` Kees Cook
2018-01-11 23:05     ` Kees Cook
2018-01-11 23:05       ` [kernel-hardening] " Kees Cook
2018-01-11 23:05       ` Kees Cook
2018-01-11 23:05       ` Kees Cook
2018-01-14 22:34       ` Matthew Wilcox
2018-01-14 22:34         ` [kernel-hardening] " Matthew Wilcox
2018-01-14 22:34         ` Matthew Wilcox
2018-01-14 22:34         ` Matthew Wilcox
2018-01-14 22:34         ` Matthew Wilcox
2018-01-11 23:05     ` Kees Cook
2018-01-11 23:05     ` Kees Cook
2018-01-11  2:02 ` [PATCH 14/38] ext2: Define usercopy region in ext2_inode_cache " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 15/38] jfs: Define usercopy region in jfs_ip " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 16/38] befs: Define usercopy region in befs_inode_cache " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 17/38] exofs: Define usercopy region in exofs_inode_cache " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 18/38] orangefs: Define usercopy region in orangefs_inode_cache " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 19/38] ufs: Define usercopy region in ufs_inode_cache " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 20/38] vxfs: Define usercopy region in vxfs_inode " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 21/38] cifs: Define usercopy region in cifs_request " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 22/38] scsi: Define usercopy region in scsi_sense_cache " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 23/38] net: Define usercopy region in struct proto " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 24/38] ip: Define usercopy region in IP " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 25/38] caif: Define usercopy region in caif " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 26/38] sctp: Define usercopy region in SCTP " Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02 ` [PATCH 27/38] sctp: Copy struct sctp_sock.autoclose to userspace using put_user() Kees Cook
2018-01-11  2:02   ` [kernel-hardening] " Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-11  2:02   ` Kees Cook
2018-01-18 21:31   ` [kernel-hardening] " Laura Abbott
2018-01-18 21:31     ` Laura Abbott
2018-01-18 21:31     ` Laura Abbott
2018-01-18 21:31     ` Laura Abbott
2018-01-18 21:31     ` Laura Abbott
2018-01-18 21:31     ` Laura Abbott
2018-01-18 21:36     ` [kernel-hardening] " Kees Cook
2018-01-18 21:36       ` Kees Cook
2018-01-18 21:36       ` Kees Cook
2018-01-18 21:36       ` Kees Cook
2018-01-18 21:36       ` Kees Cook
2018-01-18 21:36       ` Kees Cook
2018-01-11  2:03 ` [PATCH 28/38] net: Restrict unwhitelisted proto caches to size 0 Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03 ` [PATCH 29/38] fork: Define usercopy region in mm_struct slab caches Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03 ` [PATCH 30/38] fork: Define usercopy region in thread_stack " Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03 ` [PATCH 31/38] fork: Provide usercopy whitelisting for task_struct Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03 ` [PATCH 32/38] x86: Implement thread_struct whitelist for hardened usercopy Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03 ` [PATCH 33/38] arm64: " Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-15 12:24   ` Dave P Martin
2018-01-15 12:24     ` [kernel-hardening] " Dave P Martin
2018-01-15 12:24     ` Dave P Martin
2018-01-15 12:24     ` Dave P Martin
2018-01-15 12:24     ` Dave P Martin
2018-01-15 12:24     ` Dave P Martin
2018-01-15 12:24     ` Dave P Martin
2018-01-15 20:06     ` Kees Cook
2018-01-15 20:06       ` [kernel-hardening] " Kees Cook
2018-01-15 20:06       ` Kees Cook
2018-01-15 20:06       ` Kees Cook
2018-01-15 20:06       ` Kees Cook
2018-01-15 20:06       ` Kees Cook
2018-01-15 20:06       ` Kees Cook
2018-01-16 12:33       ` Dave Martin
2018-01-16 12:33         ` [kernel-hardening] " Dave Martin
2018-01-16 12:33         ` Dave Martin
2018-01-16 12:33         ` Dave Martin
2018-01-16 12:33         ` Dave Martin
2018-01-16 12:33         ` Dave Martin
2018-03-26 16:22       ` Dave Martin
2018-03-26 16:22         ` Dave Martin
2018-03-26 17:41         ` Kees Cook
2018-03-26 17:41           ` Kees Cook
2018-03-27 12:32           ` Dave Martin
2018-03-27 12:32             ` Dave Martin
2018-01-11  2:03 ` [PATCH 34/38] arm: " Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11 10:24   ` Russell King - ARM Linux
2018-01-11 10:24     ` [kernel-hardening] " Russell King - ARM Linux
2018-01-11 10:24     ` Russell King - ARM Linux
2018-01-11 10:24     ` Russell King - ARM Linux
2018-01-11 10:24     ` Russell King - ARM Linux
2018-01-11 10:24     ` Russell King - ARM Linux
2018-01-11 23:21     ` Kees Cook
2018-01-11 23:21       ` [kernel-hardening] " Kees Cook
2018-01-11 23:21       ` Kees Cook
2018-01-11 23:21       ` Kees Cook
2018-01-11 23:21       ` Kees Cook
2018-01-11 23:21       ` Kees Cook
2018-01-11  2:03 ` [PATCH 35/38] kvm: whitelist struct kvm_vcpu_arch Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03 ` [PATCH 36/38] kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03 ` [PATCH 37/38] usercopy: Restrict non-usercopy caches to size 0 Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03 ` [PATCH 38/38] lkdtm: Update usercopy tests for whitelisting Kees Cook
2018-01-11  2:03   ` [kernel-hardening] " Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-01-11  2:03   ` Kees Cook
2018-02-19 16:55 ` [PATCH] signals: Move put_compat_sigset to compat.h to silence hardened usercopy Matt Redfearn
2018-02-19 16:55   ` Matt Redfearn
2018-02-19 23:55   ` Kees Cook
2018-03-02 21:40     ` James Hogan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1515636190-24061-4-git-send-email-keescook@chromium.org \
    --to=keescook@chromium.org \
    --cc=akpm@linux-foundation.org \
    --cc=borntraeger@de.ibm.com \
    --cc=christoffer.dall@linaro.org \
    --cc=cl@linux.com \
    --cc=dave.kleikamp@oracle.com \
    --cc=dave@nullcore.net \
    --cc=davem@davemloft.net \
    --cc=hch@infradead.org \
    --cc=jack@suse.cz \
    --cc=kernel-hardening@lists.openwall.com \
    --cc=labbott@redhat.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luisbg@kernel.org \
    --cc=luto@kernel.org \
    --cc=marc.zyngier@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=martin.petersen@oracle.com \
    --cc=mjg59@google.com \
    --cc=netdev@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=riel@redhat.com \
    --cc=torvalds@linux-foundation.org \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.