All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kees Cook <keescook@chromium.org>
To: kernel-hardening@lists.openwall.com
Cc: Kees Cook <keescook@chromium.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Andy Lutomirski <luto@kernel.org>,
	Hoeun Ryu <hoeun.ryu@gmail.com>, PaX Team <pageexec@freemail.hu>,
	Emese Revfy <re.emese@gmail.com>,
	Russell King <linux@armlinux.org.uk>,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: [RFC v2][PATCH 01/11] Introduce rare_write() infrastructure
Date: Wed, 29 Mar 2017 11:15:53 -0700	[thread overview]
Message-ID: <1490811363-93944-2-git-send-email-keescook@chromium.org> (raw)
In-Reply-To: <1490811363-93944-1-git-send-email-keescook@chromium.org>

Several types of data storage exist in the kernel: read-write data (.data,
.bss), read-only data (.rodata), and RO-after-init. This introduces the
infrastructure for another type: write-rarely, which is intended for data
that is either only rarely modified or especially security-sensitive. The
goal is to further reduce the internal attack surface of the kernel by
making this storage read-only when "at rest". This makes it much harder
to be subverted by attackers who have a kernel-write flaw, since they
cannot directly change these memory contents.

This work is heavily based on PaX and grsecurity's pax_{open,close}_kernel
API, its __read_only annotations, its constify plugin, and the work done
to identify sensitive structures that should be moved from .data into
.rodata. This builds the initial infrastructure to support these kinds
of changes, though the API and naming has been adjusted in places for
clarity and maintainability.

Variables declared with the __wr_rare annotation will be moved to the
.rodata section if an architecture supports CONFIG_HAVE_ARCH_WRITE_RARE.
To change these variables, either a single rare_write() macro can be used,
or multiple uses of __rare_write(), wrapped in a matching pair of
rare_write_begin() and rare_write_end() macros can be used. These macros
are expanded into the arch-specific functions that perform the actions
needed to write to otherwise read-only memory.

As detailed in the Kconfig help, the arch-specific helpers have several
requirements to make them sensible/safe for use by the kernel: they must
not allow non-current CPUs to write the memory area, they must run
non-preemptible to avoid accidentally leaving memory writable, and must
be inline to avoid making them desirable ROP targets for attackers.

Signed-off-by: Kees Cook <keescook@chromium.org>
---
 arch/Kconfig             | 25 +++++++++++++++++++++++++
 include/linux/compiler.h | 32 ++++++++++++++++++++++++++++++++
 include/linux/preempt.h  |  6 ++++--
 3 files changed, 61 insertions(+), 2 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index cd211a14a88f..5ebf62500b99 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -847,4 +847,29 @@ config STRICT_MODULE_RWX
 config ARCH_WANT_RELAX_ORDER
 	bool
 
+config HAVE_ARCH_RARE_WRITE
+	def_bool n
+	help
+	  An arch should select this option if it has defined the functions
+	  __arch_rare_write_begin() and __arch_rare_write_end() to
+	  respectively enable and disable writing to read-only memory. The
+	  routines must meet the following requirements:
+	  - read-only memory writing must only be available on the current
+	    CPU (to make sure other CPUs can't race to make changes too).
+	  - the routines must be declared inline (to discourage ROP use).
+	  - the routines must not be preemptible (likely they will call
+	    preempt_disable() and preempt_enable_no_resched() respectively).
+	  - the routines must validate expected state (e.g. when enabling
+	    writes, BUG() if writes are already be enabled).
+
+config HAVE_ARCH_RARE_WRITE_MEMCPY
+	def_bool n
+	depends on HAVE_ARCH_RARE_WRITE
+	help
+	  An arch should select this option if a special accessor is needed
+	  to write to otherwise read-only memory, defined by the function
+	  __arch_rare_write_memcpy(). Without this, the write-rarely
+	  infrastructure will just attempt to write directly to the memory
+	  using a const-ignoring assignment.
+
 source "kernel/gcov/Kconfig"
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f8110051188f..274bd03cfe9e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -336,6 +336,38 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 	__u.__val;					\
 })
 
+/*
+ * Build "write rarely" infrastructure for flipping memory r/w
+ * on a per-CPU basis.
+ */
+#ifndef CONFIG_HAVE_ARCH_RARE_WRITE
+# define __wr_rare
+# define __wr_rare_type
+# define __rare_write(__var, __val)	(__var = (__val))
+# define rare_write_begin()		do { } while (0)
+# define rare_write_end()		do { } while (0)
+#else
+# define __wr_rare			__ro_after_init
+# define __wr_rare_type			const
+# ifdef CONFIG_HAVE_ARCH_RARE_WRITE_MEMCPY
+#  define __rare_write_n(dst, src, len)	({			\
+		BUILD_BUG(!builtin_const(len));			\
+		__arch_rare_write_memcpy((dst), (src), (len));	\
+	})
+#  define __rare_write(var, val)  __rare_write_n(&(var), &(val), sizeof(var))
+# else
+#  define __rare_write(var, val)  ((*(typeof((typeof(var))0) *)&(var)) = (val))
+# endif
+# define rare_write_begin()	__arch_rare_write_begin()
+# define rare_write_end()	__arch_rare_write_end()
+#endif
+#define rare_write(__var, __val) ({			\
+	rare_write_begin();				\
+	__rare_write(__var, __val);			\
+	rare_write_end();				\
+	__var;						\
+})
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index cae461224948..4fc97aaa22ea 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -258,10 +258,12 @@ do { \
 /*
  * Modules have no business playing preemption tricks.
  */
-#undef sched_preempt_enable_no_resched
-#undef preempt_enable_no_resched
 #undef preempt_enable_no_resched_notrace
 #undef preempt_check_resched
+#ifndef CONFIG_HAVE_ARCH_RARE_WRITE
+#undef sched_preempt_enable_no_resched
+#undef preempt_enable_no_resched
+#endif
 #endif
 
 #define preempt_set_need_resched() \
-- 
2.7.4

WARNING: multiple messages have this Message-ID (diff)
From: keescook@chromium.org (Kees Cook)
To: linux-arm-kernel@lists.infradead.org
Subject: [RFC v2][PATCH 01/11] Introduce rare_write() infrastructure
Date: Wed, 29 Mar 2017 11:15:53 -0700	[thread overview]
Message-ID: <1490811363-93944-2-git-send-email-keescook@chromium.org> (raw)
In-Reply-To: <1490811363-93944-1-git-send-email-keescook@chromium.org>

Several types of data storage exist in the kernel: read-write data (.data,
.bss), read-only data (.rodata), and RO-after-init. This introduces the
infrastructure for another type: write-rarely, which is intended for data
that is either only rarely modified or especially security-sensitive. The
goal is to further reduce the internal attack surface of the kernel by
making this storage read-only when "at rest". This makes it much harder
to be subverted by attackers who have a kernel-write flaw, since they
cannot directly change these memory contents.

This work is heavily based on PaX and grsecurity's pax_{open,close}_kernel
API, its __read_only annotations, its constify plugin, and the work done
to identify sensitive structures that should be moved from .data into
.rodata. This builds the initial infrastructure to support these kinds
of changes, though the API and naming has been adjusted in places for
clarity and maintainability.

Variables declared with the __wr_rare annotation will be moved to the
.rodata section if an architecture supports CONFIG_HAVE_ARCH_WRITE_RARE.
To change these variables, either a single rare_write() macro can be used,
or multiple uses of __rare_write(), wrapped in a matching pair of
rare_write_begin() and rare_write_end() macros can be used. These macros
are expanded into the arch-specific functions that perform the actions
needed to write to otherwise read-only memory.

As detailed in the Kconfig help, the arch-specific helpers have several
requirements to make them sensible/safe for use by the kernel: they must
not allow non-current CPUs to write the memory area, they must run
non-preemptible to avoid accidentally leaving memory writable, and must
be inline to avoid making them desirable ROP targets for attackers.

Signed-off-by: Kees Cook <keescook@chromium.org>
---
 arch/Kconfig             | 25 +++++++++++++++++++++++++
 include/linux/compiler.h | 32 ++++++++++++++++++++++++++++++++
 include/linux/preempt.h  |  6 ++++--
 3 files changed, 61 insertions(+), 2 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index cd211a14a88f..5ebf62500b99 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -847,4 +847,29 @@ config STRICT_MODULE_RWX
 config ARCH_WANT_RELAX_ORDER
 	bool
 
+config HAVE_ARCH_RARE_WRITE
+	def_bool n
+	help
+	  An arch should select this option if it has defined the functions
+	  __arch_rare_write_begin() and __arch_rare_write_end() to
+	  respectively enable and disable writing to read-only memory. The
+	  routines must meet the following requirements:
+	  - read-only memory writing must only be available on the current
+	    CPU (to make sure other CPUs can't race to make changes too).
+	  - the routines must be declared inline (to discourage ROP use).
+	  - the routines must not be preemptible (likely they will call
+	    preempt_disable() and preempt_enable_no_resched() respectively).
+	  - the routines must validate expected state (e.g. when enabling
+	    writes, BUG() if writes are already be enabled).
+
+config HAVE_ARCH_RARE_WRITE_MEMCPY
+	def_bool n
+	depends on HAVE_ARCH_RARE_WRITE
+	help
+	  An arch should select this option if a special accessor is needed
+	  to write to otherwise read-only memory, defined by the function
+	  __arch_rare_write_memcpy(). Without this, the write-rarely
+	  infrastructure will just attempt to write directly to the memory
+	  using a const-ignoring assignment.
+
 source "kernel/gcov/Kconfig"
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f8110051188f..274bd03cfe9e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -336,6 +336,38 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 	__u.__val;					\
 })
 
+/*
+ * Build "write rarely" infrastructure for flipping memory r/w
+ * on a per-CPU basis.
+ */
+#ifndef CONFIG_HAVE_ARCH_RARE_WRITE
+# define __wr_rare
+# define __wr_rare_type
+# define __rare_write(__var, __val)	(__var = (__val))
+# define rare_write_begin()		do { } while (0)
+# define rare_write_end()		do { } while (0)
+#else
+# define __wr_rare			__ro_after_init
+# define __wr_rare_type			const
+# ifdef CONFIG_HAVE_ARCH_RARE_WRITE_MEMCPY
+#  define __rare_write_n(dst, src, len)	({			\
+		BUILD_BUG(!builtin_const(len));			\
+		__arch_rare_write_memcpy((dst), (src), (len));	\
+	})
+#  define __rare_write(var, val)  __rare_write_n(&(var), &(val), sizeof(var))
+# else
+#  define __rare_write(var, val)  ((*(typeof((typeof(var))0) *)&(var)) = (val))
+# endif
+# define rare_write_begin()	__arch_rare_write_begin()
+# define rare_write_end()	__arch_rare_write_end()
+#endif
+#define rare_write(__var, __val) ({			\
+	rare_write_begin();				\
+	__rare_write(__var, __val);			\
+	rare_write_end();				\
+	__var;						\
+})
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index cae461224948..4fc97aaa22ea 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -258,10 +258,12 @@ do { \
 /*
  * Modules have no business playing preemption tricks.
  */
-#undef sched_preempt_enable_no_resched
-#undef preempt_enable_no_resched
 #undef preempt_enable_no_resched_notrace
 #undef preempt_check_resched
+#ifndef CONFIG_HAVE_ARCH_RARE_WRITE
+#undef sched_preempt_enable_no_resched
+#undef preempt_enable_no_resched
+#endif
 #endif
 
 #define preempt_set_need_resched() \
-- 
2.7.4

WARNING: multiple messages have this Message-ID (diff)
From: Kees Cook <keescook@chromium.org>
To: kernel-hardening@lists.openwall.com
Cc: Kees Cook <keescook@chromium.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Andy Lutomirski <luto@kernel.org>,
	Hoeun Ryu <hoeun.ryu@gmail.com>, PaX Team <pageexec@freemail.hu>,
	Emese Revfy <re.emese@gmail.com>,
	Russell King <linux@armlinux.org.uk>,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: [kernel-hardening] [RFC v2][PATCH 01/11] Introduce rare_write() infrastructure
Date: Wed, 29 Mar 2017 11:15:53 -0700	[thread overview]
Message-ID: <1490811363-93944-2-git-send-email-keescook@chromium.org> (raw)
In-Reply-To: <1490811363-93944-1-git-send-email-keescook@chromium.org>

Several types of data storage exist in the kernel: read-write data (.data,
.bss), read-only data (.rodata), and RO-after-init. This introduces the
infrastructure for another type: write-rarely, which is intended for data
that is either only rarely modified or especially security-sensitive. The
goal is to further reduce the internal attack surface of the kernel by
making this storage read-only when "at rest". This makes it much harder
to be subverted by attackers who have a kernel-write flaw, since they
cannot directly change these memory contents.

This work is heavily based on PaX and grsecurity's pax_{open,close}_kernel
API, its __read_only annotations, its constify plugin, and the work done
to identify sensitive structures that should be moved from .data into
.rodata. This builds the initial infrastructure to support these kinds
of changes, though the API and naming has been adjusted in places for
clarity and maintainability.

Variables declared with the __wr_rare annotation will be moved to the
.rodata section if an architecture supports CONFIG_HAVE_ARCH_WRITE_RARE.
To change these variables, either a single rare_write() macro can be used,
or multiple uses of __rare_write(), wrapped in a matching pair of
rare_write_begin() and rare_write_end() macros can be used. These macros
are expanded into the arch-specific functions that perform the actions
needed to write to otherwise read-only memory.

As detailed in the Kconfig help, the arch-specific helpers have several
requirements to make them sensible/safe for use by the kernel: they must
not allow non-current CPUs to write the memory area, they must run
non-preemptible to avoid accidentally leaving memory writable, and must
be inline to avoid making them desirable ROP targets for attackers.

Signed-off-by: Kees Cook <keescook@chromium.org>
---
 arch/Kconfig             | 25 +++++++++++++++++++++++++
 include/linux/compiler.h | 32 ++++++++++++++++++++++++++++++++
 include/linux/preempt.h  |  6 ++++--
 3 files changed, 61 insertions(+), 2 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index cd211a14a88f..5ebf62500b99 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -847,4 +847,29 @@ config STRICT_MODULE_RWX
 config ARCH_WANT_RELAX_ORDER
 	bool
 
+config HAVE_ARCH_RARE_WRITE
+	def_bool n
+	help
+	  An arch should select this option if it has defined the functions
+	  __arch_rare_write_begin() and __arch_rare_write_end() to
+	  respectively enable and disable writing to read-only memory. The
+	  routines must meet the following requirements:
+	  - read-only memory writing must only be available on the current
+	    CPU (to make sure other CPUs can't race to make changes too).
+	  - the routines must be declared inline (to discourage ROP use).
+	  - the routines must not be preemptible (likely they will call
+	    preempt_disable() and preempt_enable_no_resched() respectively).
+	  - the routines must validate expected state (e.g. when enabling
+	    writes, BUG() if writes are already be enabled).
+
+config HAVE_ARCH_RARE_WRITE_MEMCPY
+	def_bool n
+	depends on HAVE_ARCH_RARE_WRITE
+	help
+	  An arch should select this option if a special accessor is needed
+	  to write to otherwise read-only memory, defined by the function
+	  __arch_rare_write_memcpy(). Without this, the write-rarely
+	  infrastructure will just attempt to write directly to the memory
+	  using a const-ignoring assignment.
+
 source "kernel/gcov/Kconfig"
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f8110051188f..274bd03cfe9e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -336,6 +336,38 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 	__u.__val;					\
 })
 
+/*
+ * Build "write rarely" infrastructure for flipping memory r/w
+ * on a per-CPU basis.
+ */
+#ifndef CONFIG_HAVE_ARCH_RARE_WRITE
+# define __wr_rare
+# define __wr_rare_type
+# define __rare_write(__var, __val)	(__var = (__val))
+# define rare_write_begin()		do { } while (0)
+# define rare_write_end()		do { } while (0)
+#else
+# define __wr_rare			__ro_after_init
+# define __wr_rare_type			const
+# ifdef CONFIG_HAVE_ARCH_RARE_WRITE_MEMCPY
+#  define __rare_write_n(dst, src, len)	({			\
+		BUILD_BUG(!builtin_const(len));			\
+		__arch_rare_write_memcpy((dst), (src), (len));	\
+	})
+#  define __rare_write(var, val)  __rare_write_n(&(var), &(val), sizeof(var))
+# else
+#  define __rare_write(var, val)  ((*(typeof((typeof(var))0) *)&(var)) = (val))
+# endif
+# define rare_write_begin()	__arch_rare_write_begin()
+# define rare_write_end()	__arch_rare_write_end()
+#endif
+#define rare_write(__var, __val) ({			\
+	rare_write_begin();				\
+	__rare_write(__var, __val);			\
+	rare_write_end();				\
+	__var;						\
+})
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index cae461224948..4fc97aaa22ea 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -258,10 +258,12 @@ do { \
 /*
  * Modules have no business playing preemption tricks.
  */
-#undef sched_preempt_enable_no_resched
-#undef preempt_enable_no_resched
 #undef preempt_enable_no_resched_notrace
 #undef preempt_check_resched
+#ifndef CONFIG_HAVE_ARCH_RARE_WRITE
+#undef sched_preempt_enable_no_resched
+#undef preempt_enable_no_resched
+#endif
 #endif
 
 #define preempt_set_need_resched() \
-- 
2.7.4

  reply	other threads:[~2017-03-29 18:16 UTC|newest]

Thread overview: 188+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-29 18:15 [RFC v2] Introduce rare_write() infrastructure Kees Cook
2017-03-29 18:15 ` [kernel-hardening] " Kees Cook
2017-03-29 18:15 ` Kees Cook
2017-03-29 18:15 ` Kees Cook [this message]
2017-03-29 18:15   ` [kernel-hardening] [RFC v2][PATCH 01/11] " Kees Cook
2017-03-29 18:15   ` Kees Cook
2017-03-29 18:23   ` Kees Cook
2017-03-29 18:23     ` [kernel-hardening] " Kees Cook
2017-03-29 18:23     ` Kees Cook
2017-03-30  7:44     ` Ho-Eun Ryu
2017-03-30  7:44       ` [kernel-hardening] " Ho-Eun Ryu
2017-03-30  7:44       ` Ho-Eun Ryu
2017-03-30 17:02       ` Kees Cook
2017-03-30 17:02         ` [kernel-hardening] " Kees Cook
2017-03-30 17:02         ` Kees Cook
2017-04-07  8:09   ` Ho-Eun Ryu
2017-04-07  8:09     ` [kernel-hardening] " Ho-Eun Ryu
2017-04-07  8:09     ` Ho-Eun Ryu
2017-04-07 20:38     ` Kees Cook
2017-04-07 20:38       ` [kernel-hardening] " Kees Cook
2017-04-07 20:38       ` Kees Cook
2017-03-29 18:15 ` [RFC v2][PATCH 02/11] lkdtm: add test for " Kees Cook
2017-03-29 18:15   ` [kernel-hardening] " Kees Cook
2017-03-29 18:15   ` Kees Cook
2017-03-30  9:34   ` [kernel-hardening] " Ian Campbell
2017-03-30  9:34     ` Ian Campbell
2017-03-30 16:16     ` Kees Cook
2017-03-30 16:16       ` Kees Cook
2017-03-30 16:16       ` Kees Cook
2017-03-29 18:15 ` [RFC v2][PATCH 03/11] net: switch sock_diag handlers to rare_write() Kees Cook
2017-03-29 18:15   ` [kernel-hardening] " Kees Cook
2017-03-29 18:15   ` Kees Cook
2017-03-29 18:15 ` [RFC v2][PATCH 04/11] x86: Implement __arch_rare_write_begin/unmap() Kees Cook
2017-03-29 18:15   ` [kernel-hardening] " Kees Cook
2017-03-29 18:15   ` Kees Cook
2017-03-29 22:38   ` Andy Lutomirski
2017-03-29 22:38     ` [kernel-hardening] " Andy Lutomirski
2017-03-29 22:38     ` Andy Lutomirski
2017-03-30  1:41     ` Kees Cook
2017-03-30  1:41       ` [kernel-hardening] " Kees Cook
2017-03-30  1:41       ` Kees Cook
2017-04-05 23:57       ` Andy Lutomirski
2017-04-05 23:57         ` [kernel-hardening] " Andy Lutomirski
2017-04-05 23:57         ` Andy Lutomirski
2017-04-06  0:14         ` Kees Cook
2017-04-06  0:14           ` [kernel-hardening] " Kees Cook
2017-04-06  0:14           ` Kees Cook
2017-04-06 15:59           ` Andy Lutomirski
2017-04-06 15:59             ` [kernel-hardening] " Andy Lutomirski
2017-04-06 15:59             ` Andy Lutomirski
2017-04-07  8:34             ` [kernel-hardening] " Mathias Krause
2017-04-07  8:34               ` Mathias Krause
2017-04-07  8:34               ` Mathias Krause
2017-04-07  9:46               ` Thomas Gleixner
2017-04-07  9:46                 ` Thomas Gleixner
2017-04-07  9:46                 ` Thomas Gleixner
2017-04-07 10:51                 ` Mathias Krause
2017-04-07 10:51                   ` Mathias Krause
2017-04-07 10:51                   ` Mathias Krause
2017-04-07 13:14                   ` Thomas Gleixner
2017-04-07 13:14                     ` Thomas Gleixner
2017-04-07 13:14                     ` Thomas Gleixner
2017-04-07 13:30                     ` Mathias Krause
2017-04-07 13:30                       ` Mathias Krause
2017-04-07 13:30                       ` Mathias Krause
2017-04-07 16:14                       ` Andy Lutomirski
2017-04-07 16:14                         ` Andy Lutomirski
2017-04-07 16:14                         ` Andy Lutomirski
2017-04-07 16:22                         ` Mark Rutland
2017-04-07 16:22                           ` Mark Rutland
2017-04-07 16:22                           ` Mark Rutland
2017-04-07 19:58                         ` PaX Team
2017-04-07 19:58                           ` PaX Team
2017-04-07 19:58                           ` PaX Team
2017-04-08  4:58                           ` Andy Lutomirski
2017-04-08  4:58                             ` Andy Lutomirski
2017-04-08  4:58                             ` Andy Lutomirski
2017-04-09 12:47                             ` PaX Team
2017-04-09 12:47                               ` PaX Team
2017-04-09 12:47                               ` PaX Team
2017-04-10  0:10                               ` Andy Lutomirski
2017-04-10  0:10                                 ` Andy Lutomirski
2017-04-10  0:10                                 ` Andy Lutomirski
2017-04-10 10:42                                 ` PaX Team
2017-04-10 10:42                                   ` PaX Team
2017-04-10 10:42                                   ` PaX Team
2017-04-10 16:01                                   ` Andy Lutomirski
2017-04-10 16:01                                     ` Andy Lutomirski
2017-04-10 16:01                                     ` Andy Lutomirski
2017-04-07 20:44                         ` Thomas Gleixner
2017-04-07 20:44                           ` Thomas Gleixner
2017-04-07 20:44                           ` Thomas Gleixner
2017-04-07 21:20                           ` Kees Cook
2017-04-07 21:20                             ` Kees Cook
2017-04-07 21:20                             ` Kees Cook
2017-04-08  4:12                             ` Daniel Micay
2017-04-08  4:12                               ` Daniel Micay
2017-04-08  4:12                               ` Daniel Micay
2017-04-08  4:13                               ` Daniel Micay
2017-04-08  4:13                                 ` Daniel Micay
2017-04-08  4:13                                 ` Daniel Micay
2017-04-08  4:21                         ` Daniel Micay
2017-04-08  4:21                           ` Daniel Micay
2017-04-08  4:21                           ` Daniel Micay
2017-04-08  5:07                           ` Andy Lutomirski
2017-04-08  5:07                             ` Andy Lutomirski
2017-04-08  5:07                             ` Andy Lutomirski
2017-04-08  7:33                             ` Daniel Micay
2017-04-08  7:33                               ` Daniel Micay
2017-04-08  7:33                               ` Daniel Micay
2017-04-08 15:20                               ` Andy Lutomirski
2017-04-08 15:20                                 ` Andy Lutomirski
2017-04-08 15:20                                 ` Andy Lutomirski
2017-04-09 10:53                                 ` Ingo Molnar
2017-04-09 10:53                                   ` Ingo Molnar
2017-04-09 10:53                                   ` Ingo Molnar
2017-04-10 10:22                                 ` Mark Rutland
2017-04-10 10:22                                   ` Mark Rutland
2017-04-10 10:22                                   ` Mark Rutland
2017-04-09 20:24                             ` PaX Team
2017-04-09 20:24                               ` PaX Team
2017-04-09 20:24                               ` PaX Team
2017-04-10  0:31                               ` Andy Lutomirski
2017-04-10  0:31                                 ` Andy Lutomirski
2017-04-10  0:31                                 ` Andy Lutomirski
2017-04-10 19:47                                 ` PaX Team
2017-04-10 19:47                                   ` PaX Team
2017-04-10 19:47                                   ` PaX Team
2017-04-10 20:27                                   ` Andy Lutomirski
2017-04-10 20:27                                     ` Andy Lutomirski
2017-04-10 20:27                                     ` Andy Lutomirski
2017-04-10 20:13                               ` Kees Cook
2017-04-10 20:13                                 ` Kees Cook
2017-04-10 20:13                                 ` Kees Cook
2017-04-10 20:17                                 ` Andy Lutomirski
2017-04-10 20:17                                   ` Andy Lutomirski
2017-04-10 20:17                                   ` Andy Lutomirski
2017-04-07 19:25                       ` Thomas Gleixner
2017-04-07 19:25                         ` Thomas Gleixner
2017-04-07 19:25                         ` Thomas Gleixner
2017-04-07 14:45                   ` Peter Zijlstra
2017-04-07 14:45                     ` Peter Zijlstra
2017-04-07 14:45                     ` Peter Zijlstra
2017-04-10 10:29                     ` Mark Rutland
2017-04-10 10:29                       ` Mark Rutland
2017-04-10 10:29                       ` Mark Rutland
2017-04-07 19:52                 ` PaX Team
2017-04-07 19:52                   ` PaX Team
2017-04-07 19:52                   ` PaX Team
2017-04-10  8:26                   ` Thomas Gleixner
2017-04-10  8:26                     ` Thomas Gleixner
2017-04-10  8:26                     ` Thomas Gleixner
2017-04-10 19:55                     ` PaX Team
2017-04-10 19:55                       ` PaX Team
2017-04-10 19:55                       ` PaX Team
2017-04-07  9:37   ` Peter Zijlstra
2017-04-07  9:37     ` [kernel-hardening] " Peter Zijlstra
2017-04-07  9:37     ` Peter Zijlstra
2017-03-29 18:15 ` [RFC v2][PATCH 05/11] ARM: mm: dump: Add domain to output Kees Cook
2017-03-29 18:15   ` [kernel-hardening] " Kees Cook
2017-03-29 18:15   ` Kees Cook
2017-03-29 18:15 ` [RFC v2][PATCH 06/11] ARM: domains: Extract common USER domain init Kees Cook
2017-03-29 18:15   ` [kernel-hardening] " Kees Cook
2017-03-29 18:15   ` Kees Cook
2017-03-29 18:15 ` [RFC v2][PATCH 07/11] ARM: mm: set DOMAIN_WR_RARE for rodata Kees Cook
2017-03-29 18:15   ` [kernel-hardening] " Kees Cook
2017-03-29 18:15   ` Kees Cook
2017-03-29 18:16 ` [RFC v2][PATCH 08/11] ARM: Implement __arch_rare_write_begin/end() Kees Cook
2017-03-29 18:16   ` [kernel-hardening] " Kees Cook
2017-03-29 18:16   ` Kees Cook
2017-04-07  9:36   ` Peter Zijlstra
2017-04-07  9:36     ` [kernel-hardening] " Peter Zijlstra
2017-04-07  9:36     ` Peter Zijlstra
2017-03-29 18:16 ` [RFC v2][PATCH 09/11] list: add rare_write() list helpers Kees Cook
2017-03-29 18:16   ` [kernel-hardening] " Kees Cook
2017-03-29 18:16   ` Kees Cook
2017-03-29 18:16 ` [RFC v2][PATCH 10/11] gcc-plugins: Add constify plugin Kees Cook
2017-03-29 18:16   ` [kernel-hardening] " Kees Cook
2017-03-29 18:16   ` Kees Cook
2017-03-29 18:16 ` [RFC v2][PATCH 11/11] cgroups: force all struct cftype const Kees Cook
2017-03-29 18:16   ` [kernel-hardening] " Kees Cook
2017-03-29 18:16   ` Kees Cook
2017-03-29 19:00 ` [RFC v2] Introduce rare_write() infrastructure Russell King - ARM Linux
2017-03-29 19:00   ` [kernel-hardening] " Russell King - ARM Linux
2017-03-29 19:00   ` Russell King - ARM Linux
2017-03-29 19:14   ` Kees Cook
2017-03-29 19:14     ` [kernel-hardening] " Kees Cook
2017-03-29 19:14     ` Kees Cook

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1490811363-93944-2-git-send-email-keescook@chromium.org \
    --to=keescook@chromium.org \
    --cc=hoeun.ryu@gmail.com \
    --cc=kernel-hardening@lists.openwall.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=luto@kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=pageexec@freemail.hu \
    --cc=re.emese@gmail.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.