All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yu-cheng Yu <yu-cheng.yu@intel.com>
To: x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
	linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-api@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Andy Lutomirski <luto@kernel.org>,
	Balbir Singh <bsingharora@gmail.com>,
	Borislav Petkov <bp@alien8.de>,
	Cyrill Gorcunov <gorcunov@gmail.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Eugene Syromiatnikov <esyr@redhat.com>,
	Florian Weimer <fweimer@redhat.com>,
	"H.J. Lu" <hjl.tools@gmail.com>, Jann Horn <jannh@google.com>,
	Jonathan Corbet <corbet@lwn.net>,
	Kees Cook <keescook@chromium.org>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Nadav Amit <nadav.amit@gmail.com>,
	Oleg Nesterov <oleg@redhat.com>, Pavel Machek <pavel@ucw.cz>,
	Peter Zijlstra <peterz@infradead.org>,
	Randy Dunlap <rdunlap@infradead.org>,
	"Ravi V. Shankar" <ravi.v.shankar@intel.com>,
	Vedvyas Shanbhogue <vedvyas.shanbhogue@intel.com>,
	Dave Martin <Dave.Martin@arm.com>,
	Weijiang Yang <weijiang.yang@intel.com>,
	Pengfei Xu <pengfei.xu@intel.com>,
	Haitao Huang <haitao.huang@intel.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Subject: [PATCH v27 25/31] x86/cet/shstk: Introduce shadow stack token setup/verify routines
Date: Fri, 21 May 2021 15:12:05 -0700	[thread overview]
Message-ID: <20210521221211.29077-26-yu-cheng.yu@intel.com> (raw)
In-Reply-To: <20210521221211.29077-1-yu-cheng.yu@intel.com>

A shadow stack restore token marks a restore point of the shadow stack, and
the address in a token must point directly above the token, which is within
the same shadow stack.  This is distinctively different from other pointers
on the shadow stack, since those pointers point to executable code area.

The restore token can be used as an extra protection for signal handling.
To deliver a signal, create a shadow stack restore token and put the token
and the signal restorer address on the shadow stack.  In sigreturn, verify
the token and restore from it the shadow stack pointer.

Introduce token setup and verify routines.  Also introduce WRUSS, which is
a kernel-mode instruction but writes directly to user shadow stack.  It is
used to construct user signal stack as described above.

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: Kees Cook <keescook@chromium.org>
---
v27:
- For shstk_check_rstor_token(), instead of an input param, use current
  shadow stack pointer.
- In response to comments, fix/simplify a few syntax/format issues.

v25:
- Update inline assembly syntax, use %[].
- Change token address from (unsigned long) to (u64/u32 __user *).
- Change -EPERM to -EFAULT.

 arch/x86/include/asm/cet.h           |   7 ++
 arch/x86/include/asm/special_insns.h |  30 ++++++
 arch/x86/kernel/shstk.c              | 133 +++++++++++++++++++++++++++
 3 files changed, 170 insertions(+)

diff --git a/arch/x86/include/asm/cet.h b/arch/x86/include/asm/cet.h
index 4314a41ab3c9..aa533700ba31 100644
--- a/arch/x86/include/asm/cet.h
+++ b/arch/x86/include/asm/cet.h
@@ -21,6 +21,9 @@ int shstk_alloc_thread_stack(struct task_struct *p, unsigned long clone_flags,
 			     unsigned long stack_size);
 void shstk_free(struct task_struct *p);
 void shstk_disable(void);
+int shstk_setup_rstor_token(bool ia32, unsigned long restorer,
+			    unsigned long *new_ssp);
+int shstk_check_rstor_token(bool ia32, unsigned long *new_ssp);
 #else
 static inline int shstk_setup(void) { return 0; }
 static inline int shstk_alloc_thread_stack(struct task_struct *p,
@@ -28,6 +31,10 @@ static inline int shstk_alloc_thread_stack(struct task_struct *p,
 					   unsigned long stack_size) { return 0; }
 static inline void shstk_free(struct task_struct *p) {}
 static inline void shstk_disable(void) {}
+static inline int shstk_setup_rstor_token(bool ia32, unsigned long restorer,
+					  unsigned long *new_ssp) { return 0; }
+static inline int shstk_check_rstor_token(bool ia32,
+					  unsigned long *new_ssp) { return 0; }
 #endif
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 2acd6cb62328..5b48c91fa8d4 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -234,6 +234,36 @@ static inline void clwb(volatile void *__p)
 		: [pax] "a" (p));
 }
 
+#ifdef CONFIG_X86_SHADOW_STACK
+static inline int write_user_shstk_32(u32 __user *addr, u32 val)
+{
+	if (WARN_ONCE(!IS_ENABLED(CONFIG_IA32_EMULATION) &&
+		      !IS_ENABLED(CONFIG_X86_X32),
+		      "%s used but not supported.\n", __func__)) {
+		return -EFAULT;
+	}
+
+	asm_volatile_goto("1: wrussd %[val], (%[addr])\n"
+			  _ASM_EXTABLE(1b, %l[fail])
+			  :: [addr] "r" (addr), [val] "r" (val)
+			  :: fail);
+	return 0;
+fail:
+	return -EFAULT;
+}
+
+static inline int write_user_shstk_64(u64 __user *addr, u64 val)
+{
+	asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
+			  _ASM_EXTABLE(1b, %l[fail])
+			  :: [addr] "r" (addr), [val] "r" (val)
+			  :: fail);
+	return 0;
+fail:
+	return -EFAULT;
+}
+#endif /* CONFIG_X86_SHADOW_STACK */
+
 #define nop() asm volatile ("nop")
 
 static inline void serialize(void)
diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
index 8e5f772181b9..61ec300c1a97 100644
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -20,6 +20,7 @@
 #include <asm/fpu/xstate.h>
 #include <asm/fpu/types.h>
 #include <asm/cet.h>
+#include <asm/special_insns.h>
 
 static void start_update_msrs(void)
 {
@@ -181,3 +182,135 @@ void shstk_disable(void)
 
 	shstk_free(current);
 }
+
+static unsigned long get_user_shstk_addr(void)
+{
+	struct fpu *fpu = &current->thread.fpu;
+	unsigned long ssp = 0;
+
+	fpregs_lock();
+
+	if (fpregs_state_valid(fpu, smp_processor_id())) {
+		rdmsrl(MSR_IA32_PL3_SSP, ssp);
+	} else {
+		struct cet_user_state *p;
+
+		p = get_xsave_addr(&fpu->state.xsave, XFEATURE_CET_USER);
+		if (p)
+			ssp = p->user_ssp;
+	}
+
+	fpregs_unlock();
+
+	return ssp;
+}
+
+/*
+ * Create a restore token on the shadow stack.  A token is always 8-byte
+ * and aligned to 8.
+ */
+static int create_rstor_token(bool ia32, unsigned long ssp,
+			       unsigned long *token_addr)
+{
+	unsigned long addr;
+
+	/* Aligned to 8 is aligned to 4, so test 8 first */
+	if ((!ia32 && !IS_ALIGNED(ssp, 8)) || !IS_ALIGNED(ssp, 4))
+		return -EINVAL;
+
+	addr = ALIGN_DOWN(ssp, 8) - 8;
+
+	/* Is the token for 64-bit? */
+	if (!ia32)
+		ssp |= BIT(0);
+
+	if (write_user_shstk_64((u64 __user *)addr, (u64)ssp))
+		return -EFAULT;
+
+	*token_addr = addr;
+
+	return 0;
+}
+
+/*
+ * Create a restore token on shadow stack, and then push the user-mode
+ * function return address.
+ */
+int shstk_setup_rstor_token(bool ia32, unsigned long ret_addr,
+			    unsigned long *new_ssp)
+{
+	struct thread_shstk *shstk = &current->thread.shstk;
+	unsigned long ssp, token_addr;
+	int err;
+
+	if (!shstk->size)
+		return 0;
+
+	if (!ret_addr)
+		return -EINVAL;
+
+	ssp = get_user_shstk_addr();
+	if (!ssp)
+		return -EINVAL;
+
+	err = create_rstor_token(ia32, ssp, &token_addr);
+	if (err)
+		return err;
+
+	if (ia32) {
+		ssp = token_addr - sizeof(u32);
+		err = write_user_shstk_32((u32 __user *)ssp, (u32)ret_addr);
+	} else {
+		ssp = token_addr - sizeof(u64);
+		err = write_user_shstk_64((u64 __user *)ssp, (u64)ret_addr);
+	}
+
+	if (!err)
+		*new_ssp = ssp;
+
+	return err;
+}
+
+/*
+ * Verify token_addr points to a valid token, and then set *new_ssp
+ * according to the token.
+ */
+int shstk_check_rstor_token(bool proc32, unsigned long *new_ssp)
+{
+	unsigned long token_addr;
+	unsigned long token;
+	bool shstk32;
+
+	token_addr = get_user_shstk_addr();
+
+	if (get_user(token, (unsigned long __user *)token_addr))
+		return -EFAULT;
+
+	/* Is mode flag correct? */
+	shstk32 = !(token & BIT(0));
+	if (proc32 ^ shstk32)
+		return -EINVAL;
+
+	/* Is busy flag set? */
+	if (token & BIT(1))
+		return -EINVAL;
+
+	/* Mask out flags */
+	token &= ~3UL;
+
+	/*
+	 * Restore address aligned?
+	 */
+	if ((!proc32 && !IS_ALIGNED(token, 8)) || !IS_ALIGNED(token, 4))
+		return -EINVAL;
+
+	/*
+	 * Token placed properly?
+	 */
+	if (((ALIGN_DOWN(token, 8) - 8) != token_addr) || token >= TASK_SIZE_MAX)
+		return -EINVAL;
+
+	*new_ssp = token;
+
+	return 0;
+}
-- 
2.21.0


  parent reply	other threads:[~2021-05-21 22:14 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-21 22:11 [PATCH v27 00/31] Control-flow Enforcement: Shadow Stack Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 01/31] Documentation/x86: Add CET description Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 02/31] x86/cet/shstk: Add Kconfig option for Shadow Stack Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 03/31] x86/cpufeatures: Add CET CPU feature flags for Control-flow Enforcement Technology (CET) Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 04/31] x86/cpufeatures: Introduce CPU setup and option parsing for CET Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 05/31] x86/fpu/xstate: Introduce CET MSR and XSAVES supervisor states Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 06/31] x86/cet: Add control-protection fault handler Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 07/31] x86/mm: Remove _PAGE_DIRTY from kernel RO pages Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 08/31] x86/mm: Move pmd_write(), pud_write() up in the file Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 09/31] x86/mm: Introduce _PAGE_COW Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 10/31] drm/i915/gvt: Change _PAGE_DIRTY to _PAGE_DIRTY_BITS Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 11/31] x86/mm: Update pte_modify for _PAGE_COW Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 12/31] x86/mm: Update ptep_set_wrprotect() and pmdp_set_wrprotect() for transition from _PAGE_DIRTY to _PAGE_COW Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 13/31] mm: Move VM_UFFD_MINOR_BIT from 37 to 38 Yu-cheng Yu
2021-05-21 22:25   ` Axel Rasmussen
2021-05-21 22:25     ` Axel Rasmussen
2021-05-22  1:58     ` Yu, Yu-cheng
2021-05-21 22:11 ` [PATCH v27 14/31] mm: Introduce VM_SHADOW_STACK for shadow stack memory Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 15/31] x86/mm: Shadow Stack page fault error checking Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 16/31] x86/mm: Update maybe_mkwrite() for shadow stack Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 17/31] mm: Fixup places that call pte_mkwrite() directly Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 18/31] mm: Add guard pages around a shadow stack Yu-cheng Yu
2021-05-21 22:11 ` [PATCH v27 19/31] mm/mmap: Add shadow stack pages to memory accounting Yu-cheng Yu
2021-05-21 22:12 ` [PATCH v27 20/31] mm: Update can_follow_write_pte() for shadow stack Yu-cheng Yu
2021-05-21 22:12 ` [PATCH v27 21/31] mm/mprotect: Exclude shadow stack from preserve_write Yu-cheng Yu
2021-05-21 22:12 ` [PATCH v27 22/31] mm: Re-introduce vm_flags to do_mmap() Yu-cheng Yu
2021-05-21 22:12 ` [PATCH v27 23/31] x86/cet/shstk: Add user-mode shadow stack support Yu-cheng Yu
2021-07-19 18:23   ` Edgecombe, Rick P
2021-07-20 17:13     ` Yu, Yu-cheng
2021-05-21 22:12 ` [PATCH v27 24/31] x86/cet/shstk: Handle thread shadow stack Yu-cheng Yu
2021-05-22 23:39   ` Andy Lutomirski
2021-05-22 23:39     ` Andy Lutomirski
2021-05-25 15:04     ` Yu, Yu-cheng
2021-07-21 18:14   ` John Allen
2021-07-21 18:28     ` Florian Weimer
2021-07-21 18:28       ` Florian Weimer
2021-07-21 18:34       ` Yu, Yu-cheng
2021-07-28 21:34         ` John Allen
2021-07-21 18:37     ` Dave Hansen
2021-07-21 20:14     ` H.J. Lu
2021-07-21 20:14       ` H.J. Lu
2021-05-21 22:12 ` Yu-cheng Yu [this message]
2021-05-21 22:12 ` [PATCH v27 26/31] x86/cet/shstk: Handle signals for " Yu-cheng Yu
2021-05-21 22:12 ` [PATCH v27 27/31] ELF: Introduce arch_setup_elf_property() Yu-cheng Yu
2021-05-25 10:54   ` Catalin Marinas
2021-05-21 22:12 ` [PATCH v27 28/31] x86/cet/shstk: Add arch_prctl functions for shadow stack Yu-cheng Yu
2021-05-21 22:12 ` [PATCH v27 29/31] mm: Move arch_calc_vm_prot_bits() to arch/x86/include/asm/mman.h Yu-cheng Yu
2021-05-21 22:12 ` [PATCH v27 30/31] mm: Update arch_validate_flags() to test vma anonymous Yu-cheng Yu
2021-05-25 11:00   ` Catalin Marinas
2021-05-25 15:03     ` Yu, Yu-cheng
2021-05-21 22:12 ` [PATCH v27 31/31] mm: Introduce PROT_SHADOW_STACK for shadow stack Yu-cheng Yu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210521221211.29077-26-yu-cheng.yu@intel.com \
    --to=yu-cheng.yu@intel.com \
    --cc=Dave.Martin@arm.com \
    --cc=arnd@arndb.de \
    --cc=bp@alien8.de \
    --cc=bsingharora@gmail.com \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=esyr@redhat.com \
    --cc=fweimer@redhat.com \
    --cc=gorcunov@gmail.com \
    --cc=haitao.huang@intel.com \
    --cc=hjl.tools@gmail.com \
    --cc=hpa@zytor.com \
    --cc=jannh@google.com \
    --cc=keescook@chromium.org \
    --cc=linux-api@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=nadav.amit@gmail.com \
    --cc=oleg@redhat.com \
    --cc=pavel@ucw.cz \
    --cc=pengfei.xu@intel.com \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=rdunlap@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=vedvyas.shanbhogue@intel.com \
    --cc=weijiang.yang@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.