All of lore.kernel.org
 help / color / mirror / Atom feed
From: Borislav Petkov <bp@alien8.de>
To: X86 ML <x86@kernel.org>
Cc: LKML <linux-kernel@vger.kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>, Ingo Molnar <mingo@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Quentin Casasnovas <quentin.casasnovas@oracle.com>,
	Oleg Nesterov <oleg@redhat.com>,
	Andy Lutomirski <luto@amacapital.net>
Subject: [PATCH] x86/xsave: Robustify and merge macros
Date: Thu,  2 Apr 2015 15:11:22 +0200	[thread overview]
Message-ID: <1427980282-25929-1-git-send-email-bp@alien8.de> (raw)

From: Borislav Petkov <bp@suse.de>

Previously, we did call an XSAVE/XRSTOR variant through alternatives
and did potential exception handling resulting from the instruction
execution in a second inline asm. Which was misleading and error prone,
see

  06c8173eb92b ("x86/fpu/xsaves: Fix improper uses of __ex_table")

for an example.

Add single macros which combine the alternatives and the exception
handling.

While at it, remove the SYSTEM_BOOTING checks in favor of
static_cpu_has_safe() which works regardless of system state.

Cleanup comments.

Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
---
 arch/x86/include/asm/xsave.h | 141 ++++++++++++++++++++++---------------------
 1 file changed, 73 insertions(+), 68 deletions(-)

diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index c9a6d68b8d62..e6c7986c95df 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -67,6 +67,66 @@ extern int init_fpu(struct task_struct *child);
 			_ASM_EXTABLE(1b, 3b)		\
 			: [err] "=r" (err)
 
+#define XSTATE_OP(op, st, lmask, hmask, err)				\
+	asm volatile("1:" op "\n\t"					\
+		     "2:\n\t"						\
+		     "xor %[err], %[err]\n"				\
+		     ".pushsection .fixup,\"ax\"\n\t"			\
+		     "3: movl $-1,%[err]\n\t"				\
+		     "jmp 2b\n\t"					\
+		     ".popsection\n\t"					\
+		     _ASM_EXTABLE(1b, 3b)				\
+		     : [err] "=r" (err)					\
+		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
+		     : "memory")
+
+/*
+ * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
+ * format and supervisor states in addition to modified optimization in
+ * XSAVEOPT.
+ *
+ * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
+ * supports modified optimization which is not supported by XSAVE.
+ *
+ * We use XSAVE as a fallback.
+ *
+ * 661 and alt_end_marker labels below are defined in ALTERNATIVE* and we're
+ * reusing  them here so as not to clutter this macro unnecessarily.
+ */
+#define XSTATE_XSAVE(st, lmask, hmask, err)				\
+	asm volatile(ALTERNATIVE_2(XSAVE,				\
+				   XSAVEOPT, X86_FEATURE_XSAVEOPT,	\
+				   XSAVES,   X86_FEATURE_XSAVES)	\
+		     "\n"						\
+		     "xor %[err], %[err]\n"				\
+		     ".pushsection .fixup,\"ax\"\n"			\
+		     "3: movl $-1, %[err]\n"				\
+		     "jmp " alt_end_marker "b\n"			\
+		     ".popsection\n"					\
+		     _ASM_EXTABLE(661b, 3b)				\
+		     : [err] "=r" (err)					\
+		     : "D" (st), "a" (lmask), "d" (hmask)		\
+		     : "memory")
+
+/*
+ * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
+ * XSAVE area format.
+ */
+#define XSTATE_XRESTORE(st, lmask, hmask, err)				\
+	asm volatile(ALTERNATIVE(XRSTOR,				\
+				 XRSTORS, X86_FEATURE_XSAVES)		\
+		     "\n"						\
+		     "xor %[err], %[err]\n"				\
+		     ".pushsection .fixup,\"ax\"\n"			\
+		     "3: movl $-1, %[err]\n"				\
+		     "jmp 663b\n"					\
+		     ".popsection\n"					\
+		     _ASM_EXTABLE(661b, 3b)				\
+		     : [err] "=r" (err)					\
+		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
+		     : "memory")
+
+
 /*
  * This function is called only during boot time when x86 caps are not set
  * up and alternative can not be used yet.
@@ -75,22 +135,13 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
 {
 	u32 lmask = mask;
 	u32 hmask = mask >> 32;
-	int err = 0;
-
-	WARN_ON(system_state != SYSTEM_BOOTING);
+	int err;
 
-	if (boot_cpu_has(X86_FEATURE_XSAVES))
-		asm volatile("1:"XSAVES"\n\t"
-			"2:\n\t"
-			     xstate_fault
-			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-			:   "memory");
+	if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+		XSTATE_OP(XSAVES, fx, lmask, hmask, err);
 	else
-		asm volatile("1:"XSAVE"\n\t"
-			"2:\n\t"
-			     xstate_fault
-			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-			:   "memory");
+		XSTATE_OP(XSAVE, fx, lmask, hmask, err);
+
 	return err;
 }
 
@@ -102,22 +153,12 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
 {
 	u32 lmask = mask;
 	u32 hmask = mask >> 32;
-	int err = 0;
-
-	WARN_ON(system_state != SYSTEM_BOOTING);
+	int err;
 
-	if (boot_cpu_has(X86_FEATURE_XSAVES))
-		asm volatile("1:"XRSTORS"\n\t"
-			"2:\n\t"
-			     xstate_fault
-			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-			:   "memory");
+	if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+		XSTATE_OP(XRSTORS, fx, lmask, hmask, err);
 	else
-		asm volatile("1:"XRSTOR"\n\t"
-			"2:\n\t"
-			     xstate_fault
-			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-			:   "memory");
+		XSTATE_OP(XRSTOR, fx, lmask, hmask, err);
 	return err;
 }
 
@@ -128,31 +169,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
 {
 	u32 lmask = mask;
 	u32 hmask = mask >> 32;
-	int err = 0;
+	int err;
 
-	/*
-	 * If xsaves is enabled, xsaves replaces xsaveopt because
-	 * it supports compact format and supervisor states in addition to
-	 * modified optimization in xsaveopt.
-	 *
-	 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
-	 * because xsaveopt supports modified optimization which is not
-	 * supported by xsave.
-	 *
-	 * If none of xsaves and xsaveopt is enabled, use xsave.
-	 */
-	alternative_input_2(
-		"1:"XSAVE,
-		XSAVEOPT,
-		X86_FEATURE_XSAVEOPT,
-		XSAVES,
-		X86_FEATURE_XSAVES,
-		[fx] "D" (fx), "a" (lmask), "d" (hmask) :
-		"memory");
-	asm volatile("2:\n\t"
-		     xstate_fault
-		     : "0" (0)
-		     : "memory");
+	XSTATE_XSAVE(fx, lmask, hmask, err);
 
 	return err;
 }
@@ -162,25 +181,11 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
  */
 static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
 {
-	int err = 0;
+	int err;
 	u32 lmask = mask;
 	u32 hmask = mask >> 32;
 
-	/*
-	 * Use xrstors to restore context if it is enabled. xrstors supports
-	 * compacted format of xsave area which is not supported by xrstor.
-	 */
-	alternative_input(
-		"1: " XRSTOR,
-		XRSTORS,
-		X86_FEATURE_XSAVES,
-		"D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-		: "memory");
-
-	asm volatile("2:\n"
-		     xstate_fault
-		     : "0" (0)
-		     : "memory");
+	XSTATE_XRESTORE(fx, lmask, hmask, err);
 
 	return err;
 }
-- 
2.3.3


             reply	other threads:[~2015-04-02 13:13 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-02 13:11 Borislav Petkov [this message]
2015-04-02 15:52 ` [PATCH] x86/xsave: Robustify and merge macros Quentin Casasnovas
2015-04-02 16:12   ` Borislav Petkov
2015-04-02 16:33     ` Quentin Casasnovas
2015-04-02 16:45       ` Borislav Petkov
2015-04-03 14:06     ` Quentin Casasnovas
2015-04-03 14:14       ` Quentin Casasnovas
2015-04-03 15:23         ` Borislav Petkov
2015-04-03 15:40           ` Quentin Casasnovas
2015-04-03 17:06             ` Borislav Petkov
2015-04-03 17:33               ` Quentin Casasnovas
2015-04-03 17:48                 ` Borislav Petkov
2015-04-03 20:42                   ` Quentin Casasnovas
2015-04-04  7:34                     ` Borislav Petkov
2015-04-04  8:36                       ` Quentin Casasnovas
2015-04-04  9:25                         ` Borislav Petkov
2015-04-04 10:11                           ` Quentin Casasnovas
2015-04-04 10:29                             ` Borislav Petkov
2015-04-04 13:32                               ` Borislav Petkov
2015-04-04 13:34                                 ` [PATCH] x86/alternatives: Fix ALTERNATIVE_2 padding generation properly Borislav Petkov
2015-04-07  9:27                                   ` Quentin Casasnovas
2015-04-07  9:40                                   ` [tip:x86/asm] " tip-bot for Borislav Petkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1427980282-25929-1-git-send-email-bp@alien8.de \
    --to=bp@alien8.de \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@amacapital.net \
    --cc=mingo@kernel.org \
    --cc=oleg@redhat.com \
    --cc=quentin.casasnovas@oracle.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.