xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Keir Fraser <keir@xen.org>, Feng Wu <feng.wu@intel.com>
Subject: [PATCH v3 3/4] x86: use optimal NOPs to fill the SMEP/SMAP placeholders
Date: Thu, 17 Mar 2016 02:03:50 -0600	[thread overview]
Message-ID: <56EA72F602000078000DD933@prv-mh.provo.novell.com> (raw)
In-Reply-To: <56EA6FDF02000078000DD8FB@prv-mh.provo.novell.com>

[-- Attachment #1: Type: text/plain, Size: 2875 bytes --]

Alternatives patching code picks the most suitable NOPs for the
running system, so simply use it to replace the pre-populated ones.

Use an arbitrary, always available feature to key off from, but
hide this behind the new X86_FEATURE_ALWAYS.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Re-base.
v2: Introduce and use X86_FEATURE_ALWAYS.

--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -175,12 +175,7 @@ compat_bad_hypercall:
 ENTRY(compat_restore_all_guest)
         ASSERT_INTERRUPTS_DISABLED
 .Lcr4_orig:
-        ASM_NOP8 /* testb $3,UREGS_cs(%rsp) */
-        ASM_NOP2 /* jpe   .Lcr4_alt_end */
-        ASM_NOP8 /* mov   CPUINFO_cr4...(%rsp), %rax */
-        ASM_NOP6 /* and   $..., %rax */
-        ASM_NOP8 /* mov   %rax, CPUINFO_cr4...(%rsp) */
-        ASM_NOP3 /* mov   %rax, %cr4 */
+        .skip (.Lcr4_alt_end - .Lcr4_alt) - (. - .Lcr4_orig), 0x90
 .Lcr4_orig_end:
         .pushsection .altinstr_replacement, "ax"
 .Lcr4_alt:
@@ -192,6 +187,7 @@ ENTRY(compat_restore_all_guest)
         mov   %rax, %cr4
 .Lcr4_alt_end:
         .section .altinstructions, "a"
+        altinstruction_entry .Lcr4_orig, .Lcr4_orig, X86_FEATURE_ALWAYS, 12, 0
         altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_SMEP, \
                              (.Lcr4_orig_end - .Lcr4_orig), \
                              (.Lcr4_alt_end - .Lcr4_alt)
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -204,6 +204,7 @@ void ret_from_intr(void);
         662: __ASM_##op;                                               \
         .popsection;                                                   \
         .pushsection .altinstructions, "a";                            \
+        altinstruction_entry 661b, 661b, X86_FEATURE_ALWAYS, 3, 0;     \
         altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3;       \
         .popsection
 
@@ -215,6 +216,7 @@ void ret_from_intr(void);
         .pushsection .altinstr_replacement, "ax";                  \
         668: call cr4_pv32_restore;                                \
         .section .altinstructions, "a";                            \
+        altinstruction_entry 667b, 667b, X86_FEATURE_ALWAYS, 5, 0; \
         altinstruction_entry 667b, 668b, X86_FEATURE_SMEP, 5, 5;   \
         altinstruction_entry 667b, 668b, X86_FEATURE_SMAP, 5, 5;   \
         .popsection
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -162,6 +162,9 @@
 #define cpufeat_bit(idx)	((idx) % 32)
 #define cpufeat_mask(idx)	(_AC(1, U) << cpufeat_bit(idx))
 
+/* An alias of a feature we know is always going to be present. */
+#define X86_FEATURE_ALWAYS      X86_FEATURE_LM
+
 #if !defined(__ASSEMBLY__) && !defined(X86_FEATURES_ONLY)
 #include <xen/bitops.h>
 




[-- Attachment #2: x86-SMEP-SMAP-NOPs.patch --]
[-- Type: text/plain, Size: 2929 bytes --]

x86: use optimal NOPs to fill the SMEP/SMAP placeholders

Alternatives patching code picks the most suitable NOPs for the
running system, so simply use it to replace the pre-populated ones.

Use an arbitrary, always available feature to key off from, but
hide this behind the new X86_FEATURE_ALWAYS.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Re-base.
v2: Introduce and use X86_FEATURE_ALWAYS.

--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -175,12 +175,7 @@ compat_bad_hypercall:
 ENTRY(compat_restore_all_guest)
         ASSERT_INTERRUPTS_DISABLED
 .Lcr4_orig:
-        ASM_NOP8 /* testb $3,UREGS_cs(%rsp) */
-        ASM_NOP2 /* jpe   .Lcr4_alt_end */
-        ASM_NOP8 /* mov   CPUINFO_cr4...(%rsp), %rax */
-        ASM_NOP6 /* and   $..., %rax */
-        ASM_NOP8 /* mov   %rax, CPUINFO_cr4...(%rsp) */
-        ASM_NOP3 /* mov   %rax, %cr4 */
+        .skip (.Lcr4_alt_end - .Lcr4_alt) - (. - .Lcr4_orig), 0x90
 .Lcr4_orig_end:
         .pushsection .altinstr_replacement, "ax"
 .Lcr4_alt:
@@ -192,6 +187,7 @@ ENTRY(compat_restore_all_guest)
         mov   %rax, %cr4
 .Lcr4_alt_end:
         .section .altinstructions, "a"
+        altinstruction_entry .Lcr4_orig, .Lcr4_orig, X86_FEATURE_ALWAYS, 12, 0
         altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_SMEP, \
                              (.Lcr4_orig_end - .Lcr4_orig), \
                              (.Lcr4_alt_end - .Lcr4_alt)
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -204,6 +204,7 @@ void ret_from_intr(void);
         662: __ASM_##op;                                               \
         .popsection;                                                   \
         .pushsection .altinstructions, "a";                            \
+        altinstruction_entry 661b, 661b, X86_FEATURE_ALWAYS, 3, 0;     \
         altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3;       \
         .popsection
 
@@ -215,6 +216,7 @@ void ret_from_intr(void);
         .pushsection .altinstr_replacement, "ax";                  \
         668: call cr4_pv32_restore;                                \
         .section .altinstructions, "a";                            \
+        altinstruction_entry 667b, 667b, X86_FEATURE_ALWAYS, 5, 0; \
         altinstruction_entry 667b, 668b, X86_FEATURE_SMEP, 5, 5;   \
         altinstruction_entry 667b, 668b, X86_FEATURE_SMAP, 5, 5;   \
         .popsection
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -162,6 +162,9 @@
 #define cpufeat_bit(idx)	((idx) % 32)
 #define cpufeat_mask(idx)	(_AC(1, U) << cpufeat_bit(idx))
 
+/* An alias of a feature we know is always going to be present. */
+#define X86_FEATURE_ALWAYS      X86_FEATURE_LM
+
 #if !defined(__ASSEMBLY__) && !defined(X86_FEATURES_ONLY)
 #include <xen/bitops.h>
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-03-17  8:03 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-04 11:08 [PATCH 0/4] x86: accommodate 32-bit PV guests with SMAP/SMEP handling Jan Beulich
2016-03-04 11:27 ` [PATCH 1/4] x86/alternatives: correct near branch check Jan Beulich
2016-03-07 15:43   ` Andrew Cooper
2016-03-07 15:56     ` Jan Beulich
2016-03-07 16:11       ` Andrew Cooper
2016-03-07 16:21         ` Jan Beulich
2016-03-08 17:33           ` Andrew Cooper
2016-03-04 11:27 ` [PATCH 2/4] x86: suppress SMAP and SMEP while running 32-bit PV guest code Jan Beulich
2016-03-07 16:59   ` Andrew Cooper
2016-03-08  7:57     ` Jan Beulich
2016-03-09  8:09       ` Wu, Feng
2016-03-09 14:09         ` Jan Beulich
2016-03-09 11:19       ` Andrew Cooper
2016-03-09 14:28         ` Jan Beulich
2016-03-09  8:09   ` Wu, Feng
2016-03-09 10:45     ` Andrew Cooper
2016-03-09 12:27       ` Wu, Feng
2016-03-09 12:33         ` Andrew Cooper
2016-03-09 12:36           ` Jan Beulich
2016-03-09 12:54             ` Wu, Feng
2016-03-09 13:35             ` Wu, Feng
2016-03-09 13:42               ` Andrew Cooper
2016-03-09 14:03       ` Jan Beulich
2016-03-09 14:07     ` Jan Beulich
2016-03-04 11:28 ` [PATCH 3/4] x86: use optimal NOPs to fill the SMAP/SMEP placeholders Jan Beulich
2016-03-07 17:43   ` Andrew Cooper
2016-03-08  8:02     ` Jan Beulich
2016-03-04 11:29 ` [PATCH 4/4] x86: use 32-bit loads for 32-bit PV guest state reload Jan Beulich
2016-03-07 17:45   ` Andrew Cooper
2016-03-10  9:44 ` [PATCH v2 0/3] x86: accommodate 32-bit PV guests with SMEP/SMAP handling Jan Beulich
2016-03-10  9:53   ` [PATCH v2 1/3] x86: suppress SMEP and SMAP while running 32-bit PV guest code Jan Beulich
2016-05-13 15:48     ` Andrew Cooper
2016-03-10  9:54   ` [PATCH v2 2/3] x86: use optimal NOPs to fill the SMEP/SMAP placeholders Jan Beulich
2016-05-13 15:49     ` Andrew Cooper
2016-03-10  9:55   ` [PATCH v2 3/3] x86: use 32-bit loads for 32-bit PV guest state reload Jan Beulich
     [not found]   ` <56E9A0DB02000078000DD54C@prv-mh.provo.novell.com>
2016-03-17  7:50     ` [PATCH v3 0/4] x86: accommodate 32-bit PV guests with SMEP/SMAP handling Jan Beulich
2016-03-17  8:02       ` [PATCH v3 1/4] x86: move cached CR4 value to struct cpu_info Jan Beulich
2016-03-17 16:20         ` Andrew Cooper
2016-03-17  8:03       ` [PATCH v3 2/4] x86: suppress SMEP and SMAP while running 32-bit PV guest code Jan Beulich
2016-03-25 18:01         ` Konrad Rzeszutek Wilk
2016-03-29  6:55           ` Jan Beulich
2016-05-13 15:58         ` Andrew Cooper
2016-03-17  8:03       ` Jan Beulich [this message]
2016-05-13 15:57         ` [PATCH v3 3/4] x86: use optimal NOPs to fill the SMEP/SMAP placeholders Andrew Cooper
2016-05-13 16:06           ` Jan Beulich
2016-05-13 16:09             ` Andrew Cooper
2016-03-17  8:04       ` [PATCH v3 4/4] x86: use 32-bit loads for 32-bit PV guest state reload Jan Beulich
2016-03-25 18:02         ` Konrad Rzeszutek Wilk
2016-03-17 16:14       ` [PATCH v3 5/4] x86: reduce code size of struct cpu_info member accesses Jan Beulich
2016-03-25 18:47         ` Konrad Rzeszutek Wilk
2016-03-29  6:59           ` Jan Beulich
2016-03-30 14:28             ` Konrad Rzeszutek Wilk
2016-03-30 14:42               ` Jan Beulich
2016-05-13 16:11         ` Andrew Cooper
2016-05-03 13:58       ` Ping: [PATCH v3 2/4] x86: suppress SMEP and SMAP while running 32-bit PV guest code Jan Beulich
2016-05-03 14:10         ` Andrew Cooper
2016-05-03 14:25           ` Jan Beulich
2016-05-04 10:03             ` Andrew Cooper
2016-05-04 13:35               ` Jan Beulich
2016-05-04  3:07         ` Wu, Feng
2016-05-13 15:21         ` Wei Liu
2016-05-13 15:30           ` Jan Beulich
2016-05-13 15:33             ` Wei Liu
2016-05-13 17:02       ` [PATCH v3 0/4] x86: accommodate 32-bit PV guests with SMEP/SMAP handling Wei Liu
2016-05-13 17:21         ` Andrew Cooper
2016-06-21  6:19       ` Wu, Feng
2016-06-21  7:17         ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=56EA72F602000078000DD933@prv-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=feng.wu@intel.com \
    --cc=keir@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).