All of lore.kernel.org
 help / color / mirror / Atom feed
From: Feng Wu <feng.wu@intel.com>
To: xen-devel@lists.xen.org
Cc: kevin.tian@intel.com, Feng Wu <feng.wu@intel.com>,
	JBeulich@suse.com, andrew.cooper3@citrix.com,
	eddie.dong@intel.com, jun.nakajima@intel.com,
	ian.campbell@citrix.com
Subject: [PATCH v6 09/10] x86/hvm: Add SMAP support to HVM guest
Date: Wed,  7 May 2014 16:19:41 +0800	[thread overview]
Message-ID: <1399450782-14735-10-git-send-email-feng.wu@intel.com> (raw)
In-Reply-To: <1399450782-14735-1-git-send-email-feng.wu@intel.com>

Intel new CPU supports SMAP (Supervisor Mode Access Prevention).
SMAP prevents supervisor-mode accesses to any linear address with
a valid translation for which the U/S flag (bit 2) is 1 in every
paging-structure entry controlling the translation for the linear
address.

Signed-off-by: Feng Wu <feng.wu@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/hvm/hvm.c        |  3 +++
 xen/arch/x86/mm/guest_walk.c  | 40 ++++++++++++++++++++++++++++++----------
 xen/include/asm-x86/hvm/hvm.h | 18 +++++++++++++++++-
 3 files changed, 50 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ac05160..76ccd07 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3039,6 +3039,9 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
         if ( (count == 0) && !cpu_has_smep )
             *ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
 
+        if ( (count == 0) && !cpu_has_smap )
+            *ebx &= ~cpufeat_mask(X86_FEATURE_SMAP);
+
         /* Don't expose MPX to hvm when VMX support is not available */
         if ( (count == 0) &&
              (!(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 70460b6..bb38fda 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -144,7 +144,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     guest_l4e_t *l4p;
 #endif
     uint32_t gflags, mflags, iflags, rc = 0;
-    int smep;
+    bool_t smep = 0, smap = 0;
     bool_t pse1G = 0, pse2M = 0;
     p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE;
 
@@ -159,13 +159,33 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     mflags = mandatory_flags(v, pfec);
     iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
 
-    /* SMEP: kernel-mode instruction fetches from user-mode mappings
-     * should fault.  Unlike NX or invalid bits, we're looking for _all_
-     * entries in the walk to have _PAGE_USER set, so we need to do the
-     * whole walk as if it were a user-mode one and then invert the answer. */
-    smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v) 
-            && (pfec & PFEC_insn_fetch) && !(pfec & PFEC_user_mode) );
-    if ( smep )
+    if ( is_hvm_vcpu(v) && !(pfec & PFEC_user_mode) )
+    {
+        struct segment_register seg;
+        const struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+        hvm_get_segment_register(v, x86_seg_ss, &seg);
+
+        /* SMEP: kernel-mode instruction fetches from user-mode mappings
+         * should fault.  Unlike NX or invalid bits, we're looking for _all_
+         * entries in the walk to have _PAGE_USER set, so we need to do the
+         * whole walk as if it were a user-mode one and then invert the answer. */
+        smep =  hvm_smep_enabled(v) && (pfec & PFEC_insn_fetch);
+
+        /*
+         * SMAP: kernel-mode data accesses from user-mode mappings should fault
+         * A fault is considered as a SMAP violation if the following
+         * conditions come true:
+         *   - X86_CR4_SMAP is set in CR4
+         *   - A user page is accessed
+         *   - CPL = 3 or X86_EFLAGS_AC is clear
+         *   - Page fault in kernel mode
+         */
+        smap = hvm_smap_enabled(v) &&
+               ((seg.attr.fields.dpl == 3) || !(regs->eflags & X86_EFLAGS_AC));
+    }
+
+    if ( smep || smap )
         mflags |= _PAGE_USER;
 
 #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
@@ -338,8 +358,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
 set_ad:
 #endif
-    /* Now re-invert the user-mode requirement for SMEP. */
-    if ( smep ) 
+    /* Now re-invert the user-mode requirement for SMEP and SMAP */
+    if ( smep || smap )
         rc ^= _PAGE_USER;
 
     /* Go back and set accessed and dirty bits only if the walk was a
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index c373930..0ab6b70 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -260,6 +260,8 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
 #define hvm_smep_enabled(v) \
     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
+#define hvm_smap_enabled(v) \
+    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMAP))
 #define hvm_nx_enabled(v) \
     (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
 
@@ -360,13 +362,26 @@ static inline bool_t hvm_vcpu_has_smep(void)
 
     hvm_cpuid(0, &eax, NULL, NULL, NULL);
 
-    if (eax < 7)
+    if ( eax < 7 )
         return 0;
 
     hvm_cpuid(7, NULL, &ebx, NULL, NULL);
     return !!(ebx & cpufeat_mask(X86_FEATURE_SMEP));
 }
 
+static inline bool_t hvm_vcpu_has_smap(void)
+{
+    unsigned int eax, ebx;
+
+    hvm_cpuid(0, &eax, NULL, NULL, NULL);
+
+    if ( eax < 7 )
+        return 0;
+
+    hvm_cpuid(0x7, NULL, &ebx, NULL, NULL);
+    return !!(ebx & cpufeat_mask(X86_FEATURE_SMAP));
+}
+
 /* These reserved bits in lower 32 remain 0 after any load of CR0 */
 #define HVM_CR0_GUEST_RESERVED_BITS             \
     (~((unsigned long)                          \
@@ -387,6 +402,7 @@ static inline bool_t hvm_vcpu_has_smep(void)
         X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
         X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
         (hvm_vcpu_has_smep() ? X86_CR4_SMEP : 0) |      \
+        (hvm_vcpu_has_smap() ? X86_CR4_SMAP : 0) |      \
         (cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) |     \
         ((nestedhvm_enabled((_v)->domain) && cpu_has_vmx)\
                       ? X86_CR4_VMXE : 0)  |             \
-- 
1.8.3.1

  parent reply	other threads:[~2014-05-07  8:19 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-05-07  8:19 [PATCH v6 00/10] x86: Enable Supervisor Mode Access Prevention (SMAP) Feng Wu
2014-05-07  8:19 ` [PATCH v6 01/10] x86: define macros CPUINFO_features and CPUINFO_FEATURE_OFFSET Feng Wu
2014-05-07  8:19 ` [PATCH v6 02/10] x86: move common_interrupt to entry.S Feng Wu
2014-05-07  8:19 ` [PATCH v6 03/10] x86: merge stuff from asm-x86/x86_64/asm_defns.h to asm-x86/asm_defns.h Feng Wu
2014-05-07  8:19 ` [PATCH v6 04/10] x86: Add support for STAC/CLAC instructions Feng Wu
2014-05-07  9:36   ` Andrew Cooper
2014-05-07  8:19 ` [PATCH v6 05/10] Clear AC bit in RFLAGS to protect Xen itself by SMAP Feng Wu
2014-05-07  9:44   ` Andrew Cooper
2014-05-07 11:40     ` Jan Beulich
2014-05-07 11:53       ` Andrew Cooper
2014-05-08  1:41         ` Wu, Feng
2014-05-08  1:57           ` Andrew Cooper
2014-05-08  2:02             ` Wu, Feng
2014-05-08  6:40               ` Jan Beulich
2014-05-08  6:49                 ` Wu, Feng
2014-05-08  6:54                   ` Jan Beulich
2014-05-08  6:58                     ` Wu, Feng
2014-05-08  7:08                       ` Jan Beulich
2014-05-08  7:13                         ` Wu, Feng
2014-05-08  9:48               ` Andrew Cooper
2014-05-07  8:19 ` [PATCH v6 06/10] x86: Temporary disable SMAP to legally access user pages in kernel mode Feng Wu
2014-05-07  9:49   ` Andrew Cooper
2014-05-08  1:14   ` Tian, Kevin
2014-05-07  8:19 ` [PATCH v6 07/10] VMX: Disable SMAP feature when guest is in non-paging mode Feng Wu
2014-05-08  1:16   ` Tian, Kevin
2014-05-07  8:19 ` [PATCH v6 08/10] x86: Enable Supervisor Mode Access Prevention (SMAP) for Xen Feng Wu
2014-05-07 10:26   ` Andrew Cooper
2014-05-07 11:44     ` Jan Beulich
2014-05-07 11:47       ` Andrew Cooper
2014-05-08  2:32     ` Wu, Feng
2014-05-08  1:20   ` Tian, Kevin
2014-05-08  6:25     ` Wu, Feng
2014-05-08  7:06       ` Jan Beulich
2014-05-07  8:19 ` Feng Wu [this message]
2014-05-07 10:46   ` [PATCH v6 09/10] x86/hvm: Add SMAP support to HVM guest Andrew Cooper
2014-05-07 11:47     ` Jan Beulich
2014-05-08  1:22   ` Tian, Kevin
2014-05-07  8:19 ` [PATCH v6 10/10] x86/tools: Expose SMAP to HVM guests Feng Wu
2014-05-07  8:35 ` [PATCH v6 00/10] x86: Enable Supervisor Mode Access Prevention (SMAP) Jan Beulich
2014-05-07  9:00   ` Wu, Feng
2014-05-07  9:33     ` Jan Beulich
2014-05-07  8:57 ` Ian Campbell
2014-05-07  8:59   ` Wu, Feng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1399450782-14735-10-git-send-email-feng.wu@intel.com \
    --to=feng.wu@intel.com \
    --cc=JBeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=eddie.dong@intel.com \
    --cc=ian.campbell@citrix.com \
    --cc=jun.nakajima@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.