xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Subject: [PATCH v2 4/4] SVM: clean up svm_vmcb_isvalid()
Date: Wed, 07 Jun 2017 08:07:58 -0600	[thread overview]
Message-ID: <593824DE02000078001606F0@prv-mh.provo.novell.com> (raw)
In-Reply-To: <593822A3020000780016069F@prv-mh.provo.novell.com>

[-- Attachment #1: Type: text/plain, Size: 7713 bytes --]

- correct CR3, CR4, and EFER checks
- delete bogus nested paging check
- add vcpu parameter (to include in log messages) and constify vmcb one
- use bool/true/false
- use accessors
- adjust formatting

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Constrain CR3 checks to the case when CR0.PG is set. Change wording
    of CR4 related message and also log valid bit mask there. Tighten
    EFER checks. Delete bogus nested paging check. Correct indentation
    in a few places.

--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -658,13 +658,13 @@ static int nsvm_vmcb_prepare4vmrun(struc
     /* Cleanbits */
     n2vmcb->cleanbits.bytes = 0;
 
-    rc = svm_vmcb_isvalid(__func__, ns_vmcb, 1);
+    rc = svm_vmcb_isvalid(__func__, ns_vmcb, v, true);
     if (rc) {
         gdprintk(XENLOG_ERR, "virtual vmcb invalid\n");
         return NSVM_ERROR_VVMCB;
     }
 
-    rc = svm_vmcb_isvalid(__func__, n2vmcb, 1);
+    rc = svm_vmcb_isvalid(__func__, n2vmcb, v, true);
     if (rc) {
         gdprintk(XENLOG_ERR, "n2vmcb invalid\n");
         return NSVM_ERROR_VMENTRY;
--- a/xen/arch/x86/hvm/svm/svmdebug.c
+++ b/xen/arch/x86/hvm/svm/svmdebug.c
@@ -16,6 +16,7 @@
  *
  */
 
+#include <xen/sched.h>
 #include <asm/processor.h>
 #include <asm/msr-index.h>
 #include <asm/hvm/svm/svmdebug.h>
@@ -87,93 +88,79 @@ void svm_vmcb_dump(const char *from, con
     svm_dump_sel("  TR", &vmcb->tr);
 }
 
-bool_t
-svm_vmcb_isvalid(const char *from, struct vmcb_struct *vmcb,
-                 bool_t verbose)
+bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb,
+                      const struct vcpu *v, bool verbose)
 {
-    bool_t ret = 0; /* ok */
+    bool ret = false; /* ok */
 
-#define PRINTF(...) \
-    if (verbose) { ret = 1; printk("%s: ", from); printk(__VA_ARGS__); \
-    } else return 1;
+#define PRINTF(fmt, args...) do { \
+    if ( !verbose ) return true; \
+    ret = true; \
+    printk(XENLOG_GUEST "%pv[%s]: " fmt, v, from, ## args); \
+} while (0)
 
-    if ((vmcb->_efer & EFER_SVME) == 0) {
-        PRINTF("EFER: SVME bit not set (%#"PRIx64")\n", vmcb->_efer);
-    }
+    if ( !(vmcb_get_efer(vmcb) & EFER_SVME) )
+        PRINTF("EFER: SVME bit not set (%#"PRIx64")\n", vmcb_get_efer(vmcb));
 
-    if ((vmcb->_cr0 & X86_CR0_CD) == 0 && (vmcb->_cr0 & X86_CR0_NW) != 0) {
+    if ( !(vmcb_get_cr0(vmcb) & X86_CR0_CD) && (vmcb_get_cr0(vmcb) & X86_CR0_NW) )
         PRINTF("CR0: CD bit is zero and NW bit set (%#"PRIx64")\n",
-                vmcb->_cr0);
-    }
+                vmcb_get_cr0(vmcb));
 
-    if ((vmcb->_cr0 >> 32U) != 0) {
+    if ( vmcb_get_cr0(vmcb) >> 32 )
         PRINTF("CR0: bits [63:32] are not zero (%#"PRIx64")\n",
-                vmcb->_cr0);
-    }
+                vmcb_get_cr0(vmcb));
 
-    if ((vmcb->_cr3 & 0x7) != 0) {
-        PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", vmcb->_cr3);
-    }
-    if ((vmcb->_efer & EFER_LMA) && (vmcb->_cr3 & 0xfe) != 0) {
-        PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", vmcb->_cr3);
-    }
-
-    if ((vmcb->_cr4 >> 19U) != 0) {
-        PRINTF("CR4: bits [63:19] are not zero (%#"PRIx64")\n",
-                vmcb->_cr4);
-    }
-
-    if (((vmcb->_cr4 >> 11U) & 0x7fU) != 0) {
-        PRINTF("CR4: bits [17:11] are not zero (%#"PRIx64")\n",
-                vmcb->_cr4);
-    }
+    if ( (vmcb_get_cr0(vmcb) & X86_CR0_PG) &&
+         ((vmcb_get_cr3(vmcb) & 0x7) ||
+          ((!(vmcb_get_cr4(vmcb) & X86_CR4_PAE) ||
+            (vmcb_get_efer(vmcb) & EFER_LMA)) &&
+           (vmcb_get_cr3(vmcb) & 0xfe0)) ||
+          ((vmcb_get_efer(vmcb) & EFER_LMA) &&
+           (vmcb_get_cr3(vmcb) >> v->domain->arch.cpuid->extd.maxphysaddr))) )
+        PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", vmcb_get_cr3(vmcb));
+
+    if ( vmcb_get_cr4(vmcb) & ~hvm_cr4_guest_valid_bits(v, false) )
+        PRINTF("CR4: invalid bits are set (%#"PRIx64", valid: %#"PRIx64")\n",
+               vmcb_get_cr4(vmcb), hvm_cr4_guest_valid_bits(v, false));
 
-    if ((vmcb->_dr6 >> 32U) != 0) {
+    if ( vmcb_get_dr6(vmcb) >> 32 )
         PRINTF("DR6: bits [63:32] are not zero (%#"PRIx64")\n",
-                vmcb->_dr6);
-    }
+               vmcb_get_dr6(vmcb));
 
-    if ((vmcb->_dr7 >> 32U) != 0) {
+    if ( vmcb_get_dr7(vmcb) >> 32 )
         PRINTF("DR7: bits [63:32] are not zero (%#"PRIx64")\n",
-                vmcb->_dr7);
-    }
+               vmcb_get_dr7(vmcb));
 
-    if ((vmcb->_efer >> 15U) != 0) {
-        PRINTF("EFER: bits [63:15] are not zero (%#"PRIx64")\n",
-                vmcb->_efer);
-    }
+    if ( vmcb_get_efer(vmcb) & ~(EFER_SCE | EFER_LME | EFER_LMA | EFER_NX |
+                                 EFER_SVME | EFER_LMSLE | EFER_FFXSE) )
+        PRINTF("EFER: undefined bits are not zero (%#"PRIx64")\n",
+               vmcb_get_efer(vmcb));
 
-    if ((vmcb->_efer & EFER_LME) != 0 && ((vmcb->_cr0 & X86_CR0_PG) != 0)) {
-        if ((vmcb->_cr4 & X86_CR4_PAE) == 0) {
-            PRINTF("EFER_LME and CR0.PG are both set and CR4.PAE is zero.\n");
-        }
-        if ((vmcb->_cr0 & X86_CR0_PE) == 0) {
-            PRINTF("EFER_LME and CR0.PG are both set and CR0.PE is zero.\n");
-        }
-    }
+    if ( hvm_efer_valid(v, vmcb_get_efer(vmcb), -1) )
+        PRINTF("EFER: %s\n", hvm_efer_valid(v, vmcb_get_efer(vmcb), -1));
 
-    if ((vmcb->_efer & EFER_LME) != 0
-        && (vmcb->_cr0 & X86_CR0_PG) != 0
-        && (vmcb->_cr4 & X86_CR4_PAE) != 0
-        && (vmcb->cs.attr.fields.l != 0)
-        && (vmcb->cs.attr.fields.db != 0))
+    if ( (vmcb_get_efer(vmcb) & EFER_LME) && (vmcb_get_cr0(vmcb) & X86_CR0_PG) )
     {
-        PRINTF("EFER_LME, CR0.PG, CR4.PAE, CS.L and CS.D are all non-zero.\n");
+        if ( !(vmcb_get_cr4(vmcb) & X86_CR4_PAE) )
+            PRINTF("EFER_LME and CR0.PG are both set and CR4.PAE is zero\n");
+        if ( !(vmcb_get_cr0(vmcb) & X86_CR0_PE) )
+            PRINTF("EFER_LME and CR0.PG are both set and CR0.PE is zero\n");
     }
 
-    if ((vmcb->_general2_intercepts & GENERAL2_INTERCEPT_VMRUN) == 0) {
+    if ( (vmcb_get_efer(vmcb) & EFER_LME) &&
+         (vmcb_get_cr0(vmcb) & X86_CR0_PG) &&
+         (vmcb_get_cr4(vmcb) & X86_CR4_PAE) &&
+         vmcb->cs.attr.fields.l &&
+         vmcb->cs.attr.fields.db )
+        PRINTF("EFER_LME, CR0.PG, CR4.PAE, CS.L and CS.D are all non-zero\n");
+
+    if ( !(vmcb_get_general2_intercepts(vmcb) & GENERAL2_INTERCEPT_VMRUN) )
         PRINTF("GENERAL2_INTERCEPT: VMRUN intercept bit is clear (%#"PRIx32")\n",
-            vmcb->_general2_intercepts);
-    }
+               vmcb_get_general2_intercepts(vmcb));
 
-    if (vmcb->eventinj.fields.resvd1 != 0) {
+    if ( vmcb->eventinj.fields.resvd1 )
         PRINTF("eventinj: MBZ bits are set (%#"PRIx64")\n",
-                vmcb->eventinj.bytes);
-    }
-
-    if (vmcb->_np_enable && vmcb->_h_cr3 == 0) {
-        PRINTF("nested paging enabled but host cr3 is 0\n");
-    }
+               vmcb->eventinj.bytes);
 
 #undef PRINTF
     return ret;
--- a/xen/include/asm-x86/hvm/svm/svmdebug.h
+++ b/xen/include/asm-x86/hvm/svm/svmdebug.h
@@ -23,7 +23,7 @@
 #include <asm/hvm/svm/vmcb.h>
 
 void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb);
-bool_t svm_vmcb_isvalid(const char *from, struct vmcb_struct *vmcb,
-                        bool_t verbose);
+bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb,
+                      const struct vcpu *v, bool verbose);
 
 #endif /* __ASM_X86_HVM_SVM_SVMDEBUG_H__ */



[-- Attachment #2: SVM-isvalid-cleanup.patch --]
[-- Type: text/plain, Size: 7745 bytes --]

SVM: clean up svm_vmcb_isvalid()

- correct CR3, CR4, and EFER checks
- delete bogus nested paging check
- add vcpu parameter (to include in log messages) and constify vmcb one
- use bool/true/false
- use accessors
- adjust formatting

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Constrain CR3 checks to the case when CR0.PG is set. Change wording
    of CR4 related message and also log valid bit mask there. Tighten
    EFER checks. Delete bogus nested paging check. Correct indentation
    in a few places.

--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -658,13 +658,13 @@ static int nsvm_vmcb_prepare4vmrun(struc
     /* Cleanbits */
     n2vmcb->cleanbits.bytes = 0;
 
-    rc = svm_vmcb_isvalid(__func__, ns_vmcb, 1);
+    rc = svm_vmcb_isvalid(__func__, ns_vmcb, v, true);
     if (rc) {
         gdprintk(XENLOG_ERR, "virtual vmcb invalid\n");
         return NSVM_ERROR_VVMCB;
     }
 
-    rc = svm_vmcb_isvalid(__func__, n2vmcb, 1);
+    rc = svm_vmcb_isvalid(__func__, n2vmcb, v, true);
     if (rc) {
         gdprintk(XENLOG_ERR, "n2vmcb invalid\n");
         return NSVM_ERROR_VMENTRY;
--- a/xen/arch/x86/hvm/svm/svmdebug.c
+++ b/xen/arch/x86/hvm/svm/svmdebug.c
@@ -16,6 +16,7 @@
  *
  */
 
+#include <xen/sched.h>
 #include <asm/processor.h>
 #include <asm/msr-index.h>
 #include <asm/hvm/svm/svmdebug.h>
@@ -87,93 +88,79 @@ void svm_vmcb_dump(const char *from, con
     svm_dump_sel("  TR", &vmcb->tr);
 }
 
-bool_t
-svm_vmcb_isvalid(const char *from, struct vmcb_struct *vmcb,
-                 bool_t verbose)
+bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb,
+                      const struct vcpu *v, bool verbose)
 {
-    bool_t ret = 0; /* ok */
+    bool ret = false; /* ok */
 
-#define PRINTF(...) \
-    if (verbose) { ret = 1; printk("%s: ", from); printk(__VA_ARGS__); \
-    } else return 1;
+#define PRINTF(fmt, args...) do { \
+    if ( !verbose ) return true; \
+    ret = true; \
+    printk(XENLOG_GUEST "%pv[%s]: " fmt, v, from, ## args); \
+} while (0)
 
-    if ((vmcb->_efer & EFER_SVME) == 0) {
-        PRINTF("EFER: SVME bit not set (%#"PRIx64")\n", vmcb->_efer);
-    }
+    if ( !(vmcb_get_efer(vmcb) & EFER_SVME) )
+        PRINTF("EFER: SVME bit not set (%#"PRIx64")\n", vmcb_get_efer(vmcb));
 
-    if ((vmcb->_cr0 & X86_CR0_CD) == 0 && (vmcb->_cr0 & X86_CR0_NW) != 0) {
+    if ( !(vmcb_get_cr0(vmcb) & X86_CR0_CD) && (vmcb_get_cr0(vmcb) & X86_CR0_NW) )
         PRINTF("CR0: CD bit is zero and NW bit set (%#"PRIx64")\n",
-                vmcb->_cr0);
-    }
+                vmcb_get_cr0(vmcb));
 
-    if ((vmcb->_cr0 >> 32U) != 0) {
+    if ( vmcb_get_cr0(vmcb) >> 32 )
         PRINTF("CR0: bits [63:32] are not zero (%#"PRIx64")\n",
-                vmcb->_cr0);
-    }
+                vmcb_get_cr0(vmcb));
 
-    if ((vmcb->_cr3 & 0x7) != 0) {
-        PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", vmcb->_cr3);
-    }
-    if ((vmcb->_efer & EFER_LMA) && (vmcb->_cr3 & 0xfe) != 0) {
-        PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", vmcb->_cr3);
-    }
-
-    if ((vmcb->_cr4 >> 19U) != 0) {
-        PRINTF("CR4: bits [63:19] are not zero (%#"PRIx64")\n",
-                vmcb->_cr4);
-    }
-
-    if (((vmcb->_cr4 >> 11U) & 0x7fU) != 0) {
-        PRINTF("CR4: bits [17:11] are not zero (%#"PRIx64")\n",
-                vmcb->_cr4);
-    }
+    if ( (vmcb_get_cr0(vmcb) & X86_CR0_PG) &&
+         ((vmcb_get_cr3(vmcb) & 0x7) ||
+          ((!(vmcb_get_cr4(vmcb) & X86_CR4_PAE) ||
+            (vmcb_get_efer(vmcb) & EFER_LMA)) &&
+           (vmcb_get_cr3(vmcb) & 0xfe0)) ||
+          ((vmcb_get_efer(vmcb) & EFER_LMA) &&
+           (vmcb_get_cr3(vmcb) >> v->domain->arch.cpuid->extd.maxphysaddr))) )
+        PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", vmcb_get_cr3(vmcb));
+
+    if ( vmcb_get_cr4(vmcb) & ~hvm_cr4_guest_valid_bits(v, false) )
+        PRINTF("CR4: invalid bits are set (%#"PRIx64", valid: %#"PRIx64")\n",
+               vmcb_get_cr4(vmcb), hvm_cr4_guest_valid_bits(v, false));
 
-    if ((vmcb->_dr6 >> 32U) != 0) {
+    if ( vmcb_get_dr6(vmcb) >> 32 )
         PRINTF("DR6: bits [63:32] are not zero (%#"PRIx64")\n",
-                vmcb->_dr6);
-    }
+               vmcb_get_dr6(vmcb));
 
-    if ((vmcb->_dr7 >> 32U) != 0) {
+    if ( vmcb_get_dr7(vmcb) >> 32 )
         PRINTF("DR7: bits [63:32] are not zero (%#"PRIx64")\n",
-                vmcb->_dr7);
-    }
+               vmcb_get_dr7(vmcb));
 
-    if ((vmcb->_efer >> 15U) != 0) {
-        PRINTF("EFER: bits [63:15] are not zero (%#"PRIx64")\n",
-                vmcb->_efer);
-    }
+    if ( vmcb_get_efer(vmcb) & ~(EFER_SCE | EFER_LME | EFER_LMA | EFER_NX |
+                                 EFER_SVME | EFER_LMSLE | EFER_FFXSE) )
+        PRINTF("EFER: undefined bits are not zero (%#"PRIx64")\n",
+               vmcb_get_efer(vmcb));
 
-    if ((vmcb->_efer & EFER_LME) != 0 && ((vmcb->_cr0 & X86_CR0_PG) != 0)) {
-        if ((vmcb->_cr4 & X86_CR4_PAE) == 0) {
-            PRINTF("EFER_LME and CR0.PG are both set and CR4.PAE is zero.\n");
-        }
-        if ((vmcb->_cr0 & X86_CR0_PE) == 0) {
-            PRINTF("EFER_LME and CR0.PG are both set and CR0.PE is zero.\n");
-        }
-    }
+    if ( hvm_efer_valid(v, vmcb_get_efer(vmcb), -1) )
+        PRINTF("EFER: %s\n", hvm_efer_valid(v, vmcb_get_efer(vmcb), -1));
 
-    if ((vmcb->_efer & EFER_LME) != 0
-        && (vmcb->_cr0 & X86_CR0_PG) != 0
-        && (vmcb->_cr4 & X86_CR4_PAE) != 0
-        && (vmcb->cs.attr.fields.l != 0)
-        && (vmcb->cs.attr.fields.db != 0))
+    if ( (vmcb_get_efer(vmcb) & EFER_LME) && (vmcb_get_cr0(vmcb) & X86_CR0_PG) )
     {
-        PRINTF("EFER_LME, CR0.PG, CR4.PAE, CS.L and CS.D are all non-zero.\n");
+        if ( !(vmcb_get_cr4(vmcb) & X86_CR4_PAE) )
+            PRINTF("EFER_LME and CR0.PG are both set and CR4.PAE is zero\n");
+        if ( !(vmcb_get_cr0(vmcb) & X86_CR0_PE) )
+            PRINTF("EFER_LME and CR0.PG are both set and CR0.PE is zero\n");
     }
 
-    if ((vmcb->_general2_intercepts & GENERAL2_INTERCEPT_VMRUN) == 0) {
+    if ( (vmcb_get_efer(vmcb) & EFER_LME) &&
+         (vmcb_get_cr0(vmcb) & X86_CR0_PG) &&
+         (vmcb_get_cr4(vmcb) & X86_CR4_PAE) &&
+         vmcb->cs.attr.fields.l &&
+         vmcb->cs.attr.fields.db )
+        PRINTF("EFER_LME, CR0.PG, CR4.PAE, CS.L and CS.D are all non-zero\n");
+
+    if ( !(vmcb_get_general2_intercepts(vmcb) & GENERAL2_INTERCEPT_VMRUN) )
         PRINTF("GENERAL2_INTERCEPT: VMRUN intercept bit is clear (%#"PRIx32")\n",
-            vmcb->_general2_intercepts);
-    }
+               vmcb_get_general2_intercepts(vmcb));
 
-    if (vmcb->eventinj.fields.resvd1 != 0) {
+    if ( vmcb->eventinj.fields.resvd1 )
         PRINTF("eventinj: MBZ bits are set (%#"PRIx64")\n",
-                vmcb->eventinj.bytes);
-    }
-
-    if (vmcb->_np_enable && vmcb->_h_cr3 == 0) {
-        PRINTF("nested paging enabled but host cr3 is 0\n");
-    }
+               vmcb->eventinj.bytes);
 
 #undef PRINTF
     return ret;
--- a/xen/include/asm-x86/hvm/svm/svmdebug.h
+++ b/xen/include/asm-x86/hvm/svm/svmdebug.h
@@ -23,7 +23,7 @@
 #include <asm/hvm/svm/vmcb.h>
 
 void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb);
-bool_t svm_vmcb_isvalid(const char *from, struct vmcb_struct *vmcb,
-                        bool_t verbose);
+bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb,
+                      const struct vcpu *v, bool verbose);
 
 #endif /* __ASM_X86_HVM_SVM_SVMDEBUG_H__ */

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-06-07 14:08 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-07 13:58 [PATCH v2 0/4] SVM: misc cleanup Jan Beulich
2017-06-07 14:04 ` [PATCH v2 1/4] SVM: use VMCB accessors Jan Beulich
2017-06-07 17:04   ` Boris Ostrovsky
2017-06-07 14:04 ` [PATCH v2 2/4] SVM: infer type in VMCB_ACCESSORS() Jan Beulich
2017-06-07 17:04   ` Boris Ostrovsky
2017-06-07 14:07 ` [PATCH v2 3/4] SVM: clean up svm_vmcb_dump() Jan Beulich
2017-06-07 17:08   ` Boris Ostrovsky
2017-06-07 14:07 ` Jan Beulich [this message]
2017-06-07 17:11   ` [PATCH v2 4/4] SVM: clean up svm_vmcb_isvalid() Boris Ostrovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=593824DE02000078001606F0@prv-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).