All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5 4/7] X86: generic MSRs save/restore
@ 2013-12-03 14:50 Liu, Jinsong
  2013-12-04 14:18 ` Jan Beulich
  0 siblings, 1 reply; 32+ messages in thread
From: Liu, Jinsong @ 2013-12-03 14:50 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Andrew Cooper, keir, Ian.Campbell, haoxudong.hao

[-- Attachment #1: Type: text/plain, Size: 5924 bytes --]

>From 54fe7e722dd4ebba91bde16a1860f49a1cce4e5e Mon Sep 17 00:00:00 2001
From: Liu Jinsong <jinsong.liu@intel.com>
Date: Wed, 4 Dec 2013 00:57:23 +0800
Subject: [PATCH v5 4/7] X86: generic MSRs save/restore

This patch introduced a generic MSRs save/restore mechanism, so that
in the future new MSRs save/restore could be added w/ smaller change
than the full blown addition of a new save/restore type.

Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Liu Jinsong <jinsong.liu@intel.com>
---
 xen/arch/x86/hvm/hvm.c                 |   74 ++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/vmx/vmx.c             |   17 +++++++
 xen/include/asm-x86/hvm/hvm.h          |    3 +
 xen/include/public/arch-x86/hvm/save.h |   18 +++++++-
 4 files changed, 111 insertions(+), 1 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0f7178b..fb46e4b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -702,6 +702,80 @@ static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
                           hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
 
+/* Temporarily NULL, could be added in the future */
+static void hvm_save_msr_common(struct vcpu *v, struct hvm_msr *ctxt)
+{
+}
+
+/*
+ * Temporarily NULL, could be added in the future:
+ *   For msr load fail, return error (other than -ENOENT);
+ *   For msr load success, return 0;
+ *   For msr not found, return -ENOENT;
+ */
+static int hvm_load_msr_common(struct vcpu *v, struct hvm_msr *ctxt)
+{
+    return -ENOENT;
+}
+
+static int hvm_save_msr(struct domain *d, hvm_domain_context_t *h)
+{
+    struct vcpu *v;
+    struct hvm_msr ctxt;
+    int err = 0;
+
+    for_each_vcpu ( d, v )
+    {
+        memset(&ctxt, 0, sizeof(ctxt));
+
+        /* For common msrs */
+        hvm_save_msr_common(v, &ctxt);
+
+        /* For vmx/svm specific msrs */
+        if ( hvm_funcs.save_msr )
+            hvm_funcs.save_msr(v, &ctxt);
+
+        err = hvm_save_entry(HVM_MSR, v->vcpu_id, h, &ctxt);
+        if ( err )
+            break;
+    }
+
+    return err;
+}
+
+static int hvm_load_msr(struct domain *d, hvm_domain_context_t *h)
+{
+    unsigned int vcpuid = hvm_load_instance(h);
+    struct vcpu *v;
+    struct hvm_msr ctxt;
+    int ret;
+
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
+                d->domain_id, vcpuid);
+        return -EINVAL;
+    }
+
+    if ( hvm_load_entry(HVM_MSR, h, &ctxt) != 0 )
+        return -EINVAL;
+
+    /* For common msrs */
+    ret = hvm_load_msr_common(v, &ctxt);
+    if ( ret == -ENOENT )
+    {
+        /* For vmx/svm specific msrs */
+        if ( hvm_funcs.load_msr )
+            return hvm_funcs.load_msr(v, &ctxt);
+        else
+            return -EINVAL;
+    }
+    return ret;
+}
+
+HVM_REGISTER_SAVE_RESTORE(HVM_MSR, hvm_save_msr,
+                          hvm_load_msr, 1, HVMSR_PER_VCPU);
+
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index f0132a4..bac19f3 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -580,6 +580,21 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
     return 0;
 }
 
+/* Temporarily NULL, could be added in the future */
+static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
+{
+}
+
+/*
+ * Temporarily NULL, could be added in the future:
+ *   For msr load fail, or msr not found, return error;
+ *   For msr load success, return 0;
+ */
+static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
+{
+    return 0;
+}
+
 static void vmx_fpu_enter(struct vcpu *v)
 {
     vcpu_restore_fpu_lazy(v);
@@ -1606,6 +1621,8 @@ static struct hvm_function_table __initdata vmx_function_table = {
     .vcpu_destroy         = vmx_vcpu_destroy,
     .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
     .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
+    .save_msr             = vmx_save_msr,
+    .load_msr             = vmx_load_msr,
     .get_interrupt_shadow = vmx_get_interrupt_shadow,
     .set_interrupt_shadow = vmx_set_interrupt_shadow,
     .guest_x86_mode       = vmx_guest_x86_mode,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index a8ba06d..1c09d41 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -109,6 +109,9 @@ struct hvm_function_table {
     void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
     int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
 
+    void (*save_msr)(struct vcpu *v, struct hvm_msr *ctxt);
+    int (*load_msr)(struct vcpu *v, struct hvm_msr *ctxt);
+
     /* Examine specifics of the guest state. */
     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h
index 3664aaf..e440eb5 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -592,9 +592,25 @@ struct hvm_tsc_adjust {
 
 DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust);
 
+enum {
+    HVM_MSR_COUNT,
+};
+
+struct msr_save_load {
+    uint32_t index;
+    uint64_t val;
+};
+
+struct hvm_msr {
+    uint32_t count;
+    struct msr_save_load msr[HVM_MSR_COUNT];
+};
+
+DECLARE_HVM_SAVE_TYPE(HVM_MSR, 20, struct hvm_msr);
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 19
+#define HVM_SAVE_CODE_MAX 20
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
-- 
1.7.1

[-- Attachment #2: 0004-X86-generic-MSRs-save-restore.patch --]
[-- Type: application/octet-stream, Size: 5736 bytes --]

From 54fe7e722dd4ebba91bde16a1860f49a1cce4e5e Mon Sep 17 00:00:00 2001
From: Liu Jinsong <jinsong.liu@intel.com>
Date: Wed, 4 Dec 2013 00:57:23 +0800
Subject: [PATCH 4/7] X86: generic MSRs save/restore

This patch introduced a generic MSRs save/restore mechanism, so that
in the future new MSRs save/restore could be added w/ smaller change
than the full blown addition of a new save/restore type.

Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Liu Jinsong <jinsong.liu@intel.com>
---
 xen/arch/x86/hvm/hvm.c                 |   74 ++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/vmx/vmx.c             |   17 +++++++
 xen/include/asm-x86/hvm/hvm.h          |    3 +
 xen/include/public/arch-x86/hvm/save.h |   18 +++++++-
 4 files changed, 111 insertions(+), 1 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0f7178b..fb46e4b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -702,6 +702,80 @@ static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
                           hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
 
+/* Temporarily NULL, could be added in the future */
+static void hvm_save_msr_common(struct vcpu *v, struct hvm_msr *ctxt)
+{
+}
+
+/*
+ * Temporarily NULL, could be added in the future:
+ *   For msr load fail, return error (other than -ENOENT);
+ *   For msr load success, return 0;
+ *   For msr not found, return -ENOENT;
+ */
+static int hvm_load_msr_common(struct vcpu *v, struct hvm_msr *ctxt)
+{
+    return -ENOENT;
+}
+
+static int hvm_save_msr(struct domain *d, hvm_domain_context_t *h)
+{
+    struct vcpu *v;
+    struct hvm_msr ctxt;
+    int err = 0;
+
+    for_each_vcpu ( d, v )
+    {
+        memset(&ctxt, 0, sizeof(ctxt));
+
+        /* For common msrs */
+        hvm_save_msr_common(v, &ctxt);
+
+        /* For vmx/svm specific msrs */
+        if ( hvm_funcs.save_msr )
+            hvm_funcs.save_msr(v, &ctxt);
+
+        err = hvm_save_entry(HVM_MSR, v->vcpu_id, h, &ctxt);
+        if ( err )
+            break;
+    }
+
+    return err;
+}
+
+static int hvm_load_msr(struct domain *d, hvm_domain_context_t *h)
+{
+    unsigned int vcpuid = hvm_load_instance(h);
+    struct vcpu *v;
+    struct hvm_msr ctxt;
+    int ret;
+
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
+                d->domain_id, vcpuid);
+        return -EINVAL;
+    }
+
+    if ( hvm_load_entry(HVM_MSR, h, &ctxt) != 0 )
+        return -EINVAL;
+
+    /* For common msrs */
+    ret = hvm_load_msr_common(v, &ctxt);
+    if ( ret == -ENOENT )
+    {
+        /* For vmx/svm specific msrs */
+        if ( hvm_funcs.load_msr )
+            return hvm_funcs.load_msr(v, &ctxt);
+        else
+            return -EINVAL;
+    }
+    return ret;
+}
+
+HVM_REGISTER_SAVE_RESTORE(HVM_MSR, hvm_save_msr,
+                          hvm_load_msr, 1, HVMSR_PER_VCPU);
+
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index f0132a4..bac19f3 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -580,6 +580,21 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
     return 0;
 }
 
+/* Temporarily NULL, could be added in the future */
+static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
+{
+}
+
+/*
+ * Temporarily NULL, could be added in the future:
+ *   For msr load fail, or msr not found, return error;
+ *   For msr load success, return 0;
+ */
+static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
+{
+    return 0;
+}
+
 static void vmx_fpu_enter(struct vcpu *v)
 {
     vcpu_restore_fpu_lazy(v);
@@ -1606,6 +1621,8 @@ static struct hvm_function_table __initdata vmx_function_table = {
     .vcpu_destroy         = vmx_vcpu_destroy,
     .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
     .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
+    .save_msr             = vmx_save_msr,
+    .load_msr             = vmx_load_msr,
     .get_interrupt_shadow = vmx_get_interrupt_shadow,
     .set_interrupt_shadow = vmx_set_interrupt_shadow,
     .guest_x86_mode       = vmx_guest_x86_mode,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index a8ba06d..1c09d41 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -109,6 +109,9 @@ struct hvm_function_table {
     void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
     int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
 
+    void (*save_msr)(struct vcpu *v, struct hvm_msr *ctxt);
+    int (*load_msr)(struct vcpu *v, struct hvm_msr *ctxt);
+
     /* Examine specifics of the guest state. */
     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h
index 3664aaf..e440eb5 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -592,9 +592,25 @@ struct hvm_tsc_adjust {
 
 DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust);
 
+enum {
+    HVM_MSR_COUNT,
+};
+
+struct msr_save_load {
+    uint32_t index;
+    uint64_t val;
+};
+
+struct hvm_msr {
+    uint32_t count;
+    struct msr_save_load msr[HVM_MSR_COUNT];
+};
+
+DECLARE_HVM_SAVE_TYPE(HVM_MSR, 20, struct hvm_msr);
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 19
+#define HVM_SAVE_CODE_MAX 20
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
-- 
1.7.1


[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2013-12-18 12:20 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-12-03 14:50 [PATCH v5 4/7] X86: generic MSRs save/restore Liu, Jinsong
2013-12-04 14:18 ` Jan Beulich
2013-12-13  7:50   ` Liu, Jinsong
2013-12-13  9:44     ` Jan Beulich
2013-12-13  7:57   ` Liu, Jinsong
2013-12-13  9:47     ` Jan Beulich
2013-12-13 12:04       ` Andrew Cooper
2013-12-13 12:26         ` Jan Beulich
2013-12-13 14:01     ` [PATCH v2] x86: " Jan Beulich
2013-12-13 17:44       ` Andrew Cooper
2013-12-16  3:12         ` Liu, Jinsong
2013-12-16  8:03         ` Jan Beulich
2013-12-16  3:01       ` Liu, Jinsong
2013-12-16  8:04         ` Jan Beulich
2013-12-16  8:39           ` Liu, Jinsong
2013-12-16  8:52             ` Jan Beulich
2013-12-16  9:13               ` Liu, Jinsong
2013-12-16  9:41                 ` Jan Beulich
2013-12-16  9:53                   ` Liu, Jinsong
2013-12-16 10:01                     ` Jan Beulich
2013-12-16 10:05                       ` Liu, Jinsong
2013-12-16 11:11                         ` Jan Beulich
2013-12-16 13:23                           ` Liu, Jinsong
2013-12-16 13:34                             ` Jan Beulich
2013-12-16 13:57                               ` Liu, Jinsong
2013-12-16 14:01                                 ` Liu, Jinsong
2013-12-18 12:20       ` Liu, Jinsong
2013-12-13 14:02     ` [PATCH v2] x86: MSR_IA32_BNDCFGS save/restore Jan Beulich
2013-12-13 17:57       ` Andrew Cooper
2013-12-16  3:22         ` Liu, Jinsong
2013-12-16  8:06         ` Jan Beulich
2013-12-16  3:23       ` Liu, Jinsong

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.