All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4 1/2] x86/hvm: Introduce *save_one() functions
@ 2018-05-07  8:24 Alexandru Isaila
  2018-05-07  8:24 ` [PATCH v4 2/2] x86/domctl: Don't pause the whole domain if only getting vcpu state Alexandru Isaila
  2018-05-17 15:49 ` [PATCH v4 1/2] x86/hvm: Introduce *save_one() functions Jan Beulich
  0 siblings, 2 replies; 5+ messages in thread
From: Alexandru Isaila @ 2018-05-07  8:24 UTC (permalink / raw)
  To: xen-devel
  Cc: wei.liu2, andrew.cooper3, ian.jackson, paul.durrant, jbeulich,
	Alexandru Isaila

This patch introduces save_one() functions. They will be called in the
*save() so we can extract data for a single instance.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V3:
	- Rb to the lateste staging version
	- Split the patch into 2 patches.
---
 xen/arch/x86/cpu/mcheck/vmce.c |  16 ++-
 xen/arch/x86/hvm/hvm.c         | 279 ++++++++++++++++++++++-------------------
 xen/arch/x86/hvm/mtrr.c        |  51 ++++----
 xen/arch/x86/hvm/viridian.c    |  13 +-
 4 files changed, 200 insertions(+), 159 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index e07cd2f..15b0f2a 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -349,6 +349,14 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
     return ret;
 }
 
+void vmce_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_vmce_vcpu *ctxt)
+{
+    ctxt->caps = v->arch.vmce.mcg_cap;
+    ctxt->mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
+    ctxt->mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
+    ctxt->mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
+}
+
 static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -356,13 +364,9 @@ static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 
     for_each_vcpu ( d, v )
     {
-        struct hvm_vmce_vcpu ctxt = {
-            .caps = v->arch.vmce.mcg_cap,
-            .mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
-            .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2,
-            .mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl,
-        };
+        struct hvm_vmce_vcpu ctxt;
 
+        vmce_save_vcpu_ctxt_one(v, &ctxt);
         err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
         if ( err )
             break;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c23983c..6733f26 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -740,6 +740,11 @@ void hvm_domain_destroy(struct domain *d)
     destroy_vpci_mmcfg(d);
 }
 
+void hvm_save_tsc_adjust_one(struct vcpu *v, struct hvm_tsc_adjust *ctxt)
+{
+    ctxt->tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
+}
+
 static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -748,7 +753,7 @@ static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
 
     for_each_vcpu ( d, v )
     {
-        ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
+        hvm_save_tsc_adjust_one(v, &ctxt);
         err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
         if ( err )
             break;
@@ -780,11 +785,109 @@ static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
                           hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
 
+void hvm_save_cpu_ctxt_one(struct vcpu *v, struct hvm_hw_cpu *ctxt)
+{
+    struct segment_register seg;
+
+    /* Architecture-specific vmcs/vmcb bits */
+    hvm_funcs.save_cpu_ctxt(v, ctxt);
+
+    ctxt->tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm_domain.sync_tsc);
+
+    ctxt->msr_tsc_aux = hvm_msr_tsc_aux(v);
+
+    hvm_get_segment_register(v, x86_seg_idtr, &seg);
+    ctxt->idtr_limit = seg.limit;
+    ctxt->idtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_gdtr, &seg);
+    ctxt->gdtr_limit = seg.limit;
+    ctxt->gdtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_cs, &seg);
+    ctxt->cs_sel = seg.sel;
+    ctxt->cs_limit = seg.limit;
+    ctxt->cs_base = seg.base;
+    ctxt->cs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ds, &seg);
+    ctxt->ds_sel = seg.sel;
+    ctxt->ds_limit = seg.limit;
+    ctxt->ds_base = seg.base;
+    ctxt->ds_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_es, &seg);
+    ctxt->es_sel = seg.sel;
+    ctxt->es_limit = seg.limit;
+    ctxt->es_base = seg.base;
+    ctxt->es_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ss, &seg);
+    ctxt->ss_sel = seg.sel;
+    ctxt->ss_limit = seg.limit;
+    ctxt->ss_base = seg.base;
+    ctxt->ss_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_fs, &seg);
+    ctxt->fs_sel = seg.sel;
+    ctxt->fs_limit = seg.limit;
+    ctxt->fs_base = seg.base;
+    ctxt->fs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_gs, &seg);
+    ctxt->gs_sel = seg.sel;
+    ctxt->gs_limit = seg.limit;
+    ctxt->gs_base = seg.base;
+    ctxt->gs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_tr, &seg);
+    ctxt->tr_sel = seg.sel;
+    ctxt->tr_limit = seg.limit;
+    ctxt->tr_base = seg.base;
+    ctxt->tr_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ldtr, &seg);
+    ctxt->ldtr_sel = seg.sel;
+    ctxt->ldtr_limit = seg.limit;
+    ctxt->ldtr_base = seg.base;
+    ctxt->ldtr_arbytes = seg.attr;
+
+    if ( v->fpu_initialised )
+    {
+        memcpy(ctxt->fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt->fpu_regs));
+        ctxt->flags = XEN_X86_FPU_INITIALISED;
+    }
+
+    ctxt->rax = v->arch.user_regs.rax;
+    ctxt->rbx = v->arch.user_regs.rbx;
+    ctxt->rcx = v->arch.user_regs.rcx;
+    ctxt->rdx = v->arch.user_regs.rdx;
+    ctxt->rbp = v->arch.user_regs.rbp;
+    ctxt->rsi = v->arch.user_regs.rsi;
+    ctxt->rdi = v->arch.user_regs.rdi;
+    ctxt->rsp = v->arch.user_regs.rsp;
+    ctxt->rip = v->arch.user_regs.rip;
+    ctxt->rflags = v->arch.user_regs.rflags;
+    ctxt->r8  = v->arch.user_regs.r8;
+    ctxt->r9  = v->arch.user_regs.r9;
+    ctxt->r10 = v->arch.user_regs.r10;
+    ctxt->r11 = v->arch.user_regs.r11;
+    ctxt->r12 = v->arch.user_regs.r12;
+    ctxt->r13 = v->arch.user_regs.r13;
+    ctxt->r14 = v->arch.user_regs.r14;
+    ctxt->r15 = v->arch.user_regs.r15;
+    ctxt->dr0 = v->arch.debugreg[0];
+    ctxt->dr1 = v->arch.debugreg[1];
+    ctxt->dr2 = v->arch.debugreg[2];
+    ctxt->dr3 = v->arch.debugreg[3];
+    ctxt->dr6 = v->arch.debugreg[6];
+    ctxt->dr7 = v->arch.debugreg[7];
+}
+
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct hvm_hw_cpu ctxt;
-    struct segment_register seg;
 
     for_each_vcpu ( d, v )
     {
@@ -795,99 +898,7 @@ static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 
         memset(&ctxt, 0, sizeof(ctxt));
 
-        /* Architecture-specific vmcs/vmcb bits */
-        hvm_funcs.save_cpu_ctxt(v, &ctxt);
-
-        ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm_domain.sync_tsc);
-
-        ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
-
-        hvm_get_segment_register(v, x86_seg_idtr, &seg);
-        ctxt.idtr_limit = seg.limit;
-        ctxt.idtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_gdtr, &seg);
-        ctxt.gdtr_limit = seg.limit;
-        ctxt.gdtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_cs, &seg);
-        ctxt.cs_sel = seg.sel;
-        ctxt.cs_limit = seg.limit;
-        ctxt.cs_base = seg.base;
-        ctxt.cs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ds, &seg);
-        ctxt.ds_sel = seg.sel;
-        ctxt.ds_limit = seg.limit;
-        ctxt.ds_base = seg.base;
-        ctxt.ds_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_es, &seg);
-        ctxt.es_sel = seg.sel;
-        ctxt.es_limit = seg.limit;
-        ctxt.es_base = seg.base;
-        ctxt.es_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ss, &seg);
-        ctxt.ss_sel = seg.sel;
-        ctxt.ss_limit = seg.limit;
-        ctxt.ss_base = seg.base;
-        ctxt.ss_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_fs, &seg);
-        ctxt.fs_sel = seg.sel;
-        ctxt.fs_limit = seg.limit;
-        ctxt.fs_base = seg.base;
-        ctxt.fs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_gs, &seg);
-        ctxt.gs_sel = seg.sel;
-        ctxt.gs_limit = seg.limit;
-        ctxt.gs_base = seg.base;
-        ctxt.gs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_tr, &seg);
-        ctxt.tr_sel = seg.sel;
-        ctxt.tr_limit = seg.limit;
-        ctxt.tr_base = seg.base;
-        ctxt.tr_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ldtr, &seg);
-        ctxt.ldtr_sel = seg.sel;
-        ctxt.ldtr_limit = seg.limit;
-        ctxt.ldtr_base = seg.base;
-        ctxt.ldtr_arbytes = seg.attr;
-
-        if ( v->fpu_initialised )
-        {
-            memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
-            ctxt.flags = XEN_X86_FPU_INITIALISED;
-        }
-
-        ctxt.rax = v->arch.user_regs.rax;
-        ctxt.rbx = v->arch.user_regs.rbx;
-        ctxt.rcx = v->arch.user_regs.rcx;
-        ctxt.rdx = v->arch.user_regs.rdx;
-        ctxt.rbp = v->arch.user_regs.rbp;
-        ctxt.rsi = v->arch.user_regs.rsi;
-        ctxt.rdi = v->arch.user_regs.rdi;
-        ctxt.rsp = v->arch.user_regs.rsp;
-        ctxt.rip = v->arch.user_regs.rip;
-        ctxt.rflags = v->arch.user_regs.rflags;
-        ctxt.r8  = v->arch.user_regs.r8;
-        ctxt.r9  = v->arch.user_regs.r9;
-        ctxt.r10 = v->arch.user_regs.r10;
-        ctxt.r11 = v->arch.user_regs.r11;
-        ctxt.r12 = v->arch.user_regs.r12;
-        ctxt.r13 = v->arch.user_regs.r13;
-        ctxt.r14 = v->arch.user_regs.r14;
-        ctxt.r15 = v->arch.user_regs.r15;
-        ctxt.dr0 = v->arch.debugreg[0];
-        ctxt.dr1 = v->arch.debugreg[1];
-        ctxt.dr2 = v->arch.debugreg[2];
-        ctxt.dr3 = v->arch.debugreg[3];
-        ctxt.dr6 = v->arch.debugreg[6];
-        ctxt.dr7 = v->arch.debugreg[7];
+        hvm_save_cpu_ctxt_one(v, &ctxt);
 
         if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
             return 1; 
@@ -1173,6 +1184,18 @@ HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
                                            save_area) + \
                                   xstate_ctxt_size(xcr0))
 
+void hvm_save_cpu_xsave_states_one(struct vcpu *v, struct hvm_hw_cpu_xsave **ctx, hvm_domain_context_t *h)
+{
+    unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
+    struct hvm_hw_cpu_xsave *ctxt = * ctx;
+
+    h->cur += size;
+
+    ctxt->xfeature_mask = xfeature_mask;
+    ctxt->xcr0 = v->arch.xcr0;
+    ctxt->xcr0_accum = v->arch.xcr0_accum;
+}
+
 static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -1190,11 +1213,7 @@ static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
         if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size) )
             return 1;
         ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
-        h->cur += size;
-
-        ctxt->xfeature_mask = xfeature_mask;
-        ctxt->xcr0 = v->arch.xcr0;
-        ctxt->xcr0_accum = v->arch.xcr0_accum;
+        hvm_save_cpu_xsave_states_one(v, &ctxt, h);
         expand_xsave_states(v, &ctxt->save_area,
                             size - offsetof(typeof(*ctxt), save_area));
     }
@@ -1339,6 +1358,39 @@ static const uint32_t msrs_to_send[] = {
 };
 static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
 
+int hvm_save_cpu_msrs_one(struct vcpu *v, struct hvm_msr **ctx, hvm_domain_context_t *h)
+{
+    unsigned int i;
+    struct hvm_msr *ctxt = *ctx;
+
+    for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
+    {
+        uint64_t val;
+        int rc = guest_rdmsr(v, msrs_to_send[i], &val);
+
+        /*
+         * It is the programmers responsibility to ensure that
+         * msrs_to_send[] contain generally-read/write MSRs.
+         * X86EMUL_EXCEPTION here implies a missing feature, and that the
+         * guest doesn't have access to the MSR.
+         */
+        if ( rc == X86EMUL_EXCEPTION )
+            continue;
+
+        if ( rc != X86EMUL_OKAY )
+        {
+            ASSERT_UNREACHABLE();
+            return -ENXIO;
+        }
+
+        if ( !val )
+           continue; /* Skip empty MSRs. */
+        ctxt->msr[ctxt->count].index = msrs_to_send[i];
+        ctxt->msr[ctxt->count++].val = val;
+    }
+    return 0;
+}
+
 static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -1355,32 +1407,7 @@ static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
         ctxt = (struct hvm_msr *)&h->data[h->cur];
         ctxt->count = 0;
 
-        for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
-        {
-            uint64_t val;
-            int rc = guest_rdmsr(v, msrs_to_send[i], &val);
-
-            /*
-             * It is the programmers responsibility to ensure that
-             * msrs_to_send[] contain generally-read/write MSRs.
-             * X86EMUL_EXCEPTION here implies a missing feature, and that the
-             * guest doesn't have access to the MSR.
-             */
-            if ( rc == X86EMUL_EXCEPTION )
-                continue;
-
-            if ( rc != X86EMUL_OKAY )
-            {
-                ASSERT_UNREACHABLE();
-                return -ENXIO;
-            }
-
-            if ( !val )
-                continue; /* Skip empty MSRs. */
-
-            ctxt->msr[ctxt->count].index = msrs_to_send[i];
-            ctxt->msr[ctxt->count++].val = val;
-        }
+        hvm_save_cpu_msrs_one(v, &ctxt, h);
 
         if ( hvm_funcs.save_msr )
             hvm_funcs.save_msr(v, ctxt);
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index b721c63..2678b25 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -666,36 +666,41 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
     return 0;
 }
 
+void hvm_save_mtrr_msr_one(struct vcpu *v, struct hvm_hw_mtrr *hw_mtrr)
+{
+    struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr;
+     int i;
+
+    hvm_get_guest_pat(v, &hw_mtrr->msr_pat_cr);
+
+    hw_mtrr->msr_mtrr_def_type = mtrr_state->def_type
+                            | (mtrr_state->enabled << 10);
+    hw_mtrr->msr_mtrr_cap = mtrr_state->mtrr_cap;
+
+    for ( i = 0; i < MTRR_VCNT; i++ )
+    {
+        /* save physbase */
+        hw_mtrr->msr_mtrr_var[i*2] =
+            ((uint64_t*)mtrr_state->var_ranges)[i*2];
+        /* save physmask */
+        hw_mtrr->msr_mtrr_var[i*2+1] =
+            ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
+    }
+
+    for ( i = 0; i < NUM_FIXED_MSR; i++ )
+        hw_mtrr->msr_mtrr_fixed[i] =
+            ((uint64_t*)mtrr_state->fixed_ranges)[i];
+
+}
+
 static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
 {
-    int i;
     struct vcpu *v;
     struct hvm_hw_mtrr hw_mtrr;
-    struct mtrr_state *mtrr_state;
     /* save mtrr&pat */
     for_each_vcpu(d, v)
     {
-        mtrr_state = &v->arch.hvm_vcpu.mtrr;
-
-        hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr);
-
-        hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
-                                | (mtrr_state->enabled << 10);
-        hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
-
-        for ( i = 0; i < MTRR_VCNT; i++ )
-        {
-            /* save physbase */
-            hw_mtrr.msr_mtrr_var[i*2] =
-                ((uint64_t*)mtrr_state->var_ranges)[i*2];
-            /* save physmask */
-            hw_mtrr.msr_mtrr_var[i*2+1] =
-                ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
-        }
-
-        for ( i = 0; i < NUM_FIXED_MSR; i++ )
-            hw_mtrr.msr_mtrr_fixed[i] =
-                ((uint64_t*)mtrr_state->fixed_ranges)[i];
+        hvm_save_mtrr_msr_one(v, &hw_mtrr);
 
         if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
             return 1;
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index d6aa89d..9a49e76 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -1028,6 +1028,12 @@ static int viridian_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
                           viridian_load_domain_ctxt, 1, HVMSR_PER_DOM);
 
+void viridian_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_viridian_vcpu_context *ctxt)
+{
+    ctxt->vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw;
+    ctxt->vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending;
+}
+
 static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -1036,10 +1042,9 @@ static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
         return 0;
 
     for_each_vcpu( d, v ) {
-        struct hvm_viridian_vcpu_context ctxt = {
-            .vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw,
-            .vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending,
-        };
+        struct hvm_viridian_vcpu_context ctxt;
+
+        viridian_save_vcpu_ctxt_one(v, &ctxt);
 
         if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
             return 1;
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v4 2/2] x86/domctl: Don't pause the whole domain if only getting vcpu state
  2018-05-07  8:24 [PATCH v4 1/2] x86/hvm: Introduce *save_one() functions Alexandru Isaila
@ 2018-05-07  8:24 ` Alexandru Isaila
  2018-05-17 16:04   ` Jan Beulich
  2018-05-17 15:49 ` [PATCH v4 1/2] x86/hvm: Introduce *save_one() functions Jan Beulich
  1 sibling, 1 reply; 5+ messages in thread
From: Alexandru Isaila @ 2018-05-07  8:24 UTC (permalink / raw)
  To: xen-devel
  Cc: wei.liu2, andrew.cooper3, ian.jackson, paul.durrant, jbeulich,
	Alexandru Isaila

This patch adds a vcpu param for all the *save() fucntions. The *save()
functions now handle only one instance. The for_each loop is transferred
to the caller.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V3:
	- Rb to the lateste staging version
	- Moved the for_each() loop to the caller.

NOTE: tested with xen/tools/misc/xen-hvmctx
---
 tools/tests/vhpet/emul.h       |   3 +-
 tools/tests/vhpet/main.c       |   8 ++-
 xen/arch/x86/cpu/mcheck/vmce.c |  16 ++----
 xen/arch/x86/hvm/hpet.c        |   3 +-
 xen/arch/x86/hvm/hvm.c         | 122 +++++++++++++++++++----------------------
 xen/arch/x86/hvm/i8254.c       |   3 +-
 xen/arch/x86/hvm/irq.c         |   9 ++-
 xen/arch/x86/hvm/mtrr.c        |  14 ++---
 xen/arch/x86/hvm/pmtimer.c     |   3 +-
 xen/arch/x86/hvm/rtc.c         |   3 +-
 xen/arch/x86/hvm/save.c        | 115 ++++++++++++++++++++++++++++----------
 xen/arch/x86/hvm/vioapic.c     |   3 +-
 xen/arch/x86/hvm/viridian.c    |  18 +++---
 xen/arch/x86/hvm/vlapic.c      |  28 ++++------
 xen/arch/x86/hvm/vpic.c        |   3 +-
 xen/include/asm-x86/hvm/save.h |   3 +-
 16 files changed, 199 insertions(+), 155 deletions(-)

diff --git a/tools/tests/vhpet/emul.h b/tools/tests/vhpet/emul.h
index 383acff..053a1f5 100644
--- a/tools/tests/vhpet/emul.h
+++ b/tools/tests/vhpet/emul.h
@@ -296,7 +296,8 @@ struct hvm_hw_hpet
 };
 
 typedef int (*hvm_save_handler)(struct domain *d,
-                                hvm_domain_context_t *h);
+                                hvm_domain_context_t *h,
+                                struct vcpu *vcpu);
 typedef int (*hvm_load_handler)(struct domain *d,
                                 hvm_domain_context_t *h);
 
diff --git a/tools/tests/vhpet/main.c b/tools/tests/vhpet/main.c
index 6fe65ea..77143dc 100644
--- a/tools/tests/vhpet/main.c
+++ b/tools/tests/vhpet/main.c
@@ -177,7 +177,13 @@ void __init hvm_register_savevm(uint16_t typecode,
 
 int do_save(uint16_t typecode, struct domain *d, hvm_domain_context_t *h)
 {
-    return hvm_sr_handlers[typecode].save(d, h);
+    struct vcpu *v;
+
+    for_each_vcpu ( d, v )
+    {
+        hvm_sr_handlers[typecode].save(d, h, v);
+    }
+    return 0;
 }
 
 int do_load(uint16_t typecode, struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 15b0f2a..2286875 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -357,20 +357,14 @@ void vmce_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_vmce_vcpu *ctxt)
     ctxt->mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
 }
 
-static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h,
+                               struct vcpu *v)
 {
-    struct vcpu *v;
     int err = 0;
+    struct hvm_vmce_vcpu ctxt;
 
-    for_each_vcpu ( d, v )
-    {
-        struct hvm_vmce_vcpu ctxt;
-
-        vmce_save_vcpu_ctxt_one(v, &ctxt);
-        err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
-        if ( err )
-            break;
-    }
+    vmce_save_vcpu_ctxt_one(v, &ctxt);
+    err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
 
     return err;
 }
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index f7aed7f..d95c72b 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -509,7 +509,8 @@ static const struct hvm_mmio_ops hpet_mmio_ops = {
 };
 
 
-static int hpet_save(struct domain *d, hvm_domain_context_t *h)
+static int hpet_save(struct domain *d, hvm_domain_context_t *h,
+                     struct vcpu *vcpu)
 {
     HPETState *hp = domain_vhpet(d);
     struct vcpu *v = pt_global_vcpu_target(d);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6733f26..16d6736 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -745,19 +745,14 @@ void hvm_save_tsc_adjust_one(struct vcpu *v, struct hvm_tsc_adjust *ctxt)
     ctxt->tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
 }
 
-static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h,
+                               struct vcpu *v)
 {
-    struct vcpu *v;
     struct hvm_tsc_adjust ctxt;
     int err = 0;
 
-    for_each_vcpu ( d, v )
-    {
-        hvm_save_tsc_adjust_one(v, &ctxt);
-        err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
-        if ( err )
-            break;
-    }
+    hvm_save_tsc_adjust_one(v, &ctxt);
+    err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
 
     return err;
 }
@@ -884,25 +879,23 @@ void hvm_save_cpu_ctxt_one(struct vcpu *v, struct hvm_hw_cpu *ctxt)
     ctxt->dr7 = v->arch.debugreg[7];
 }
 
-static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h,
+                             struct vcpu *v)
 {
-    struct vcpu *v;
     struct hvm_hw_cpu ctxt;
 
-    for_each_vcpu ( d, v )
-    {
-        /* We don't need to save state for a vcpu that is down; the restore 
-         * code will leave it down if there is nothing saved. */
-        if ( v->pause_flags & VPF_down )
-            continue;
 
-        memset(&ctxt, 0, sizeof(ctxt));
+    /* We don't need to save state for a vcpu that is down; the restore
+     * code will leave it down if there is nothing saved. */
+    if ( v->pause_flags & VPF_down )
+        return 2;
 
-        hvm_save_cpu_ctxt_one(v, &ctxt);
+    memset(&ctxt, 0, sizeof(ctxt));
+    hvm_save_cpu_ctxt_one(v, &ctxt);
+
+    if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
+        return 1;
 
-        if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
-            return 1; 
-    }
     return 0;
 }
 
@@ -1184,7 +1177,9 @@ HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
                                            save_area) + \
                                   xstate_ctxt_size(xcr0))
 
-void hvm_save_cpu_xsave_states_one(struct vcpu *v, struct hvm_hw_cpu_xsave **ctx, hvm_domain_context_t *h)
+void hvm_save_cpu_xsave_states_one(struct vcpu *v,
+                                   struct hvm_hw_cpu_xsave **ctx,
+                                   hvm_domain_context_t *h)
 {
     unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
     struct hvm_hw_cpu_xsave *ctxt = * ctx;
@@ -1196,27 +1191,25 @@ void hvm_save_cpu_xsave_states_one(struct vcpu *v, struct hvm_hw_cpu_xsave **ctx
     ctxt->xcr0_accum = v->arch.xcr0_accum;
 }
 
-static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h,
+                                     struct vcpu *v)
 {
-    struct vcpu *v;
     struct hvm_hw_cpu_xsave *ctxt;
+    unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
+
 
     if ( !cpu_has_xsave )
         return 0;   /* do nothing */
 
-    for_each_vcpu ( d, v )
-    {
-        unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
 
-        if ( !xsave_enabled(v) )
-            continue;
-        if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size) )
-            return 1;
-        ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
-        hvm_save_cpu_xsave_states_one(v, &ctxt, h);
-        expand_xsave_states(v, &ctxt->save_area,
-                            size - offsetof(typeof(*ctxt), save_area));
-    }
+    if ( !xsave_enabled(v) )
+        return 2;
+    if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size) )
+        return 1;
+    ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+    hvm_save_cpu_xsave_states_one(v, &ctxt, h);
+    expand_xsave_states(v, &ctxt->save_area,
+                        size - offsetof(typeof(*ctxt), save_area));
 
     return 0;
 }
@@ -1391,44 +1384,41 @@ int hvm_save_cpu_msrs_one(struct vcpu *v, struct hvm_msr **ctx, hvm_domain_conte
     return 0;
 }
 
-static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h,
+                             struct vcpu *v)
 {
-    struct vcpu *v;
-
-    for_each_vcpu ( d, v )
-    {
-        struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
-        struct hvm_msr *ctxt;
-        unsigned int i;
+    struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
+    struct hvm_msr* ctxt;
+    unsigned int i;
+    int rc;
 
-        if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
-                             HVM_CPU_MSR_SIZE(msr_count_max)) )
-            return 1;
-        ctxt = (struct hvm_msr *)&h->data[h->cur];
-        ctxt->count = 0;
+    if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
+                         HVM_CPU_MSR_SIZE(msr_count_max)) )
+        return 1;
+    ctxt = (struct hvm_msr* )&h->data[h->cur];
+    ctxt->count = 0;
 
-        hvm_save_cpu_msrs_one(v, &ctxt, h);
+    rc = hvm_save_cpu_msrs_one(v, &ctxt, h);
 
-        if ( hvm_funcs.save_msr )
-            hvm_funcs.save_msr(v, ctxt);
+    if ( hvm_funcs.save_msr )
+        hvm_funcs.save_msr(v, ctxt);
 
-        ASSERT(ctxt->count <= msr_count_max);
+    ASSERT(ctxt->count <= msr_count_max);
 
-        for ( i = 0; i < ctxt->count; ++i )
-            ctxt->msr[i]._rsvd = 0;
+    for ( i = 0; i < ctxt->count; ++i )
+        ctxt->msr[i]._rsvd = 0;
 
-        if ( ctxt->count )
-        {
-            /* Rewrite length to indicate how much space we actually used. */
-            desc->length = HVM_CPU_MSR_SIZE(ctxt->count);
-            h->cur += HVM_CPU_MSR_SIZE(ctxt->count);
-        }
-        else
-            /* or rewind and remove the descriptor from the stream. */
-            h->cur -= sizeof(struct hvm_save_descriptor);
+    if ( ctxt->count )
+    {
+        /* Rewrite length to indicate how much space we actually used. */
+        desc->length = HVM_CPU_MSR_SIZE(ctxt->count);
+        h->cur += HVM_CPU_MSR_SIZE(ctxt->count);
     }
+    else
+        /* or rewind and remove the descriptor from the stream. */
+        h->cur -= sizeof(struct hvm_save_descriptor);
 
-    return 0;
+    return rc;
 }
 
 static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c
index 992f08d..6601288 100644
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -390,7 +390,8 @@ void pit_stop_channel0_irq(PITState *pit)
     spin_unlock(&pit->lock);
 }
 
-static int pit_save(struct domain *d, hvm_domain_context_t *h)
+static int pit_save(struct domain *d, hvm_domain_context_t *h,
+                    struct vcpu *v)
 {
     PITState *pit = domain_vpit(d);
     int rc;
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index f528e2d..a94bb94 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -610,7 +610,8 @@ static int __init dump_irq_info_key_init(void)
 }
 __initcall(dump_irq_info_key_init);
 
-static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_pci(struct domain *d, hvm_domain_context_t *h,
+                        struct vcpu *v)
 {
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
     unsigned int asserted, pdev, pintx;
@@ -642,7 +643,8 @@ static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
     return rc;
 }
 
-static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_isa(struct domain *d, hvm_domain_context_t *h,
+                        struct vcpu *v)
 {
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
 
@@ -650,7 +652,8 @@ static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
     return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
 }
 
-static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
+static int irq_save_link(struct domain *d, hvm_domain_context_t *h,
+                         struct vcpu *v)
 {
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
 
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 2678b25..e21416c 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -693,18 +693,16 @@ void hvm_save_mtrr_msr_one(struct vcpu *v, struct hvm_hw_mtrr *hw_mtrr)
 
 }
 
-static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h,
+                             struct vcpu *v)
 {
-    struct vcpu *v;
     struct hvm_hw_mtrr hw_mtrr;
     /* save mtrr&pat */
-    for_each_vcpu(d, v)
-    {
-        hvm_save_mtrr_msr_one(v, &hw_mtrr);
+    hvm_save_mtrr_msr_one(v, &hw_mtrr);
+
+    if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
+        return 1;
 
-        if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
-            return 1;
-    }
     return 0;
 }
 
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 435647f..d701cb1 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -249,7 +249,8 @@ static int handle_pmt_io(
     return X86EMUL_OKAY;
 }
 
-static int acpi_save(struct domain *d, hvm_domain_context_t *h)
+static int acpi_save(struct domain *d, hvm_domain_context_t *h,
+                     struct vcpu *v)
 {
     struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
     PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index cb75b99..0896e55 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -737,7 +737,8 @@ void rtc_migrate_timers(struct vcpu *v)
 }
 
 /* Save RTC hardware state */
-static int rtc_save(struct domain *d, hvm_domain_context_t *h)
+static int rtc_save(struct domain *d, hvm_domain_context_t *h,
+                    struct vcpu *v)
 {
     RTCState *s = domain_vrtc(d);
     int rc;
diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c
index 8984a23..0d5d3d9 100644
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -135,54 +135,101 @@ size_t hvm_save_size(struct domain *d)
 int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance,
                  XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz)
 {
-    int rv;
+    int rv = 0;
     hvm_domain_context_t ctxt = { };
     const struct hvm_save_descriptor *desc;
-
+    bool is_single_instance = false;
+    uint32_t off = 0;
+    struct vcpu *v;
     if ( d->is_dying ||
          typecode > HVM_SAVE_CODE_MAX ||
          hvm_sr_handlers[typecode].size < sizeof(*desc) ||
          !hvm_sr_handlers[typecode].save )
         return -EINVAL;
 
+    if( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU &&
+        instance < d->max_vcpus )
+        is_single_instance = true;
+
     ctxt.size = hvm_sr_handlers[typecode].size;
-    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU )
+    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU &&
+        instance == d->max_vcpus )
         ctxt.size *= d->max_vcpus;
     ctxt.data = xmalloc_bytes(ctxt.size);
     if ( !ctxt.data )
         return -ENOMEM;
 
-    if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 )
-        printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n",
-               d->domain_id, typecode, rv);
-    else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) )
+    if( is_single_instance )
+        vcpu_pause(d->vcpu[instance]);
+    else
+        domain_pause(d);
+
+    if( is_single_instance )
     {
-        uint32_t off;
 
-        for ( off = 0; off <= (ctxt.cur - sizeof(*desc)); off += desc->length )
+        if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt,
+                      d->vcpu[instance])) != 0 )
+        {
+            printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n",
+                   d->domain_id, typecode, rv);
+            vcpu_unpause(d->vcpu[instance]);
+        }
+        else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) )
         {
-            desc = (void *)(ctxt.data + off);
+            desc = (void *)(ctxt.data);
             /* Move past header */
-            off += sizeof(*desc);
+            off = sizeof(*desc);
             if ( ctxt.cur < desc->length ||
                  off > ctxt.cur - desc->length )
-                break;
-            if ( instance == desc->instance )
+                rv = -EFAULT;
+            rv = 0;
+            if ( guest_handle_is_null(handle) )
+                *bufsz = desc->length;
+            else if ( *bufsz < desc->length )
+                rv = -ENOBUFS;
+            else if ( copy_to_guest(handle, ctxt.data + off, desc->length) )
+                rv = -EFAULT;
+            else
+                *bufsz = desc->length;
+            vcpu_unpause(d->vcpu[instance]);
+        }
+    }
+    else
+    {
+        for_each_vcpu ( d, v )
+        {
+           if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt,
+              d->vcpu[instance])) != 0 )
             {
-                rv = 0;
-                if ( guest_handle_is_null(handle) )
-                    *bufsz = desc->length;
-                else if ( *bufsz < desc->length )
-                    rv = -ENOBUFS;
-                else if ( copy_to_guest(handle, ctxt.data + off, desc->length) )
-                    rv = -EFAULT;
-                else
-                    *bufsz = desc->length;
-                break;
+                printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n",
+                       d->domain_id, typecode, rv);
+            }
+            else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) )
+            {
+                desc = (void *)(ctxt.data + off);
+                /* Move past header */
+                off += sizeof(*desc);
+                if ( ctxt.cur < desc->length ||
+                     off > ctxt.cur - desc->length )
+                    break;
+                if ( instance == desc->instance )
+                {
+                    rv = 0;
+                    if ( guest_handle_is_null(handle) )
+                        *bufsz = desc->length;
+                    else if ( *bufsz < desc->length )
+                        rv = -ENOBUFS;
+                    else if ( copy_to_guest(handle, ctxt.data + off, desc->length) )
+                        rv = -EFAULT;
+                    else
+                        *bufsz = desc->length;
+                    break;
+                }
+                off += desc->length;
             }
         }
+        domain_unpause(d);
     }
-
     xfree(ctxt.data);
     return rv;
 }
@@ -193,7 +240,8 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
     struct hvm_save_header hdr;
     struct hvm_save_end end;
     hvm_save_handler handler;
-    unsigned int i;
+    unsigned int i, rc;
+    struct vcpu *v = NULL;
 
     if ( d->is_dying )
         return -EINVAL;
@@ -225,12 +273,19 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
         {
             printk(XENLOG_G_INFO "HVM%d save: %s\n",
                    d->domain_id, hvm_sr_handlers[i].name);
-            if ( handler(d, h) != 0 )
+            for_each_vcpu ( d, v )
             {
-                printk(XENLOG_G_ERR
-                       "HVM%d save: failed to save type %"PRIu16"\n",
-                       d->domain_id, i);
-                return -EFAULT;
+                rc = handler(d, h, v);
+                if( rc == 2 )
+                    continue;
+
+                if( rc != 0 )
+                {
+                    printk(XENLOG_G_ERR
+                           "HVM%d save: failed to save type %"PRIu16"\n",
+                           d->domain_id, i);
+                    return -EFAULT;
+                }
             }
         }
     }
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 97b419f..1d0ccfd 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -569,7 +569,8 @@ int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi)
     return vioapic->redirtbl[pin].fields.trig_mode;
 }
 
-static int ioapic_save(struct domain *d, hvm_domain_context_t *h)
+static int ioapic_save(struct domain *d, hvm_domain_context_t *h,
+                       struct vcpu *v)
 {
     struct hvm_vioapic *s;
 
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 9a49e76..352baec 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -992,7 +992,8 @@ out:
     return HVM_HCALL_completed;
 }
 
-static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h,
+                                     struct vcpu *v)
 {
     struct hvm_viridian_domain_context ctxt = {
         .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val,
@@ -1034,21 +1035,18 @@ void viridian_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_viridian_vcpu_contex
     ctxt->vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending;
 }
 
-static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h,
+                                   struct vcpu *v)
 {
-    struct vcpu *v;
+    struct hvm_viridian_vcpu_context ctxt;
 
     if ( !is_viridian_domain(d) )
         return 0;
 
-    for_each_vcpu( d, v ) {
-        struct hvm_viridian_vcpu_context ctxt;
+    viridian_save_vcpu_ctxt_one(v, &ctxt);
 
-        viridian_save_vcpu_ctxt_one(v, &ctxt);
-
-        if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
-            return 1;
-    }
+    if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
+        return 1;
 
     return 0;
 }
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 1b9f00a..f130fec 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1435,43 +1435,35 @@ static void lapic_rearm(struct vlapic *s)
     s->timer_last_update = s->pt.last_plt_gtime;
 }
 
-static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
+static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h,
+                             struct vcpu *v)
 {
-    struct vcpu *v;
     struct vlapic *s;
     int rc = 0;
 
     if ( !has_vlapic(d) )
         return 0;
 
-    for_each_vcpu ( d, v )
-    {
-        s = vcpu_vlapic(v);
-        if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 )
-            break;
-    }
+    s = vcpu_vlapic(v);
+    rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw);
 
     return rc;
 }
 
-static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
+static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h,
+                           struct vcpu *v)
 {
-    struct vcpu *v;
     struct vlapic *s;
     int rc = 0;
 
     if ( !has_vlapic(d) )
         return 0;
 
-    for_each_vcpu ( d, v )
-    {
-        if ( hvm_funcs.sync_pir_to_irr )
-            hvm_funcs.sync_pir_to_irr(v);
+    if ( hvm_funcs.sync_pir_to_irr )
+        hvm_funcs.sync_pir_to_irr(v);
 
-        s = vcpu_vlapic(v);
-        if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 )
-            break;
-    }
+    s = vcpu_vlapic(v);
+    rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs);
 
     return rc;
 }
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index e160bbd..b72b3c7 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -371,7 +371,8 @@ static int vpic_intercept_elcr_io(
     return X86EMUL_OKAY;
 }
 
-static int vpic_save(struct domain *d, hvm_domain_context_t *h)
+static int vpic_save(struct domain *d, hvm_domain_context_t *h,
+                     struct vcpu *v)
 {
     struct hvm_hw_vpic *s;
     int i;
diff --git a/xen/include/asm-x86/hvm/save.h b/xen/include/asm-x86/hvm/save.h
index f889e8f..e0147a6 100644
--- a/xen/include/asm-x86/hvm/save.h
+++ b/xen/include/asm-x86/hvm/save.h
@@ -96,7 +96,8 @@ static inline uint16_t hvm_load_instance(struct hvm_domain_context *h)
  * the load handler will be called once for each instance found when
  * restoring.  Both return non-zero on error. */
 typedef int (*hvm_save_handler) (struct domain *d, 
-                                 hvm_domain_context_t *h);
+                                 hvm_domain_context_t *h,
+                                 struct vcpu *v);
 typedef int (*hvm_load_handler) (struct domain *d,
                                  hvm_domain_context_t *h);
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v4 1/2] x86/hvm: Introduce *save_one() functions
  2018-05-07  8:24 [PATCH v4 1/2] x86/hvm: Introduce *save_one() functions Alexandru Isaila
  2018-05-07  8:24 ` [PATCH v4 2/2] x86/domctl: Don't pause the whole domain if only getting vcpu state Alexandru Isaila
@ 2018-05-17 15:49 ` Jan Beulich
  1 sibling, 0 replies; 5+ messages in thread
From: Jan Beulich @ 2018-05-17 15:49 UTC (permalink / raw)
  To: aisaila; +Cc: Andrew Cooper, Paul Durrant, Wei Liu, Ian Jackson, xen-devel

>>> On 07.05.18 at 10:24, <aisaila@bitdefender.com> wrote:
> This patch introduces save_one() functions. They will be called in the
> *save() so we can extract data for a single instance.

Mostly fine, but please split up into one patch per save type.

> --- a/xen/arch/x86/cpu/mcheck/vmce.c
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> @@ -349,6 +349,14 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
>      return ret;
>  }
>  
> +void vmce_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_vmce_vcpu *ctxt)

static (also elsewhere)

> @@ -1173,6 +1184,18 @@ HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
>                                             save_area) + \
>                                    xstate_ctxt_size(xcr0))
>  
> +void hvm_save_cpu_xsave_states_one(struct vcpu *v, struct hvm_hw_cpu_xsave **ctx, hvm_domain_context_t *h)

This is inconsistent with the others: Why the extra indirection for ctx?
And why the passing of h?

> +{
> +    unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
> +    struct hvm_hw_cpu_xsave *ctxt = * ctx;
> +
> +    h->cur += size;

This belongs in the caller afaict.

> @@ -1339,6 +1358,39 @@ static const uint32_t msrs_to_send[] = {
>  };
>  static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
>  
> +int hvm_save_cpu_msrs_one(struct vcpu *v, struct hvm_msr **ctx, hvm_domain_context_t *h)

Same as above; I can't even spot where you use h in this function.

>  static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
>  {
> -    int i;
>      struct vcpu *v;
>      struct hvm_hw_mtrr hw_mtrr;
> -    struct mtrr_state *mtrr_state;
>      /* save mtrr&pat */

Please take the opportunity and add a blank line after the declarations.

> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -1028,6 +1028,12 @@ static int viridian_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
>  HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
>                            viridian_load_domain_ctxt, 1, HVMSR_PER_DOM);
>  
> +void viridian_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_viridian_vcpu_context *ctxt)
> +{
> +    ctxt->vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw;
> +    ctxt->vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending;
> +}
> +
>  static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
>  {
>      struct vcpu *v;
> @@ -1036,10 +1042,9 @@ static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
>          return 0;
>  
>      for_each_vcpu( d, v ) {
> -        struct hvm_viridian_vcpu_context ctxt = {
> -            .vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw,
> -            .vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending,
> -        };
> +        struct hvm_viridian_vcpu_context ctxt;
> +
> +        viridian_save_vcpu_ctxt_one(v, &ctxt);

There is a reason ctxt has an initializer: You're now leaking 7 bytes of hypervisor
stack data (through the _pad[] array).

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v4 2/2] x86/domctl: Don't pause the whole domain if only getting vcpu state
  2018-05-07  8:24 ` [PATCH v4 2/2] x86/domctl: Don't pause the whole domain if only getting vcpu state Alexandru Isaila
@ 2018-05-17 16:04   ` Jan Beulich
  2018-05-18 15:01     ` Jan Beulich
  0 siblings, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2018-05-17 16:04 UTC (permalink / raw)
  To: aisaila; +Cc: Andrew Cooper, Paul Durrant, Wei Liu, Ian Jackson, xen-devel

>>> On 07.05.18 at 10:24, <aisaila@bitdefender.com> wrote:
> --- a/xen/arch/x86/cpu/mcheck/vmce.c
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> @@ -357,20 +357,14 @@ void vmce_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_vmce_vcpu *ctxt)
>      ctxt->mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
>  }
>  
> -static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
> +static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h,
> +                               struct vcpu *v)
>  {
> -    struct vcpu *v;
>      int err = 0;
> +    struct hvm_vmce_vcpu ctxt;
>  
> -    for_each_vcpu ( d, v )
> -    {
> -        struct hvm_vmce_vcpu ctxt;
> -
> -        vmce_save_vcpu_ctxt_one(v, &ctxt);
> -        err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
> -        if ( err )
> -            break;
> -    }
> +    vmce_save_vcpu_ctxt_one(v, &ctxt);
> +    err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
>  
>      return err;
>  }

At the example of this one: The idea of breaking out the patch introducing
the _one() functions was to avoid restructuring in this patch like what you
do here. Any such change not strictly fitting under the title of this patch
should be broken out. There may be multiple steps involved here.

As it stands, the function is now no longer meaningfully different from
vmce_save_vcpu_ctxt_one(), and this pattern recurs. Such redundancy
is undesirable. Together with you now passing v and d (when just v
would suffice) I think you want to further re-structure how handling of
save/restore happens, such that no stub functions like the one here
remain. IOW after having introduced the _one() functions, a second
transformation would be expected to eliminate the original ones, with
(as you do here) the loop moving into the caller.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v4 2/2] x86/domctl: Don't pause the whole domain if only getting vcpu state
  2018-05-17 16:04   ` Jan Beulich
@ 2018-05-18 15:01     ` Jan Beulich
  0 siblings, 0 replies; 5+ messages in thread
From: Jan Beulich @ 2018-05-18 15:01 UTC (permalink / raw)
  To: aisaila; +Cc: Andrew Cooper, Paul Durrant, Wei Liu, Ian Jackson, xen-devel

>>> On 17.05.18 at 18:04, <JBeulich@suse.com> wrote:
>>>> On 07.05.18 at 10:24, <aisaila@bitdefender.com> wrote:
>> --- a/xen/arch/x86/cpu/mcheck/vmce.c
>> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
>> @@ -357,20 +357,14 @@ void vmce_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_vmce_vcpu *ctxt)
>>      ctxt->mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
>>  }
>>  
>> -static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
>> +static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h,
>> +                               struct vcpu *v)
>>  {
>> -    struct vcpu *v;
>>      int err = 0;
>> +    struct hvm_vmce_vcpu ctxt;
>>  
>> -    for_each_vcpu ( d, v )
>> -    {
>> -        struct hvm_vmce_vcpu ctxt;
>> -
>> -        vmce_save_vcpu_ctxt_one(v, &ctxt);
>> -        err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
>> -        if ( err )
>> -            break;
>> -    }
>> +    vmce_save_vcpu_ctxt_one(v, &ctxt);
>> +    err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
>>  
>>      return err;
>>  }
> 
> At the example of this one: The idea of breaking out the patch introducing
> the _one() functions was to avoid restructuring in this patch like what you
> do here. Any such change not strictly fitting under the title of this patch
> should be broken out. There may be multiple steps involved here.
> 
> As it stands, the function is now no longer meaningfully different from
> vmce_save_vcpu_ctxt_one(), and this pattern recurs. Such redundancy
> is undesirable. Together with you now passing v and d (when just v
> would suffice) I think you want to further re-structure how handling of
> save/restore happens, such that no stub functions like the one here
> remain. IOW after having introduced the _one() functions, a second
> transformation would be expected to eliminate the original ones, with
> (as you do here) the loop moving into the caller.

I think I should further clarify this reply of mine: The goal of the
transformation should be that in the end we continue to have a single
load and a single save function for every save type. These should be
referenced by HVM_REGISTER_SAVE_RESTORE() or whatever clone of
it may turn out necessary (note how its uses currently specify
HVMSR_PER_VCPU, which may in the end no longer be necessary).
That will also make the asymmetry go away that the save functions
currently iterate over all vCPU-s, while the load functions don't (it'll
presumably remain to be that way for multi-instance types where it's
not the vCPU that is determining the instance, like the PIC).

Ideally save functions would further match load ones by the caller
specifying the instance. Suitable unique return values may need to be
used to signal the caller when to end the iteration. For example, the
functions could return a "next instance" indicator. (I think you agree
that this is an unavoidable difference to the load functions, where the
instance comes with the load record.)

In any event, just to re-iterate - the final patch under this title should
preferably not have a need to touch any of the save functions; all of
this should be done in prereq changes. That'll then allow to focus on
just the specific bit of new behavior you want when reviewing this
(presumably last) patch of the series.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2018-05-18 15:01 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-07  8:24 [PATCH v4 1/2] x86/hvm: Introduce *save_one() functions Alexandru Isaila
2018-05-07  8:24 ` [PATCH v4 2/2] x86/domctl: Don't pause the whole domain if only getting vcpu state Alexandru Isaila
2018-05-17 16:04   ` Jan Beulich
2018-05-18 15:01     ` Jan Beulich
2018-05-17 15:49 ` [PATCH v4 1/2] x86/hvm: Introduce *save_one() functions Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.