All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86/HVM: move vendor independent CPU save/restore logic to shared code
@ 2018-10-05 11:31 Jan Beulich
  2018-10-05 12:01 ` Razvan Cojocaru
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Jan Beulich @ 2018-10-05 11:31 UTC (permalink / raw)
  To: xen-devel
  Cc: Kevin Tian, Tamas K Lengyel, Wei Liu, Suravee Suthikulpanit,
	Razvan Cojocaru, Andrew Cooper, Jun Nakajima, Boris Ostrovsky,
	Brian Woods

A few pieces of the handling here are (no longer?) vendor specific, and
hence there's no point in replicating the code. Make sure not otherwise
pre-filled fields of struct hvm_hw_cpu instances are zero filled before
calling the vendor "save" hook, eliminating the need for the hook
functions to zero individual fields.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -787,12 +787,17 @@ static int hvm_save_cpu_ctxt(struct vcpu
         .r13 = v->arch.user_regs.r13,
         .r14 = v->arch.user_regs.r14,
         .r15 = v->arch.user_regs.r15,
+        .cr0 = v->arch.hvm.guest_cr[0],
+        .cr2 = v->arch.hvm.guest_cr[2],
+        .cr3 = v->arch.hvm.guest_cr[3],
+        .cr4 = v->arch.hvm.guest_cr[4],
         .dr0 = v->arch.debugreg[0],
         .dr1 = v->arch.debugreg[1],
         .dr2 = v->arch.debugreg[2],
         .dr3 = v->arch.debugreg[3],
         .dr6 = v->arch.debugreg[6],
         .dr7 = v->arch.debugreg[7],
+        .msr_efer = v->arch.hvm.guest_efer,
     };
 
     /*
@@ -1023,6 +1028,9 @@ static int hvm_load_cpu_ctxt(struct doma
     if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
         return -EINVAL;
 
+    v->arch.hvm.guest_cr[2] = ctxt.cr2;
+    hvm_update_guest_cr(v, 2);
+
     if ( hvm_funcs.tsc_scaling.setup )
         hvm_funcs.tsc_scaling.setup(v);
 
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -269,17 +269,10 @@ static int svm_vmcb_save(struct vcpu *v,
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    c->cr0 = v->arch.hvm.guest_cr[0];
-    c->cr2 = v->arch.hvm.guest_cr[2];
-    c->cr3 = v->arch.hvm.guest_cr[3];
-    c->cr4 = v->arch.hvm.guest_cr[4];
-
     c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
     c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
     c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
 
-    c->pending_event = 0;
-    c->error_code = 0;
     if ( vmcb->eventinj.fields.v &&
          hvm_event_needs_reinjection(vmcb->eventinj.fields.type,
                                      vmcb->eventinj.fields.vector) )
@@ -338,11 +331,9 @@ static int svm_vmcb_restore(struct vcpu
     }
 
     v->arch.hvm.guest_cr[0] = c->cr0 | X86_CR0_ET;
-    v->arch.hvm.guest_cr[2] = c->cr2;
     v->arch.hvm.guest_cr[3] = c->cr3;
     v->arch.hvm.guest_cr[4] = c->cr4;
     svm_update_guest_cr(v, 0, 0);
-    svm_update_guest_cr(v, 2, 0);
     svm_update_guest_cr(v, 4, 0);
 
     /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
@@ -384,8 +375,6 @@ static void svm_save_cpu_state(struct vc
     data->msr_star         = vmcb->star;
     data->msr_cstar        = vmcb->cstar;
     data->msr_syscall_mask = vmcb->sfmask;
-    data->msr_efer         = v->arch.hvm.guest_efer;
-    data->msr_flags        = 0;
 }
 
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -646,19 +646,10 @@ static void vmx_vmcs_save(struct vcpu *v
 
     vmx_vmcs_enter(v);
 
-    c->cr0 = v->arch.hvm.guest_cr[0];
-    c->cr2 = v->arch.hvm.guest_cr[2];
-    c->cr3 = v->arch.hvm.guest_cr[3];
-    c->cr4 = v->arch.hvm.guest_cr[4];
-
-    c->msr_efer = v->arch.hvm.guest_efer;
-
     __vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
     __vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
     __vmread(GUEST_SYSENTER_EIP, &c->sysenter_eip);
 
-    c->pending_event = 0;
-    c->error_code = 0;
     __vmread(VM_ENTRY_INTR_INFO, &ev);
     if ( (ev & INTR_INFO_VALID_MASK) &&
          hvm_event_needs_reinjection(MASK_EXTR(ev, INTR_INFO_INTR_TYPE_MASK),
@@ -732,10 +723,8 @@ static int vmx_vmcs_restore(struct vcpu
 
     vmx_vmcs_enter(v);
 
-    v->arch.hvm.guest_cr[2] = c->cr2;
     v->arch.hvm.guest_cr[4] = c->cr4;
     vmx_update_guest_cr(v, 0, 0);
-    vmx_update_guest_cr(v, 2, 0);
     vmx_update_guest_cr(v, 4, 0);
 
     v->arch.hvm.guest_efer = c->msr_efer;
@@ -770,7 +759,6 @@ static int vmx_vmcs_restore(struct vcpu
 static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
     data->shadow_gs        = v->arch.hvm.vmx.shadow_gs;
-    data->msr_flags        = 0;
     data->msr_lstar        = v->arch.hvm.vmx.lstar;
     data->msr_star         = v->arch.hvm.vmx.star;
     data->msr_cstar        = v->arch.hvm.vmx.cstar;
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -127,7 +127,7 @@ void vm_event_fill_regs(vm_event_request
 #ifdef CONFIG_HVM
     const struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct segment_register seg;
-    struct hvm_hw_cpu ctxt;
+    struct hvm_hw_cpu ctxt = {};
     struct vcpu *curr = current;
 
     ASSERT(is_hvm_vcpu(curr));
@@ -157,16 +157,16 @@ void vm_event_fill_regs(vm_event_request
     req->data.regs.x86.rip    = regs->rip;
 
     req->data.regs.x86.dr7 = curr->arch.debugreg[7];
-    req->data.regs.x86.cr0 = ctxt.cr0;
-    req->data.regs.x86.cr2 = ctxt.cr2;
-    req->data.regs.x86.cr3 = ctxt.cr3;
-    req->data.regs.x86.cr4 = ctxt.cr4;
+    req->data.regs.x86.cr0 = curr->arch.hvm.guest_cr[0];
+    req->data.regs.x86.cr2 = curr->arch.hvm.guest_cr[2];
+    req->data.regs.x86.cr3 = curr->arch.hvm.guest_cr[3];
+    req->data.regs.x86.cr4 = curr->arch.hvm.guest_cr[4];
 
     req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs;
     req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp;
     req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip;
 
-    req->data.regs.x86.msr_efer = ctxt.msr_efer;
+    req->data.regs.x86.msr_efer = curr->arch.hvm.guest_efer;
     req->data.regs.x86.msr_star = ctxt.msr_star;
     req->data.regs.x86.msr_lstar = ctxt.msr_lstar;
 




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-10-09  5:22 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-05 11:31 [PATCH] x86/HVM: move vendor independent CPU save/restore logic to shared code Jan Beulich
2018-10-05 12:01 ` Razvan Cojocaru
2018-10-05 12:18 ` Andrew Cooper
2018-10-05 13:33   ` Jan Beulich
2018-10-05 13:16 ` Boris Ostrovsky
2018-10-09  5:22 ` Tian, Kevin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.