All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Juergen Gross <jgross@suse.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Subject: [PATCH v2 2/2] SVM: introduce a VM entry helper
Date: Mon, 30 Apr 2018 05:37:43 -0600	[thread overview]
Message-ID: <5AE7000702000078001BF8A5@prv1-mh.provo.novell.com> (raw)
In-Reply-To: <5AE6ED2302000078001BF867@prv1-mh.provo.novell.com>

The register values copying doesn't need doing in assembly. The VMLOAD
invocation can also be further deferred (and centralized). Therefore
replace the svm_asid_handle_vmrun() invocation wiht one of the new
helper.

Similarly move the VM exit side register value copying into
svm_vmexit_handler().

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.
---
TBD: Now that we always make it out to guest context after VMLOAD, perhaps
     svm_sync_vmcb() should no longer override vmcb_needs_vmsave, and
     svm_vmexit_handler() would then no longer need to to set the field at all.

--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -61,24 +61,14 @@ UNLIKELY_START(ne, nsvm_hap)
         jmp  .Lsvm_do_resume
 __UNLIKELY_END(nsvm_hap)
 
-        call svm_asid_handle_vmrun
+        mov  %rsp, %rdi
+        call svm_vmenter_helper
 
         cmpb $0,tb_init_done(%rip)
 UNLIKELY_START(nz, svm_trace)
         call svm_trace_vmentry
 UNLIKELY_END(svm_trace)
 
-        mov  VCPU_svm_vmcb(%rbx),%rcx
-        mov  UREGS_rax(%rsp),%rax
-        mov  %rax,VMCB_rax(%rcx)
-        mov  UREGS_rip(%rsp),%rax
-        mov  %rax,VMCB_rip(%rcx)
-        mov  UREGS_rsp(%rsp),%rax
-        mov  %rax,VMCB_rsp(%rcx)
-        mov  UREGS_eflags(%rsp),%rax
-        or   $X86_EFLAGS_MBS,%rax
-        mov  %rax,VMCB_rflags(%rcx)
-
         mov VCPU_arch_msr(%rbx), %rax
         mov VCPUMSR_spec_ctrl_raw(%rax), %eax
 
@@ -111,16 +101,6 @@ UNLIKELY_END(svm_trace)
         SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
         /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
 
-        mov  VCPU_svm_vmcb(%rbx),%rcx
-        mov  VMCB_rax(%rcx),%rax
-        mov  %rax,UREGS_rax(%rsp)
-        mov  VMCB_rip(%rcx),%rax
-        mov  %rax,UREGS_rip(%rsp)
-        mov  VMCB_rsp(%rcx),%rax
-        mov  %rax,UREGS_rsp(%rsp)
-        mov  VMCB_rflags(%rcx),%rax
-        mov  %rax,UREGS_eflags(%rsp)
-
         STGI
 GLOBAL(svm_stgi_label)
         mov  %rsp,%rdi
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1169,12 +1169,25 @@ static void noreturn svm_do_resume(struc
 
     hvm_do_resume(v);
 
-    if ( v->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
-        svm_sync_vmcb(v, vmcb_needs_vmsave);
-
     reset_stack_and_jump(svm_asm_do_resume);
 }
 
+void svm_vmenter_helper(const struct cpu_user_regs *regs)
+{
+    struct vcpu *curr = current;
+    struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+
+    svm_asid_handle_vmrun();
+
+    if ( curr->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
+        svm_sync_vmcb(curr, vmcb_needs_vmsave);
+
+    vmcb->rax = regs->rax;
+    vmcb->rip = regs->rip;
+    vmcb->rsp = regs->rsp;
+    vmcb->rflags = regs->rflags | X86_EFLAGS_MBS;
+}
+
 static void svm_guest_osvw_init(struct vcpu *vcpu)
 {
     if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
@@ -2621,6 +2634,12 @@ void svm_vmexit_handler(struct cpu_user_
     struct vlapic *vlapic = vcpu_vlapic(v);
 
     v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
+
+    regs->rax = vmcb->rax;
+    regs->rip = vmcb->rip;
+    regs->rsp = vmcb->rsp;
+    regs->rflags = vmcb->rflags;
+
     hvm_invalidate_regs_fields(regs);
 
     if ( paging_mode_hap(v->domain) )
@@ -3107,9 +3126,6 @@ void svm_vmexit_handler(struct cpu_user_
     }
 
   out:
-    if ( v->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
-        svm_sync_vmcb(v, vmcb_needs_vmsave);
-
     if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) )
         return;
 
@@ -3118,7 +3134,6 @@ void svm_vmexit_handler(struct cpu_user_
     intr.fields.tpr =
         (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
     vmcb_set_vintr(vmcb, intr);
-    ASSERT(v->arch.hvm_svm.vmcb_sync_state != vmcb_needs_vmload);
 }
 
 void svm_trace_vmentry(void)
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -119,12 +119,6 @@ void __dummy__(void)
     OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
     BLANK();
 
-    OFFSET(VMCB_rax, struct vmcb_struct, rax);
-    OFFSET(VMCB_rip, struct vmcb_struct, rip);
-    OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
-    OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
-    BLANK();
-
     OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
     OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
     BLANK();
--- a/xen/include/asm-x86/hvm/svm/asid.h
+++ b/xen/include/asm-x86/hvm/svm/asid.h
@@ -23,6 +23,7 @@
 #include <asm/processor.h>
 
 void svm_asid_init(const struct cpuinfo_x86 *c);
+void svm_asid_handle_vmrun(void);
 
 static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
 {



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-04-30 11:37 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-30 10:17 [PATCH v2 0/2] SVM: guest state handling adjustments Jan Beulich
2018-04-30 11:37 ` [PATCH v2 1/2] SVM: re-work VMCB sync-ing Jan Beulich
2018-04-30 15:30   ` Boris Ostrovsky
2018-04-30 15:50     ` Jan Beulich
2018-04-30 15:56       ` Boris Ostrovsky
2018-04-30 16:01         ` Jan Beulich
2018-04-30 17:07   ` Andrew Cooper
2018-04-30 17:50     ` Boris Ostrovsky
2018-05-02  7:11       ` Jan Beulich
2018-05-02 14:45         ` Boris Ostrovsky
2018-05-03 14:43           ` Jan Beulich
2018-05-03 18:34             ` Boris Ostrovsky
2018-04-30 11:37 ` Jan Beulich [this message]
2018-04-30 11:51   ` [PATCH v2 2/2] SVM: introduce a VM entry helper Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5AE7000702000078001BF8A5@prv1-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=jgross@suse.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.