From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Juergen Gross <jgross@suse.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>
Subject: [PATCH v3 2/2] SVM: introduce a VM entry helper
Date: Fri, 04 May 2018 09:11:37 -0600 [thread overview]
Message-ID: <5AEC782902000078001C0BEE@prv1-mh.provo.novell.com> (raw)
In-Reply-To: <5AEC773002000078001C0BCE@prv1-mh.provo.novell.com>
Neither the register values copying nor the trace entry generation need
doing in assembly. The VMLOAD invocation can also be further deferred
(and centralized). Therefore replace the svm_asid_handle_vmrun()
invocation with one of the new helper.
Similarly move the VM exit side register value copying into
svm_vmexit_handler().
Now that we always make it out to guest context after VMLOAD,
svm_sync_vmcb() no longer overrides vmcb_needs_vmsave, making
svm_vmexit_handler() setting the field early unnecessary.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: svm_vmexit_handler() no longer explicitly sets vmcb_sync_state, and
svm_sync_vmcb() no longer converts a needs-vmsave request into
in-sync state. Also move the svm_trace_vmentry() invocation to C.
v2: New.
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -61,23 +61,8 @@ UNLIKELY_START(ne, nsvm_hap)
jmp .Lsvm_do_resume
__UNLIKELY_END(nsvm_hap)
- call svm_asid_handle_vmrun
-
- cmpb $0,tb_init_done(%rip)
-UNLIKELY_START(nz, svm_trace)
- call svm_trace_vmentry
-UNLIKELY_END(svm_trace)
-
- mov VCPU_svm_vmcb(%rbx),%rcx
- mov UREGS_rax(%rsp),%rax
- mov %rax,VMCB_rax(%rcx)
- mov UREGS_rip(%rsp),%rax
- mov %rax,VMCB_rip(%rcx)
- mov UREGS_rsp(%rsp),%rax
- mov %rax,VMCB_rsp(%rcx)
- mov UREGS_eflags(%rsp),%rax
- or $X86_EFLAGS_MBS,%rax
- mov %rax,VMCB_rflags(%rcx)
+ mov %rsp, %rdi
+ call svm_vmenter_helper
mov VCPU_arch_msr(%rbx), %rax
mov VCPUMSR_spec_ctrl_raw(%rax), %eax
@@ -111,16 +96,6 @@ UNLIKELY_END(svm_trace)
SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
- mov VCPU_svm_vmcb(%rbx),%rcx
- mov VMCB_rax(%rcx),%rax
- mov %rax,UREGS_rax(%rsp)
- mov VMCB_rip(%rcx),%rax
- mov %rax,UREGS_rip(%rsp)
- mov VMCB_rsp(%rcx),%rax
- mov %rax,UREGS_rsp(%rsp)
- mov VMCB_rflags(%rcx),%rax
- mov %rax,UREGS_eflags(%rsp)
-
STGI
GLOBAL(svm_stgi_label)
mov %rsp,%rdi
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -687,10 +687,9 @@ static void svm_sync_vmcb(struct vcpu *v
if ( new_state == vmcb_needs_vmsave )
{
if ( arch_svm->vmcb_sync_state == vmcb_needs_vmload )
- {
svm_vmload(arch_svm->vmcb);
- arch_svm->vmcb_sync_state = vmcb_in_sync;
- }
+
+ arch_svm->vmcb_sync_state = new_state;
}
else
{
@@ -1171,11 +1170,29 @@ static void noreturn svm_do_resume(struc
hvm_do_resume(v);
- svm_sync_vmcb(v, vmcb_needs_vmsave);
-
reset_stack_and_jump(svm_asm_do_resume);
}
+void svm_vmenter_helper(const struct cpu_user_regs *regs)
+{
+ struct vcpu *curr = current;
+ struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+
+ svm_asid_handle_vmrun();
+
+ if ( unlikely(tb_init_done) )
+ HVMTRACE_ND(VMENTRY,
+ nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0,
+ 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
+
+ svm_sync_vmcb(curr, vmcb_needs_vmsave);
+
+ vmcb->rax = regs->rax;
+ vmcb->rip = regs->rip;
+ vmcb->rsp = regs->rsp;
+ vmcb->rflags = regs->rflags | X86_EFLAGS_MBS;
+}
+
static void svm_guest_osvw_init(struct vcpu *vcpu)
{
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
@@ -2621,7 +2638,11 @@ void svm_vmexit_handler(struct cpu_user_
bool_t vcpu_guestmode = 0;
struct vlapic *vlapic = vcpu_vlapic(v);
- v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
+ regs->rax = vmcb->rax;
+ regs->rip = vmcb->rip;
+ regs->rsp = vmcb->rsp;
+ regs->rflags = vmcb->rflags;
+
hvm_invalidate_regs_fields(regs);
if ( paging_mode_hap(v->domain) )
@@ -3108,8 +3129,6 @@ void svm_vmexit_handler(struct cpu_user_
}
out:
- svm_sync_vmcb(v, vmcb_needs_vmsave);
-
if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) )
return;
@@ -3118,17 +3137,8 @@ void svm_vmexit_handler(struct cpu_user_
intr.fields.tpr =
(vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
vmcb_set_vintr(vmcb, intr);
- ASSERT(v->arch.hvm_svm.vmcb_sync_state != vmcb_needs_vmload);
}
-void svm_trace_vmentry(void)
-{
- struct vcpu *curr = current;
- HVMTRACE_ND(VMENTRY,
- nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0,
- 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
-}
-
/*
* Local variables:
* mode: C
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -119,12 +119,6 @@ void __dummy__(void)
OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
BLANK();
- OFFSET(VMCB_rax, struct vmcb_struct, rax);
- OFFSET(VMCB_rip, struct vmcb_struct, rip);
- OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
- OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
- BLANK();
-
OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
BLANK();
--- a/xen/include/asm-x86/hvm/svm/asid.h
+++ b/xen/include/asm-x86/hvm/svm/asid.h
@@ -23,6 +23,7 @@
#include <asm/processor.h>
void svm_asid_init(const struct cpuinfo_x86 *c);
+void svm_asid_handle_vmrun(void);
static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
{
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-05-04 15:11 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-05-04 15:07 [PATCH v3 0/2] SVM: guest state handling adjustments Jan Beulich
2018-05-04 15:10 ` [PATCH v3 1/2] SVM: re-work VMCB sync-ing Jan Beulich
2018-05-04 15:11 ` Jan Beulich [this message]
2018-05-07 14:11 ` [PATCH v3 2/2] SVM: introduce a VM entry helper Jan Beulich
2018-05-07 14:19 ` Andrew Cooper
2018-05-07 15:25 ` Jan Beulich
2018-05-07 15:29 ` Andrew Cooper
2018-05-07 15:46 ` Boris Ostrovsky
2018-05-07 15:47 ` Jan Beulich
2018-05-07 15:49 ` Andrew Cooper
2018-05-07 16:16 ` Boris Ostrovsky
2018-05-04 17:52 ` [PATCH v3 0/2] SVM: guest state handling adjustments Andrew Cooper
2018-05-04 18:38 ` Boris Ostrovsky
[not found] ` <5AEC77E802000078001C0BEB@suse.com>
2018-05-07 6:30 ` [PATCH v3 1/2] SVM: re-work VMCB sync-ing Juergen Gross
[not found] ` <5AEC782902000078001C0BEE@suse.com>
2018-05-07 6:31 ` [PATCH v3 2/2] SVM: introduce a VM entry helper Juergen Gross
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5AEC782902000078001C0BEE@prv1-mh.provo.novell.com \
--to=jbeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=boris.ostrovsky@oracle.com \
--cc=jgross@suse.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.