All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: thomas.lendacky@amd.com, jmattson@google.com, seanjc@google.com,
	stable@vger.kernel.org
Subject: [PATCH 04/11] KVM: SVM: retrieve VMCB from assembly
Date: Wed,  9 Nov 2022 09:51:49 -0500	[thread overview]
Message-ID: <20221109145156.84714-5-pbonzini@redhat.com> (raw)
In-Reply-To: <20221109145156.84714-1-pbonzini@redhat.com>

Continue moving all accesses to struct vcpu_svm directly in vmenter.S.
This limits the confusion due to different registers used for
argument passing in 32- and 64-bit ABIs.

It is not strictly necessary for __svm_sev_es_vcpu_run, but staying
consistent is a good idea since it makes __svm_sev_es_vcpu_run a
stripped version of _svm_vcpu_run.

No functional change intended.

Cc: stable@vger.kernel.org
Fixes: a149180fbcf3 ("x86: Add magic AMD return-thunk")
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/kvm-asm-offsets.c |  2 ++
 arch/x86/kvm/svm/svm.c         |  5 ++---
 arch/x86/kvm/svm/svm.h         |  4 ++--
 arch/x86/kvm/svm/vmenter.S     | 20 ++++++++++----------
 4 files changed, 16 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/kvm-asm-offsets.c b/arch/x86/kvm/kvm-asm-offsets.c
index 30db96852e2d..f1b694e431ae 100644
--- a/arch/x86/kvm/kvm-asm-offsets.c
+++ b/arch/x86/kvm/kvm-asm-offsets.c
@@ -15,6 +15,8 @@ static void __used common(void)
 	if (IS_ENABLED(CONFIG_KVM_AMD)) {
 		BLANK();
 		OFFSET(SVM_vcpu_arch_regs, vcpu_svm, vcpu.arch.regs);
+		OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
+		OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
 	}
 
 	if (IS_ENABLED(CONFIG_KVM_INTEL)) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index b412bc5773c5..0c86c435c51f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3914,12 +3914,11 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	unsigned long vmcb_pa = svm->current_vmcb->pa;
 
 	guest_state_enter_irqoff();
 
 	if (sev_es_guest(vcpu->kvm)) {
-		__svm_sev_es_vcpu_run(vmcb_pa);
+		__svm_sev_es_vcpu_run(svm);
 	} else {
 		struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
 
@@ -3930,7 +3929,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 		 * vmcb02 when switching vmcbs for nested virtualization.
 		 */
 		vmload(svm->vmcb01.pa);
-		__svm_vcpu_run(vmcb_pa, svm);
+		__svm_vcpu_run(svm);
 		vmsave(svm->vmcb01.pa);
 
 		vmload(__sme_page_pa(sd->save_area));
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 447e25c9101a..7ff1879e73c5 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -683,7 +683,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
 
 /* vmenter.S */
 
-void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
-void __svm_vcpu_run(unsigned long vmcb_pa, struct vcpu_svm *svm);
+void __svm_sev_es_vcpu_run(struct vcpu_svm *svm);
+void __svm_vcpu_run(struct vcpu_svm *svm);
 
 #endif
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 531510ab6072..d07bac1952c5 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -32,7 +32,6 @@
 
 /**
  * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
- * @vmcb_pa:	unsigned long
  * @svm:	struct vcpu_svm *
  */
 SYM_FUNC_START(__svm_vcpu_run)
@@ -49,16 +48,16 @@ SYM_FUNC_START(__svm_vcpu_run)
 	push %_ASM_BX
 
 	/* Save @svm. */
-	push %_ASM_ARG2
-
-	/* Save @vmcb. */
 	push %_ASM_ARG1
 
+.ifnc _ASM_ARG1, _ASM_DI
 	/* Move @svm to RDI. */
-	mov %_ASM_ARG2, %_ASM_DI
+	mov %_ASM_ARG1, %_ASM_DI
+.endif
 
-	/* "POP" @vmcb to RAX. */
-	pop %_ASM_AX
+	/* Get svm->current_vmcb->pa into RAX. */
+	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
+	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
 
 	/* Load guest registers. */
 	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
@@ -170,7 +169,7 @@ SYM_FUNC_END(__svm_vcpu_run)
 
 /**
  * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
- * @vmcb_pa:	unsigned long
+ * @svm:	struct vcpu_svm *
  */
 SYM_FUNC_START(__svm_sev_es_vcpu_run)
 	push %_ASM_BP
@@ -185,8 +184,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
 #endif
 	push %_ASM_BX
 
-	/* Move @vmcb to RAX. */
-	mov %_ASM_ARG1, %_ASM_AX
+	/* Get svm->current_vmcb->pa into RAX. */
+	mov SVM_current_vmcb(%_ASM_ARG1), %_ASM_AX
+	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
 
 	/* Enter guest mode */
 	sti
-- 
2.31.1



  parent reply	other threads:[~2022-11-09 14:53 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-09 14:51 [PATCH v3 00/11] KVM: SVM: fixes for vmentry code Paolo Bonzini
2022-11-09 14:51 ` [PATCH 01/11] KVM: x86: use a separate asm-offsets.c file Paolo Bonzini
2022-11-09 15:06   ` Sean Christopherson
2022-11-09 14:51 ` [PATCH 02/11] KVM: SVM: replace regs argument of __svm_vcpu_run with vcpu_svm Paolo Bonzini
2022-11-09 15:14   ` Sean Christopherson
2022-11-09 14:51 ` [PATCH 03/11] KVM: SVM: adjust register allocation for __svm_vcpu_run Paolo Bonzini
2022-11-09 15:20   ` Sean Christopherson
2022-11-09 16:04     ` Paolo Bonzini
2022-11-09 14:51 ` Paolo Bonzini [this message]
2022-11-09 15:25   ` [PATCH 04/11] KVM: SVM: retrieve VMCB from assembly Sean Christopherson
2022-11-09 14:51 ` [PATCH 05/11] KVM: SVM: remove unused field from struct vcpu_svm Paolo Bonzini
2022-11-09 15:25   ` Sean Christopherson
2022-11-09 14:51 ` [PATCH 06/11] KVM: SVM: remove dead field from struct svm_cpu_data Paolo Bonzini
2022-11-09 15:26   ` Sean Christopherson
2022-11-09 14:51 ` [PATCH 07/11] KVM: SVM: do not allocate struct svm_cpu_data dynamically Paolo Bonzini
2022-11-09 15:58   ` Sean Christopherson
2022-11-09 16:00     ` Paolo Bonzini
2022-11-09 14:51 ` [PATCH 08/11] KVM: SVM: move guest vmsave/vmload back to assembly Paolo Bonzini
2022-11-09 15:29   ` Sean Christopherson
2022-11-09 14:51 ` [PATCH 09/11] KVM: SVM: restore host save area from assembly Paolo Bonzini
2022-11-09 15:54   ` Sean Christopherson
2022-11-09 14:51 ` [PATCH 10/11] KVM: SVM: move MSR_IA32_SPEC_CTRL save/restore to assembly Paolo Bonzini
2022-11-09 16:04   ` Sean Christopherson
2022-11-09 14:51 ` [PATCH 11/11] x86, KVM: remove unnecessary argument to x86_virt_spec_ctrl and callers Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221109145156.84714-5-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=seanjc@google.com \
    --cc=stable@vger.kernel.org \
    --cc=thomas.lendacky@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.