All of lore.kernel.org
 help / color / mirror / Atom feed
From: Babu Moger <babu.moger@amd.com>
To: pbonzini@redhat.com, vkuznets@redhat.com,
	sean.j.christopherson@intel.com, jmattson@google.com
Cc: wanpengli@tencent.com, kvm@vger.kernel.org, joro@8bytes.org,
	x86@kernel.org, linux-kernel@vger.kernel.org, babu.moger@amd.com,
	mingo@redhat.com, bp@alien8.de, hpa@zytor.com,
	tglx@linutronix.de
Subject: [PATCH v6 10/12] KVM: X86: Rename and move the function vmx_handle_memory_failure to x86.c
Date: Fri, 11 Sep 2020 14:29:05 -0500	[thread overview]
Message-ID: <159985254493.11252.6603092560732507607.stgit@bmoger-ubuntu> (raw)
In-Reply-To: <159985237526.11252.1516487214307300610.stgit@bmoger-ubuntu>

Handling of kvm_read/write_guest_virt*() errors can be moved to common
code. The same code can be used by both VMX and SVM.

Signed-off-by: Babu Moger <babu.moger@amd.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/vmx/nested.c |   12 ++++++------
 arch/x86/kvm/vmx/vmx.c    |   29 +----------------------------
 arch/x86/kvm/vmx/vmx.h    |    2 --
 arch/x86/kvm/x86.c        |   28 ++++++++++++++++++++++++++++
 arch/x86/kvm/x86.h        |    2 ++
 5 files changed, 37 insertions(+), 36 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 23b58c28a1c9..28becd22d9d9 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4688,7 +4688,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
 
 	r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
 	if (r != X86EMUL_CONTINUE) {
-		*ret = vmx_handle_memory_failure(vcpu, r, &e);
+		*ret = kvm_handle_memory_failure(vcpu, r, &e);
 		return -EINVAL;
 	}
 
@@ -4995,7 +4995,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
 		r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
 		if (r != X86EMUL_CONTINUE)
-			return vmx_handle_memory_failure(vcpu, r, &e);
+			return kvm_handle_memory_failure(vcpu, r, &e);
 	}
 
 	return nested_vmx_succeed(vcpu);
@@ -5068,7 +5068,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 			return 1;
 		r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
 		if (r != X86EMUL_CONTINUE)
-			return vmx_handle_memory_failure(vcpu, r, &e);
+			return kvm_handle_memory_failure(vcpu, r, &e);
 	}
 
 	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
@@ -5230,7 +5230,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
 	r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
 					sizeof(gpa_t), &e);
 	if (r != X86EMUL_CONTINUE)
-		return vmx_handle_memory_failure(vcpu, r, &e);
+		return kvm_handle_memory_failure(vcpu, r, &e);
 
 	return nested_vmx_succeed(vcpu);
 }
@@ -5283,7 +5283,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
 		return 1;
 	r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
 	if (r != X86EMUL_CONTINUE)
-		return vmx_handle_memory_failure(vcpu, r, &e);
+		return kvm_handle_memory_failure(vcpu, r, &e);
 
 	/*
 	 * Nested EPT roots are always held through guest_mmu,
@@ -5365,7 +5365,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
 		return 1;
 	r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
 	if (r != X86EMUL_CONTINUE)
-		return vmx_handle_memory_failure(vcpu, r, &e);
+		return kvm_handle_memory_failure(vcpu, r, &e);
 
 	if (operand.vpid >> 16)
 		return nested_vmx_fail(vcpu,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 46ba2e03a892..b15b4c6e3b46 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1598,33 +1598,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	return 1;
 }
 
-/*
- * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
- * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
- * indicates whether exit to userspace is needed.
- */
-int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
-			      struct x86_exception *e)
-{
-	if (r == X86EMUL_PROPAGATE_FAULT) {
-		kvm_inject_emulated_page_fault(vcpu, e);
-		return 1;
-	}
-
-	/*
-	 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
-	 * while handling a VMX instruction KVM could've handled the request
-	 * correctly by exiting to userspace and performing I/O but there
-	 * doesn't seem to be a real use-case behind such requests, just return
-	 * KVM_EXIT_INTERNAL_ERROR for now.
-	 */
-	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
-	vcpu->run->internal.ndata = 0;
-
-	return 0;
-}
-
 /*
  * Recognizes a pending MTF VM-exit and records the nested state for later
  * delivery.
@@ -5558,7 +5531,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
 
 	r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
 	if (r != X86EMUL_CONTINUE)
-		return vmx_handle_memory_failure(vcpu, r, &e);
+		return kvm_handle_memory_failure(vcpu, r, &e);
 
 	if (operand.pcid >> 12 != 0) {
 		kvm_inject_gp(vcpu, 0);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 26175a4759fa..7c578564a8fc 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -354,8 +354,6 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
 int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
-int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
-			      struct x86_exception *e);
 
 #define POSTED_INTR_ON  0
 #define POSTED_INTR_SN  1
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 539ea1cd6020..5d7930ecdddc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10763,6 +10763,34 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
 }
 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
 
+/*
+ * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
+ * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
+ * indicates whether exit to userspace is needed.
+ */
+int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+			      struct x86_exception *e)
+{
+	if (r == X86EMUL_PROPAGATE_FAULT) {
+		kvm_inject_emulated_page_fault(vcpu, e);
+		return 1;
+	}
+
+	/*
+	 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
+	 * while handling a VMX instruction KVM could've handled the request
+	 * correctly by exiting to userspace and performing I/O but there
+	 * doesn't seem to be a real use-case behind such requests, just return
+	 * KVM_EXIT_INTERNAL_ERROR for now.
+	 */
+	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+	vcpu->run->internal.ndata = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 995ab696dcf0..d3a41144eb30 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -372,6 +372,8 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
 int kvm_spec_ctrl_test_value(u64 value);
 int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
+int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+			      struct x86_exception *e);
 
 #define  KVM_MSR_RET_INVALID  2
 


  parent reply	other threads:[~2020-09-11 19:29 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-11 19:27 [PATCH v6 00/12] SVM cleanup and INVPCID feature support Babu Moger
2020-09-11 19:27 ` [PATCH v6 01/12] KVM: SVM: Introduce vmcb_(set_intercept/clr_intercept/_is_intercept) Babu Moger
2020-09-11 19:28 ` [PATCH v6 02/12] KVM: SVM: Change intercept_cr to generic intercepts Babu Moger
2020-09-11 19:28 ` [PATCH v6 03/12] KVM: SVM: Change intercept_dr " Babu Moger
2020-09-11 19:28 ` [PATCH v6 04/12] KVM: SVM: Modify intercept_exceptions " Babu Moger
2020-09-12 16:52   ` Paolo Bonzini
2020-09-14 15:06     ` Sean Christopherson
2020-09-22 13:39       ` Paolo Bonzini
2020-09-22 19:11         ` Babu Moger
2020-09-23  2:43           ` Paolo Bonzini
2020-09-23 13:35             ` Babu Moger
2020-09-11 19:28 ` [PATCH v6 05/12] KVM: SVM: Modify 64 bit intercept field to two 32 bit vectors Babu Moger
2020-09-11 19:28 ` [PATCH v6 06/12] KVM: SVM: Add new intercept vector in vmcb_control_area Babu Moger
2020-09-11 19:28 ` [PATCH v6 07/12] KVM: nSVM: Cleanup nested_state data structure Babu Moger
2020-09-11 19:28 ` [PATCH v6 08/12] KVM: SVM: Remove set_cr_intercept, clr_cr_intercept and is_cr_intercept Babu Moger
2020-09-11 19:28 ` [PATCH v6 09/12] KVM: SVM: Remove set_exception_intercept and clr_exception_intercept Babu Moger
2020-09-11 19:29 ` Babu Moger [this message]
2020-09-11 19:29 ` [PATCH v6 11/12] KVM: X86: Move handling of INVPCID types to x86 Babu Moger
2020-09-11 19:29 ` [PATCH v6 12/12] KVM:SVM: Enable INVPCID feature on AMD Babu Moger
2020-09-12 17:08 ` [PATCH v6 00/12] SVM cleanup and INVPCID feature support Paolo Bonzini
2020-09-14 15:05   ` Sean Christopherson
2020-09-14 18:33   ` Babu Moger
2021-01-19 23:01     ` Jim Mattson
2021-01-19 23:45       ` Babu Moger
2021-01-20 21:14         ` Jim Mattson
2021-01-20 21:45           ` Babu Moger
2021-01-21  3:10             ` Babu Moger
2021-01-21 23:51               ` Babu Moger
2021-01-23  1:52                 ` Babu Moger
2021-02-24  0:13                   ` Jim Mattson
2021-02-24 22:17                     ` Babu Moger
2021-03-10  1:04                       ` Babu Moger
2021-03-10  9:08                         ` Paolo Bonzini
2021-03-10 14:55                           ` Babu Moger
2021-03-10 14:58                             ` Babu Moger
2021-03-10 15:31                               ` Paolo Bonzini
2021-03-11  1:21                                 ` Babu Moger
2021-03-11 20:07                                   ` Borislav Petkov
2021-03-11 20:32                                     ` Borislav Petkov
2021-03-11 20:57                                       ` Babu Moger
2021-03-11 21:40                                         ` Borislav Petkov
2021-03-11 22:04                                           ` Babu Moger
2021-03-11 22:15                                             ` Babu Moger
2021-03-11 23:52                                               ` Borislav Petkov
2021-03-12 14:53                                                 ` Babu Moger
2021-03-12 16:12                                                 ` Babu Moger
2021-03-24 21:21                                                   ` Borislav Petkov
2021-03-24 21:59                                                     ` Paolo Bonzini
2021-03-25  0:05                                                     ` Hugh Dickins
2021-03-25  2:43                                                       ` Hugh Dickins
2021-03-25  9:56                                                         ` Borislav Petkov
2021-03-25 10:29                                                           ` [PATCH] x86/tlb: Flush global mappings when KAISER is disabled Borislav Petkov
2021-03-25 10:52                                                             ` Paolo Bonzini
2021-03-25 15:13                                                             ` Babu Moger
2021-03-25 16:33                                                             ` Hugh Dickins
2021-03-25 19:00                                                               ` Jim Mattson
2021-03-25 20:09                                                             ` Borislav Petkov
2021-03-25 20:36                                                               ` Sasha Levin
2021-03-25 23:19                                                                 ` Sasha Levin
2021-03-25 23:56                                                                   ` Ben Hutchings
2021-03-11 21:23                                       ` [PATCH v6 00/12] SVM cleanup and INVPCID feature support Jim Mattson
2021-03-11 21:36                                         ` Borislav Petkov
2021-03-11 21:50                                           ` Babu Moger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=159985254493.11252.6603092560732507607.stgit@bmoger-ubuntu \
    --to=babu.moger@amd.com \
    --cc=bp@alien8.de \
    --cc=hpa@zytor.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.