All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tom Lendacky <thomas.lendacky@amd.com>
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, x86@kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Borislav Petkov <bp@alien8.de>, Ingo Molnar <mingo@redhat.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Brijesh Singh <brijesh.singh@amd.com>
Subject: [PATCH v3 05/34] KVM: SVM: Add support for the SEV-ES VMSA
Date: Mon,  9 Nov 2020 16:25:31 -0600	[thread overview]
Message-ID: <316f09c279628b972730664250903936b8a7b372.1604960760.git.thomas.lendacky@amd.com> (raw)
In-Reply-To: <cover.1604960760.git.thomas.lendacky@amd.com>

From: Tom Lendacky <thomas.lendacky@amd.com>

Allocate a page during vCPU creation to be used as the encrypted VM save
area (VMSA) for the SEV-ES guest. Provide a flag in the kvm_vcpu_arch
structure that indicates whether the guest state is protected.

When freeing a VMSA page that has been encrypted, the cache contents must
be flushed using the MSR_AMD64_VM_PAGE_FLUSH before freeing the page.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/kvm_host.h |  3 ++
 arch/x86/kvm/svm/sev.c          | 64 +++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c          | 24 +++++++++++--
 arch/x86/kvm/svm/svm.h          |  5 +++
 4 files changed, 94 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d44858b69353..7776bb18e29d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -804,6 +804,9 @@ struct kvm_vcpu_arch {
 		 */
 		bool enforce;
 	} pv_cpuid;
+
+	/* Protected Guests */
+	bool guest_state_protected;
 };
 
 struct kvm_lpage_info {
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 9bf5e9dadff5..151e9eab85a9 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -14,6 +14,7 @@
 #include <linux/psp-sev.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
+#include <asm/processor.h>
 
 #include "x86.h"
 #include "svm.h"
@@ -1190,6 +1191,69 @@ void sev_hardware_teardown(void)
 	sev_flush_asids();
 }
 
+/*
+ * Pages used by hardware to hold guest encrypted state must be flushed before
+ * returning them to the system.
+ */
+void sev_flush_guest_memory(struct vcpu_svm *svm, void *va, unsigned long len)
+{
+	/*
+	 * If hardware enforced cache coherency for encrypted mappings of the
+	 * same physical page is supported, nothing to do.
+	 */
+	if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
+		return;
+
+	/*
+	 * If the VM Page Flush MSR is supported, use it to flush the page
+	 * (using the page virtual address and the guest ASID).
+	 */
+	if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
+		struct kvm_sev_info *sev;
+		u64 start, stop;
+
+		/* Align start and stop to page boundaries. */
+		start = (u64)va & PAGE_MASK;
+		stop = PAGE_ALIGN((u64)va + len);
+
+		if (start < stop) {
+			sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
+
+			while (start < stop) {
+				wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
+				       start | sev->asid);
+
+				start += PAGE_SIZE;
+			}
+
+			return;
+		} else {
+			WARN(1, "Address overflow, using WBINVD\n");
+		}
+	}
+
+	/*
+	 * Hardware should always have one of the above features,
+	 * but if not, use WBINVD and issue a warning.
+	 */
+	WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
+	wbinvd_on_all_cpus();
+}
+
+void sev_free_vcpu(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm;
+
+	if (!sev_es_guest(vcpu->kvm))
+		return;
+
+	svm = to_svm(vcpu);
+
+	if (vcpu->arch.guest_state_protected)
+		sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
+	__free_page(virt_to_page(svm->vmsa));
+}
+
 void pre_sev_run(struct vcpu_svm *svm, int cpu)
 {
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a3198b65f431..d45b2dc5cabe 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1288,6 +1288,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm;
 	struct page *vmcb_page;
+	struct page *vmsa_page = NULL;
 	int err;
 
 	BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
@@ -1298,9 +1299,19 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 	if (!vmcb_page)
 		goto out;
 
+	if (sev_es_guest(svm->vcpu.kvm)) {
+		/*
+		 * SEV-ES guests require a separate VMSA page used to contain
+		 * the encrypted register state of the guest.
+		 */
+		vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+		if (!vmsa_page)
+			goto error_free_vmcb_page;
+	}
+
 	err = avic_init_vcpu(svm);
 	if (err)
-		goto error_free_vmcb_page;
+		goto error_free_vmsa_page;
 
 	/* We initialize this flag to true to make sure that the is_running
 	 * bit would be set the first time the vcpu is loaded.
@@ -1310,12 +1321,16 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 
 	svm->msrpm = svm_vcpu_alloc_msrpm();
 	if (!svm->msrpm)
-		goto error_free_vmcb_page;
+		goto error_free_vmsa_page;
 
 	svm_vcpu_init_msrpm(vcpu, svm->msrpm);
 
 	svm->vmcb = page_address(vmcb_page);
 	svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT);
+
+	if (vmsa_page)
+		svm->vmsa = page_address(vmsa_page);
+
 	svm->asid_generation = 0;
 	init_vmcb(svm);
 
@@ -1324,6 +1339,9 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 
 	return 0;
 
+error_free_vmsa_page:
+	if (vmsa_page)
+		__free_page(vmsa_page);
 error_free_vmcb_page:
 	__free_page(vmcb_page);
 out:
@@ -1351,6 +1369,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 
 	svm_free_nested(svm);
 
+	sev_free_vcpu(vcpu);
+
 	__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
 	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
 }
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index af9e5910817c..8f0a3ed0d790 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -167,6 +167,10 @@ struct vcpu_svm {
 		DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
 		DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
 	} shadow_msr_intercept;
+
+	/* SEV-ES support */
+	struct vmcb_save_area *vmsa;
+	struct ghcb *ghcb;
 };
 
 struct svm_cpu_data {
@@ -512,5 +516,6 @@ int svm_unregister_enc_region(struct kvm *kvm,
 void pre_sev_run(struct vcpu_svm *svm, int cpu);
 void __init sev_hardware_setup(void);
 void sev_hardware_teardown(void);
+void sev_free_vcpu(struct kvm_vcpu *vcpu);
 
 #endif
-- 
2.28.0


  parent reply	other threads:[~2020-11-09 22:27 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-09 22:25 [PATCH v3 00/34] SEV-ES hypervisor support Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 01/34] x86/cpu: Add VM page flush MSR availablility as a CPUID feature Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 02/34] KVM: SVM: Remove the call to sev_platform_status() during setup Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 03/34] KVM: SVM: Add support for SEV-ES capability in KVM Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 04/34] KVM: SVM: Add GHCB accessor functions for retrieving fields Tom Lendacky
2020-11-09 22:25 ` Tom Lendacky [this message]
2020-11-10  3:25   ` [PATCH v3 05/34] KVM: SVM: Add support for the SEV-ES VMSA kernel test robot
2020-11-10  3:25     ` kernel test robot
2020-11-09 22:25 ` [PATCH v3 06/34] KVM: x86: Mark GPRs dirty when written Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 07/34] KVM: SVM: Add required changes to support intercepts under SEV-ES Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 08/34] KVM: SVM: Prevent debugging " Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 09/34] KVM: SVM: Do not allow instruction emulation " Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 10/34] KVM: SVM: Cannot re-initialize the VMCB after shutdown with SEV-ES Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 11/34] KVM: SVM: Prepare for SEV-ES exit handling in the sev.c file Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 12/34] KVM: SVM: Add initial support for a VMGEXIT VMEXIT Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 13/34] KVM: SVM: Create trace events for VMGEXIT processing Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 14/34] KVM: SVM: Add support for SEV-ES GHCB MSR protocol function 0x002 Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 15/34] KVM: SVM: Add support for SEV-ES GHCB MSR protocol function 0x004 Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 16/34] KVM: SVM: Add support for SEV-ES GHCB MSR protocol function 0x100 Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 17/34] KVM: SVM: Create trace events for VMGEXIT MSR protocol processing Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 18/34] KVM: SVM: Support MMIO for an SEV-ES guest Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 19/34] KVM: SVM: Support string IO operations " Tom Lendacky
2020-11-10  3:56   ` kernel test robot
2020-11-10  3:56     ` kernel test robot
2020-11-09 22:25 ` [PATCH v3 20/34] KVM: SVM: Add support for EFER write traps " Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 21/34] KVM: SVM: Add support for CR0 " Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 22/34] KVM: SVM: Add support for CR4 " Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 23/34] KVM: SVM: Add support for CR8 " Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 24/34] KVM: x86: Update __get_sregs() / __set_sregs() to support SEV-ES Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 25/34] KVM: SVM: Do not report support for SMM for an SEV-ES guest Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 26/34] KVM: SVM: Guest FPU state save/restore not needed for " Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 27/34] KVM: SVM: Add support for booting APs for an " Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 28/34] KVM: SVM: Add NMI support " Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 29/34] KVM: SVM: Set the encryption mask for the SVM host save area Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 30/34] KVM: SVM: Update ASID allocation to support SEV-ES guests Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 31/34] KVM: SVM: Provide support for SEV-ES vCPU creation/loading Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 32/34] KVM: SVM: Provide support for SEV-ES vCPU loading Tom Lendacky
2020-11-09 22:25 ` [PATCH v3 33/34] KVM: SVM: Provide an updated VMRUN invocation for SEV-ES guests Tom Lendacky
2020-11-09 22:26 ` [PATCH v3 34/34] KVM: SVM: Provide support to launch and run an SEV-ES guest Tom Lendacky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=316f09c279628b972730664250903936b8a7b372.1604960760.git.thomas.lendacky@amd.com \
    --to=thomas.lendacky@amd.com \
    --cc=bp@alien8.de \
    --cc=brijesh.singh@amd.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.