All of lore.kernel.org
 help / color / mirror / Atom feed
From: Brijesh Singh <brijesh.singh@amd.com>
To: x86@kernel.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, linux-coco@lists.linux.dev,
	linux-mm@kvack.org, linux-crypto@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Joerg Roedel <jroedel@suse.de>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Ard Biesheuvel <ardb@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>,
	Andy Lutomirski <luto@kernel.org>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Sergio Lopez <slp@redhat.com>, Peter Gonda <pgonda@google.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>,
	David Rientjes <rientjes@google.com>,
	tony.luck@intel.com, npmccallum@redhat.com,
	Borislav Petkov <bp@suse.de>,
	Brijesh Singh <brijesh.singh@amd.com>
Subject: [PATCH Part2 RFC v3 30/37] KVM: SVM: Add support to handle MSR based Page State Change VMGEXIT
Date: Wed,  2 Jun 2021 09:10:50 -0500	[thread overview]
Message-ID: <20210602141057.27107-31-brijesh.singh@amd.com> (raw)
In-Reply-To: <20210602141057.27107-1-brijesh.singh@amd.com>

SEV-SNP VMs can ask the hypervisor to change the page state in the RMP
table to be private or shared using the Page State Change MSR protocol
as defined in the GHCB specification.

Before changing the page state in the RMP entry, we lookup the page in
the TDP to make sure that there is a valid mapping for it. If the mapping
exist then try to find a workable page level between the TDP and RMP for
the page. If the page is not mapped in the TDP, then create a fault such
that it gets mapped before we change the page state in the RMP entry.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/sev-common.h |   3 +
 arch/x86/kvm/svm/sev.c            | 141 ++++++++++++++++++++++++++++++
 2 files changed, 144 insertions(+)

diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index e7c6ce2ce45e..ed417340ed42 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -82,6 +82,9 @@
 
 #define GHCB_MSR_PSC_RESP		0x015
 #define GHCB_MSR_PSC_ERROR_POS		32
+#define GHCB_MSR_PSC_ERROR_MASK		GENMASK_ULL(31, 0)
+#define GHCB_MSR_PSC_RSVD_POS		12
+#define GHCB_MSR_PSC_RSVD_MASK		GENMASK_ULL(19, 0)
 #define GHCB_MSR_PSC_RESP_VAL(val)	((val) >> GHCB_MSR_PSC_ERROR_POS)
 
 /* GHCB GPA Register */
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 81c0fc883261..dac7042464be 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -28,6 +28,7 @@
 #include "svm_ops.h"
 #include "cpuid.h"
 #include "trace.h"
+#include "mmu.h"
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
 
@@ -2821,6 +2822,127 @@ static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
 	svm->vmcb->control.ghcb_gpa = value;
 }
 
+static int snp_rmptable_psmash(struct kvm_vcpu *vcpu, kvm_pfn_t pfn)
+{
+	pfn = pfn & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1);
+
+	return psmash(pfn_to_page(pfn));
+}
+
+static int snp_make_page_shared(struct kvm_vcpu *vcpu, gpa_t gpa, kvm_pfn_t pfn, int level)
+{
+	struct rmpupdate val;
+	int rc, rmp_level;
+	struct rmpentry *e;
+
+	e = snp_lookup_page_in_rmptable(pfn_to_page(pfn), &rmp_level);
+	if (!e)
+		return -EINVAL;
+
+	if (!rmpentry_assigned(e))
+		return 0;
+
+	/* Log if the entry is validated */
+	if (rmpentry_validated(e))
+		pr_warn_ratelimited("Remove RMP entry for a validated gpa 0x%llx\n", gpa);
+
+	/*
+	 * Is the page part of an existing 2M RMP entry ? Split the 2MB into multiple
+	 * of 4K-page before making the memory shared.
+	 */
+	if ((level == PG_LEVEL_4K) && (rmp_level == PG_LEVEL_2M)) {
+		rc = snp_rmptable_psmash(vcpu, pfn);
+		if (rc)
+			return rc;
+	}
+
+	memset(&val, 0, sizeof(val));
+	val.pagesize = X86_TO_RMP_PG_LEVEL(level);
+	return rmpupdate(pfn_to_page(pfn), &val);
+}
+
+static int snp_make_page_private(struct kvm_vcpu *vcpu, gpa_t gpa, kvm_pfn_t pfn, int level)
+{
+	struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+	struct rmpupdate val;
+	struct rmpentry *e;
+	int rmp_level;
+
+	e = snp_lookup_page_in_rmptable(pfn_to_page(pfn), &rmp_level);
+	if (!e)
+		return -EINVAL;
+
+	/* Log if the entry is validated */
+	if (rmpentry_validated(e))
+		pr_warn_ratelimited("Asked to make a pre-validated gpa %llx private\n", gpa);
+
+	memset(&val, 0, sizeof(val));
+	val.gpa = gpa;
+	val.asid = sev->asid;
+	val.pagesize = X86_TO_RMP_PG_LEVEL(level);
+	val.assigned = true;
+
+	return rmpupdate(pfn_to_page(pfn), &val);
+}
+
+static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, int op, gpa_t gpa, int level)
+{
+	struct kvm *kvm = vcpu->kvm;
+	int rc, tdp_level;
+	kvm_pfn_t pfn;
+	gpa_t gpa_end;
+
+	gpa_end = gpa + page_level_size(level);
+
+	while (gpa < gpa_end) {
+		/*
+		 * Get the pfn and level for the gpa from the nested page table.
+		 *
+		 * If the TDP walk failed, then its safe to say that we don't have a valid
+		 * mapping for the gpa in the nested page table. Create a fault to map the
+		 * page is nested page table.
+		 */
+		if (!kvm_mmu_get_tdp_walk(vcpu, gpa, &pfn, &tdp_level)) {
+			pfn = kvm_mmu_map_tdp_page(vcpu, gpa, PFERR_USER_MASK, level);
+			if (is_error_noslot_pfn(pfn))
+				goto out;
+
+			if (!kvm_mmu_get_tdp_walk(vcpu, gpa, &pfn, &tdp_level))
+				goto out;
+		}
+
+		/* Adjust the level so that we don't go higher than the backing page level */
+		level = min_t(size_t, level, tdp_level);
+
+		write_lock(&kvm->mmu_lock);
+
+		switch (op) {
+		case SNP_PAGE_STATE_SHARED:
+			rc = snp_make_page_shared(vcpu, gpa, pfn, level);
+			break;
+		case SNP_PAGE_STATE_PRIVATE:
+			rc = snp_make_page_private(vcpu, gpa, pfn, level);
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+
+		write_unlock(&kvm->mmu_lock);
+
+		if (rc) {
+			pr_err_ratelimited("Error op %d gpa %llx pfn %llx level %d rc %d\n",
+					   op, gpa, pfn, level, rc);
+			goto out;
+		}
+
+		gpa = gpa + page_level_size(level);
+	}
+
+out:
+	return rc;
+}
+
 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -2919,6 +3041,25 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
 				  GHCB_MSR_INFO_POS);
 		break;
 	}
+	case GHCB_MSR_PSC_REQ: {
+		gfn_t gfn;
+		int ret;
+		u8 op;
+
+		gfn = get_ghcb_msr_bits(svm, GHCB_MSR_PSC_GFN_MASK, GHCB_MSR_PSC_GFN_POS);
+		op = get_ghcb_msr_bits(svm, GHCB_MSR_PSC_OP_MASK, GHCB_MSR_PSC_OP_POS);
+
+		ret = __snp_handle_page_state_change(vcpu, op, gfn_to_gpa(gfn), PG_LEVEL_4K);
+
+		/* If failed to change the state then spec requires to return all F's */
+		if (ret)
+			ret = -1;
+
+		set_ghcb_msr_bits(svm, ret, GHCB_MSR_PSC_ERROR_MASK, GHCB_MSR_PSC_ERROR_POS);
+		set_ghcb_msr_bits(svm, 0, GHCB_MSR_PSC_RSVD_MASK, GHCB_MSR_PSC_RSVD_POS);
+		set_ghcb_msr_bits(svm, GHCB_MSR_PSC_RESP, GHCB_MSR_INFO_MASK, GHCB_MSR_INFO_POS);
+		break;
+	}
 	case GHCB_MSR_TERM_REQ: {
 		u64 reason_set, reason_code;
 
-- 
2.17.1


  parent reply	other threads:[~2021-06-02 14:12 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-02 14:10 [PATCH Part2 RFC v3 00/37] Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 01/37] KVM: SVM: Add support to handle AP reset MSR protocol Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 02/37] KVM: SVM: Provide the Hypervisor Feature support VMGEXIT Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 03/37] x86/cpufeatures: Add SEV-SNP CPU feature Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 04/37] x86/sev: Add the host SEV-SNP initialization support Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 05/37] x86/sev: Add RMP entry lookup helpers Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 06/37] x86/sev: Add helper functions for RMPUPDATE and PSMASH instruction Brijesh Singh
2021-06-10 13:03   ` Dr. David Alan Gilbert
2021-06-02 14:10 ` [PATCH Part2 RFC v3 07/37] x86/sev: Split the physmap when adding the page in RMP table Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 08/37] x86/traps: Define RMP violation #PF error code Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 09/37] x86/fault: Add support to dump RMP entry on fault Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 10/37] x86/fault: Add support to handle the RMP fault for user address Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 11/37] crypto:ccp: Define the SEV-SNP commands Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 12/37] crypto: ccp: Add support to initialize the AMD-SP for SEV-SNP Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 13/37] crypto: ccp: Shutdown SNP firmware on kexec Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 14/37] crypto:ccp: Provide APIs to issue SEV-SNP commands Brijesh Singh
2021-06-15 11:23   ` Dr. David Alan Gilbert
2021-06-02 14:10 ` [PATCH Part2 RFC v3 15/37] crypto: ccp: Handle the legacy TMR allocation when SNP is enabled Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 16/37] crypto: ccp: Handle the legacy SEV command " Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 17/37] KVM: SVM: make AVIC backing, VMSA and VMCB memory allocation SNP safe Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 18/37] KVM: SVM: Add initial SEV-SNP support Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 19/37] KVM: SVM: Add KVM_SNP_INIT command Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 20/37] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_START command Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 21/37] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_UPDATE command Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 22/37] KVM: SVM: Reclaim the guest pages when SEV-SNP VM terminates Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 23/37] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_FINISH command Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 24/37] KVM: X86: Add kvm_x86_ops to get the max page level for the TDP Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 25/37] KVM: X86: Introduce kvm_mmu_map_tdp_page() for use by SEV Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 26/37] KVM: X86: Introduce kvm_mmu_get_tdp_walk() for SEV-SNP use Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 27/37] KVM: X86: Define new RMP check related #NPF error bits Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 28/37] KVM: X86: update page-fault trace to log the 64-bit error code Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 29/37] KVM: SVM: Add support to handle GHCB GPA register VMGEXIT Brijesh Singh
2021-06-02 14:10 ` Brijesh Singh [this message]
2021-06-02 14:10 ` [PATCH Part2 RFC v3 31/37] KVM: SVM: Add support to handle Page State Change VMGEXIT Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 32/37] KVM: Add arch hooks to track the host write to guest memory Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 33/37] KVM: X86: Export the kvm_zap_gfn_range() for the SNP use Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 34/37] KVM: SVM: Add support to handle the RMP nested page fault Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 35/37] KVM: SVM: Provide support for SNP_GUEST_REQUEST NAE event Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 36/37] KVM: SVM: Use a VMSA physical address variable for populating VMCB Brijesh Singh
2021-06-02 14:10 ` [PATCH Part2 RFC v3 37/37] KVM: SVM: Support SEV-SNP AP Creation NAE event Brijesh Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210602141057.27107-31-brijesh.singh@amd.com \
    --to=brijesh.singh@amd.com \
    --cc=ardb@kernel.org \
    --cc=bp@suse.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=jmattson@google.com \
    --cc=jroedel@suse.de \
    --cc=kvm@vger.kernel.org \
    --cc=linux-coco@lists.linux.dev \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=npmccallum@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=pgonda@google.com \
    --cc=rientjes@google.com \
    --cc=seanjc@google.com \
    --cc=slp@redhat.com \
    --cc=srinivas.pandruvada@linux.intel.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=tony.luck@intel.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.