All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nikunj A Dadhania <nikunj@amd.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	Brijesh Singh <brijesh.singh@amd.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Peter Gonda <pgonda@google.com>, Bharata B Rao <bharata@amd.com>,
	"Maciej S . Szmigiero" <mail@maciej.szmigiero.name>,
	Mingwei Zhang <mizhang@google.com>,
	"David Hildenbrand" <david@redhat.com>, <kvm@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	Nikunj A Dadhania <nikunj@amd.com>
Subject: [PATCH RFC v1 4/9] KVM: SVM: Add pinning metadata in the arch memslot
Date: Tue, 8 Mar 2022 10:08:52 +0530	[thread overview]
Message-ID: <20220308043857.13652-5-nikunj@amd.com> (raw)
In-Reply-To: <20220308043857.13652-1-nikunj@amd.com>

AMD SEV guest requires the guest's pages to be pinned in host
physical memory. The memory encryption scheme uses the physical
address of the memory being encrypted. If guest pages are moved,
content decrypted would be incorrect, corrupting guest's memory.

For SEV/SEV-ES guests, the hypervisor doesn't know which pages are
encrypted and when the guest is done using those pages. Hypervisor
should treat all the guest pages as encrypted until they are
deallocated or the guest is destroyed.

The KVM MMU needs to track the pages that are pinned and the
corresponding pfns for unpinning them during the guest destroy path
and deallocation path.

Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
---
 arch/x86/include/asm/kvm-x86-ops.h |  2 ++
 arch/x86/include/asm/kvm_host.h    |  7 +++++
 arch/x86/kvm/svm/sev.c             | 49 ++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c             |  3 ++
 arch/x86/kvm/svm/svm.h             |  6 ++++
 arch/x86/kvm/x86.c                 | 11 ++++++-
 6 files changed, 77 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 8efb43d92eef..61ff8a636db6 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -89,6 +89,8 @@ KVM_X86_OP(set_identity_map_addr)
 KVM_X86_OP(get_mt_mask)
 KVM_X86_OP(load_mmu_pgd)
 KVM_X86_OP(pin_pfn)
+KVM_X86_OP(alloc_memslot_metadata)
+KVM_X86_OP(free_memslot)
 KVM_X86_OP_NULL(has_wbinvd_exit)
 KVM_X86_OP(get_l2_tsc_offset)
 KVM_X86_OP(get_l2_tsc_multiplier)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index df11f1fb76de..eeb2c799b59f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -926,6 +926,8 @@ struct kvm_arch_memory_slot {
 	struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
 	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 	unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
+	unsigned long *pinned_bitmap;
+	kvm_pfn_t *pfns;
 };
 
 /*
@@ -1421,6 +1423,11 @@ struct kvm_x86_ops {
 	bool (*pin_pfn)(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
 			kvm_pfn_t pfn, hva_t hva, bool write,
 			enum pg_level level);
+	int (*alloc_memslot_metadata)(struct kvm *kvm,
+				      const struct kvm_memory_slot *old,
+				      struct kvm_memory_slot *new);
+	void (*free_memslot)(struct kvm *kvm,
+			     struct kvm_memory_slot *slot);
 
 	bool (*has_wbinvd_exit)(void);
 
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 17b53457d866..bd7572517c99 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2950,3 +2950,52 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
 
 	ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
 }
+
+void sev_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+	struct kvm_arch_memory_slot *aslot = &slot->arch;
+
+	if (!sev_guest(kvm))
+		return;
+
+	if (aslot->pinned_bitmap) {
+		kvfree(aslot->pinned_bitmap);
+		aslot->pinned_bitmap = NULL;
+	}
+
+	if (aslot->pfns) {
+		kvfree(aslot->pfns);
+		aslot->pfns = NULL;
+	}
+}
+
+int sev_alloc_memslot_metadata(struct kvm *kvm,
+			       const struct kvm_memory_slot *old,
+			       struct kvm_memory_slot *new)
+{
+	struct kvm_arch_memory_slot *aslot = &new->arch;
+	unsigned long pinned_bytes = new->npages * sizeof(kvm_pfn_t);
+
+	if (!sev_guest(kvm))
+		return 0;
+
+	if (old && old->arch.pinned_bitmap && old->arch.pfns) {
+		WARN_ON(old->npages != new->npages);
+		aslot->pinned_bitmap = old->arch.pinned_bitmap;
+		aslot->pfns = old->arch.pfns;
+		return 0;
+	}
+
+	aslot->pfns = kvcalloc(new->npages, sizeof(*aslot->pfns),
+			      GFP_KERNEL_ACCOUNT);
+	if (!aslot->pfns)
+		return -ENOMEM;
+
+	aslot->pinned_bitmap = kvzalloc(pinned_bytes, GFP_KERNEL_ACCOUNT);
+	if (!aslot->pinned_bitmap) {
+		kvfree(aslot->pfns);
+		aslot->pfns = NULL;
+		return -ENOMEM;
+	}
+	return 0;
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index fd3a00c892c7..ec06421cb532 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4658,6 +4658,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.complete_emulated_msr = svm_complete_emulated_msr,
 
 	.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
+
+	.alloc_memslot_metadata = sev_alloc_memslot_metadata,
+	.free_memslot = sev_free_memslot,
 };
 
 /*
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index fa98d6844728..f00364020d7e 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -616,4 +616,10 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
 
+int sev_alloc_memslot_metadata(struct kvm *kvm,
+			       const struct kvm_memory_slot *old,
+			       struct kvm_memory_slot *new);
+void sev_free_memslot(struct kvm *kvm,
+		      struct kvm_memory_slot *slot);
+
 #endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 82a9dcd8c67f..95070aaa1636 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -11796,6 +11796,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
 	}
 
 	kvm_page_track_free_memslot(slot);
+	static_call_cond(kvm_x86_free_memslot)(kvm, slot);
 }
 
 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
@@ -11821,6 +11822,7 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
 }
 
 static int kvm_alloc_memslot_metadata(struct kvm *kvm,
+				      const struct kvm_memory_slot *old,
 				      struct kvm_memory_slot *slot)
 {
 	unsigned long npages = slot->npages;
@@ -11873,8 +11875,15 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
 	if (kvm_page_track_create_memslot(kvm, slot, npages))
 		goto out_free;
 
+	if (kvm_x86_ops.alloc_memslot_metadata &&
+	    static_call(kvm_x86_alloc_memslot_metadata)(kvm, old, slot))
+		goto out_free_page_track;
+
 	return 0;
 
+out_free_page_track:
+	kvm_page_track_free_memslot(slot);
+
 out_free:
 	memslot_rmap_free(slot);
 
@@ -11907,7 +11916,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 				   enum kvm_mr_change change)
 {
 	if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
-		return kvm_alloc_memslot_metadata(kvm, new);
+		return kvm_alloc_memslot_metadata(kvm, old, new);
 
 	if (change == KVM_MR_FLAGS_ONLY)
 		memcpy(&new->arch, &old->arch, sizeof(old->arch));
-- 
2.32.0


  parent reply	other threads:[~2022-03-08  4:40 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-08  4:38 [PATCH RFC v1 0/9] KVM: SVM: Defer page pinning for SEV guests Nikunj A Dadhania
2022-03-08  4:38 ` [PATCH RFC v1 1/9] KVM: Introduce pinning flag to hva_to_pfn* Nikunj A Dadhania
2022-03-08  4:38 ` [PATCH RFC v1 2/9] KVM: x86/mmu: Move hugepage adjust to direct_page_fault Nikunj A Dadhania
2022-03-28 21:04   ` Sean Christopherson
2022-03-08  4:38 ` [PATCH RFC v1 3/9] KVM: x86/mmu: Add hook to pin PFNs on demand in MMU Nikunj A Dadhania
2022-03-08  4:38 ` Nikunj A Dadhania [this message]
2022-03-08  4:38 ` [PATCH RFC v1 5/9] KVM: SVM: Implement demand page pinning Nikunj A Dadhania
2022-03-08 21:53   ` Mingwei Zhang
2022-03-09  5:10     ` Nikunj A. Dadhania
2022-03-21  6:11       ` Mingwei Zhang
2022-03-21  9:19         ` Nikunj A. Dadhania
2022-03-08  4:38 ` [PATCH RFC v1 6/9] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by SEV/TDX Nikunj A Dadhania
2022-03-08  4:38 ` [PATCH RFC v1 7/9] KVM: SEV: Carve out routine for allocation of pages Nikunj A Dadhania
2022-03-08  4:38 ` [PATCH RFC v1 8/9] KVM: Move kvm_for_each_memslot_in_hva_range() to be used in SVM Nikunj A Dadhania
2022-03-08  4:38 ` [PATCH RFC v1 9/9] KVM: SVM: Pin SEV pages in MMU during sev_launch_update_data() Nikunj A Dadhania
2022-03-09 16:57   ` Maciej S. Szmigiero
2022-03-09 17:47     ` Nikunj A. Dadhania
2022-03-28 21:00 ` [PATCH RFC v1 0/9] KVM: SVM: Defer page pinning for SEV guests Sean Christopherson
2022-03-30  4:42   ` Nikunj A. Dadhania
2022-03-30 19:47     ` Sean Christopherson
2022-03-31  4:48       ` Nikunj A. Dadhania
2022-03-31 18:32         ` Peter Gonda
2022-03-31 19:00           ` Sean Christopherson
2022-04-01  3:22             ` Nikunj A. Dadhania
2022-04-01 14:54               ` Sean Christopherson
2022-04-01 15:39                 ` Nikunj A. Dadhania
2022-04-01 17:28             ` Marc Orr
2022-04-01 18:02               ` Sean Christopherson
2022-04-01 18:19                 ` Marc Orr

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220308043857.13652-5-nikunj@amd.com \
    --to=nikunj@amd.com \
    --cc=bharata@amd.com \
    --cc=brijesh.singh@amd.com \
    --cc=david@redhat.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mail@maciej.szmigiero.name \
    --cc=mizhang@google.com \
    --cc=pbonzini@redhat.com \
    --cc=pgonda@google.com \
    --cc=seanjc@google.com \
    --cc=thomas.lendacky@amd.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.