All of lore.kernel.org
 help / color / mirror / Atom feed
From: Brijesh Singh <brijesh.singh@amd.com>
To: x86@kernel.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, linux-coco@lists.linux.dev,
	linux-mm@kvack.org, linux-crypto@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Joerg Roedel <jroedel@suse.de>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Ard Biesheuvel <ardb@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>,
	Andy Lutomirski <luto@kernel.org>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Sergio Lopez <slp@redhat.com>, Peter Gonda <pgonda@google.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>,
	David Rientjes <rientjes@google.com>,
	Dov Murik <dovmurik@linux.ibm.com>,
	Tobin Feldman-Fitzthum <tobin@ibm.com>,
	Borislav Petkov <bp@alien8.de>,
	Michael Roth <michael.roth@amd.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	"Kirill A . Shutemov" <kirill@shutemov.name>,
	Andi Kleen <ak@linux.intel.com>,
	tony.luck@intel.com, marcorr@google.com,
	sathyanarayanan.kuppuswamy@linux.intel.com,
	Brijesh Singh <brijesh.singh@amd.com>
Subject: [PATCH Part2 v5 39/45] KVM: SVM: Introduce ops for the post gfn map and unmap
Date: Fri, 20 Aug 2021 10:59:12 -0500	[thread overview]
Message-ID: <20210820155918.7518-40-brijesh.singh@amd.com> (raw)
In-Reply-To: <20210820155918.7518-1-brijesh.singh@amd.com>

When SEV-SNP is enabled in the guest VM, the guest memory pages can
either be a private or shared. A write from the hypervisor goes through
the RMP checks. If hardware sees that hypervisor is attempting to write
to a guest private page, then it triggers an RMP violation #PF.

To avoid the RMP violation, add post_{map,unmap}_gfn() ops that can be
used to verify that its safe to map a given guest page. Use the SRCU to
protect against the page state change for existing mapped pages.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/kvm-x86-ops.h |  2 +
 arch/x86/include/asm/kvm_host.h    |  4 ++
 arch/x86/kvm/svm/sev.c             | 69 +++++++++++++++++++++-----
 arch/x86/kvm/svm/svm.c             |  4 ++
 arch/x86/kvm/svm/svm.h             |  8 +++
 arch/x86/kvm/x86.c                 | 78 +++++++++++++++++++++++++++---
 6 files changed, 146 insertions(+), 19 deletions(-)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 371756c7f8f4..c09bd40e0160 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -124,6 +124,8 @@ KVM_X86_OP(msr_filter_changed)
 KVM_X86_OP_NULL(complete_emulated_msr)
 KVM_X86_OP(alloc_apic_backing_page)
 KVM_X86_OP_NULL(rmp_page_level_adjust)
+KVM_X86_OP(post_map_gfn)
+KVM_X86_OP(post_unmap_gfn)
 
 #undef KVM_X86_OP
 #undef KVM_X86_OP_NULL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a6e764458f3e..5ac1ff097e8c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1463,7 +1463,11 @@ struct kvm_x86_ops {
 	void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
 
 	void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
+
 	void (*rmp_page_level_adjust)(struct kvm *kvm, kvm_pfn_t pfn, int *level);
+
+	int (*post_map_gfn)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int *token);
+	void (*post_unmap_gfn)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int token);
 };
 
 struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0de85ed63e9b..65b578463271 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -336,6 +336,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
 		if (ret)
 			goto e_free;
 
+		init_srcu_struct(&sev->psc_srcu);
 		ret = sev_snp_init(&argp->error);
 	} else {
 		ret = sev_platform_init(&argp->error);
@@ -2293,6 +2294,7 @@ void sev_vm_destroy(struct kvm *kvm)
 			WARN_ONCE(1, "Failed to free SNP guest context, leaking asid!\n");
 			return;
 		}
+		cleanup_srcu_struct(&sev->psc_srcu);
 	} else {
 		sev_unbind_asid(kvm, sev->handle);
 	}
@@ -2494,23 +2496,32 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
 	kfree(svm->ghcb_sa);
 }
 
-static inline int svm_map_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map)
+static inline int svm_map_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map, int *token)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
 	u64 gfn = gpa_to_gfn(control->ghcb_gpa);
+	struct kvm_vcpu *vcpu = &svm->vcpu;
 
-	if (kvm_vcpu_map(&svm->vcpu, gfn, map)) {
+	if (kvm_vcpu_map(vcpu, gfn, map)) {
 		/* Unable to map GHCB from guest */
 		pr_err("error mapping GHCB GFN [%#llx] from guest\n", gfn);
 		return -EFAULT;
 	}
 
+	if (sev_post_map_gfn(vcpu->kvm, map->gfn, map->pfn, token)) {
+		kvm_vcpu_unmap(vcpu, map, false);
+		return -EBUSY;
+	}
+
 	return 0;
 }
 
-static inline void svm_unmap_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map)
+static inline void svm_unmap_ghcb(struct vcpu_svm *svm, struct kvm_host_map *map, int token)
 {
-	kvm_vcpu_unmap(&svm->vcpu, map, true);
+	struct kvm_vcpu *vcpu = &svm->vcpu;
+
+	kvm_vcpu_unmap(vcpu, map, true);
+	sev_post_unmap_gfn(vcpu->kvm, map->gfn, map->pfn, token);
 }
 
 static void dump_ghcb(struct vcpu_svm *svm)
@@ -2518,8 +2529,9 @@ static void dump_ghcb(struct vcpu_svm *svm)
 	struct kvm_host_map map;
 	unsigned int nbits;
 	struct ghcb *ghcb;
+	int token;
 
-	if (svm_map_ghcb(svm, &map))
+	if (svm_map_ghcb(svm, &map, &token))
 		return;
 
 	ghcb = map.hva;
@@ -2544,7 +2556,7 @@ static void dump_ghcb(struct vcpu_svm *svm)
 	pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
 
 e_unmap:
-	svm_unmap_ghcb(svm, &map);
+	svm_unmap_ghcb(svm, &map, token);
 }
 
 static bool sev_es_sync_to_ghcb(struct vcpu_svm *svm)
@@ -2552,8 +2564,9 @@ static bool sev_es_sync_to_ghcb(struct vcpu_svm *svm)
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	struct kvm_host_map map;
 	struct ghcb *ghcb;
+	int token;
 
-	if (svm_map_ghcb(svm, &map))
+	if (svm_map_ghcb(svm, &map, &token))
 		return false;
 
 	ghcb = map.hva;
@@ -2579,7 +2592,7 @@ static bool sev_es_sync_to_ghcb(struct vcpu_svm *svm)
 
 	trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, ghcb);
 
-	svm_unmap_ghcb(svm, &map);
+	svm_unmap_ghcb(svm, &map, token);
 
 	return true;
 }
@@ -2636,8 +2649,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm, u64 *exit_code)
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	struct kvm_host_map map;
 	struct ghcb *ghcb;
+	int token;
 
-	if (svm_map_ghcb(svm, &map))
+	if (svm_map_ghcb(svm, &map, &token))
 		return -EFAULT;
 
 	ghcb = map.hva;
@@ -2739,7 +2753,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm, u64 *exit_code)
 
 	sev_es_sync_from_ghcb(svm, ghcb);
 
-	svm_unmap_ghcb(svm, &map);
+	svm_unmap_ghcb(svm, &map, token);
 	return 0;
 
 vmgexit_err:
@@ -2760,7 +2774,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm, u64 *exit_code)
 	vcpu->run->internal.data[0] = *exit_code;
 	vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
 
-	svm_unmap_ghcb(svm, &map);
+	svm_unmap_ghcb(svm, &map, token);
 	return -EINVAL;
 }
 
@@ -3036,6 +3050,9 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
 				return PSC_UNDEF_ERR;
 		}
 
+		/* Wait for all the existing mapped gfn to unmap */
+		synchronize_srcu_expedited(&sev->psc_srcu);
+
 		write_lock(&kvm->mmu_lock);
 
 		rc = kvm_mmu_get_tdp_walk(vcpu, gpa, &pfn, &npt_level);
@@ -3604,3 +3621,33 @@ void sev_rmp_page_level_adjust(struct kvm *kvm, kvm_pfn_t pfn, int *level)
 	/* Adjust the level to keep the NPT and RMP in sync */
 	*level = min_t(size_t, *level, rmp_level);
 }
+
+int sev_post_map_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int *token)
+{
+	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+	int level;
+
+	if (!sev_snp_guest(kvm))
+		return 0;
+
+	*token = srcu_read_lock(&sev->psc_srcu);
+
+	/* If pfn is not added as private then fail */
+	if (snp_lookup_rmpentry(pfn, &level) == 1) {
+		srcu_read_unlock(&sev->psc_srcu, *token);
+		pr_err_ratelimited("failed to map private gfn 0x%llx pfn 0x%llx\n", gfn, pfn);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+void sev_post_unmap_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int token)
+{
+	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+	if (!sev_snp_guest(kvm))
+		return;
+
+	srcu_read_unlock(&sev->psc_srcu, token);
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5f73f21a37a1..3784d389247b 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4679,7 +4679,11 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
 
 	.alloc_apic_backing_page = svm_alloc_apic_backing_page,
+
 	.rmp_page_level_adjust = sev_rmp_page_level_adjust,
+
+	.post_map_gfn = sev_post_map_gfn,
+	.post_unmap_gfn = sev_post_unmap_gfn,
 };
 
 static struct kvm_x86_init_ops svm_init_ops __initdata = {
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index d10f7166b39d..ff91184f9b4a 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -76,16 +76,22 @@ struct kvm_sev_info {
 	bool active;		/* SEV enabled guest */
 	bool es_active;		/* SEV-ES enabled guest */
 	bool snp_active;	/* SEV-SNP enabled guest */
+
 	unsigned int asid;	/* ASID used for this guest */
 	unsigned int handle;	/* SEV firmware handle */
 	int fd;			/* SEV device fd */
+
 	unsigned long pages_locked; /* Number of pages locked */
 	struct list_head regions_list;  /* List of registered regions */
+
 	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
+
 	struct kvm *enc_context_owner; /* Owner of copied encryption context */
 	struct misc_cg *misc_cg; /* For misc cgroup accounting */
+
 	u64 snp_init_flags;
 	void *snp_context;      /* SNP guest context page */
+	struct srcu_struct psc_srcu;
 };
 
 struct kvm_svm {
@@ -618,6 +624,8 @@ void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
 struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
 void sev_rmp_page_level_adjust(struct kvm *kvm, kvm_pfn_t pfn, int *level);
+int sev_post_map_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int *token);
+void sev_post_unmap_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int token);
 
 /* vmenter.S */
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index afcdc75a99f2..bf4389ffc88f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3095,6 +3095,65 @@ static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
 	return (vcpu->arch.apf.msr_en_val & mask) == mask;
 }
 
+static int kvm_map_gfn_protected(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+				 struct gfn_to_pfn_cache *cache, bool atomic, int *token)
+{
+	int ret;
+
+	ret = kvm_map_gfn(vcpu, gfn, map, cache, atomic);
+	if (ret)
+		return ret;
+
+	if (kvm_x86_ops.post_map_gfn) {
+		ret = static_call(kvm_x86_post_map_gfn)(vcpu->kvm, map->gfn, map->pfn, token);
+		if (ret)
+			kvm_unmap_gfn(vcpu, map, cache, false, atomic);
+	}
+
+	return ret;
+}
+
+static int kvm_unmap_gfn_protected(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+				   struct gfn_to_pfn_cache *cache, bool dirty,
+				   bool atomic, int token)
+{
+	int ret;
+
+	ret = kvm_unmap_gfn(vcpu, map, cache, dirty, atomic);
+
+	if (kvm_x86_ops.post_unmap_gfn)
+		static_call(kvm_x86_post_unmap_gfn)(vcpu->kvm, map->gfn, map->pfn, token);
+
+	return ret;
+}
+
+static int kvm_vcpu_map_protected(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map,
+				  int *token)
+{
+	int ret;
+
+	ret = kvm_vcpu_map(vcpu, gpa, map);
+	if (ret)
+		return ret;
+
+	if (kvm_x86_ops.post_map_gfn) {
+		ret = static_call(kvm_x86_post_map_gfn)(vcpu->kvm, map->gfn, map->pfn, token);
+		if (ret)
+			kvm_vcpu_unmap(vcpu, map, false);
+	}
+
+	return ret;
+}
+
+static void kvm_vcpu_unmap_protected(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+				     bool dirty, int token)
+{
+	kvm_vcpu_unmap(vcpu, map, dirty);
+
+	if (kvm_x86_ops.post_unmap_gfn)
+		static_call(kvm_x86_post_unmap_gfn)(vcpu->kvm, map->gfn, map->pfn, token);
+}
+
 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 {
 	gpa_t gpa = data & ~0x3f;
@@ -3185,6 +3244,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 {
 	struct kvm_host_map map;
 	struct kvm_steal_time *st;
+	int token;
 
 	if (kvm_xen_msr_enabled(vcpu->kvm)) {
 		kvm_xen_runstate_set_running(vcpu);
@@ -3195,8 +3255,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 		return;
 
 	/* -EAGAIN is returned in atomic context so we can just return. */
-	if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
-			&map, &vcpu->arch.st.cache, false))
+	if (kvm_map_gfn_protected(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
+				  &map, &vcpu->arch.st.cache, false, &token))
 		return;
 
 	st = map.hva +
@@ -3234,7 +3294,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 
 	st->version += 1;
 
-	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
+	kvm_unmap_gfn_protected(vcpu, &map, &vcpu->arch.st.cache, true, false, token);
 }
 
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -4271,6 +4331,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 {
 	struct kvm_host_map map;
 	struct kvm_steal_time *st;
+	int token;
 
 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
 		return;
@@ -4278,8 +4339,8 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 	if (vcpu->arch.st.preempted)
 		return;
 
-	if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
-			&vcpu->arch.st.cache, true))
+	if (kvm_map_gfn_protected(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
+				  &map, &vcpu->arch.st.cache, true, &token))
 		return;
 
 	st = map.hva +
@@ -4287,7 +4348,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 
 	st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
 
-	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
+	kvm_unmap_gfn_protected(vcpu, &map, &vcpu->arch.st.cache, true, true, token);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -6816,6 +6877,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
 	gpa_t gpa;
 	char *kaddr;
 	bool exchanged;
+	int token;
 
 	/* guests cmpxchg8b have to be emulated atomically */
 	if (bytes > 8 || (bytes & (bytes - 1)))
@@ -6839,7 +6901,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
 	if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
 		goto emul_write;
 
-	if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
+	if (kvm_vcpu_map_protected(vcpu, gpa_to_gfn(gpa), &map, &token))
 		goto emul_write;
 
 	kaddr = map.hva + offset_in_page(gpa);
@@ -6861,7 +6923,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
 		BUG();
 	}
 
-	kvm_vcpu_unmap(vcpu, &map, true);
+	kvm_vcpu_unmap_protected(vcpu, &map, true, token);
 
 	if (!exchanged)
 		return X86EMUL_CMPXCHG_FAILED;
-- 
2.17.1


  parent reply	other threads:[~2021-08-20 16:01 UTC|newest]

Thread overview: 250+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-20 15:58 [PATCH Part2 v5 00/45] Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 01/45] x86/cpufeatures: Add SEV-SNP CPU feature Brijesh Singh
2021-09-16 16:56   ` Borislav Petkov
2021-09-16 17:35     ` Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 02/45] iommu/amd: Introduce function to check SEV-SNP support Brijesh Singh
2021-09-16 17:26   ` Borislav Petkov
2021-08-20 15:58 ` [PATCH Part2 v5 03/45] x86/sev: Add the host SEV-SNP initialization support Brijesh Singh
2021-09-24  8:58   ` Borislav Petkov
2021-08-20 15:58 ` [PATCH Part2 v5 04/45] x86/sev: Add RMP entry lookup helpers Brijesh Singh
2021-09-24  9:49   ` Borislav Petkov
2021-09-27 16:01     ` Brijesh Singh
2021-09-27 16:04       ` Brijesh Singh
2021-09-29 12:56         ` Borislav Petkov
2022-06-02 11:57   ` Jarkko Sakkinen
2021-08-20 15:58 ` [PATCH Part2 v5 05/45] x86/sev: Add helper functions for RMPUPDATE and PSMASH instruction Brijesh Singh
2021-09-24 14:04   ` Borislav Petkov
2021-09-27 16:06     ` Brijesh Singh
2021-10-15 18:05   ` Sean Christopherson
2021-10-15 20:18     ` Brijesh Singh
2021-10-15 20:27       ` Sean Christopherson
2021-10-15 20:36         ` Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 06/45] x86/sev: Invalid pages from direct map when adding it to RMP table Brijesh Singh
2021-09-29 14:34   ` Borislav Petkov
2021-09-30 16:19     ` Brijesh Singh
2021-10-01 11:06       ` Borislav Petkov
2021-08-20 15:58 ` [PATCH Part2 v5 07/45] x86/traps: Define RMP violation #PF error code Brijesh Singh
2021-09-29 17:25   ` Borislav Petkov
2021-08-20 15:58 ` [PATCH Part2 v5 08/45] x86/fault: Add support to handle the RMP fault for user address Brijesh Singh
2021-08-23 14:20   ` Dave Hansen
2021-08-23 14:36     ` Brijesh Singh
2021-08-23 14:50       ` Dave Hansen
2021-08-24 16:42         ` Joerg Roedel
2021-08-25  9:16           ` Vlastimil Babka
2021-08-25 13:50             ` Tom Lendacky
2021-09-29 18:19   ` Borislav Petkov
2021-08-20 15:58 ` [PATCH Part2 v5 09/45] x86/fault: Add support to dump RMP entry on fault Brijesh Singh
2021-09-29 18:38   ` Borislav Petkov
2021-08-20 15:58 ` [PATCH Part2 v5 10/45] crypto: ccp: shutdown SEV firmware on kexec Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 11/45] crypto:ccp: Define the SEV-SNP commands Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 12/45] crypto: ccp: Add support to initialize the AMD-SP for SEV-SNP Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 13/45] crypto:ccp: Provide APIs to issue SEV-SNP commands Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 14/45] crypto: ccp: Handle the legacy TMR allocation when SNP is enabled Brijesh Singh
2022-02-25 18:03   ` Alper Gun
2022-03-01 14:12     ` Brijesh Singh
2022-06-14  0:10   ` Alper Gun
2021-08-20 15:58 ` [PATCH Part2 v5 15/45] crypto: ccp: Handle the legacy SEV command " Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 16/45] crypto: ccp: Add the SNP_PLATFORM_STATUS command Brijesh Singh
2021-09-10  3:18   ` Marc Orr
2021-09-10  3:18     ` Marc Orr
2021-09-13 11:17     ` Brijesh Singh
2021-09-22 17:35   ` Dr. David Alan Gilbert
2021-09-23 18:01     ` Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 17/45] crypto: ccp: Add the SNP_{SET,GET}_EXT_CONFIG command Brijesh Singh
2021-09-01 21:02   ` Connor Kuehl
2021-09-01 23:06     ` Brijesh Singh
2021-09-10  3:27   ` Marc Orr
2021-09-10  3:27     ` Marc Orr
2021-09-13 11:29     ` Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 18/45] crypto: ccp: Provide APIs to query extended attestation report Brijesh Singh
2021-09-10  3:30   ` Marc Orr
2021-09-10  3:30     ` Marc Orr
2021-09-12  7:46     ` Dov Murik
2021-08-20 15:58 ` [PATCH Part2 v5 19/45] KVM: SVM: Add support to handle AP reset MSR protocol Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 20/45] KVM: SVM: Provide the Hypervisor Feature support VMGEXIT Brijesh Singh
2021-10-12 20:38   ` Sean Christopherson
2021-08-20 15:58 ` [PATCH Part2 v5 21/45] KVM: SVM: Make AVIC backing, VMSA and VMCB memory allocation SNP safe Brijesh Singh
2021-09-22 18:55   ` Dr. David Alan Gilbert
2021-09-23 18:09     ` Brijesh Singh
2021-09-23 18:39       ` Dr. David Alan Gilbert
2021-09-23 22:23         ` Brijesh Singh
2021-09-23 19:17       ` Marc Orr
2021-09-23 19:17         ` Marc Orr
2021-09-23 20:44         ` Brijesh Singh
2021-09-23 20:55           ` Marc Orr
2021-09-23 20:55             ` Marc Orr
2021-10-12 20:44   ` Sean Christopherson
2021-08-20 15:58 ` [PATCH Part2 v5 22/45] KVM: SVM: Add initial SEV-SNP support Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 23/45] KVM: SVM: Add KVM_SNP_INIT command Brijesh Singh
2021-09-05  6:56   ` Dov Murik
2021-09-05 13:59     ` Brijesh Singh
2021-09-10  3:32   ` Marc Orr
2021-09-10  3:32     ` Marc Orr
2021-09-13 11:32     ` Brijesh Singh
2021-09-16 15:50   ` Peter Gonda
2021-09-16 15:50     ` Peter Gonda
2022-06-13 20:58   ` Alper Gun
2022-06-13 23:15     ` Ashish Kalra
2022-06-13 23:33       ` Alper Gun
2022-06-14  0:21         ` Ashish Kalra
2022-06-14 15:37           ` Peter Gonda
2022-06-14 16:11             ` Kalra, Ashish
2022-06-14 16:30               ` Peter Gonda
2022-06-14 17:16                 ` Kalra, Ashish
2022-06-14 18:58                   ` Alper Gun
2022-06-14 20:23                     ` Kalra, Ashish
2022-06-14 20:29                       ` Peter Gonda
2022-06-14 20:39                         ` Kalra, Ashish
2021-08-20 15:58 ` [PATCH Part2 v5 24/45] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_START command Brijesh Singh
2021-08-20 15:58 ` [PATCH Part2 v5 25/45] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_UPDATE command Brijesh Singh
2021-09-27 16:43   ` Peter Gonda
2021-09-27 16:43     ` Peter Gonda
2021-09-27 19:33     ` Brijesh Singh
2021-10-05 15:01       ` Peter Gonda
2021-08-20 15:58 ` [PATCH Part2 v5 26/45] KVM: SVM: Mark the private vma unmerable for SEV-SNP guests Brijesh Singh
2021-09-23 17:18   ` Dr. David Alan Gilbert
2021-10-12 18:46   ` Sean Christopherson
2021-10-13 12:39     ` Brijesh Singh
2021-10-13 14:34       ` Sean Christopherson
2021-10-13 14:51         ` Brijesh Singh
2021-10-13 15:33           ` Sean Christopherson
2021-08-20 15:59 ` [PATCH Part2 v5 27/45] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_FINISH command Brijesh Singh
2022-05-18 20:21   ` Marc Orr
2022-05-18 20:35     ` Kalra, Ashish
2021-08-20 15:59 ` [PATCH Part2 v5 28/45] KVM: X86: Keep the NPT and RMP page level in sync Brijesh Singh
2021-08-20 15:59 ` [PATCH Part2 v5 29/45] KVM: x86/mmu: Move 'pfn' variable to caller of direct_page_fault() Brijesh Singh
2021-08-20 15:59 ` [PATCH Part2 v5 30/45] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by TDX and SNP Brijesh Singh
2021-08-20 15:59 ` [PATCH Part2 v5 31/45] KVM: x86: Introduce kvm_mmu_get_tdp_walk() for SEV-SNP use Brijesh Singh
2021-08-20 15:59 ` [PATCH Part2 v5 32/45] KVM: x86: Define RMP page fault error bits for #NPF Brijesh Singh
2021-09-30 23:41   ` Marc Orr
2021-09-30 23:41     ` Marc Orr
2021-10-01 13:03     ` Borislav Petkov
2021-08-20 15:59 ` [PATCH Part2 v5 33/45] KVM: x86: Update page-fault trace to log full 64-bit error code Brijesh Singh
2021-10-13 21:23   ` Sean Christopherson
2021-08-20 15:59 ` [PATCH Part2 v5 34/45] KVM: SVM: Do not use long-lived GHCB map while setting scratch area Brijesh Singh
2021-10-13 21:20   ` Sean Christopherson
2021-10-15 16:11     ` Brijesh Singh
2021-10-15 16:44       ` Sean Christopherson
2021-08-20 15:59 ` [PATCH Part2 v5 35/45] KVM: SVM: Remove the long-lived GHCB host map Brijesh Singh
2021-08-20 15:59 ` [PATCH Part2 v5 36/45] KVM: SVM: Add support to handle GHCB GPA register VMGEXIT Brijesh Singh
2021-08-20 15:59 ` [PATCH Part2 v5 37/45] KVM: SVM: Add support to handle MSR based Page State Change VMGEXIT Brijesh Singh
2021-09-28  9:56   ` Dr. David Alan Gilbert
2021-10-12 21:48   ` Sean Christopherson
2021-10-13 17:04     ` Sean Christopherson
2021-10-13 17:05     ` Brijesh Singh
2021-10-13 17:24       ` Sean Christopherson
2021-10-13 17:49         ` Brijesh Singh
2021-08-20 15:59 ` [PATCH Part2 v5 38/45] KVM: SVM: Add support to handle " Brijesh Singh
2021-09-28 10:17   ` Dr. David Alan Gilbert
2021-09-28 23:20     ` Brijesh Singh
2021-08-20 15:59 ` Brijesh Singh [this message]
2021-10-13  0:23   ` [PATCH Part2 v5 39/45] KVM: SVM: Introduce ops for the post gfn map and unmap Sean Christopherson
2021-10-13 18:10     ` Brijesh Singh
2021-10-13 20:10       ` Sean Christopherson
2021-10-13 21:49         ` Brijesh Singh
2021-10-13 22:10           ` Sean Christopherson
2021-10-13 22:31             ` Brijesh Singh
2021-10-13 20:16     ` Sean Christopherson
2021-10-15 16:31       ` Brijesh Singh
2021-10-15 17:16         ` Sean Christopherson
2022-09-08 21:21           ` Michael Roth
2022-09-08 22:28             ` Michael Roth
2022-09-14  8:05             ` Sean Christopherson
2022-09-14 11:02               ` Marc Orr
2022-09-14 16:15                 ` Sean Christopherson
2022-09-14 16:32                   ` Marc Orr
2022-09-14 16:39                     ` Marc Orr
2022-09-19 17:56               ` Michael Roth
2021-08-20 15:59 ` [PATCH Part2 v5 40/45] KVM: x86: Export the kvm_zap_gfn_range() for the SNP use Brijesh Singh
2021-08-20 15:59 ` [PATCH Part2 v5 41/45] KVM: SVM: Add support to handle the RMP nested page fault Brijesh Singh
2021-09-29 12:24   ` Dr. David Alan Gilbert
2021-10-13 17:57   ` Sean Christopherson
2021-08-20 15:59 ` [PATCH Part2 v5 42/45] KVM: SVM: Provide support for SNP_GUEST_REQUEST NAE event Brijesh Singh
2021-09-29 21:33   ` Peter Gonda
2021-09-29 21:33     ` Peter Gonda
2021-09-29 22:00   ` Peter Gonda
2021-09-29 22:00     ` Peter Gonda
2021-08-20 15:59 ` [PATCH Part2 v5 43/45] KVM: SVM: Use a VMSA physical address variable for populating VMCB Brijesh Singh
2021-10-15 18:58   ` Sean Christopherson
2021-08-20 15:59 ` [PATCH Part2 v5 44/45] KVM: SVM: Support SEV-SNP AP Creation NAE event Brijesh Singh
2021-10-15 19:50   ` Sean Christopherson
2021-10-20 21:48     ` Brijesh Singh
2021-10-20 23:01       ` Sean Christopherson
2021-08-20 15:59 ` [PATCH Part2 v5 45/45] KVM: SVM: Add module parameter to enable the SEV-SNP Brijesh Singh
2021-11-12 15:43 ` [PATCH Part2 v5 00/45] Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support Peter Gonda
2021-11-12 17:59   ` Dave Hansen
2021-11-12 18:35     ` Borislav Petkov
2021-11-12 19:48       ` Sean Christopherson
2021-11-12 20:04         ` Borislav Petkov
2021-11-12 20:37           ` Sean Christopherson
2021-11-12 20:53             ` Borislav Petkov
2021-11-12 21:12               ` Peter Gonda
2021-11-12 21:20                 ` Andy Lutomirski
2021-11-12 22:04                   ` Borislav Petkov
2021-11-12 22:52                   ` Peter Gonda
2021-11-13  0:00                 ` Sean Christopherson
2021-11-13  0:10                   ` Marc Orr
2021-11-13 18:34                     ` Sean Christopherson
2021-11-14  7:54                       ` Marc Orr
2021-11-15 17:16                         ` Sean Christopherson
2021-11-15 16:36                       ` Joerg Roedel
2021-11-15 17:25                         ` Sean Christopherson
2021-11-12 21:30             ` Marc Orr
2021-11-12 21:37               ` Dave Hansen
2021-11-12 21:40                 ` Marc Orr
2021-11-12 21:39               ` Andy Lutomirski
2021-11-12 21:43                 ` Marc Orr
2021-11-12 22:54                   ` Peter Gonda
2021-11-13  0:53                     ` Sean Christopherson
2021-11-13  1:04                       ` Marc Orr
2021-11-13 18:28                         ` Sean Christopherson
2021-11-14  7:41                           ` Marc Orr
2021-11-15 18:17                             ` Sean Christopherson
2021-11-15 16:52                           ` Joerg Roedel
2021-11-15 16:18             ` Brijesh Singh
2021-11-15 18:44               ` Sean Christopherson
2021-11-15 18:58                 ` Brijesh Singh
2021-11-12 21:16         ` Marc Orr
2021-11-12 21:23           ` Andy Lutomirski
2021-11-12 21:35             ` Borislav Petkov
2021-11-15 12:30         ` Dr. David Alan Gilbert
2021-11-15 14:42           ` Joerg Roedel
2021-11-15 15:33             ` Dr. David Alan Gilbert
2021-11-15 16:20               ` Joerg Roedel
2021-11-15 16:32                 ` Dr. David Alan Gilbert
2021-11-15 18:26           ` Sean Christopherson
2021-11-15 18:41             ` Marc Orr
2021-11-15 19:15               ` Sean Christopherson
2021-11-16  3:07                 ` Marc Orr
2021-11-16  5:14                   ` Andy Lutomirski
2021-11-16 13:21                     ` Joerg Roedel
2021-11-16 18:26                       ` Sean Christopherson
2021-11-16 18:39                         ` Peter Gonda
2021-11-16 13:30                 ` Joerg Roedel
2021-11-16  5:00               ` Andy Lutomirski
2021-11-16 13:02             ` Joerg Roedel
2021-11-16 20:08               ` Sean Christopherson
2021-11-15 16:16         ` Joerg Roedel
2021-11-22 15:23   ` Brijesh Singh
2021-11-22 17:03     ` Vlastimil Babka
2021-11-22 18:01       ` Brijesh Singh
2021-11-22 18:30     ` Dave Hansen
2021-11-22 19:06       ` Brijesh Singh
2021-11-22 19:14         ` Dave Hansen
2021-11-22 20:33           ` Brijesh Singh
2021-11-22 21:34             ` Sean Christopherson
2021-11-22 22:51             ` Dave Hansen
2021-11-23  5:15               ` Luck, Tony
2021-11-23  7:18               ` Borislav Petkov
2021-11-23 15:36                 ` Sean Christopherson
2021-11-23 16:26                   ` Borislav Petkov
2021-11-23  8:55               ` Vlastimil Babka
2021-11-24 16:03               ` Joerg Roedel
2021-11-24 17:48                 ` Dave Hansen
2021-11-24 19:34                   ` Vlastimil Babka
2021-11-25 10:05                   ` Joerg Roedel
2021-11-29 14:44                     ` Brijesh Singh
2021-11-29 14:58                       ` Vlastimil Babka
2021-11-29 16:13                         ` Brijesh Singh
2021-11-30 19:40                           ` Vlastimil Babka
2021-11-29 16:41                     ` Dave Hansen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210820155918.7518-40-brijesh.singh@amd.com \
    --to=brijesh.singh@amd.com \
    --cc=ak@linux.intel.com \
    --cc=ardb@kernel.org \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=dovmurik@linux.ibm.com \
    --cc=hpa@zytor.com \
    --cc=jmattson@google.com \
    --cc=jroedel@suse.de \
    --cc=kirill@shutemov.name \
    --cc=kvm@vger.kernel.org \
    --cc=linux-coco@lists.linux.dev \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=marcorr@google.com \
    --cc=michael.roth@amd.com \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=pgonda@google.com \
    --cc=rientjes@google.com \
    --cc=sathyanarayanan.kuppuswamy@linux.intel.com \
    --cc=seanjc@google.com \
    --cc=slp@redhat.com \
    --cc=srinivas.pandruvada@linux.intel.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=tobin@ibm.com \
    --cc=tony.luck@intel.com \
    --cc=vbabka@suse.cz \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.