kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: isaku.yamahata@intel.com
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: isaku.yamahata@intel.com, isaku.yamahata@gmail.com,
	Paolo Bonzini <pbonzini@redhat.com>
Subject: [PATCH v7 057/102] KVM: x86/tdp_mmu: implement MapGPA hypercall for TDX
Date: Mon, 27 Jun 2022 14:53:49 -0700	[thread overview]
Message-ID: <7cda2771dffb7a70a33c30eca46ad703638c34d3.1656366338.git.isaku.yamahata@intel.com> (raw)
In-Reply-To: <cover.1656366337.git.isaku.yamahata@intel.com>

From: Isaku Yamahata <isaku.yamahata@intel.com>

The TDX Guest-Hypervisor communication interface(GHCI) specification
defines MapGPA hypercall for guest TD to request the host VMM to map given
GPA range as private or shared.

It means the guest TD uses the GPA as shared (or private).  The GPA
won't be used as private (or shared).  VMM should enforce GPA usage. VMM
doesn't have to map the GPA on the hypercall request.

- Allocate 4k PTE to record SPTE_SHARED_MASK bit.

- Zap the aliased region.
  If shared (or private) GPA is requested, zap private (or shared) GPA
  (modulo shared bit).

- Record the request GPA is shared (or private) by SPTE_SHARED_MASK in SPTE
  in both shared and private EPT tables.
  - With SPTE_SHARED_MASK set, a shared GPA is allowed.
  - With SPTE_SHARED_MASK cleared, a private GPA is allowed.

  The reason to record SPTE_SHARED_MASK in both shared and private EPT
  is to optimize EPT violation path for normal guest TD execution path and
  penalize map_gpa hypercall.

  If the guest TD faults on not-allowed GPA (modulo shared bit), the KVM
  doesn't resolve EPT violation and let vcpu retry.  vcpu will keep
  faulting until other vcpu maps the region with MapGPA hypercall.  With
  the nonpresent value of spte(shadow_nonpresent_value), SPTE_SHARED_MASK
  is cleared.  So the default behavior doesn't change.

- don't map GPA.
  The GPA is mapped on the next EPT violation.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
 arch/x86/kvm/mmu.h         |   3 +
 arch/x86/kvm/mmu/mmu.c     | 106 +++++++++++++++
 arch/x86/kvm/mmu/tdp_mmu.c | 271 ++++++++++++++++++++++++++++++++++++-
 arch/x86/kvm/mmu/tdp_mmu.h |   5 +
 4 files changed, 382 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 9ba60fd79d33..f5edf2e58dba 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -225,6 +225,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
 
 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
 
+int kvm_mmu_map_gpa(struct kvm_vcpu *vcpu, gfn_t *startp, gfn_t end,
+		    bool allow_private);
+
 int kvm_mmu_post_init_vm(struct kvm *kvm);
 void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
 
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index ef925722ee28..a777a1d4278c 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6323,6 +6323,112 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
 	}
 }
 
+static int kvm_mmu_populate_nonleaf(struct kvm_vcpu *vcpu, gfn_t start, gfn_t end)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_memslots *slots;
+	struct kvm_memslot_iter iter;
+	int ret = 0;
+
+	/* No need to populate as mmu_map_gpa() handles single GPA. */
+	if (!is_tdp_mmu_enabled(kvm))
+		return 0;
+
+	slots = __kvm_memslots(kvm, 0 /* only normal ram. not SMM. */);
+	kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
+		struct kvm_memory_slot *memslot = iter.slot;
+		gfn_t s = max(start, memslot->base_gfn);
+		gfn_t e = min(end, memslot->base_gfn + memslot->npages);
+
+		if (WARN_ON_ONCE(s >= e))
+			continue;
+
+		ret = kvm_tdp_mmu_populate_nonleaf(vcpu, kvm_gfn_private(kvm, s),
+						kvm_gfn_private(kvm, e), true, false);
+		if (ret)
+			break;
+		ret = kvm_tdp_mmu_populate_nonleaf(vcpu, kvm_gfn_shared(kvm, s),
+						kvm_gfn_shared(kvm, e), false, false);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+int kvm_mmu_map_gpa(struct kvm_vcpu *vcpu, gfn_t *startp, gfn_t end,
+		bool allow_private)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_memslots *slots;
+	struct kvm_memslot_iter iter;
+	gfn_t start = *startp;
+	int ret;
+
+	if (!kvm_gfn_shared_mask(kvm))
+		return -EOPNOTSUPP;
+
+	start = start & ~kvm_gfn_shared_mask(kvm);
+	end = end & ~kvm_gfn_shared_mask(kvm);
+
+	/*
+	 * Allocate S-EPT pages first so that the operations leaf SPTE entry
+	 * can be done without memory allocation.
+	 */
+	while (true) {
+		ret = mmu_topup_memory_caches(vcpu, false);
+		if (ret)
+			return ret;
+
+		mutex_lock(&kvm->slots_lock);
+		write_lock(&kvm->mmu_lock);
+
+		ret = kvm_mmu_populate_nonleaf(vcpu, start, end);
+		if (!ret)
+			break;
+
+		write_unlock(&kvm->mmu_lock);
+		mutex_unlock(&kvm->slots_lock);
+		if (ret == -EAGAIN) {
+			if (need_resched())
+				cond_resched();
+			continue;
+		}
+		return ret;
+	}
+
+	slots = __kvm_memslots(kvm, 0 /* only normal ram. not SMM. */);
+	kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
+		struct kvm_memory_slot *memslot = iter.slot;
+		gfn_t s = max(start, memslot->base_gfn);
+		gfn_t e = min(end, memslot->base_gfn + memslot->npages);
+
+		if (WARN_ON_ONCE(s >= e))
+			continue;
+		if (is_tdp_mmu_enabled(kvm)) {
+			ret = kvm_tdp_mmu_map_gpa(vcpu, &s, e, allow_private);
+			if (ret) {
+				start = s;
+				break;
+			}
+		} else {
+			ret = -EOPNOTSUPP;
+			break;
+		}
+	}
+
+	write_unlock(&kvm->mmu_lock);
+	mutex_unlock(&kvm->slots_lock);
+
+	if (ret == -EAGAIN) {
+		if (allow_private)
+			*startp = kvm_gfn_private(kvm, start);
+		else
+			*startp = kvm_gfn_shared(kvm, start);
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_map_gpa);
+
 static unsigned long
 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 4f279700b3cc..c99f2c9a86dc 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -680,6 +680,13 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
 		}
 		change.sept_page = sept_page;
 
+		/*
+		 * SPTE_SHARED_MASK is only changed by map_gpa that obtains
+		 * write lock of mmu_lock.
+		 */
+		WARN_ON(shared &&
+			(spte_shared_mask(old_spte) !=
+				spte_shared_mask(new_spte)));
 		static_call(kvm_x86_handle_changed_private_spte)(kvm, &change);
 	}
 }
@@ -1324,7 +1331,8 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
 	return 0;
 }
 
-static int tdp_mmu_populate_nonleaf(struct kvm_vcpu *vcpu, struct tdp_iter *iter, bool account_nx)
+static int tdp_mmu_populate_nonleaf(
+	struct kvm_vcpu *vcpu, struct tdp_iter *iter, bool account_nx, bool shared)
 {
 	struct kvm_mmu_page *sp;
 	int ret;
@@ -1335,7 +1343,7 @@ static int tdp_mmu_populate_nonleaf(struct kvm_vcpu *vcpu, struct tdp_iter *iter
 	sp = tdp_mmu_alloc_sp(vcpu, iter->is_private, false);
 	tdp_mmu_init_child_sp(sp, iter);
 
-	ret = tdp_mmu_link_sp(vcpu->kvm, iter, sp, account_nx, true);
+	ret = tdp_mmu_link_sp(vcpu->kvm, iter, sp, account_nx, shared);
 	if (ret)
 		tdp_mmu_free_sp(sp);
 	return ret;
@@ -1411,7 +1419,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 			if (is_removed_spte(iter.old_spte))
 				break;
 
-			if (tdp_mmu_populate_nonleaf(vcpu, &iter, account_nx))
+			if (tdp_mmu_populate_nonleaf(vcpu, &iter, account_nx, true))
 				break;
 		}
 	}
@@ -2143,6 +2151,263 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
 	return spte_set;
 }
 
+/*
+ * Allocate shadow page table for given gfn so that the following operations
+ * on sptes can be done without memory allocation.
+ */
+int kvm_tdp_mmu_populate_nonleaf(
+	struct kvm_vcpu *vcpu, gfn_t start, gfn_t end, bool is_private, bool shared)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct tdp_iter iter;
+	int ret = 0;
+
+	kvm_lockdep_assert_mmu_lock_held(kvm, false);
+	rcu_read_lock();
+	tdp_mmu_for_each_pte(iter, vcpu->arch.mmu, is_private, start, end) {
+		if (iter.level == PG_LEVEL_4K)
+			continue;
+		if (is_shadow_present_pte(iter.old_spte) &&
+			is_large_pte(iter.old_spte)) {
+			/* TODO: large page support. */
+			WARN_ON_ONCE(true);
+			return -ENOSYS;
+		}
+
+		if (is_shadow_present_pte(iter.old_spte))
+			continue;
+
+		/*
+		 * Guarantee that alloc_tdp_mmu_page() succees which
+		 * assumes page allocation from cache always successes.
+		 */
+		if (vcpu->arch.mmu_page_header_cache.nobjs == 0 ||
+			vcpu->arch.mmu_shadow_page_cache.nobjs == 0 ||
+			vcpu->arch.mmu_private_sp_cache.nobjs == 0) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		/*
+		 * write lock of mmu_lock is held.  No other thread
+		 * freezes SPTE.
+		 */
+		ret = tdp_mmu_populate_nonleaf(vcpu, &iter, false, shared);
+		if (ret) {
+			/* As write lock is held, this case sholdn't happen. */
+			WARN_ON_ONCE(true);
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
+typedef void (*update_spte_t)(
+	struct kvm *kvm, struct tdp_iter *iter, bool allow_private);
+
+static int kvm_tdp_mmu_update_range(struct kvm_vcpu *vcpu, bool is_private,
+				gfn_t start, gfn_t end, gfn_t *nextp,
+				update_spte_t fn, bool allow_private)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct tdp_iter iter;
+	int ret = 0;
+
+	rcu_read_lock();
+	tdp_mmu_for_each_pte(iter, vcpu->arch.mmu, is_private, start, end) {
+		if (iter.level == PG_LEVEL_4K) {
+			fn(kvm, &iter, allow_private);
+			continue;
+		}
+
+		/*
+		 * Which GPA is allowed, private or shared, is recorded in the
+		 * granular of 4K in private leaf spte as SPTE_SHARED_MASK.
+		 * Break large page into 4K.
+		 */
+		if (is_shadow_present_pte(iter.old_spte) &&
+			is_large_pte(iter.old_spte)) {
+			/*
+			 * TODO: large page support.
+			 * Doesn't support large page for TDX now
+			 */
+			WARN_ON_ONCE(true);
+			tdp_mmu_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
+			iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep);
+		}
+
+		if (!is_shadow_present_pte(iter.old_spte)) {
+			/*
+			 * Guarantee that alloc_tdp_mmu_page() succees which
+			 * assumes page allocation from cache always successes.
+			 */
+			if (vcpu->arch.mmu_page_header_cache.nobjs == 0 ||
+				vcpu->arch.mmu_shadow_page_cache.nobjs == 0 ||
+				vcpu->arch.mmu_private_sp_cache.nobjs == 0) {
+				ret = -EAGAIN;
+				break;
+			}
+			/*
+			 * write lock of mmu_lock is held.  No other thread
+			 * freezes SPTE.
+			 */
+			ret = tdp_mmu_populate_nonleaf(vcpu, &iter, false, false);
+			if (ret) {
+				/* As write lock is held, this case sholdn't happen. */
+				WARN_ON_ONCE(true);
+				break;
+			}
+		}
+	}
+	rcu_read_unlock();
+
+	if (ret == -EAGAIN)
+		*nextp = iter.next_last_level_gfn;
+
+	return ret;
+}
+
+static void kvm_tdp_mmu_update_shared_spte(
+	struct kvm *kvm, struct tdp_iter *iter, bool allow_private)
+{
+	u64 new_spte;
+
+	WARN_ON(iter->is_private);
+	if (allow_private) {
+		/* Zap SPTE and clear SPTE_SHARED_MASK */
+		new_spte = SHADOW_NONPRESENT_VALUE;
+		if (new_spte != iter->old_spte)
+			tdp_mmu_set_spte(kvm, iter, new_spte);
+	} else {
+		new_spte = iter->old_spte | SPTE_SHARED_MASK;
+		/* No side effect is needed */
+		if (new_spte != iter->old_spte)
+			__kvm_tdp_mmu_write_spte(iter->sptep, new_spte);
+	}
+}
+
+static void kvm_tdp_mmu_update_private_spte(
+	struct kvm *kvm, struct tdp_iter *iter, bool allow_private)
+{
+	u64 new_spte;
+
+	WARN_ON(!iter->is_private);
+	if (allow_private) {
+		new_spte = iter->old_spte & ~SPTE_SHARED_MASK;
+		/* No side effect is needed */
+		if (new_spte != iter->old_spte)
+			__kvm_tdp_mmu_write_spte(iter->sptep, new_spte);
+	} else {
+		if (is_shadow_present_pte(iter->old_spte)) {
+			/* Zap SPTE */
+			new_spte = shadow_nonpresent_spte(iter->old_spte) |
+				SPTE_SHARED_MASK;
+			if (new_spte != iter->old_spte)
+				tdp_mmu_set_spte(kvm, iter, new_spte);
+		} else {
+			new_spte = iter->old_spte | SPTE_SHARED_MASK;
+			/* No side effect is needed */
+			if (new_spte != iter->old_spte)
+				__kvm_tdp_mmu_write_spte(iter->sptep, new_spte);
+		}
+	}
+}
+
+/*
+ * Whether GPA is allowed to map private or shared is recorded in both private
+ * and shared leaf spte entry as SPTE_SHARED_MASK bit.  They must match.
+ * private leaf spte entry
+ * - present: private mapping is allowed. (already mapped)
+ * - non-present: private mapping is allowed.
+ * - present | SPTE_SHARED_MASK: invalid state.
+ * - non-present | SPTE_SHARED_MASK: shared mapping is allowed.
+ *                                        may or may not be mapped as shared.
+ * shared leaf spte entry
+ * - present: invalid state
+ * - non-present: private mapping is allowed.
+ * - present | SPTE_SHARED_MASK: shared mapping is allowed (already mapped)
+ * - non-present | SPTE_SHARED_MASK: shared mapping is allowed.
+ *
+ * state change of private spte:
+ * map_gpa(private):
+ *      private EPT entry: clear SPTE_SHARED_MASK
+ *	  present: nop
+ *	  non-present: nop
+ *	  non-present | SPTE_SHARED_MASK -> non-present
+ *	share EPT entry: zap and clear SPTE_SHARED_MASK
+ *	  any -> non-present
+ * map_gpa(shared):
+ *	private EPT entry: zap and set SPTE_SHARED_MASK
+ *	  present     -> non-present | SPTE_SHARED_MASK
+ *	  non-present -> non-present | SPTE_SHARED_MASK
+ *	  non-present | SPTE_SHARED_MASK: nop
+ *	shared EPT entry: set SPTE_SHARED_MASK
+ *	  present | SPTE_SHARED_MASK: nop
+ *	  non-present -> non-present | SPTE_SHARED_MASK
+ *	  non-present | SPTE_SHARED_MASK: nop
+ * map(private GPA):
+ *	private EPT entry: try to populate
+ *	  present: nop
+ *	  non-present -> present
+ *	  non-present | SPTE_SHARED_MASK: nop. looping on EPT violation
+ *	shared EPT entry: nop
+ * map(shared GPA):
+ *	private EPT entry: nop
+ *	shared EPT entry: populate
+ *	  present | SPTE_SHARED_MASK: nop
+ *	  non-present | SPTE_SHARED_MASK -> present | SPTE_SHARED_MASK
+ *	  non-present: nop. looping on EPT violation
+ * zap(private GPA):
+ *	private EPT entry: zap and keep SPTE_SHARED_MASK
+ *	  present | SPTE_SHARED_MASK -> non-present | SPTE_SHARED_MASK
+ *	  non-present: nop as is_shadow_prsent_pte() is checked
+ *	  non-present | SPTE_SHARED_MASK: nop by is_shadow_present_pte()
+ *	shared EPT entry: nop
+ * zap(shared GPA):
+ *	private EPT entry: nop
+ *	shared EPT entry: zap and keep SPTE_SHARED_MASK
+ *	  present | SPTE_SHARED_MASK -> non-present | SPTE_SHARED_MASK
+ *	  non-present | SPTE_SHARED_MASK: nop
+ *	  non-present: nop.
+ */
+int kvm_tdp_mmu_map_gpa(struct kvm_vcpu *vcpu,
+			gfn_t *startp, gfn_t end, bool allow_private)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_mmu *mmu = vcpu->arch.mmu;
+	gfn_t start = *startp;
+	gfn_t next;
+	int ret = 0;
+
+	lockdep_assert_held_write(&kvm->mmu_lock);
+	WARN_ON(start & kvm_gfn_shared_mask(kvm));
+	WARN_ON(end & kvm_gfn_shared_mask(kvm));
+
+	if (!VALID_PAGE(mmu->root.hpa) || !VALID_PAGE(mmu->private_root_hpa))
+		return -EINVAL;
+
+	next = end;
+	ret = kvm_tdp_mmu_update_range(
+		vcpu, false, kvm_gfn_shared(kvm, start), kvm_gfn_shared(kvm, end),
+		&next, kvm_tdp_mmu_update_shared_spte, allow_private);
+	if (ret) {
+		kvm_flush_remote_tlbs_with_address(kvm, start, next - start);
+		return ret;
+	}
+
+	ret = kvm_tdp_mmu_update_range(
+		vcpu, true, kvm_gfn_private(kvm, start), kvm_gfn_private(kvm, end),
+		&next, kvm_tdp_mmu_update_private_spte, allow_private);
+	if (ret == -EAGAIN) {
+		*startp = next;
+		end = *startp;
+	}
+	kvm_flush_remote_tlbs_with_address(kvm, start, end - start);
+	return ret;
+}
+
 /*
  * Return the level of the lowest level SPTE added to sptes.
  * That SPTE may be non-present.
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index d1655571eb2f..4d1c27911134 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -51,6 +51,11 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
 				      gfn_t start, gfn_t end,
 				      int target_level, bool shared);
 
+int kvm_tdp_mmu_populate_nonleaf(struct kvm_vcpu *vcpu, gfn_t start, gfn_t end,
+				bool is_private, bool shared);
+int kvm_tdp_mmu_map_gpa(struct kvm_vcpu *vcpu,
+			gfn_t *startp, gfn_t end, bool allow_private);
+
 static inline void kvm_tdp_mmu_walk_lockless_begin(void)
 {
 	rcu_read_lock();
-- 
2.25.1


  parent reply	other threads:[~2022-06-27 21:56 UTC|newest]

Thread overview: 219+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-27 21:52 [PATCH v7 000/102] KVM TDX basic feature support isaku.yamahata
2022-06-27 21:52 ` [PATCH v7 001/102] KVM: x86: Move check_processor_compatibility from init ops to runtime ops isaku.yamahata
2022-06-27 21:52 ` [PATCH v7 002/102] Partially revert "KVM: Pass kvm_init()'s opaque param to additional arch funcs" isaku.yamahata
2022-07-13  1:55   ` Kai Huang
2022-07-26 23:57     ` Isaku Yamahata
2022-06-27 21:52 ` [PATCH v7 003/102] KVM: Refactor CPU compatibility check on module initialiization isaku.yamahata
2022-07-12  1:15   ` Kai Huang
2022-07-13  3:16     ` Kai Huang
2022-07-13  3:11   ` Kai Huang
2022-07-27 22:04   ` Isaku Yamahata
2022-06-27 21:52 ` [PATCH v7 004/102] KVM: VMX: Move out vmx_x86_ops to 'main.c' to wrap VMX and TDX isaku.yamahata
2022-06-27 21:52 ` [PATCH v7 005/102] x86/virt/vmx/tdx: export platform_tdx_enabled() isaku.yamahata
2022-06-27 21:52 ` [PATCH v7 006/102] KVM: TDX: Detect CPU feature on kernel module initialization isaku.yamahata
2022-06-28  3:43   ` Kai Huang
2022-07-11 23:48     ` Isaku Yamahata
2022-07-12  0:45       ` Kai Huang
2022-06-27 21:52 ` [PATCH v7 007/102] KVM: Enable hardware before doing arch VM initialization isaku.yamahata
2022-06-28  2:59   ` Kai Huang
2022-06-27 21:53 ` [PATCH v7 008/102] KVM: x86: Refactor KVM VMX module init/exit functions isaku.yamahata
2022-06-28  3:53   ` Kai Huang
2022-07-12  0:38     ` Isaku Yamahata
2022-07-12  1:30       ` Kai Huang
2022-07-27  0:44         ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 009/102] KVM: TDX: Add placeholders for TDX VM/vcpu structure isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 010/102] x86/virt/tdx: Add a helper function to return system wide info about TDX module isaku.yamahata
2022-07-07  2:46   ` Yuan Yao
2022-07-12  0:39     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 011/102] KVM: TDX: Initialize TDX module when loading kvm_intel.ko isaku.yamahata
2022-06-28  4:31   ` Kai Huang
2022-07-12  0:46     ` Isaku Yamahata
2022-07-12  1:13       ` Kai Huang
2022-07-27  0:39         ` Isaku Yamahata
2022-07-27  4:38           ` Kai Huang
2022-06-27 21:53 ` [PATCH v7 012/102] KVM: x86: Introduce vm_type to differentiate default VMs from confidential VMs isaku.yamahata
2022-06-28  2:52   ` Kai Huang
2022-07-04  6:44     ` Kai Huang
2022-07-12  1:01     ` Isaku Yamahata
2022-07-12  1:24       ` Kai Huang
2022-06-27 21:53 ` [PATCH v7 013/102] KVM: TDX: Make TDX VM type supported isaku.yamahata
2022-07-07  2:55   ` Yuan Yao
2022-07-12  1:06     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 014/102] [MARKER] The start of TDX KVM patch series: TDX architectural definitions isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 015/102] KVM: TDX: Define " isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 016/102] KVM: TDX: Add TDX "architectural" error codes isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 017/102] KVM: TDX: Add C wrapper functions for SEAMCALLs to the TDX module isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 018/102] KVM: TDX: Add helper functions to print TDX SEAMCALL error isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 019/102] [MARKER] The start of TDX KVM patch series: TD VM creation/destruction isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 020/102] KVM: TDX: Stub in tdx.h with structs, accessors, and VMCS helpers isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 021/102] x86/cpu: Add helper functions to allocate/free TDX private host key id isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 022/102] KVM: TDX: create/destroy VM structure isaku.yamahata
2022-07-07  6:16   ` Yuan Yao
2022-07-12  6:21     ` Isaku Yamahata
2022-08-02 19:46   ` Sean Christopherson
2022-08-11 18:29     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 023/102] KVM: TDX: x86: Add ioctl to get TDX systemwide parameters isaku.yamahata
2022-07-07  6:48   ` Yuan Yao
2022-06-27 21:53 ` [PATCH v7 024/102] KVM: TDX: Add place holder for TDX VM specific mem_enc_op ioctl isaku.yamahata
2022-07-07  7:12   ` Yuan Yao
2022-06-27 21:53 ` [PATCH v7 025/102] KVM: TDX: initialize VM with TDX specific parameters isaku.yamahata
2022-06-28  8:30   ` Xiaoyao Li
2022-07-12  7:11     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 026/102] KVM: TDX: Make pmu_intel.c ignore guest TD case isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 027/102] [MARKER] The start of TDX KVM patch series: TD vcpu creation/destruction isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 028/102] KVM: TDX: allocate/free TDX vcpu structure isaku.yamahata
2022-08-02 19:56   ` Sean Christopherson
2022-06-27 21:53 ` [PATCH v7 029/102] " isaku.yamahata
2022-06-28 11:34   ` Kai Huang
2022-07-12  7:55     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 030/102] KVM: TDX: Do TDX specific vcpu initialization isaku.yamahata
2022-07-08  2:14   ` Yuan Yao
2022-07-12 20:35     ` Isaku Yamahata
2022-07-13  0:22       ` Xiaoyao Li
2022-06-27 21:53 ` [PATCH v7 031/102] [MARKER] The start of TDX KVM patch series: KVM MMU GPA shared bits isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 032/102] KVM: x86/mmu: introduce config for PRIVATE KVM MMU isaku.yamahata
2022-07-08  1:53   ` Kai Huang
2022-07-13  1:25     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 033/102] KVM: x86/mmu: Add address conversion functions for TDX shared bits isaku.yamahata
2022-07-08  2:15   ` Kai Huang
2022-07-13  4:52     ` Isaku Yamahata
2022-07-13 10:41       ` Kai Huang
2022-07-14  0:14         ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 034/102] [MARKER] The start of TDX KVM patch series: KVM TDP refactoring for TDX isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 035/102] KVM: x86/mmu: Explicitly check for MMIO spte in fast page fault isaku.yamahata
2022-06-30 11:37   ` Kai Huang
2022-07-13  8:35     ` Isaku Yamahata
2022-07-13 10:29       ` Kai Huang
2022-06-27 21:53 ` [PATCH v7 036/102] KVM: x86/mmu: Allow non-zero value for non-present SPTE isaku.yamahata
2022-06-30 11:03   ` Kai Huang
2022-07-14 18:05     ` Isaku Yamahata
2022-07-08  5:18   ` Yuan Yao
2022-07-08 15:30     ` Sean Christopherson
2022-07-11  7:05       ` Yuan Yao
2022-07-11 14:47         ` Sean Christopherson
2022-07-14 18:41   ` Isaku Yamahata
2022-07-20  2:44     ` Kai Huang
2022-07-20  3:12     ` Kai Huang
2022-06-27 21:53 ` [PATCH v7 037/102] KVM: x86/mmu: Track shadow MMIO value/mask on a per-VM basis isaku.yamahata
2022-06-30 11:45   ` Kai Huang
2022-07-05 14:06   ` Kai Huang
2022-07-19  8:47   ` Isaku Yamahata
2022-07-20  3:45     ` Kai Huang
2022-07-27 23:20       ` Isaku Yamahata
2022-07-28  0:48         ` Kai Huang
2022-06-27 21:53 ` [PATCH v7 038/102] KVM: x86/mmu: Disallow fast page fault on private GPA isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 039/102] KVM: x86/mmu: Allow per-VM override of the TDP max page level isaku.yamahata
2022-06-30 12:27   ` Kai Huang
2022-07-19 10:26     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 040/102] KVM: x86/mmu: Zap only leaf SPTEs for deleted/moved memslot for private mmu isaku.yamahata
2022-07-01 10:41   ` Kai Huang
2022-07-19 11:06     ` Isaku Yamahata
2022-07-19 23:17       ` Kai Huang
2022-06-27 21:53 ` [PATCH v7 041/102] KVM: VMX: Introduce test mode related to EPT violation VE isaku.yamahata
2022-07-08  2:23   ` Kai Huang
2022-07-19 14:49     ` Isaku Yamahata
2022-07-20  5:13       ` Kai Huang
2022-07-27 23:39         ` Isaku Yamahata
2022-07-28  0:54           ` Kai Huang
2022-07-28 20:11             ` Sean Christopherson
2022-08-09  0:48               ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 042/102] [MARKER] The start of TDX KVM patch series: KVM TDP MMU hooks isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 043/102] KVM: x86/mmu: Focibly use TDP MMU for TDX isaku.yamahata
2022-07-11  5:48   ` Yuan Yao
2022-07-11 14:56   ` Sean Christopherson
2022-07-19 15:04     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 044/102] KVM: x86/mmu: Add a private pointer to struct kvm_mmu_page isaku.yamahata
2022-07-01 11:12   ` Kai Huang
2022-07-19 15:35     ` Isaku Yamahata
2022-07-11  6:28   ` Yuan Yao
2022-07-28 19:41   ` David Matlack
2022-08-09 23:52     ` Isaku Yamahata
2022-07-28 20:13   ` David Matlack
2022-08-09 23:50     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 045/102] KVM: x86/tdp_mmu: refactor kvm_tdp_mmu_map() isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 046/102] KVM: x86/tdp_mmu: Support TDX private mapping for TDP MMU isaku.yamahata
2022-07-08  3:44   ` Kai Huang
2022-07-26 23:39     ` Isaku Yamahata
2022-07-11  8:28   ` Yuan Yao
2022-07-26 23:41     ` Isaku Yamahata
2022-07-12  2:36   ` Yuan Yao
2022-07-26 23:42     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 047/102] [MARKER] The start of TDX KVM patch series: TDX EPT violation isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 048/102] KVM: x86/mmu: Disallow dirty logging for x86 TDX isaku.yamahata
2022-07-08  2:30   ` Kai Huang
2022-06-27 21:53 ` [PATCH v7 049/102] KVM: x86/tdp_mmu: Ignore unsupported mmu operation on private GFNs isaku.yamahata
2022-07-12  2:58   ` Yuan Yao
2022-07-19 18:03     ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 050/102] KVM: VMX: Split out guts of EPT violation to common/exposed function isaku.yamahata
2022-07-08 10:25   ` Kai Huang
2022-06-27 21:53 ` [PATCH v7 051/102] KVM: VMX: Move setting of EPT MMU masks to common VT-x code isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 052/102] KVM: TDX: Add load_mmu_pgd method for TDX isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 053/102] KVM: TDX: don't request KVM_REQ_APIC_PAGE_RELOAD isaku.yamahata
2022-07-12  3:47   ` Yuan Yao
2022-07-12  6:14     ` Chao Gao
2022-07-19 18:12       ` Isaku Yamahata
2022-06-27 21:53 ` [PATCH v7 054/102] KVM: TDX: TDP MMU TDX support isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 055/102] [MARKER] The start of TDX KVM patch series: KVM TDP MMU MapGPA isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 056/102] KVM: x86/mmu: steal software usable git to record if GFN is for shared or not isaku.yamahata
2022-07-18  8:37   ` Yuan Yao
2022-06-27 21:53 ` isaku.yamahata [this message]
2022-06-27 21:53 ` [PATCH v7 058/102] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by TDX isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 059/102] [MARKER] The start of TDX KVM patch series: TD finalization isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 060/102] KVM: TDX: Create initial guest memory isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 061/102] KVM: TDX: Finalize VM initialization isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 062/102] [MARKER] The start of TDX KVM patch series: TD vcpu enter/exit isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 063/102] KVM: TDX: Add helper assembly function to TDX vcpu isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 064/102] KVM: TDX: Implement TDX vcpu enter/exit path isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 065/102] KVM: TDX: vcpu_run: save/restore host state(host kernel gs) isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 066/102] KVM: TDX: restore host xsave state when exit from the guest TD isaku.yamahata
2022-06-27 21:53 ` [PATCH v7 067/102] KVM: x86: Allow to update cached values in kvm_user_return_msrs w/o wrmsr isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 068/102] KVM: TDX: restore user ret MSRs isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 069/102] [MARKER] The start of TDX KVM patch series: TD vcpu exits/interrupts/hypercalls isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 070/102] KVM: TDX: complete interrupts after tdexit isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 071/102] KVM: TDX: restore debug store when TD exit isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 072/102] KVM: TDX: handle vcpu migration over logical processor isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 073/102] KVM: x86: Add a switch_db_regs flag to handle TDX's auto-switched behavior isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 074/102] KVM: TDX: Add support for find pending IRQ in a protected local APIC isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 075/102] KVM: x86: Assume timer IRQ was injected if APIC state is proteced isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 076/102] KVM: TDX: remove use of struct vcpu_vmx from posted_interrupt.c isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 077/102] KVM: TDX: Implement interrupt injection isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 078/102] KVM: TDX: Implements vcpu request_immediate_exit isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 079/102] KVM: TDX: Implement methods to inject NMI isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 080/102] KVM: VMX: Modify NMI and INTR handlers to take intr_info as function argument isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 081/102] KVM: VMX: Move NMI/exception handler to common helper isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 082/102] KVM: x86: Split core of hypercall emulation to helper function isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 083/102] KVM: TDX: Add a place holder to handle TDX VM exit isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 084/102] KVM: TDX: handle EXIT_REASON_OTHER_SMI isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 085/102] KVM: TDX: handle ept violation/misconfig exit isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 086/102] KVM: TDX: handle EXCEPTION_NMI and EXTERNAL_INTERRUPT isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 087/102] KVM: TDX: Add a place holder for handler of TDX hypercalls (TDG.VP.VMCALL) isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 088/102] KVM: TDX: handle KVM hypercall with TDG.VP.VMCALL isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 089/102] KVM: TDX: Handle TDX PV CPUID hypercall isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 090/102] KVM: TDX: Handle TDX PV HLT hypercall isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 091/102] KVM: TDX: Handle TDX PV port io hypercall isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 092/102] KVM: TDX: Handle TDX PV MMIO hypercall isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 093/102] KVM: TDX: Implement callbacks for MSR operations for TDX isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 094/102] KVM: TDX: Handle TDX PV rdmsr/wrmsr hypercall isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 095/102] KVM: TDX: Handle TDX PV report fatal error hypercall isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 096/102] KVM: TDX: Handle TDX PV map_gpa hypercall isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 097/102] KVM: TDX: Handle TDG.VP.VMCALL<GetTdVmCallInfo> hypercall isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 098/102] KVM: TDX: Silently discard SMI request isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 099/102] KVM: TDX: Silently ignore INIT/SIPI isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 100/102] KVM: TDX: Add methods to ignore accesses to CPU state isaku.yamahata
2022-06-27 21:54 ` [PATCH v7 101/102] Documentation/virtual/kvm: Document on Trust Domain Extensions(TDX) isaku.yamahata
2022-07-08  1:34   ` Kai Huang
2022-06-27 21:54 ` [PATCH v7 102/102] KVM: x86: design documentation on TDX support of x86 KVM TDP MMU isaku.yamahata
2022-07-11 15:17 ` [PATCH v7 000/102] KVM TDX basic feature support Isaku Yamahata
2022-07-12  5:07   ` Chao Gao
2022-07-12 10:54     ` Chao Peng
2022-07-12 17:22       ` Isaku Yamahata
2022-07-13  7:37         ` Chao Peng
2022-07-12 10:49   ` Chao Peng
2022-07-12 17:35     ` Isaku Yamahata
2022-07-14  1:03 ` Sean Christopherson
2022-07-14  4:09   ` Xiaoyao Li
2022-07-20 14:59   ` Chao Peng
2022-07-25 13:46     ` Nikunj A. Dadhania
2022-07-26 14:32       ` Chao Peng
2022-07-27  9:26         ` Nikunj A. Dadhania
2022-08-03 10:48           ` Chao Peng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7cda2771dffb7a70a33c30eca46ad703638c34d3.1656366338.git.isaku.yamahata@intel.com \
    --to=isaku.yamahata@intel.com \
    --cc=isaku.yamahata@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).