kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: isaku.yamahata@intel.com
To: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	"H . Peter Anvin" <hpa@zytor.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org
Cc: isaku.yamahata@intel.com, isaku.yamahata@gmail.com,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Kai Huang <kai.huang@linux.intel.com>,
	Xiaoyao Li <xiaoyao.li@intel.com>
Subject: [RFC PATCH 56/67] KVM: TDX: Add macro framework to wrap TDX SEAMCALLs
Date: Mon, 16 Nov 2020 10:26:41 -0800	[thread overview]
Message-ID: <25f0d2c2f73c20309a1b578cc5fc15f4fd6b9a13.1605232743.git.isaku.yamahata@intel.com> (raw)
In-Reply-To: <cover.1605232743.git.isaku.yamahata@intel.com>
In-Reply-To: <cover.1605232743.git.isaku.yamahata@intel.com>

From: Sean Christopherson <sean.j.christopherson@intel.com>

Co-developed-by: Kai Huang <kai.huang@linux.intel.com>
Signed-off-by: Kai Huang <kai.huang@linux.intel.com>
Co-developed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/tdx_ops.h | 531 +++++++++++++++++++++++++++++++++++++
 1 file changed, 531 insertions(+)
 create mode 100644 arch/x86/kvm/vmx/tdx_ops.h

diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h
new file mode 100644
index 000000000000..a6f87cfe9bda
--- /dev/null
+++ b/arch/x86/kvm/vmx/tdx_ops.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_TDX_OPS_H
+#define __KVM_X86_TDX_OPS_H
+
+#include <linux/compiler.h>
+
+#include <asm/asm.h>
+#include <asm/kvm_host.h>
+
+struct tdx_ex_ret {
+	union {
+		/* Used to retrieve values from hardware. */
+		struct {
+			u64 rcx;
+			u64 rdx;
+			u64 r8;
+			u64 r9;
+			u64 r10;
+		};
+		/* Functions that return SEPT and level that failed. */
+		struct {
+			u64 septep;
+			int level;
+		};
+		/* TDDBG{RD,WR} return the TDR, field code, and value. */
+		struct {
+			u64 tdr;
+			u64 field;
+			u64 field_val;
+		};
+		/* TDDBG{RD,WR}MEM return the address and its value. */
+		struct {
+			u64 addr;
+			u64 val;
+		};
+		/* TDRDPAGEMD and TDRECLAIMPAGE return page metadata. */
+		struct {
+			u64 page_type;
+			u64 owner;
+			u64 page_size;
+		};
+		/* TDRDSEPT returns the contents of the SEPT entry. */
+		struct {
+			u64 septe;
+			u64 ign;
+		};
+		/*
+		 * TDSYSINFO returns the buffer address and its size, and the
+		 * CMR_INFO address and its number of entries.
+		 */
+		struct {
+			u64 buffer;
+			u64 nr_bytes;
+			u64 cmr_info;
+			u64 nr_cmr_entries;
+		};
+		/*
+		 * TDINIT and TDSYSINIT return CPUID info on error.  Note, only
+		 * the leaf and subleaf are valid on TDINIT error.
+		 */
+		struct {
+			u32 leaf;
+			u32 subleaf;
+			u32 eax_mask;
+			u32 ebx_mask;
+			u32 ecx_mask;
+			u32 edx_mask;
+			u32 eax_val;
+			u32 ebx_val;
+			u32 ecx_val;
+			u32 edx_val;
+		};
+		/* TDSYSINITTDMR returns the input PA and next PA. */
+		struct {
+			u64 prev;
+			u64 next;
+		};
+	};
+};
+
+#define pr_seamcall_error(op, err)					  \
+	pr_err_ratelimited("SEAMCALL[" #op "] failed: 0x%llx (cpu %d)\n", \
+			   SEAMCALL_##op ? (err) : (err), smp_processor_id());
+
+#define TDX_ERR(err, op)			\
+({						\
+	int __ret_warn_on = WARN_ON_ONCE(err);	\
+						\
+	if (unlikely(__ret_warn_on))		\
+		pr_seamcall_error(op, err);	\
+	__ret_warn_on;				\
+})
+
+#define tdenter(args...)		({ 0; })
+
+#define seamcall ".byte 0x66,0x0f,0x01,0xcf"
+
+#ifndef	INTEL_TDX_BOOT_TIME_SEAMCALL
+#define __seamcall				\
+	"1:" seamcall "\n\t"			\
+	"jmp 3f\n\t"				\
+	"2: call kvm_spurious_fault\n\t"	\
+	"3:\n\t"				\
+	_ASM_EXTABLE(1b, 2b)
+#else
+/*
+ * The default BUG()s on faults, which is undesirable during boot, and calls
+ * kvm_spurious_fault(), which isn't linkable if KVM is built as a module.
+ * RAX contains '0' on success, TDX-SEAM errno on failure, vector on fault.
+ */
+#define __seamcall			\
+	"1:" seamcall "\n\t"		\
+	"2: \n\t"			\
+	_ASM_EXTABLE_FAULT(1b, 2b)
+#endif
+
+#define seamcall_N(fn, inputs...)					\
+do {									\
+	u64 ret;							\
+									\
+	asm volatile(__seamcall						\
+		     : ASM_CALL_CONSTRAINT, "=a"(ret)			\
+		     : "a"(SEAMCALL_##fn), inputs			\
+		     : );						\
+	return ret;							\
+} while (0)
+
+#define seamcall_0(fn)	 						\
+	seamcall_N(fn, "i"(0))
+#define seamcall_1(fn, rcx)	 					\
+	seamcall_N(fn, "c"(rcx))
+#define seamcall_2(fn, rcx, rdx)					\
+	seamcall_N(fn, "c"(rcx), "d"(rdx))
+#define seamcall_3(fn, rcx, rdx, __r8)					\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+									\
+	seamcall_N(fn, "c"(rcx), "d"(rdx), "r"(r8));			\
+} while (0)
+#define seamcall_4(fn, rcx, rdx, __r8, __r9)				\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+	register long r9 asm("r9") = __r9;				\
+									\
+	seamcall_N(fn, "c"(rcx), "d"(rdx), "r"(r8), "r"(r9));		\
+} while (0)
+
+#define seamcall_N_2(fn, ex, inputs...)					\
+do {									\
+	u64 ret;							\
+									\
+	asm volatile(__seamcall						\
+		     : ASM_CALL_CONSTRAINT, "=a"(ret),			\
+		       "=c"((ex)->rcx), "=d"((ex)->rdx)			\
+		     : "a"(SEAMCALL_##fn), inputs			\
+		     : );						\
+	return ret;							\
+} while (0)
+
+#define seamcall_0_2(fn, ex)						\
+	seamcall_N_2(fn, ex, "i"(0))
+#define seamcall_1_2(fn, rcx, ex)					\
+	seamcall_N_2(fn, ex, "c"(rcx))
+#define seamcall_2_2(fn, rcx, rdx, ex)					\
+	seamcall_N_2(fn, ex, "c"(rcx), "d"(rdx))
+#define seamcall_3_2(fn, rcx, rdx, __r8, ex)				\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+									\
+	seamcall_N_2(fn, ex, "c"(rcx), "d"(rdx), "r"(r8));		\
+} while (0)
+#define seamcall_4_2(fn, rcx, rdx, __r8, __r9, ex)			\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+	register long r9 asm("r9") = __r9;				\
+									\
+	seamcall_N_2(fn, ex, "c"(rcx), "d"(rdx), "r"(r8), "r"(r9));	\
+} while (0)
+
+#define seamcall_N_3(fn, ex, inputs...)					\
+do {									\
+	register long r8_out asm("r8");					\
+	u64 ret;							\
+									\
+	asm volatile(__seamcall						\
+		     : ASM_CALL_CONSTRAINT, "=a"(ret),			\
+		       "=c"((ex)->rcx), "=d"((ex)->rdx), "=r"(r8_out)	\
+		     : "a"(SEAMCALL_##fn), inputs			\
+		     : );						\
+	(ex)->r8 = r8_out;						\
+	return ret;							\
+} while (0)
+
+#define seamcall_0_3(fn, ex)						\
+	seamcall_N_3(fn, ex, "i"(0))
+#define seamcall_1_3(fn, rcx, ex)					\
+	seamcall_N_3(fn, ex, "c"(rcx))
+#define seamcall_2_3(fn, rcx, rdx, ex)					\
+	seamcall_N_3(fn, ex, "c"(rcx), "d"(rdx))
+#define seamcall_3_3(fn, rcx, rdx, __r8, ex)				\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+									\
+	seamcall_N_3(fn, ex, "c"(rcx), "d"(rdx), "r"(r8));		\
+} while (0)
+#define seamcall_4_3(fn, rcx, rdx, __r8, __r9, ex)			\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+	register long r9 asm("r9") = __r9;				\
+									\
+	seamcall_N_3(fn, ex, "c"(rcx), "d"(rdx), "r"(r8), "r"(r9));	\
+} while (0)
+
+#define seamcall_N_4(fn, ex, inputs...)					\
+do {									\
+	register long r8_out asm("r8");					\
+	register long r9_out asm("r9");					\
+	u64 ret;							\
+									\
+	asm volatile(__seamcall						\
+		     : ASM_CALL_CONSTRAINT, "=a"(ret), "=c"((ex)->rcx),	\
+		       "=d"((ex)->rdx), "=r"(r8_out), "=r"(r9_out)	\
+		     : "a"(SEAMCALL_##fn), inputs			\
+		     : );						\
+	(ex)->r8 = r8_out;						\
+	(ex)->r9 = r9_out;						\
+	return ret;							\
+} while (0)
+
+#define seamcall_0_4(fn, ex)						\
+	seamcall_N_4(fn, ex, "i"(0))
+#define seamcall_1_4(fn, rcx, ex)					\
+	seamcall_N_4(fn, ex, "c"(rcx))
+#define seamcall_2_4(fn, rcx, rdx, ex)					\
+	seamcall_N_4(fn, ex, "c"(rcx), "d"(rdx))
+#define seamcall_3_4(fn, rcx, rdx, __r8, ex)				\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+									\
+	seamcall_N_4(fn, ex, "c"(rcx), "d"(rdx), "r"(r8));		\
+} while (0)
+#define seamcall_4_4(fn, rcx, rdx, __r8, __r9, ex)			\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+	register long r9 asm("r9") = __r9;				\
+									\
+	seamcall_N_4(fn, ex, "c"(rcx), "d"(rdx), "r"(r8), "r"(r9));	\
+} while (0)
+
+#define seamcall_N_5(fn, ex, inputs...)					\
+do {									\
+	register long r8_out asm("r8");					\
+	register long r9_out asm("r9");					\
+	register long r10_out asm("r10");				\
+	u64 ret;							\
+									\
+	asm volatile(__seamcall						\
+		     : ASM_CALL_CONSTRAINT, "=a"(ret), "=c"((ex)->rcx),	\
+		       "=d"((ex)->rdx), "=r"(r8_out), "=r"(r9_out),	\
+		       "=r"(r10_out)					\
+		     : "a"(SEAMCALL_##fn), inputs			\
+		     : );						\
+	(ex)->r8 = r8_out;						\
+	(ex)->r9 = r9_out;						\
+	(ex)->r10 = r10_out;						\
+	return ret;							\
+} while (0)
+
+#define seamcall_0_5(fn, ex)						\
+	seamcall_N_5(fn, ex, "i"(0))
+#define seamcall_1_5(fn, rcx, ex)					\
+	seamcall_N_5(fn, ex, "c"(rcx))
+#define seamcall_2_5(fn, rcx, rdx, ex)					\
+	seamcall_N_5(fn, ex, "c"(rcx), "d"(rdx))
+#define seamcall_3_5(fn, rcx, rdx, __r8, ex)				\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+									\
+	seamcall_N_5(fn, ex, "c"(rcx), "d"(rdx), "r"(r8));		\
+} while (0)
+#define seamcall_4_5(fn, rcx, rdx, __r8, __r9, ex)			\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+	register long r9 asm("r9") = __r9;				\
+									\
+	seamcall_N_5(fn, ex, "c"(rcx), "d"(rdx), "r"(r8), "r"(r9));	\
+} while (0)
+#define seamcall_5_5(fn, rcx, rdx, __r8, __r9, __r10, ex)		\
+do {									\
+	register long r8 asm("r8") = __r8;				\
+	register long r9 asm("r9") = __r9;				\
+	register long r10 asm("r10") = __r10;				\
+									\
+	seamcall_N_5(fn, ex, "c"(rcx), "d"(rdx), "r"(r8), "r"(r9), "r"(r10)); \
+} while (0)
+
+static inline u64 tdaddcx(hpa_t tdr, hpa_t addr)
+{
+	seamcall_2(TDADDCX, addr, tdr);
+}
+
+static inline u64 tdaddpage(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source,
+			    struct tdx_ex_ret *ex)
+{
+	seamcall_4_2(TDADDPAGE, gpa, tdr, hpa, source, ex);
+}
+
+static inline u64 tdaddsept(hpa_t tdr, gpa_t gpa, int level, hpa_t page,
+			    struct tdx_ex_ret *ex)
+{
+	seamcall_3_2(TDADDSEPT, gpa | level, tdr, page, ex);
+}
+
+static inline u64 tdaddvpx(hpa_t tdvpr, hpa_t addr)
+{
+	seamcall_2(TDADDVPX, addr, tdvpr);
+}
+
+static inline u64 tdassignhkid(hpa_t tdr, int hkid)
+{
+	seamcall_3(TDASSIGNHKID, tdr, 0, hkid);
+}
+
+static inline u64 tdaugpage(hpa_t tdr, gpa_t gpa, hpa_t hpa,
+			    struct tdx_ex_ret *ex)
+{
+	seamcall_3_2(TDAUGPAGE, gpa, tdr, hpa, ex);
+}
+
+static inline u64 tdblock(hpa_t tdr, gpa_t gpa, int level,
+			  struct tdx_ex_ret *ex)
+{
+	seamcall_2_2(TDBLOCK, gpa | level, tdr, ex);
+}
+
+static inline u64 tdconfigkey(hpa_t tdr)
+{
+	seamcall_1(TDCONFIGKEY, tdr);
+}
+
+static inline u64 tdcreate(hpa_t tdr, int hkid)
+{
+	seamcall_2(TDCREATE, tdr, hkid);
+}
+
+static inline u64 tdcreatevp(hpa_t tdr, hpa_t tdvpr)
+{
+	seamcall_2(TDCREATEVP, tdvpr, tdr);
+}
+
+static inline u64 tddbgrd(hpa_t tdr, u64 field, struct tdx_ex_ret *ex)
+{
+	seamcall_2_3(TDDBGRD, tdr, field, ex);
+}
+
+static inline u64 tddbgwr(hpa_t tdr, u64 field, u64 val, u64 mask,
+			  struct tdx_ex_ret *ex)
+{
+	seamcall_4_3(TDDBGWR, tdr, field, val, mask, ex);
+}
+
+static inline u64 tddbgrdmem(hpa_t addr, struct tdx_ex_ret *ex)
+{
+	seamcall_1_2(TDDBGRDMEM, addr, ex);
+}
+
+static inline u64 tddbgwrmem(hpa_t addr, u64 val, struct tdx_ex_ret *ex)
+{
+	seamcall_2_2(TDDBGWRMEM, addr, val, ex);
+}
+
+static inline u64 tddemotepage(hpa_t tdr, gpa_t gpa, int level, hpa_t page,
+			       struct tdx_ex_ret *ex)
+{
+	seamcall_3_2(TDDEMOTEPAGE, gpa | level, tdr, page, ex);
+}
+
+static inline u64 tdextendmr(hpa_t tdr, gpa_t gpa, struct tdx_ex_ret *ex)
+{
+	seamcall_2_2(TDEXTENDMR, gpa, tdr, ex);
+}
+
+static inline u64 tdfinalizemr(hpa_t tdr)
+{
+	seamcall_1(TDFINALIZEMR, tdr);
+}
+
+static inline u64 tdflushvp(hpa_t tdvpr)
+{
+	seamcall_1(TDFLUSHVP, tdvpr);
+}
+
+static inline u64 tdflushvpdone(hpa_t tdr)
+{
+	seamcall_1(TDFLUSHVPDONE, tdr);
+}
+
+static inline u64 tdfreehkids(hpa_t tdr)
+{
+	seamcall_1(TDFREEHKIDS, tdr);
+}
+
+static inline u64 tdinit(hpa_t tdr, hpa_t td_params, struct tdx_ex_ret *ex)
+{
+	seamcall_2_2(TDINIT, tdr, td_params, ex);
+}
+
+static inline u64 tdinitvp(hpa_t tdvpr, u64 rcx)
+{
+	seamcall_2(TDINITVP, tdvpr, rcx);
+}
+
+static inline u64 tdpromotepage(hpa_t tdr, gpa_t gpa, int level,
+				struct tdx_ex_ret *ex)
+{
+	seamcall_2_2(TDPROMOTEPAGE, gpa | level, tdr, ex);
+}
+
+static inline u64 tdrdpagemd(hpa_t page, struct tdx_ex_ret *ex)
+{
+	seamcall_1_3(TDRDPAGEMD, page, ex);
+}
+
+static inline u64 tdrdsept(hpa_t tdr, gpa_t gpa, int level,
+			   struct tdx_ex_ret *ex)
+{
+	seamcall_2_2(TDRDSEPT, gpa | level, tdr, ex);
+}
+
+static inline u64 tdrdvps(hpa_t tdvpr, u64 field, struct tdx_ex_ret *ex)
+{
+	seamcall_2_3(TDRDVPS, tdvpr, field, ex);
+}
+
+static inline u64 tdreclaimhkids(hpa_t tdr)
+{
+	seamcall_1(TDRECLAIMHKIDS, tdr);
+}
+
+static inline u64 tdreclaimpage(hpa_t page, struct tdx_ex_ret *ex)
+{
+	seamcall_1_3(TDRECLAIMPAGE, page, ex);
+}
+
+static inline u64 tdremovepage(hpa_t tdr, gpa_t gpa, int level,
+				struct tdx_ex_ret *ex)
+{
+	seamcall_2_2(TDREMOVEPAGE, gpa | level, tdr, ex);
+}
+
+static inline u64 tdremovesept(hpa_t tdr, gpa_t gpa, int level,
+			       struct tdx_ex_ret *ex)
+{
+	seamcall_2_2(TDREMOVESEPT, gpa | level, tdr, ex);
+}
+
+static inline u64 tdsysconfig(hpa_t tdmr, int nr_entries, int hkid)
+{
+	seamcall_3(TDSYSCONFIG, tdmr, nr_entries, hkid);
+}
+
+static inline u64 tdsysconfigkey(void)
+{
+	seamcall_0(TDSYSCONFIGKEY);
+}
+
+static inline u64 tdsysinfo(hpa_t tdsysinfo, int nr_bytes, hpa_t cmr_info,
+			    int nr_cmr_entries, struct tdx_ex_ret *ex)
+{
+	seamcall_4_4(TDSYSINFO, tdsysinfo, nr_bytes, cmr_info, nr_cmr_entries, ex);
+}
+
+static inline u64 tdsysinit(u64 attributes, struct tdx_ex_ret *ex)
+{
+	seamcall_1_5(TDSYSINIT, attributes, ex);
+}
+
+static inline u64 tdsysinitlp(struct tdx_ex_ret *ex)
+{
+	seamcall_0_3(TDSYSINITLP, ex);
+}
+
+static inline u64 tdsysinittdmr(hpa_t tdmr, struct tdx_ex_ret *ex)
+{
+	seamcall_1_2(TDSYSINITTDMR, tdmr, ex);
+}
+
+static inline u64 tdsysshutdownlp(void)
+{
+	seamcall_0(TDSYSSHUTDOWNLP);
+}
+
+static inline u64 tdteardown(hpa_t tdr)
+{
+	seamcall_1(TDTEARDOWN, tdr);
+}
+
+static inline u64 tdtrack(hpa_t tdr)
+{
+	seamcall_1(TDTRACK, tdr);
+}
+
+static inline u64 tdunblock(hpa_t tdr, gpa_t gpa, int level,
+			    struct tdx_ex_ret *ex)
+{
+	seamcall_2_2(TDUNBLOCK, gpa | level, tdr, ex);
+}
+
+static inline u64 tdwbcache(bool resume)
+{
+	seamcall_1(TDWBCACHE, resume ? 1 : 0);
+}
+
+static inline u64 tdwbinvdpage(hpa_t page)
+{
+	seamcall_1(TDWBINVDPAGE, page);
+}
+
+static inline u64 tdwrsept(hpa_t tdr, gpa_t gpa, int level, u64 val,
+			   struct tdx_ex_ret *ex)
+{
+	seamcall_3_2(TDWRSEPT, gpa | level, tdr, val, ex);
+}
+
+static inline u64 tdwrvps(hpa_t tdvpr, u64 field, u64 val, u64 mask,
+			  struct tdx_ex_ret *ex)
+{
+	seamcall_4_3(TDWRVPS, tdvpr, field, val, mask, ex);
+}
+
+#endif /* __KVM_X86_TDX_OPS_H */
-- 
2.17.1


  parent reply	other threads:[~2020-11-16 18:29 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-16 18:25 [RFC PATCH 00/67] KVM: X86: TDX support isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 01/67] x86/cpufeatures: Add synthetic feature flag for TDX (in host) isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 02/67] x86/msr-index: Define MSR_IA32_MKTME_KEYID_PART used by TDX isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 03/67] x86/cpu: Move get_builtin_firmware() common code (from microcode only) isaku.yamahata
2020-11-25 22:09   ` Borislav Petkov
2020-11-26  0:18     ` Sean Christopherson
2020-11-26 10:12       ` Borislav Petkov
2020-11-30 19:18         ` Sean Christopherson
2020-11-16 18:25 ` [RFC PATCH 04/67] KVM: Export kvm_io_bus_read for use by TDX for PV MMIO isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 05/67] KVM: Enable hardware before doing arch VM initialization isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 06/67] KVM: x86: Split core of hypercall emulation to helper function isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 07/67] KVM: x86: Export kvm_mmio tracepoint for use by TDX for PV MMIO isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 08/67] KVM: x86/mmu: Zap only leaf SPTEs for deleted/moved memslot by default isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 09/67] KVM: Add infrastructure and macro to mark VM as bugged isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 10/67] KVM: Export kvm_make_all_cpus_request() for use in marking VMs " isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 11/67] KVM: x86: Use KVM_BUG/KVM_BUG_ON to handle bugs that are fatal to the VM isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 12/67] KVM: x86/mmu: Mark VM as bugged if page fault returns RET_PF_INVALID isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 13/67] KVM: VMX: Explicitly check for hv_remote_flush_tlb when loading pgd() isaku.yamahata
2020-11-16 18:25 ` [RFC PATCH 14/67] KVM: Add max_vcpus field in common 'struct kvm' isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 15/67] KVM: x86: Add vm_type to differentiate legacy VMs from protected VMs isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 16/67] KVM: x86: Hoist kvm_dirty_regs check out of sync_regs() isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 17/67] KVM: x86: Introduce "protected guest" concept and block disallowed ioctls isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 18/67] KVM: x86: Add per-VM flag to disable direct IRQ injection isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 19/67] KVM: x86: Add flag to disallow #MC injection / KVM_X86_SETUP_MCE isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 20/67] KVM: x86: Make KVM_CAP_X86_SMM a per-VM capability isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 21/67] KVM: x86: Add flag to mark TSC as immutable (for TDX) isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 22/67] KVM: Add per-VM flag to mark read-only memory as unsupported isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 23/67] KVM: Add per-VM flag to disable dirty logging of memslots for TDs isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 24/67] KVM: x86: Add per-VM flag to disable in-kernel I/O APIC and level routes isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 25/67] KVM: x86: Allow host-initiated WRMSR to set X2APIC regardless of CPUID isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 26/67] KVM: x86: Add kvm_x86_ops .cache_gprs() and .flush_gprs() isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 27/67] KVM: x86: Add support for vCPU and device-scoped KVM_MEMORY_ENCRYPT_OP isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 28/67] KVM: x86: Introduce vm_teardown() hook in kvm_arch_vm_destroy() isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 29/67] KVM: x86: Add a switch_db_regs flag to handle TDX's auto-switched behavior isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 30/67] KVM: x86: Check for pending APICv interrupt in kvm_vcpu_has_events() isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 31/67] KVM: x86: Add option to force LAPIC expiration wait isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 32/67] KVM: x86: Add guest_supported_xss placholder isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 33/67] KVM: Export kvm_is_reserved_pfn() for use by TDX isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 34/67] KVM: x86: Add infrastructure for stolen GPA bits isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 35/67] KVM: x86/mmu: Explicitly check for MMIO spte in fast page fault isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 36/67] KVM: x86/mmu: Track shadow MMIO value on a per-VM basis isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 37/67] KVM: x86/mmu: Ignore bits 63 and 62 when checking for "present" SPTEs isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 38/67] KVM: x86/mmu: Allow non-zero init value for shadow PTE isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 39/67] KVM: x86/mmu: Refactor shadow walk in __direct_map() to reduce indentation isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 40/67] KVM: x86/mmu: Return old SPTE from mmu_spte_clear_track_bits() isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 41/67] KVM: x86/mmu: Frame in support for private/inaccessible shadow pages isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 42/67] KVM: x86/mmu: Move 'pfn' variable to caller of direct_page_fault() isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 43/67] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by TDX isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 44/67] KVM: VMX: Modify NMI and INTR handlers to take intr_info as param isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 45/67] KVM: VMX: Move NMI/exception handler to common helper isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 46/67] KVM: VMX: Split out guts of EPT violation to common/exposed function isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 47/67] KVM: VMX: Define EPT Violation architectural bits isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 48/67] KVM: VMX: Define VMCS encodings for shared EPT pointer isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 49/67] KVM: VMX: Add 'main.c' to wrap VMX and TDX isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 50/67] KVM: VMX: Move setting of EPT MMU masks to common VT-x code isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 51/67] KVM: VMX: Move register caching logic to common code isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 52/67] KVM: TDX: Add TDX "architectural" error codes isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 53/67] KVM: TDX: Add architectural definitions for structures and values isaku.yamahata
2021-06-11  2:25   ` Erdem Aktas
2020-11-16 18:26 ` [RFC PATCH 54/67] KVM: TDX: Define TDCALL exit reason isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 55/67] KVM: TDX: Add SEAMRR related MSRs macro definition isaku.yamahata
2020-11-16 18:26 ` isaku.yamahata [this message]
2020-11-16 18:26 ` [RFC PATCH 57/67] KVM: TDX: Stub in tdx.h with structs, accessors, and VMCS helpers isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 58/67] KVM: VMX: Add macro framework to read/write VMCS for VMs and TDs isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 59/67] KVM: VMX: Move AR_BYTES encoder/decoder helpers to common.h isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 60/67] KVM: VMX: MOVE GDT and IDT accessors to common code isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 61/67] KVM: VMX: Move .get_interrupt_shadow() implementation to common VMX code isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 62/67] KVM: TDX: Load and init TDX-SEAM module during boot isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 63/67] cpu/hotplug: Document that TDX also depends on booting CPUs once isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 64/67] KVM: TDX: Add "basic" support for building and running Trust Domains isaku.yamahata
     [not found]   ` <CAAYXXYwHp-wiAsSjfsLngriGZpdQHUVY6o7zdGrN2fNm_RJZAQ@mail.gmail.com>
2021-06-11 15:22     ` Sean Christopherson
     [not found]     ` <CAAYXXYxX_ns-D_OJCOkA+jgzSV6Hb=oHyLDb5fYMwF-2X5QAgQ@mail.gmail.com>
2021-06-15  1:10       ` Isaku Yamahata
2020-11-16 18:26 ` [RFC PATCH 65/67] KVM: x86: Mark the VM (TD) as bugged if non-coherent DMA is detected isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 66/67] fixup! KVM: TDX: Add "basic" support for building and running Trust Domains isaku.yamahata
2020-11-16 18:26 ` [RFC PATCH 67/67] KVM: X86: not for review: add dummy file for TDX-SEAM module isaku.yamahata
2021-05-19 16:37 ` [RFC PATCH 00/67] KVM: X86: TDX support Connor Kuehl
2021-05-20  9:31   ` Isaku Yamahata
2021-05-21 14:09     ` Connor Kuehl

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=25f0d2c2f73c20309a1b578cc5fc15f4fd6b9a13.1605232743.git.isaku.yamahata@intel.com \
    --to=isaku.yamahata@intel.com \
    --cc=bp@alien8.de \
    --cc=hpa@zytor.com \
    --cc=isaku.yamahata@gmail.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kai.huang@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    --cc=xiaoyao.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).