linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: isaku.yamahata@intel.com
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: isaku.yamahata@intel.com, isaku.yamahata@gmail.com,
	Paolo Bonzini <pbonzini@redhat.com>,
	erdemaktas@google.com, Sean Christopherson <seanjc@google.com>,
	Sagi Shahar <sagis@google.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>
Subject: [PATCH v9 102/105] KVM: TDX: Add methods to ignore accesses to CPU state
Date: Fri, 30 Sep 2022 03:18:36 -0700	[thread overview]
Message-ID: <a5ae094dc3362ea1ca4284bdd301d977be05d41e.1664530908.git.isaku.yamahata@intel.com> (raw)
In-Reply-To: <cover.1664530907.git.isaku.yamahata@intel.com>

From: Sean Christopherson <sean.j.christopherson@intel.com>

TDX protects TDX guest state from VMM.  Implements to access methods for
TDX guest state to ignore them or return zero.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
 arch/x86/kvm/vmx/main.c    | 463 +++++++++++++++++++++++++++++++++----
 arch/x86/kvm/vmx/tdx.c     |  55 ++++-
 arch/x86/kvm/vmx/x86_ops.h |  17 ++
 3 files changed, 490 insertions(+), 45 deletions(-)

diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 72dd771b0993..2e4be54a3be5 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -258,6 +258,46 @@ static void vt_enable_smi_window(struct kvm_vcpu *vcpu)
 	vmx_enable_smi_window(vcpu);
 }
 
+static bool vt_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+				       void *insn, int insn_len)
+{
+	if (is_td_vcpu(vcpu))
+		return false;
+
+	return vmx_can_emulate_instruction(vcpu, emul_type, insn, insn_len);
+}
+
+static int vt_check_intercept(struct kvm_vcpu *vcpu,
+				 struct x86_instruction_info *info,
+				 enum x86_intercept_stage stage,
+				 struct x86_exception *exception)
+{
+	/*
+	 * This call back is triggered by the x86 instruction emulator. TDX
+	 * doesn't allow guest memory inspection.
+	 */
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return X86EMUL_UNHANDLEABLE;
+
+	return vmx_check_intercept(vcpu, info, stage, exception);
+}
+
+static bool vt_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
+{
+	if (is_td_vcpu(vcpu))
+		return true;
+
+	return vmx_apic_init_signal_blocked(vcpu);
+}
+
+static void vt_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+{
+	if (is_td_vcpu(vcpu))
+		return tdx_set_virtual_apic_mode(vcpu);
+
+	return vmx_set_virtual_apic_mode(vcpu);
+}
+
 static void vt_apicv_post_state_restore(struct kvm_vcpu *vcpu)
 {
 	struct pi_desc *pi = vcpu_to_pi_desc(vcpu);
@@ -266,6 +306,31 @@ static void vt_apicv_post_state_restore(struct kvm_vcpu *vcpu)
 	memset(pi->pir, 0, sizeof(pi->pir));
 }
 
+static void vt_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
+{
+	if (is_td_vcpu(vcpu))
+		return;
+
+	return vmx_hwapic_irr_update(vcpu, max_irr);
+}
+
+static void vt_hwapic_isr_update(int max_isr)
+{
+	if (is_td_vcpu(kvm_get_running_vcpu()))
+		return;
+
+	return vmx_hwapic_isr_update(max_isr);
+}
+
+static bool vt_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+	/* TDX doesn't support L2 at the moment. */
+	if (WARN_ON_ONCE(is_td_vcpu(vcpu)))
+		return false;
+
+	return vmx_guest_apic_has_interrupt(vcpu);
+}
+
 static int vt_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 {
 	if (is_td_vcpu(vcpu))
@@ -305,6 +370,177 @@ static void vt_vcpu_deliver_init(struct kvm_vcpu *vcpu)
 	kvm_vcpu_deliver_init(vcpu);
 }
 
+static void vt_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+{
+	if (is_td_vcpu(vcpu))
+		return;
+
+	return vmx_vcpu_after_set_cpuid(vcpu);
+}
+
+static void vt_update_exception_bitmap(struct kvm_vcpu *vcpu)
+{
+	if (is_td_vcpu(vcpu))
+		return;
+
+	vmx_update_exception_bitmap(vcpu);
+}
+
+static u64 vt_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return tdx_get_segment_base(vcpu, seg);
+
+	return vmx_get_segment_base(vcpu, seg);
+}
+
+static void vt_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+			      int seg)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return tdx_get_segment(vcpu, var, seg);
+
+	vmx_get_segment(vcpu, var, seg);
+}
+
+static void vt_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+			      int seg)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_set_segment(vcpu, var, seg);
+}
+
+static int vt_get_cpl(struct kvm_vcpu *vcpu)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return tdx_get_cpl(vcpu);
+
+	return vmx_get_cpl(vcpu);
+}
+
+static void vt_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_get_cs_db_l_bits(vcpu, db, l);
+}
+
+static void vt_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+	if (is_td_vcpu(vcpu))
+		return;
+
+	vmx_set_cr0(vcpu, cr0);
+}
+
+static void vt_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+	if (is_td_vcpu(vcpu))
+		return;
+
+	vmx_set_cr4(vcpu, cr4);
+}
+
+static int vt_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+	if (is_td_vcpu(vcpu))
+		return 0;
+
+	return vmx_set_efer(vcpu, efer);
+}
+
+static void vt_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm)) {
+		memset(dt, 0, sizeof(*dt));
+		return;
+	}
+
+	vmx_get_idt(vcpu, dt);
+}
+
+static void vt_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_set_idt(vcpu, dt);
+}
+
+static void vt_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm)) {
+		memset(dt, 0, sizeof(*dt));
+		return;
+	}
+
+	vmx_get_gdt(vcpu, dt);
+}
+
+static void vt_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_set_gdt(vcpu, dt);
+}
+
+static void vt_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+{
+	if (is_td_vcpu(vcpu))
+		return;
+
+	vmx_set_dr7(vcpu, val);
+}
+
+static void vt_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * MOV-DR exiting is always cleared for TD guest, even in debug mode.
+	 * Thus KVM_DEBUGREG_WONT_EXIT can never be set and it should never
+	 * reach here for TD vcpu.
+	 */
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_sync_dirty_debug_regs(vcpu);
+}
+
+static void vt_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+{
+	if (is_td_vcpu(vcpu))
+		return tdx_cache_reg(vcpu, reg);
+
+	return vmx_cache_reg(vcpu, reg);
+}
+
+static unsigned long vt_get_rflags(struct kvm_vcpu *vcpu)
+{
+	if (is_td_vcpu(vcpu))
+		return tdx_get_rflags(vcpu);
+
+	return vmx_get_rflags(vcpu);
+}
+
+static void vt_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+	if (is_td_vcpu(vcpu))
+		return;
+
+	vmx_set_rflags(vcpu, rflags);
+}
+
+static bool vt_get_if_flag(struct kvm_vcpu *vcpu)
+{
+	if (is_td_vcpu(vcpu))
+		return false;
+
+	return vmx_get_if_flag(vcpu);
+}
+
 static void vt_flush_tlb_all(struct kvm_vcpu *vcpu)
 {
 	if (is_td_vcpu(vcpu))
@@ -438,6 +674,15 @@ static u32 vt_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 	return vmx_get_interrupt_shadow(vcpu);
 }
 
+static void vt_patch_hypercall(struct kvm_vcpu *vcpu,
+				  unsigned char *hypercall)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_patch_hypercall(vcpu, hypercall);
+}
+
 static void vt_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
 {
 	if (is_td_vcpu(vcpu))
@@ -446,6 +691,14 @@ static void vt_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
 	vmx_inject_irq(vcpu, reinjected);
 }
 
+static void vt_queue_exception(struct kvm_vcpu *vcpu)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_queue_exception(vcpu);
+}
+
 static void vt_cancel_injection(struct kvm_vcpu *vcpu)
 {
 	if (is_td_vcpu(vcpu))
@@ -478,6 +731,130 @@ static void vt_request_immediate_exit(struct kvm_vcpu *vcpu)
 	vmx_request_immediate_exit(vcpu);
 }
 
+static void vt_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_update_cr8_intercept(vcpu, tpr, irr);
+}
+
+static void vt_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
+{
+	if (WARN_ON_ONCE(is_td_vcpu(vcpu)))
+		return;
+
+	vmx_set_apic_access_page_addr(vcpu);
+}
+
+static void vt_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
+{
+	if (WARN_ON_ONCE(is_td_vcpu(vcpu)))
+		return;
+
+	vmx_refresh_apicv_exec_ctrl(vcpu);
+}
+
+static void vt_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+	if (is_td_vcpu(vcpu))
+		return;
+
+	vmx_load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+}
+
+static int vt_set_tss_addr(struct kvm *kvm, unsigned int addr)
+{
+	if (is_td(kvm))
+		return 0;
+
+	return vmx_set_tss_addr(kvm, addr);
+}
+
+static int vt_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
+{
+	if (is_td(kvm))
+		return 0;
+
+	return vmx_set_identity_map_addr(kvm, ident_addr);
+}
+
+static u8 vt_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+	if (is_td_vcpu(vcpu)) {
+		if (is_mmio)
+			return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
+		return  MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT;
+	}
+
+	return vmx_get_mt_mask(vcpu, gfn, is_mmio);
+}
+
+static u64 vt_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
+{
+	/* TDX doesn't support L2 guest at the moment. */
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return 0;
+
+	return vmx_get_l2_tsc_offset(vcpu);
+}
+
+static u64 vt_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+	/* TDX doesn't support L2 guest at the moment. */
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return 0;
+
+	return vmx_get_l2_tsc_multiplier(vcpu);
+}
+
+static void vt_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+	/* In TDX, tsc offset can't be changed. */
+	if (is_td_vcpu(vcpu))
+		return;
+
+	vmx_write_tsc_offset(vcpu, offset);
+}
+
+static void vt_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+{
+	/* In TDX, tsc multiplier can't be changed. */
+	if (is_td_vcpu(vcpu))
+		return;
+
+	vmx_write_tsc_multiplier(vcpu, multiplier);
+}
+
+static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
+{
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_update_cpu_dirty_logging(vcpu);
+}
+
+#ifdef CONFIG_X86_64
+static int vt_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+			      bool *expired)
+{
+	/* VMX-preemption timer isn't available for TDX. */
+	if (is_td_vcpu(vcpu))
+		return -EINVAL;
+
+	return vmx_set_hv_timer(vcpu, guest_deadline_tsc, expired);
+}
+
+static void vt_cancel_hv_timer(struct kvm_vcpu *vcpu)
+{
+	/* VMX-preemption timer can't be set.  Set vt_set_hv_timer(). */
+	if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+		return;
+
+	vmx_cancel_hv_timer(vcpu);
+}
+#endif
+
 static void vt_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
 			u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
 {
@@ -531,29 +908,29 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
 	.vcpu_load = vt_vcpu_load,
 	.vcpu_put = vt_vcpu_put,
 
-	.update_exception_bitmap = vmx_update_exception_bitmap,
+	.update_exception_bitmap = vt_update_exception_bitmap,
 	.get_msr_feature = vmx_get_msr_feature,
 	.get_msr = vt_get_msr,
 	.set_msr = vt_set_msr,
-	.get_segment_base = vmx_get_segment_base,
-	.get_segment = vmx_get_segment,
-	.set_segment = vmx_set_segment,
-	.get_cpl = vmx_get_cpl,
-	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
-	.set_cr0 = vmx_set_cr0,
+	.get_segment_base = vt_get_segment_base,
+	.get_segment = vt_get_segment,
+	.set_segment = vt_set_segment,
+	.get_cpl = vt_get_cpl,
+	.get_cs_db_l_bits = vt_get_cs_db_l_bits,
+	.set_cr0 = vt_set_cr0,
 	.is_valid_cr4 = vmx_is_valid_cr4,
-	.set_cr4 = vmx_set_cr4,
-	.set_efer = vmx_set_efer,
-	.get_idt = vmx_get_idt,
-	.set_idt = vmx_set_idt,
-	.get_gdt = vmx_get_gdt,
-	.set_gdt = vmx_set_gdt,
-	.set_dr7 = vmx_set_dr7,
-	.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
-	.cache_reg = vmx_cache_reg,
-	.get_rflags = vmx_get_rflags,
-	.set_rflags = vmx_set_rflags,
-	.get_if_flag = vmx_get_if_flag,
+	.set_cr4 = vt_set_cr4,
+	.set_efer = vt_set_efer,
+	.get_idt = vt_get_idt,
+	.set_idt = vt_set_idt,
+	.get_gdt = vt_get_gdt,
+	.set_gdt = vt_set_gdt,
+	.set_dr7 = vt_set_dr7,
+	.sync_dirty_debug_regs = vt_sync_dirty_debug_regs,
+	.cache_reg = vt_cache_reg,
+	.get_rflags = vt_get_rflags,
+	.set_rflags = vt_set_rflags,
+	.get_if_flag = vt_get_if_flag,
 
 	.flush_tlb_all = vt_flush_tlb_all,
 	.flush_tlb_current = vt_flush_tlb_current,
@@ -569,10 +946,10 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
 	.update_emulated_instruction = vmx_update_emulated_instruction,
 	.set_interrupt_shadow = vt_set_interrupt_shadow,
 	.get_interrupt_shadow = vt_get_interrupt_shadow,
-	.patch_hypercall = vmx_patch_hypercall,
+	.patch_hypercall = vt_patch_hypercall,
 	.inject_irq = vt_inject_irq,
 	.inject_nmi = vt_inject_nmi,
-	.queue_exception = vmx_queue_exception,
+	.queue_exception = vt_queue_exception,
 	.cancel_injection = vt_cancel_injection,
 	.interrupt_allowed = vt_interrupt_allowed,
 	.nmi_allowed = vt_nmi_allowed,
@@ -580,39 +957,39 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
 	.set_nmi_mask = vt_set_nmi_mask,
 	.enable_nmi_window = vt_enable_nmi_window,
 	.enable_irq_window = vt_enable_irq_window,
-	.update_cr8_intercept = vmx_update_cr8_intercept,
-	.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
-	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
-	.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
-	.load_eoi_exitmap = vmx_load_eoi_exitmap,
+	.update_cr8_intercept = vt_update_cr8_intercept,
+	.set_virtual_apic_mode = vt_set_virtual_apic_mode,
+	.set_apic_access_page_addr = vt_set_apic_access_page_addr,
+	.refresh_apicv_exec_ctrl = vt_refresh_apicv_exec_ctrl,
+	.load_eoi_exitmap = vt_load_eoi_exitmap,
 	.apicv_post_state_restore = vt_apicv_post_state_restore,
 	.check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
-	.hwapic_irr_update = vmx_hwapic_irr_update,
-	.hwapic_isr_update = vmx_hwapic_isr_update,
-	.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
+	.hwapic_irr_update = vt_hwapic_irr_update,
+	.hwapic_isr_update = vt_hwapic_isr_update,
+	.guest_apic_has_interrupt = vt_guest_apic_has_interrupt,
 	.sync_pir_to_irr = vt_sync_pir_to_irr,
 	.deliver_interrupt = vt_deliver_interrupt,
 	.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
 	.protected_apic_has_interrupt = vt_protected_apic_has_interrupt,
 
-	.set_tss_addr = vmx_set_tss_addr,
-	.set_identity_map_addr = vmx_set_identity_map_addr,
-	.get_mt_mask = vmx_get_mt_mask,
+	.set_tss_addr = vt_set_tss_addr,
+	.set_identity_map_addr = vt_set_identity_map_addr,
+	.get_mt_mask = vt_get_mt_mask,
 
 	.get_exit_info = vt_get_exit_info,
 
-	.vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
+	.vcpu_after_set_cpuid = vt_vcpu_after_set_cpuid,
 
 	.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
 
-	.get_l2_tsc_offset = vmx_get_l2_tsc_offset,
-	.get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
-	.write_tsc_offset = vmx_write_tsc_offset,
-	.write_tsc_multiplier = vmx_write_tsc_multiplier,
+	.get_l2_tsc_offset = vt_get_l2_tsc_offset,
+	.get_l2_tsc_multiplier = vt_get_l2_tsc_multiplier,
+	.write_tsc_offset = vt_write_tsc_offset,
+	.write_tsc_multiplier = vt_write_tsc_multiplier,
 
 	.load_mmu_pgd = vt_load_mmu_pgd,
 
-	.check_intercept = vmx_check_intercept,
+	.check_intercept = vt_check_intercept,
 	.handle_exit_irqoff = vt_handle_exit_irqoff,
 
 	.request_immediate_exit = vt_request_immediate_exit,
@@ -620,7 +997,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
 	.sched_in = vt_sched_in,
 
 	.cpu_dirty_log_size = PML_ENTITY_NUM,
-	.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
+	.update_cpu_dirty_logging = vt_update_cpu_dirty_logging,
 
 	.nested_ops = &vmx_nested_ops,
 
@@ -628,8 +1005,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
 	.pi_start_assignment = vmx_pi_start_assignment,
 
 #ifdef CONFIG_X86_64
-	.set_hv_timer = vmx_set_hv_timer,
-	.cancel_hv_timer = vmx_cancel_hv_timer,
+	.set_hv_timer = vt_set_hv_timer,
+	.cancel_hv_timer = vt_cancel_hv_timer,
 #endif
 
 	.setup_mce = vmx_setup_mce,
@@ -639,8 +1016,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
 	.leave_smm = vt_leave_smm,
 	.enable_smi_window = vt_enable_smi_window,
 
-	.can_emulate_instruction = vmx_can_emulate_instruction,
-	.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
+	.can_emulate_instruction = vt_can_emulate_instruction,
+	.apic_init_signal_blocked = vt_apic_init_signal_blocked,
 	.migrate_timers = vmx_migrate_timers,
 
 	.msr_filter_changed = vmx_msr_filter_changed,
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index c7164404a79f..cfce15d7361e 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -3,6 +3,7 @@
 #include <linux/mmu_context.h>
 
 #include <asm/fpu/xcr.h>
+#include <asm/virtext.h>
 #include <asm/tdx.h>
 
 #include "capabilities.h"
@@ -485,8 +486,15 @@ int tdx_vcpu_create(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.tsc_offset = to_kvm_tdx(vcpu->kvm)->tsc_offset;
 	vcpu->arch.l1_tsc_offset = vcpu->arch.tsc_offset;
-	vcpu->arch.guest_state_protected =
-		!(to_kvm_tdx(vcpu->kvm)->attributes & TDX_TD_ATTRIBUTE_DEBUG);
+	/*
+	 * TODO: support off-TD debug.  If TD DEBUG is enabled, guest state
+	 * can be accessed. guest_state_protected = false. and kvm ioctl to
+	 * access CPU states should be usable for user space VMM (e.g. qemu).
+	 *
+	 * vcpu->arch.guest_state_protected =
+	 *	!(to_kvm_tdx(vcpu->kvm)->attributes & TDX_TD_ATTRIBUTE_DEBUG);
+	 */
+	vcpu->arch.guest_state_protected = true;
 
 	tdx->pi_desc.nv = POSTED_INTR_VECTOR;
 	tdx->pi_desc.sn = 1;
@@ -1658,6 +1666,49 @@ void tdx_enable_smi_window(struct kvm_vcpu *vcpu)
 	vcpu->arch.smi_pending = false;
 }
 
+void tdx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+{
+	/* Only x2APIC mode is supported for TD. */
+	WARN_ON_ONCE(kvm_get_apic_mode(vcpu) != LAPIC_MODE_X2APIC);
+}
+
+int tdx_get_cpl(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
+void tdx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+{
+	kvm_register_mark_available(vcpu, reg);
+	switch (reg) {
+	case VCPU_REGS_RSP:
+	case VCPU_REGS_RIP:
+	case VCPU_EXREG_PDPTR:
+	case VCPU_EXREG_CR0:
+	case VCPU_EXREG_CR3:
+	case VCPU_EXREG_CR4:
+		break;
+	default:
+		KVM_BUG_ON(1, vcpu->kvm);
+		break;
+	}
+}
+
+unsigned long tdx_get_rflags(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
+u64 tdx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+{
+	return 0;
+}
+
+void tdx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
+{
+	memset(var, 0, sizeof(*var));
+}
+
 int tdx_dev_ioctl(void __user *argp)
 {
 	struct kvm_tdx_capabilities __user *user_caps;
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index a81b47307b39..7d5f1c0073b4 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -169,6 +169,14 @@ int tdx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection);
 int tdx_enter_smm(struct kvm_vcpu *vcpu, char *smstate);
 int tdx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate);
 void tdx_enable_smi_window(struct kvm_vcpu *vcpu);
+void tdx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
+
+int tdx_get_cpl(struct kvm_vcpu *vcpu);
+void tdx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+unsigned long tdx_get_rflags(struct kvm_vcpu *vcpu);
+bool tdx_is_emulated_msr(u32 index, bool write);
+u64 tdx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
+void tdx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
 
 int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
 int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
@@ -210,10 +218,19 @@ static inline void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *in
 static inline bool tdx_is_emulated_msr(u32 index, bool write) { return false; }
 static inline int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }
 static inline int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }
+
 static inline int tdx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) { return false; }
 static inline int tdx_enter_smm(struct kvm_vcpu *vcpu, char *smstate) { return 0; }
 static inline int tdx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) { return 0; }
 static inline void tdx_enable_smi_window(struct kvm_vcpu *vcpu) {}
+static inline void tdx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) {}
+
+static inline int tdx_get_cpl(struct kvm_vcpu *vcpu) { return 0; }
+static inline void tdx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) {}
+static inline unsigned long tdx_get_rflags(struct kvm_vcpu *vcpu) { return 0; }
+static inline u64 tdx_get_segment_base(struct kvm_vcpu *vcpu, int seg) { return 0; }
+static inline void tdx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+				   int seg) {}
 
 static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
 static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }
-- 
2.25.1


  parent reply	other threads:[~2022-09-30 10:28 UTC|newest]

Thread overview: 111+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-30 10:16 [PATCH v9 000/105] KVM TDX basic feature support isaku.yamahata
2022-09-30 10:16 ` [PATCH v9 001/105] KVM: VMX: Move out vmx_x86_ops to 'main.c' to wrap VMX and TDX isaku.yamahata
2022-09-30 10:16 ` [PATCH v9 002/105] KVM: x86: Refactor KVM VMX module init/exit functions isaku.yamahata
2022-09-30 10:16 ` [PATCH v9 003/105] KVM: TDX: Add placeholders for TDX VM/vcpu structure isaku.yamahata
2022-09-30 10:16 ` [PATCH v9 004/105] x86/virt/tdx: Add a helper function to return system wide info about TDX module isaku.yamahata
2022-09-30 10:16 ` [PATCH v9 005/105] KVM: TDX: Initialize the TDX module when loading the KVM intel kernel module isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 006/105] KVM: x86: Introduce vm_type to differentiate default VMs from confidential VMs isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 007/105] KVM: TDX: Make TDX VM type supported isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 008/105] [MARKER] The start of TDX KVM patch series: TDX architectural definitions isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 009/105] KVM: TDX: Define " isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 010/105] KVM: TDX: Add TDX "architectural" error codes isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 011/105] KVM: TDX: Add C wrapper functions for SEAMCALLs to the TDX module isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 012/105] KVM: TDX: Add helper functions to print TDX SEAMCALL error isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 013/105] [MARKER] The start of TDX KVM patch series: TD VM creation/destruction isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 014/105] KVM: TDX: Stub in tdx.h with structs, accessors, and VMCS helpers isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 015/105] x86/cpu: Add helper functions to allocate/free TDX private host key id isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 016/105] KVM: TDX: create/destroy VM structure isaku.yamahata
2022-10-12 22:30   ` Sagi Shahar
2022-10-13  8:55     ` Isaku Yamahata
2022-09-30 10:17 ` [PATCH v9 017/105] KVM: TDX: Refuse to unplug the last cpu on the package isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 018/105] KVM: TDX: x86: Add ioctl to get TDX systemwide parameters isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 019/105] KVM: TDX: Add place holder for TDX VM specific mem_enc_op ioctl isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 020/105] KVM: TDX: initialize VM with TDX specific parameters isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 021/105] KVM: TDX: Make pmu_intel.c ignore guest TD case isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 022/105] [MARKER] The start of TDX KVM patch series: TD vcpu creation/destruction isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 023/105] KVM: TDX: allocate/free TDX vcpu structure isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 024/105] KVM: TDX: Do TDX specific vcpu initialization isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 025/105] KVM: TDX: Use private memory for TDX isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 026/105] [MARKER] The start of TDX KVM patch series: KVM MMU GPA shared bits isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 027/105] KVM: x86/mmu: introduce config for PRIVATE KVM MMU isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 028/105] KVM: x86/mmu: Add address conversion functions for TDX shared bit of GPA isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 029/105] [MARKER] The start of TDX KVM patch series: KVM TDP refactoring for TDX isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 030/105] KVM: x86/mmu: Replace hardcoded value 0 for the initial value for SPTE isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 031/105] KVM: x86/mmu: Make sync_page not use hard-coded 0 as the initial SPTE value isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 032/105] KVM: x86/mmu: Allow non-zero value for non-present SPTE and removed SPTE isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 033/105] KVM: x86/mmu: Add Suppress VE bit to shadow_mmio_{value, mask} isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 034/105] KVM: x86/mmu: Track shadow MMIO value on a per-VM basis isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 035/105] KVM: TDX: Enable mmio spte caching always for TDX isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 036/105] KVM: x86/mmu: Disallow fast page fault on private GPA isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 037/105] KVM: x86/mmu: Allow per-VM override of the TDP max page level isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 038/105] KVM: VMX: Introduce test mode related to EPT violation VE isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 039/105] [MARKER] The start of TDX KVM patch series: KVM TDP MMU hooks isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 040/105] KVM: x86/tdp_mmu: refactor kvm_tdp_mmu_map() isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 041/105] KVM: x86/tdp_mmu: Init role member of struct kvm_mmu_page at allocation isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 042/105] KVM: x86/mmu: Require TDP MMU for TDX isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 043/105] KVM: x86/mmu: Add a new is_private member for union kvm_mmu_page_role isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 044/105] KVM: x86/mmu: Add a private pointer to struct kvm_mmu_page isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 045/105] KVM: x86/tdp_mmu: Don't zap private pages for unsupported cases isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 046/105] KVM: x86/tdp_mmu: Support TDX private mapping for TDP MMU isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 047/105] [MARKER] The start of TDX KVM patch series: TDX EPT violation isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 048/105] KVM: x86/mmu: Disallow dirty logging for x86 TDX isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 049/105] KVM: x86/tdp_mmu: Ignore unsupported mmu operation on private GFNs isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 050/105] KVM: VMX: Split out guts of EPT violation to common/exposed function isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 051/105] KVM: VMX: Move setting of EPT MMU masks to common VT-x code isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 052/105] KVM: TDX: Add load_mmu_pgd method for TDX isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 053/105] KVM: TDX: don't request KVM_REQ_APIC_PAGE_RELOAD isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 054/105] KVM: x86/VMX: introduce vmx tlb_remote_flush and tlb_remote_flush_with_range isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 055/105] KVM: TDX: TDP MMU TDX support isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 056/105] [MARKER] The start of TDX KVM patch series: KVM TDP MMU MapGPA isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 057/105] KVM: Add functions to set GFN to private or shared isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 058/105] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by TDX isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 059/105] KVM: x86/tdp_mmu: implement MapGPA hypercall for TDX isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 060/105] [MARKER] The start of TDX KVM patch series: TD finalization isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 061/105] KVM: TDX: Create initial guest memory isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 062/105] KVM: TDX: Finalize VM initialization isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 063/105] [MARKER] The start of TDX KVM patch series: TD vcpu enter/exit isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 064/105] KVM: TDX: Add helper assembly function to TDX vcpu isaku.yamahata
2022-09-30 10:17 ` [PATCH v9 065/105] KVM: TDX: Implement TDX vcpu enter/exit path isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 066/105] KVM: TDX: vcpu_run: save/restore host state(host kernel gs) isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 067/105] KVM: TDX: restore host xsave state when exit from the guest TD isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 068/105] KVM: x86: Allow to update cached values in kvm_user_return_msrs w/o wrmsr isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 069/105] KVM: TDX: restore user ret MSRs isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 070/105] [MARKER] The start of TDX KVM patch series: TD vcpu exits/interrupts/hypercalls isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 071/105] KVM: TDX: complete interrupts after tdexit isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 072/105] KVM: TDX: restore debug store when TD exit isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 073/105] KVM: TDX: handle vcpu migration over logical processor isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 074/105] KVM: x86: Add a switch_db_regs flag to handle TDX's auto-switched behavior isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 075/105] KVM: TDX: Add support for find pending IRQ in a protected local APIC isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 076/105] KVM: x86: Assume timer IRQ was injected if APIC state is proteced isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 077/105] KVM: TDX: remove use of struct vcpu_vmx from posted_interrupt.c isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 078/105] KVM: TDX: Implement interrupt injection isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 079/105] KVM: TDX: Implements vcpu request_immediate_exit isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 080/105] KVM: TDX: Implement methods to inject NMI isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 081/105] KVM: VMX: Modify NMI and INTR handlers to take intr_info as function argument isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 082/105] KVM: VMX: Move NMI/exception handler to common helper isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 083/105] KVM: x86: Split core of hypercall emulation to helper function isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 084/105] KVM: TDX: Add a place holder to handle TDX VM exit isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 085/105] KVM: TDX: Retry seamcall when TDX_OPERAND_BUSY with operand SEPT isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 086/105] KVM: TDX: handle EXIT_REASON_OTHER_SMI isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 087/105] KVM: TDX: handle ept violation/misconfig exit isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 088/105] KVM: TDX: handle EXCEPTION_NMI and EXTERNAL_INTERRUPT isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 089/105] KVM: TDX: Add a place holder for handler of TDX hypercalls (TDG.VP.VMCALL) isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 090/105] KVM: TDX: handle KVM hypercall with TDG.VP.VMCALL isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 091/105] KVM: TDX: Handle TDX PV CPUID hypercall isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 092/105] KVM: TDX: Handle TDX PV HLT hypercall isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 093/105] KVM: TDX: Handle TDX PV port io hypercall isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 094/105] KVM: TDX: Handle TDX PV MMIO hypercall isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 095/105] KVM: TDX: Implement callbacks for MSR operations for TDX isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 096/105] KVM: TDX: Handle TDX PV rdmsr/wrmsr hypercall isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 097/105] KVM: TDX: Handle TDX PV report fatal error hypercall isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 098/105] KVM: TDX: Handle TDX PV map_gpa hypercall isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 099/105] KVM: TDX: Handle TDG.VP.VMCALL<GetTdVmCallInfo> hypercall isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 100/105] KVM: TDX: Silently discard SMI request isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 101/105] KVM: TDX: Silently ignore INIT/SIPI isaku.yamahata
2022-09-30 10:18 ` isaku.yamahata [this message]
2022-09-30 10:18 ` [PATCH v9 103/105] Documentation/virt/kvm: Document on Trust Domain Extensions(TDX) isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 104/105] KVM: x86: design documentation on TDX support of x86 KVM TDP MMU isaku.yamahata
2022-09-30 10:18 ` [PATCH v9 105/105] [MARKER] the end of (the first phase of) TDX KVM patch series isaku.yamahata
2022-10-01  8:30 ` [PATCH v9 000/105] KVM TDX basic feature support Bagas Sanjaya
2022-10-03 18:29   ` Isaku Yamahata
2022-10-03 20:08     ` Huang, Kai

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a5ae094dc3362ea1ca4284bdd301d977be05d41e.1664530908.git.isaku.yamahata@intel.com \
    --to=isaku.yamahata@intel.com \
    --cc=erdemaktas@google.com \
    --cc=isaku.yamahata@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=sagis@google.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).