All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lai Jiangshan <jiangshanlai@gmail.com>
To: linux-kernel@vger.kernel.org
Cc: Lai Jiangshan <jiangshan.ljs@antgroup.com>,
	Hou Wenlong <houwenlong.hwl@antgroup.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Sean Christopherson <seanjc@google.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Borislav Petkov <bp@alien8.de>, Ingo Molnar <mingo@redhat.com>,
	kvm@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>,
	x86@kernel.org, Kees Cook <keescook@chromium.org>,
	Juergen Gross <jgross@suse.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Mike Rapoport (IBM)" <rppt@kernel.org>,
	Yu-cheng Yu <yu-cheng.yu@intel.com>,
	Rick Edgecombe <rick.p.edgecombe@intel.com>,
	"Paul E. McKenney" <paulmck@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>
Subject: [RFC PATCH 06/73] KVM: x86: Move VMX interrupt/nmi handling into kvm.ko
Date: Mon, 26 Feb 2024 22:35:23 +0800	[thread overview]
Message-ID: <20240226143630.33643-7-jiangshanlai@gmail.com> (raw)
In-Reply-To: <20240226143630.33643-1-jiangshanlai@gmail.com>

From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

Similar to VMX, hardware interrupts/NMI during guest running in PVM will
trigger VM exit and should be handled by host interrupt/NMI handlers.
Therefore, move VMX interrupt/NMI handling into kvm.ko for common usage.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Co-developed-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
---
 arch/x86/include/asm/idtentry.h | 12 ++++----
 arch/x86/kernel/nmi.c           |  8 +++---
 arch/x86/kvm/Makefile           |  2 +-
 arch/x86/kvm/host_entry.S       | 50 +++++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/vmenter.S      | 43 ----------------------------
 arch/x86/kvm/vmx/vmx.c          | 14 ++-------
 arch/x86/kvm/x86.c              |  3 ++
 arch/x86/kvm/x86.h              | 18 ++++++++++++
 8 files changed, 85 insertions(+), 65 deletions(-)
 create mode 100644 arch/x86/kvm/host_entry.S

diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 13639e57e1f8..8aab0b50431a 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -586,14 +586,14 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC,	xenpv_exc_machine_check);
 
 /* NMI */
 
-#if IS_ENABLED(CONFIG_KVM_INTEL)
+#if IS_ENABLED(CONFIG_KVM)
 /*
- * Special entry point for VMX which invokes this on the kernel stack, even for
- * 64-bit, i.e. without using an IST.  asm_exc_nmi() requires an IST to work
- * correctly vs. the NMI 'executing' marker.  Used for 32-bit kernels as well
- * to avoid more ifdeffery.
+ * Special entry point for VMX/PVM which invokes this on the kernel stack, even
+ * for 64-bit, i.e. without using an IST.  asm_exc_nmi() requires an IST to
+ * work correctly vs. the NMI 'executing' marker.  Used for 32-bit kernels as
+ * well to avoid more ifdeffery.
  */
-DECLARE_IDTENTRY(X86_TRAP_NMI,		exc_nmi_kvm_vmx);
+DECLARE_IDTENTRY(X86_TRAP_NMI,		exc_nmi_kvm);
 #endif
 
 DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,	exc_nmi);
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 17e955ab69fe..265e6b38cc58 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -568,13 +568,13 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
 		mds_user_clear_cpu_buffers();
 }
 
-#if IS_ENABLED(CONFIG_KVM_INTEL)
-DEFINE_IDTENTRY_RAW(exc_nmi_kvm_vmx)
+#if IS_ENABLED(CONFIG_KVM)
+DEFINE_IDTENTRY_RAW(exc_nmi_kvm)
 {
 	exc_nmi(regs);
 }
-#if IS_MODULE(CONFIG_KVM_INTEL)
-EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm_vmx);
+#if IS_MODULE(CONFIG_KVM)
+EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm);
 #endif
 #endif
 
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 80e3fe184d17..97bad203b1b1 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -9,7 +9,7 @@ endif
 
 include $(srctree)/virt/kvm/Makefile.kvm
 
-kvm-y			+= x86.o emulate.o i8259.o irq.o lapic.o \
+kvm-y			+= x86.o emulate.o i8259.o irq.o lapic.o host_entry.o \
 			   i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
 			   hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
 			   mmu/spte.o
diff --git a/arch/x86/kvm/host_entry.S b/arch/x86/kvm/host_entry.S
new file mode 100644
index 000000000000..6bdf0df06eb0
--- /dev/null
+++ b/arch/x86/kvm/host_entry.S
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/nospec-branch.h>
+#include <asm/segment.h>
+
+.macro KVM_DO_EVENT_IRQOFF call_insn call_target
+	/*
+	 * Unconditionally create a stack frame, getting the correct RSP on the
+	 * stack (for x86-64) would take two instructions anyways, and RBP can
+	 * be used to restore RSP to make objtool happy (see below).
+	 */
+	push %_ASM_BP
+	mov %_ASM_SP, %_ASM_BP
+
+#ifdef CONFIG_X86_64
+	/*
+	 * Align RSP to a 16-byte boundary (to emulate CPU behavior) before
+	 * creating the synthetic interrupt stack frame for the IRQ/NMI.
+	 */
+	and  $-16, %rsp
+	push $__KERNEL_DS
+	push %rbp
+#endif
+	pushf
+	push $__KERNEL_CS
+	\call_insn \call_target
+
+	/*
+	 * "Restore" RSP from RBP, even though IRET has already unwound RSP to
+	 * the correct value.  objtool doesn't know the callee will IRET and,
+	 * without the explicit restore, thinks the stack is getting walloped.
+	 * Using an unwind hint is problematic due to x86-64's dynamic alignment.
+	 */
+	mov %_ASM_BP, %_ASM_SP
+	pop %_ASM_BP
+	RET
+.endm
+
+.section .noinstr.text, "ax"
+
+SYM_FUNC_START(kvm_do_host_nmi_irqoff)
+	KVM_DO_EVENT_IRQOFF call asm_exc_nmi_kvm
+SYM_FUNC_END(kvm_do_host_nmi_irqoff)
+
+.section .text, "ax"
+
+SYM_FUNC_START(kvm_do_host_interrupt_irqoff)
+	KVM_DO_EVENT_IRQOFF CALL_NOSPEC _ASM_ARG1
+SYM_FUNC_END(kvm_do_host_interrupt_irqoff)
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 906ecd001511..12b7b99a9dd8 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -31,39 +31,6 @@
 #define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
 #endif
 
-.macro VMX_DO_EVENT_IRQOFF call_insn call_target
-	/*
-	 * Unconditionally create a stack frame, getting the correct RSP on the
-	 * stack (for x86-64) would take two instructions anyways, and RBP can
-	 * be used to restore RSP to make objtool happy (see below).
-	 */
-	push %_ASM_BP
-	mov %_ASM_SP, %_ASM_BP
-
-#ifdef CONFIG_X86_64
-	/*
-	 * Align RSP to a 16-byte boundary (to emulate CPU behavior) before
-	 * creating the synthetic interrupt stack frame for the IRQ/NMI.
-	 */
-	and  $-16, %rsp
-	push $__KERNEL_DS
-	push %rbp
-#endif
-	pushf
-	push $__KERNEL_CS
-	\call_insn \call_target
-
-	/*
-	 * "Restore" RSP from RBP, even though IRET has already unwound RSP to
-	 * the correct value.  objtool doesn't know the callee will IRET and,
-	 * without the explicit restore, thinks the stack is getting walloped.
-	 * Using an unwind hint is problematic due to x86-64's dynamic alignment.
-	 */
-	mov %_ASM_BP, %_ASM_SP
-	pop %_ASM_BP
-	RET
-.endm
-
 .section .noinstr.text, "ax"
 
 /**
@@ -299,10 +266,6 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
 
 SYM_FUNC_END(__vmx_vcpu_run)
 
-SYM_FUNC_START(vmx_do_nmi_irqoff)
-	VMX_DO_EVENT_IRQOFF call asm_exc_nmi_kvm_vmx
-SYM_FUNC_END(vmx_do_nmi_irqoff)
-
 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
 
 /**
@@ -354,9 +317,3 @@ SYM_FUNC_START(vmread_error_trampoline)
 	RET
 SYM_FUNC_END(vmread_error_trampoline)
 #endif
-
-.section .text, "ax"
-
-SYM_FUNC_START(vmx_do_interrupt_irqoff)
-	VMX_DO_EVENT_IRQOFF CALL_NOSPEC _ASM_ARG1
-SYM_FUNC_END(vmx_do_interrupt_irqoff)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index be20a60047b1..fca47304506e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6920,9 +6920,6 @@ static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
 	memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
 }
 
-void vmx_do_interrupt_irqoff(unsigned long entry);
-void vmx_do_nmi_irqoff(void);
-
 static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
 {
 	/*
@@ -6968,9 +6965,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
 	    "unexpected VM-Exit interrupt info: 0x%x", intr_info))
 		return;
 
-	kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
-	vmx_do_interrupt_irqoff(gate_offset(desc));
-	kvm_after_interrupt(vcpu);
+	kvm_do_interrupt_irqoff(vcpu, gate_offset(desc));
 
 	vcpu->arch.at_instruction_boundary = true;
 }
@@ -7260,11 +7255,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 		vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
 
 	if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
-	    is_nmi(vmx_get_intr_info(vcpu))) {
-		kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
-		vmx_do_nmi_irqoff();
-		kvm_after_interrupt(vcpu);
-	}
+	    is_nmi(vmx_get_intr_info(vcpu)))
+		kvm_do_nmi_irqoff(vcpu);
 
 out:
 	guest_state_exit_irqoff();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 35ad6dd5eaf6..96f3913f7fc5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -13784,6 +13784,9 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
 }
 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
 
+EXPORT_SYMBOL_GPL(kvm_do_host_nmi_irqoff);
+EXPORT_SYMBOL_GPL(kvm_do_host_interrupt_irqoff);
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 5184fde1dc54..4d1430f8874b 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -491,6 +491,24 @@ static inline void kvm_machine_check(void)
 #endif
 }
 
+void kvm_do_host_nmi_irqoff(void);
+void kvm_do_host_interrupt_irqoff(unsigned long entry);
+
+static __always_inline void kvm_do_nmi_irqoff(struct kvm_vcpu *vcpu)
+{
+	kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+	kvm_do_host_nmi_irqoff();
+	kvm_after_interrupt(vcpu);
+}
+
+static inline void kvm_do_interrupt_irqoff(struct kvm_vcpu *vcpu,
+					   unsigned long entry)
+{
+	kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
+	kvm_do_host_interrupt_irqoff(entry);
+	kvm_after_interrupt(vcpu);
+}
+
 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
 int kvm_spec_ctrl_test_value(u64 value);
-- 
2.19.1.6.gb485710b


  parent reply	other threads:[~2024-02-26 14:34 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-26 14:35 [RFC PATCH 00/73] KVM: x86/PVM: Introduce a new hypervisor Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 01/73] KVM: Documentation: Add the specification for PVM Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 02/73] x86/ABI/PVM: Add PVM-specific ABI header file Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 03/73] x86/entry: Implement switcher for PVM VM enter/exit Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 04/73] x86/entry: Implement direct switching for the switcher Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 05/73] KVM: x86: Set 'vcpu->arch.exception.injected' as true before vendor callback Lai Jiangshan
2024-02-26 14:35 ` Lai Jiangshan [this message]
2024-02-26 14:35 ` [RFC PATCH 07/73] KVM: x86/mmu: Adapt shadow MMU for PVM Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 08/73] KVM: x86: Allow hypercall handling to not skip the instruction Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 09/73] KVM: x86: Add PVM virtual MSRs into emulated_msrs_all[] Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 10/73] KVM: x86: Introduce vendor feature to expose vendor-specific CPUID Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 11/73] KVM: x86: Implement gpc refresh for guest usage Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 12/73] KVM: x86: Add NR_VCPU_SREG in SREG enum Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 13/73] KVM: x86/emulator: Reinject #GP if instruction emulation failed for PVM Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 14/73] KVM: x86: Create stubs for PVM module as a new vendor Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 15/73] mm/vmalloc: Add a helper to reserve a contiguous and aligned kernel virtual area Lai Jiangshan
2024-02-27 14:56   ` Christoph Hellwig
2024-02-27 17:07     ` Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 16/73] KVM: x86/PVM: Implement host mmu initialization Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 17/73] KVM: x86/PVM: Implement module initialization related callbacks Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 18/73] KVM: x86/PVM: Implement VM/VCPU " Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 19/73] x86/entry: Export 32-bit ignore syscall entry and __ia32_enabled variable Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 20/73] KVM: x86/PVM: Implement vcpu_load()/vcpu_put() related callbacks Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 21/73] KVM: x86/PVM: Implement vcpu_run() callbacks Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 22/73] KVM: x86/PVM: Handle some VM exits before enable interrupts Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 23/73] KVM: x86/PVM: Handle event handling related MSR read/write operation Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 24/73] KVM: x86/PVM: Introduce PVM mode switching Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 25/73] KVM: x86/PVM: Implement APIC emulation related callbacks Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 26/73] KVM: x86/PVM: Implement event delivery flags " Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 27/73] KVM: x86/PVM: Implement event injection " Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 28/73] KVM: x86/PVM: Handle syscall from user mode Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 29/73] KVM: x86/PVM: Implement allowed range checking for #PF Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 30/73] KVM: x86/PVM: Implement segment related callbacks Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 31/73] KVM: x86/PVM: Implement instruction emulation for #UD and #GP Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 32/73] KVM: x86/PVM: Enable guest debugging functions Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 33/73] KVM: x86/PVM: Handle VM-exit due to hardware exceptions Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 34/73] KVM: x86/PVM: Handle ERETU/ERETS synthetic instruction Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 35/73] KVM: x86/PVM: Handle PVM_SYNTHETIC_CPUID " Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 36/73] KVM: x86/PVM: Handle KVM hypercall Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 37/73] KVM: x86/PVM: Use host PCID to reduce guest TLB flushing Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 38/73] KVM: x86/PVM: Handle hypercalls for privilege instruction emulation Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 39/73] KVM: x86/PVM: Handle hypercall for CR3 switching Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 40/73] KVM: x86/PVM: Handle hypercall for loading GS selector Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 41/73] KVM: x86/PVM: Allow to load guest TLS in host GDT Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 42/73] KVM: x86/PVM: Support for kvm_exit() tracepoint Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 43/73] KVM: x86/PVM: Enable direct switching Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 44/73] KVM: x86/PVM: Implement TSC related callbacks Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 45/73] KVM: x86/PVM: Add dummy PMU " Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 46/73] KVM: x86/PVM: Support for CPUID faulting Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 47/73] KVM: x86/PVM: Handle the left supported MSRs in msrs_to_save_base[] Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 48/73] KVM: x86/PVM: Implement system registers setting callbacks Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 49/73] KVM: x86/PVM: Implement emulation for non-PVM mode Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 50/73] x86/tools/relocs: Cleanup cmdline options Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 51/73] x86/tools/relocs: Append relocations into input file Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 52/73] x86/boot: Allow to do relocation for uncompressed kernel Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 53/73] x86/pvm: Add Kconfig option and the CPU feature bit for PVM guest Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 54/73] x86/pvm: Detect PVM hypervisor support Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 55/73] x86/pvm: Relocate kernel image to specific virtual address range Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 56/73] x86/pvm: Relocate kernel image early in PVH entry Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 57/73] x86/pvm: Make cpu entry area and vmalloc area variable Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 58/73] x86/pvm: Relocate kernel address space layout Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 59/73] x86/pti: Force enabling KPTI for PVM guest Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 60/73] x86/pvm: Add event entry/exit and dispatch code Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 61/73] x86/pvm: Allow to install a system interrupt handler Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 62/73] x86/pvm: Add early kernel event entry and dispatch code Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 63/73] x86/pvm: Add hypercall support Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 64/73] x86/pvm: Enable PVM event delivery Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 65/73] x86/kvm: Patch KVM hypercall as PVM hypercall Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 66/73] x86/pvm: Use new cpu feature to describe XENPV and PVM Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 67/73] x86/pvm: Implement cpu related PVOPS Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 68/73] x86/pvm: Implement irq " Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 69/73] x86/pvm: Implement mmu " Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 70/73] x86/pvm: Don't use SWAPGS for gsbase read/write Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 71/73] x86/pvm: Adapt pushf/popf in this_cpu_cmpxchg16b_emu() Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 72/73] x86/pvm: Use RDTSCP as default in vdso_read_cpunode() Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 73/73] x86/pvm: Disable some unsupported syscalls and features Lai Jiangshan
2024-02-26 14:49 ` [RFC PATCH 00/73] KVM: x86/PVM: Introduce a new hypervisor Paolo Bonzini
2024-02-27 17:27   ` Sean Christopherson
2024-02-29  9:33     ` David Woodhouse
2024-03-01 14:00     ` Lai Jiangshan
2024-02-29 14:55   ` Lai Jiangshan
2024-03-06 11:05 ` Like Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240226143630.33643-7-jiangshanlai@gmail.com \
    --to=jiangshanlai@gmail.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=houwenlong.hwl@antgroup.com \
    --cc=hpa@zytor.com \
    --cc=jgross@suse.com \
    --cc=jiangshan.ljs@antgroup.com \
    --cc=keescook@chromium.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=paulmck@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rick.p.edgecombe@intel.com \
    --cc=rppt@kernel.org \
    --cc=seanjc@google.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    --cc=yu-cheng.yu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.