From: Gavin Shan <gshan@redhat.com> To: kvmarm@lists.cs.columbia.edu Cc: linux-kernel@vger.kernel.org, eauger@redhat.com, shannon.zhaosl@gmail.com, maz@kernel.org, Jonathan.Cameron@huawei.com, will@kernel.org, pbonzini@redhat.com, james.morse@arm.com, mark.rutland@arm.com, drjones@redhat.com, vkuznets@redhat.com, shan.gavin@gmail.com Subject: [PATCH v5 16/22] KVM: arm64: Support SDEI_EVENT_{COMPLETE,COMPLETE_AND_RESUME} hypercall Date: Tue, 22 Mar 2022 16:07:04 +0800 [thread overview] Message-ID: <20220322080710.51727-17-gshan@redhat.com> (raw) In-Reply-To: <20220322080710.51727-1-gshan@redhat.com> This supports SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall. They are used by the guest to notify the completion of the SDEI event in the handler. The executing context or registers are modified according to the SDEI specification like below: * x0 - x17, PC and PState are restored to what values we had in the interrupted or preempted context. * If it's SDEI_EVENT_COMPLETE_AND_RESUME hypercall, IRQ exception is injected. Signed-off-by: Gavin Shan <gshan@redhat.com> --- arch/arm64/include/asm/kvm_emulate.h | 1 + arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/inject_fault.c | 29 +++++++++++ arch/arm64/kvm/sdei.c | 76 +++++++++++++++++++++++++++- 4 files changed, 106 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index d62405ce3e6d..ca9de9f24923 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -37,6 +37,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); void kvm_skip_instr32(struct kvm_vcpu *vcpu); void kvm_inject_undefined(struct kvm_vcpu *vcpu); +void kvm_inject_irq(struct kvm_vcpu *vcpu); void kvm_inject_vabt(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e2762d08ab1c..282913e1afb0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -428,6 +428,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9) #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9) #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9) +#define KVM_ARM64_EXCEPT_AA32_IRQ (3 << 9) /* For AArch64: */ #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9) #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9) diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index b47df73e98d7..c8a8791bdf28 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -66,6 +66,13 @@ static void inject_undef64(struct kvm_vcpu *vcpu) vcpu_write_sys_reg(vcpu, esr, ESR_EL1); } +static void inject_irq64(struct kvm_vcpu *vcpu) +{ + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | + KVM_ARM64_EXCEPT_AA64_ELx_IRQ | + KVM_ARM64_PENDING_EXCEPTION); +} + #define DFSR_FSC_EXTABT_LPAE 0x10 #define DFSR_FSC_EXTABT_nLPAE 0x08 #define DFSR_LPAE BIT(9) @@ -77,6 +84,12 @@ static void inject_undef32(struct kvm_vcpu *vcpu) KVM_ARM64_PENDING_EXCEPTION); } +static void inject_irq32(struct kvm_vcpu *vcpu) +{ + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IRQ | + KVM_ARM64_PENDING_EXCEPTION); +} + /* * Modelled after TakeDataAbortException() and TakePrefetchAbortException * pseudocode. @@ -160,6 +173,22 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) inject_undef64(vcpu); } +/** + * kvm_inject_irq - inject an IRQ into the guest + * @vcpu: The vCPU in which to inject IRQ + * + * Inject IRQs to the target vCPU. It is assumed that this code is + * called from the VCPU thread and that the VCPU therefore is not + * currently executing guest code. + */ +void kvm_inject_irq(struct kvm_vcpu *vcpu) +{ + if (vcpu_el1_is_32bit(vcpu)) + inject_irq32(vcpu); + else + inject_irq64(vcpu); +} + void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr) { vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK); diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c index ba2ca65c871b..3019ac196e76 100644 --- a/arch/arm64/kvm/sdei.c +++ b/arch/arm64/kvm/sdei.c @@ -344,6 +344,78 @@ static unsigned long hypercall_context(struct kvm_vcpu *vcpu) return ret; } +static unsigned long hypercall_complete(struct kvm_vcpu *vcpu, bool resume) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_sdei_kvm *ksdei = kvm->arch.sdei; + struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei; + struct kvm_sdei_exposed_event *exposed_event; + struct kvm_sdei_registered_event *registered_event; + struct kvm_sdei_vcpu_event *vcpu_event; + struct kvm_sdei_vcpu_regs_state *regs; + unsigned long ret = SDEI_SUCCESS; + int index; + + spin_lock(&ksdei->lock); + spin_lock(&vsdei->lock); + + if (vsdei->critical_event) { + vcpu_event = vsdei->critical_event; + regs = &vsdei->state.critical_regs; + vsdei->critical_event = NULL; + vsdei->state.critical_num = KVM_SDEI_INVALID_EVENT; + } else if (vsdei->normal_event) { + vcpu_event = vsdei->normal_event; + regs = &vsdei->state.normal_regs; + vsdei->normal_event = NULL; + vsdei->state.normal_num = KVM_SDEI_INVALID_EVENT; + } else { + ret = SDEI_DENIED; + goto unlock; + } + + /* Restore registers: x0 -> x17, PC, PState */ + for (index = 0; index < ARRAY_SIZE(regs->regs); index++) + vcpu_set_reg(vcpu, index, regs->regs[index]); + + *vcpu_cpsr(vcpu) = regs->pstate; + *vcpu_pc(vcpu) = regs->pc; + + /* Inject interrupt if needed */ + if (resume) + kvm_inject_irq(vcpu); + + /* Dereference the vcpu event and destroy it if needed */ + vcpu_event->state.event_count--; + if (!vcpu_event->state.event_count) + remove_one_vcpu_event(vcpu, vcpu_event); + + /* + * We need to check if the registered event is pending for + * unregistration. In that case, the registered event should + * be unregistered and destroyed if needed. + */ + registered_event = vcpu_event->registered_event; + exposed_event = registered_event->exposed_event; + index = kvm_sdei_vcpu_index(vcpu, exposed_event); + if (kvm_sdei_is_unregister_pending(registered_event, index)) { + kvm_sdei_clear_enabled(registered_event, index); + kvm_sdei_clear_registered(registered_event, index); + if (kvm_sdei_none_registered(registered_event)) + remove_one_registered_event(kvm, registered_event); + } + + /* Make another request if we have any pending events */ + if ((vsdei->critical_event_count + vsdei->normal_event_count) > 0) + kvm_make_request(KVM_REQ_SDEI, vcpu); + +unlock: + spin_unlock(&vsdei->lock); + spin_unlock(&ksdei->lock); + + return ret; +} + static unsigned long unregister_one_event(struct kvm *kvm, struct kvm_vcpu *vcpu, struct kvm_sdei_registered_event *registered_event) @@ -864,8 +936,10 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu) ret = hypercall_context(vcpu); break; case SDEI_1_0_FN_SDEI_EVENT_COMPLETE: + ret = hypercall_complete(vcpu, false); + break; case SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME: - ret = SDEI_NOT_SUPPORTED; + ret = hypercall_complete(vcpu, true); break; case SDEI_1_0_FN_SDEI_EVENT_UNREGISTER: ret = hypercall_unregister(vcpu); -- 2.23.0
WARNING: multiple messages have this Message-ID (diff)
From: Gavin Shan <gshan@redhat.com> To: kvmarm@lists.cs.columbia.edu Cc: maz@kernel.org, linux-kernel@vger.kernel.org, eauger@redhat.com, shan.gavin@gmail.com, Jonathan.Cameron@huawei.com, pbonzini@redhat.com, vkuznets@redhat.com, will@kernel.org Subject: [PATCH v5 16/22] KVM: arm64: Support SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall Date: Tue, 22 Mar 2022 16:07:04 +0800 [thread overview] Message-ID: <20220322080710.51727-17-gshan@redhat.com> (raw) In-Reply-To: <20220322080710.51727-1-gshan@redhat.com> This supports SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall. They are used by the guest to notify the completion of the SDEI event in the handler. The executing context or registers are modified according to the SDEI specification like below: * x0 - x17, PC and PState are restored to what values we had in the interrupted or preempted context. * If it's SDEI_EVENT_COMPLETE_AND_RESUME hypercall, IRQ exception is injected. Signed-off-by: Gavin Shan <gshan@redhat.com> --- arch/arm64/include/asm/kvm_emulate.h | 1 + arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/inject_fault.c | 29 +++++++++++ arch/arm64/kvm/sdei.c | 76 +++++++++++++++++++++++++++- 4 files changed, 106 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index d62405ce3e6d..ca9de9f24923 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -37,6 +37,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); void kvm_skip_instr32(struct kvm_vcpu *vcpu); void kvm_inject_undefined(struct kvm_vcpu *vcpu); +void kvm_inject_irq(struct kvm_vcpu *vcpu); void kvm_inject_vabt(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e2762d08ab1c..282913e1afb0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -428,6 +428,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9) #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9) #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9) +#define KVM_ARM64_EXCEPT_AA32_IRQ (3 << 9) /* For AArch64: */ #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9) #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9) diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index b47df73e98d7..c8a8791bdf28 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -66,6 +66,13 @@ static void inject_undef64(struct kvm_vcpu *vcpu) vcpu_write_sys_reg(vcpu, esr, ESR_EL1); } +static void inject_irq64(struct kvm_vcpu *vcpu) +{ + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | + KVM_ARM64_EXCEPT_AA64_ELx_IRQ | + KVM_ARM64_PENDING_EXCEPTION); +} + #define DFSR_FSC_EXTABT_LPAE 0x10 #define DFSR_FSC_EXTABT_nLPAE 0x08 #define DFSR_LPAE BIT(9) @@ -77,6 +84,12 @@ static void inject_undef32(struct kvm_vcpu *vcpu) KVM_ARM64_PENDING_EXCEPTION); } +static void inject_irq32(struct kvm_vcpu *vcpu) +{ + vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IRQ | + KVM_ARM64_PENDING_EXCEPTION); +} + /* * Modelled after TakeDataAbortException() and TakePrefetchAbortException * pseudocode. @@ -160,6 +173,22 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) inject_undef64(vcpu); } +/** + * kvm_inject_irq - inject an IRQ into the guest + * @vcpu: The vCPU in which to inject IRQ + * + * Inject IRQs to the target vCPU. It is assumed that this code is + * called from the VCPU thread and that the VCPU therefore is not + * currently executing guest code. + */ +void kvm_inject_irq(struct kvm_vcpu *vcpu) +{ + if (vcpu_el1_is_32bit(vcpu)) + inject_irq32(vcpu); + else + inject_irq64(vcpu); +} + void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr) { vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK); diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c index ba2ca65c871b..3019ac196e76 100644 --- a/arch/arm64/kvm/sdei.c +++ b/arch/arm64/kvm/sdei.c @@ -344,6 +344,78 @@ static unsigned long hypercall_context(struct kvm_vcpu *vcpu) return ret; } +static unsigned long hypercall_complete(struct kvm_vcpu *vcpu, bool resume) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_sdei_kvm *ksdei = kvm->arch.sdei; + struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei; + struct kvm_sdei_exposed_event *exposed_event; + struct kvm_sdei_registered_event *registered_event; + struct kvm_sdei_vcpu_event *vcpu_event; + struct kvm_sdei_vcpu_regs_state *regs; + unsigned long ret = SDEI_SUCCESS; + int index; + + spin_lock(&ksdei->lock); + spin_lock(&vsdei->lock); + + if (vsdei->critical_event) { + vcpu_event = vsdei->critical_event; + regs = &vsdei->state.critical_regs; + vsdei->critical_event = NULL; + vsdei->state.critical_num = KVM_SDEI_INVALID_EVENT; + } else if (vsdei->normal_event) { + vcpu_event = vsdei->normal_event; + regs = &vsdei->state.normal_regs; + vsdei->normal_event = NULL; + vsdei->state.normal_num = KVM_SDEI_INVALID_EVENT; + } else { + ret = SDEI_DENIED; + goto unlock; + } + + /* Restore registers: x0 -> x17, PC, PState */ + for (index = 0; index < ARRAY_SIZE(regs->regs); index++) + vcpu_set_reg(vcpu, index, regs->regs[index]); + + *vcpu_cpsr(vcpu) = regs->pstate; + *vcpu_pc(vcpu) = regs->pc; + + /* Inject interrupt if needed */ + if (resume) + kvm_inject_irq(vcpu); + + /* Dereference the vcpu event and destroy it if needed */ + vcpu_event->state.event_count--; + if (!vcpu_event->state.event_count) + remove_one_vcpu_event(vcpu, vcpu_event); + + /* + * We need to check if the registered event is pending for + * unregistration. In that case, the registered event should + * be unregistered and destroyed if needed. + */ + registered_event = vcpu_event->registered_event; + exposed_event = registered_event->exposed_event; + index = kvm_sdei_vcpu_index(vcpu, exposed_event); + if (kvm_sdei_is_unregister_pending(registered_event, index)) { + kvm_sdei_clear_enabled(registered_event, index); + kvm_sdei_clear_registered(registered_event, index); + if (kvm_sdei_none_registered(registered_event)) + remove_one_registered_event(kvm, registered_event); + } + + /* Make another request if we have any pending events */ + if ((vsdei->critical_event_count + vsdei->normal_event_count) > 0) + kvm_make_request(KVM_REQ_SDEI, vcpu); + +unlock: + spin_unlock(&vsdei->lock); + spin_unlock(&ksdei->lock); + + return ret; +} + static unsigned long unregister_one_event(struct kvm *kvm, struct kvm_vcpu *vcpu, struct kvm_sdei_registered_event *registered_event) @@ -864,8 +936,10 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu) ret = hypercall_context(vcpu); break; case SDEI_1_0_FN_SDEI_EVENT_COMPLETE: + ret = hypercall_complete(vcpu, false); + break; case SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME: - ret = SDEI_NOT_SUPPORTED; + ret = hypercall_complete(vcpu, true); break; case SDEI_1_0_FN_SDEI_EVENT_UNREGISTER: ret = hypercall_unregister(vcpu); -- 2.23.0 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
next prev parent reply other threads:[~2022-03-22 8:10 UTC|newest] Thread overview: 98+ messages / expand[flat|nested] mbox.gz Atom feed top 2022-03-22 8:06 [PATCH v5 00/22] Support SDEI Virtualization Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 01/22] KVM: arm64: Introduce template for inline functions Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 19:42 ` Oliver Upton 2022-03-22 19:42 ` Oliver Upton 2022-03-23 12:16 ` Gavin Shan 2022-03-23 12:16 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 02/22] KVM: arm64: Add SDEI virtualization infrastructure Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 22:43 ` Oliver Upton 2022-03-22 22:43 ` Oliver Upton 2022-03-23 12:40 ` Gavin Shan 2022-03-23 12:40 ` Gavin Shan 2022-03-23 17:11 ` Oliver Upton 2022-03-23 17:11 ` Oliver Upton 2022-03-24 6:54 ` Gavin Shan 2022-03-24 6:54 ` Gavin Shan 2022-03-24 9:04 ` Oliver Upton 2022-03-24 9:04 ` Oliver Upton 2022-03-25 6:07 ` Gavin Shan 2022-03-25 6:07 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 03/22] KVM: arm64: Support SDEI_VERSION hypercall Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 18:04 ` Oliver Upton 2022-03-22 18:04 ` Oliver Upton 2022-03-23 12:46 ` Gavin Shan 2022-03-23 12:46 ` Gavin Shan 2022-03-23 16:31 ` Oliver Upton 2022-03-23 16:31 ` Oliver Upton 2022-03-24 4:07 ` Gavin Shan 2022-03-24 4:07 ` Gavin Shan 2022-03-24 7:48 ` Oliver Upton 2022-03-24 7:48 ` Oliver Upton 2022-03-25 6:11 ` Gavin Shan 2022-03-25 6:11 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 04/22] KVM: arm64: Support SDEI_EVENT_REGISTER hypercall Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 05/22] KVM: arm64: Support SDEI_EVENT_{ENABLE, DISABLE} hypercall Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 06/22] KVM: arm64: Support SDEI_EVENT_CONTEXT hypercall Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 07/22] KVM: arm64: Support SDEI_EVENT_UNREGISTER hypercall Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 08/22] KVM: arm64: Support SDEI_EVENT_STATUS hypercall Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 09/22] KVM: arm64: Support SDEI_EVENT_GET_INFO hypercall Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 10/22] KVM: arm64: Support SDEI_EVENT_ROUTING_SET hypercall Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 8:06 ` [PATCH v5 11/22] KVM: arm64: Support SDEI_PE_{MASK, UNMASK} hypercall Gavin Shan 2022-03-22 8:06 ` Gavin Shan 2022-03-22 8:07 ` [PATCH v5 12/22] KVM: arm64: Support SDEI_{PRIVATE, SHARED}_RESET Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-22 8:07 ` [PATCH v5 13/22] KVM: arm64: Support SDEI_FEATURES hypercall Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-22 8:07 ` [PATCH v5 14/22] KVM: arm64: Support SDEI event injection, delivery and cancellation Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-22 8:07 ` [PATCH v5 15/22] KVM: arm64: Support SDEI_EVENT_SIGNAL hypercall Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-22 23:06 ` Oliver Upton 2022-03-22 23:06 ` Oliver Upton 2022-03-23 12:52 ` Gavin Shan 2022-03-23 12:52 ` Gavin Shan 2022-03-22 8:07 ` Gavin Shan [this message] 2022-03-22 8:07 ` [PATCH v5 16/22] KVM: arm64: Support SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall Gavin Shan 2022-03-22 8:07 ` [PATCH v5 17/22] KVM: arm64: Support SDEI event notifier Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-22 8:07 ` [PATCH v5 18/22] KVM: arm64: Support SDEI ioctl commands on VM Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-23 17:28 ` Oliver Upton 2022-03-23 17:28 ` Oliver Upton 2022-03-25 6:59 ` Gavin Shan 2022-03-25 6:59 ` Gavin Shan 2022-03-25 7:35 ` Oliver Upton 2022-03-25 7:35 ` Oliver Upton 2022-03-25 10:14 ` Gavin Shan 2022-03-25 10:14 ` Gavin Shan 2022-03-22 8:07 ` [PATCH v5 19/22] KVM: arm64: Support SDEI ioctl commands on vCPU Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-23 17:55 ` Oliver Upton 2022-03-23 17:55 ` Oliver Upton 2022-03-25 7:59 ` Gavin Shan 2022-03-25 7:59 ` Gavin Shan 2022-03-25 8:37 ` Oliver Upton 2022-03-25 8:37 ` Oliver Upton 2022-03-25 10:23 ` Gavin Shan 2022-03-25 10:23 ` Gavin Shan 2022-03-22 8:07 ` [PATCH v5 20/22] KVM: arm64: Export SDEI capability Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-22 8:07 ` [PATCH v5 21/22] KVM: arm64: Add SDEI document Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-22 8:07 ` [PATCH v5 22/22] KVM: selftests: Add SDEI test case Gavin Shan 2022-03-22 8:07 ` Gavin Shan 2022-03-22 18:13 ` [PATCH v5 00/22] Support SDEI Virtualization Oliver Upton 2022-03-22 18:13 ` Oliver Upton 2022-03-23 12:57 ` Gavin Shan 2022-03-23 12:57 ` Gavin Shan
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20220322080710.51727-17-gshan@redhat.com \ --to=gshan@redhat.com \ --cc=Jonathan.Cameron@huawei.com \ --cc=drjones@redhat.com \ --cc=eauger@redhat.com \ --cc=james.morse@arm.com \ --cc=kvmarm@lists.cs.columbia.edu \ --cc=linux-kernel@vger.kernel.org \ --cc=mark.rutland@arm.com \ --cc=maz@kernel.org \ --cc=pbonzini@redhat.com \ --cc=shan.gavin@gmail.com \ --cc=shannon.zhaosl@gmail.com \ --cc=vkuznets@redhat.com \ --cc=will@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.