linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Eric Auger <eauger@redhat.com>
To: Gavin Shan <gshan@redhat.com>, kvmarm@lists.cs.columbia.edu
Cc: maz@kernel.org, linux-kernel@vger.kernel.org,
	Jonathan.Cameron@huawei.com, pbonzini@redhat.com,
	will@kernel.org
Subject: Re: [PATCH v4 14/21] KVM: arm64: Support SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall
Date: Thu, 27 Jan 2022 15:47:03 +0100	[thread overview]
Message-ID: <adbeb3e7-0edd-502e-4d6b-a51350d596bf@redhat.com> (raw)
In-Reply-To: <fdb013e7-10a1-58de-92aa-3ab9d25346fa@redhat.com>

Hi Gavin,

On 1/12/22 7:43 AM, Gavin Shan wrote:
> Hi Eric,
> 
> On 11/10/21 6:58 PM, Eric Auger wrote:
>> On 8/15/21 2:13 AM, Gavin Shan wrote:
>>> This supports SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall.
>>> They are used by the guest to notify the completion of the SDEI
>>> event in the handler. The registers are changed according to the
>>> SDEI specification as below:
>>>
>>>     * x0 - x17, PC and PState are restored to what values we had in
>>>       the interrupted context.
>>>
>>>     * If it's SDEI_EVENT_COMPLETE_AND_RESUME hypercall, IRQ exception
>>>       is injected.
>>>
>>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>>> ---
>>>   arch/arm64/include/asm/kvm_emulate.h |  1 +
>>>   arch/arm64/include/asm/kvm_host.h    |  1 +
>>>   arch/arm64/kvm/hyp/exception.c       |  7 +++
>>>   arch/arm64/kvm/inject_fault.c        | 27 ++++++++++
>>>   arch/arm64/kvm/sdei.c                | 75 ++++++++++++++++++++++++++++
>>>   5 files changed, 111 insertions(+)
>>>
>>> diff --git a/arch/arm64/include/asm/kvm_emulate.h
>>> b/arch/arm64/include/asm/kvm_emulate.h
>>> index fd418955e31e..923b4d08ea9a 100644
>>> --- a/arch/arm64/include/asm/kvm_emulate.h
>>> +++ b/arch/arm64/include/asm/kvm_emulate.h
>>> @@ -37,6 +37,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu
>>> *vcpu);
>>>   void kvm_skip_instr32(struct kvm_vcpu *vcpu);
>>>     void kvm_inject_undefined(struct kvm_vcpu *vcpu);
>>> +void kvm_inject_irq(struct kvm_vcpu *vcpu);
>>>   void kvm_inject_vabt(struct kvm_vcpu *vcpu);
>>>   void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
>>>   void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
>>> diff --git a/arch/arm64/include/asm/kvm_host.h
>>> b/arch/arm64/include/asm/kvm_host.h
>>> index 46f363aa6524..1824f7e1f9ab 100644
>>> --- a/arch/arm64/include/asm/kvm_host.h
>>> +++ b/arch/arm64/include/asm/kvm_host.h
>>> @@ -437,6 +437,7 @@ struct kvm_vcpu_arch {
>>>   #define KVM_ARM64_EXCEPT_AA32_UND    (0 << 9)
>>>   #define KVM_ARM64_EXCEPT_AA32_IABT    (1 << 9)
>>>   #define KVM_ARM64_EXCEPT_AA32_DABT    (2 << 9)
>>> +#define KVM_ARM64_EXCEPT_AA32_IRQ    (3 << 9)
>>>   /* For AArch64: */
>>>   #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC    (0 << 9)
>>>   #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ    (1 << 9)
>>> diff --git a/arch/arm64/kvm/hyp/exception.c
>>> b/arch/arm64/kvm/hyp/exception.c
>>> index 0418399e0a20..ef458207d152 100644
>>> --- a/arch/arm64/kvm/hyp/exception.c
>>> +++ b/arch/arm64/kvm/hyp/exception.c
>>> @@ -310,6 +310,9 @@ static void kvm_inject_exception(struct kvm_vcpu
>>> *vcpu)
>>>           case KVM_ARM64_EXCEPT_AA32_DABT:
>>>               enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
>>>               break;
>>> +        case KVM_ARM64_EXCEPT_AA32_IRQ:
>>> +            enter_exception32(vcpu, PSR_AA32_MODE_IRQ, 4);
>>> +            break;
>>>           default:
>>>               /* Err... */
>>>               break;
>>> @@ -320,6 +323,10 @@ static void kvm_inject_exception(struct kvm_vcpu
>>> *vcpu)
>>>                 KVM_ARM64_EXCEPT_AA64_EL1):
>>>               enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
>>>               break;
>>> +        case (KVM_ARM64_EXCEPT_AA64_ELx_IRQ |
>>> +              KVM_ARM64_EXCEPT_AA64_EL1):
>>> +            enter_exception64(vcpu, PSR_MODE_EL1h, except_type_irq);
>>> +            break;
>>>           default:
>>>               /*
>>>                * Only EL1_SYNC makes sense so far, EL2_{SYNC,IRQ}
>>> diff --git a/arch/arm64/kvm/inject_fault.c
>>> b/arch/arm64/kvm/inject_fault.c
>>> index b47df73e98d7..3a8c55867d2f 100644
>>> --- a/arch/arm64/kvm/inject_fault.c
>>> +++ b/arch/arm64/kvm/inject_fault.c
>>> @@ -66,6 +66,13 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>>>       vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
>>>   }
>>>   +static void inject_irq64(struct kvm_vcpu *vcpu)
>>> +{
>>> +    vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1     |
>>> +                 KVM_ARM64_EXCEPT_AA64_ELx_IRQ |
>>> +                 KVM_ARM64_PENDING_EXCEPTION);
>>> +}
>>> +
>>>   #define DFSR_FSC_EXTABT_LPAE    0x10
>>>   #define DFSR_FSC_EXTABT_nLPAE    0x08
>>>   #define DFSR_LPAE        BIT(9)
>>> @@ -77,6 +84,12 @@ static void inject_undef32(struct kvm_vcpu *vcpu)
>>>                    KVM_ARM64_PENDING_EXCEPTION);
>>>   }
>>>   +static void inject_irq32(struct kvm_vcpu *vcpu)
>>> +{
>>> +    vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IRQ |
>>> +                 KVM_ARM64_PENDING_EXCEPTION);
>>> +}
>>> +
>>>   /*
>>>    * Modelled after TakeDataAbortException() and
>>> TakePrefetchAbortException
>>>    * pseudocode.
>>> @@ -160,6 +173,20 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
>>>           inject_undef64(vcpu);
>>>   }
>>>   +/**
>>> + * kvm_inject_irq - inject an IRQ into the guest
>>> + *
>>> + * It is assumed that this code is called from the VCPU thread and
>>> that the
>>> + * VCPU therefore is not currently executing guest code.
>>> + */
>>> +void kvm_inject_irq(struct kvm_vcpu *vcpu)
>>> +{
>>> +    if (vcpu_el1_is_32bit(vcpu))
>>> +        inject_irq32(vcpu);
>>> +    else
>>> +        inject_irq64(vcpu);
>>> +}
>>> +
>>>   void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
>>>   {
>>>       vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
>>> diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
>>> index b5d6d1ed3858..1e8e213c9d70 100644
>>> --- a/arch/arm64/kvm/sdei.c
>>> +++ b/arch/arm64/kvm/sdei.c
>>> @@ -308,6 +308,75 @@ static unsigned long
>>> kvm_sdei_hypercall_context(struct kvm_vcpu *vcpu)
>>>       return ret;
>>>   }
>>>   +static unsigned long kvm_sdei_hypercall_complete(struct kvm_vcpu
>>> *vcpu,
>>> +                         bool resume)
>>> +{
>>> +    struct kvm *kvm = vcpu->kvm;
>>> +    struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
>>> +    struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
>>> +    struct kvm_sdei_kvm_event *kske = NULL;
>>> +    struct kvm_sdei_vcpu_event *ksve = NULL;
>>> +    struct kvm_sdei_vcpu_regs *regs;
>>> +    unsigned long ret = SDEI_SUCCESS;
>> for the RESUME you never seem to read resume_addr arg? How does it work?
>> I don't get the irq injection path. Please could you explain?
> 
> The guest kernel uses COMPLETE and COMPLETE_AND_RESUME hypercalls to
> notify the
> SDEI event has been acknoledged by it. The difference between them is
> COMPLETE_AND_RESUME
> fires the pending interrupts, but COMPLETE doesn't.
so resume_addr never is used, right?
> 
>>> +    int index;
>>> +
>>> +    /* Sanity check */
>>> +    if (!(ksdei && vsdei)) {
>>> +        ret = SDEI_NOT_SUPPORTED;
>>> +        goto out;
>>> +    }
>>> +
>>> +    spin_lock(&vsdei->lock);
>>> +    if (vsdei->critical_event) {
>>> +        ksve = vsdei->critical_event;
>>> +        regs = &vsdei->state.critical_regs;
>>> +        vsdei->critical_event = NULL;
>>> +        vsdei->state.critical_num = KVM_SDEI_INVALID_NUM;
>>> +    } else if (vsdei->normal_event) {
>>> +        ksve = vsdei->normal_event;
>>> +        regs = &vsdei->state.normal_regs;
>>> +        vsdei->normal_event = NULL;
>>> +        vsdei->state.normal_num = KVM_SDEI_INVALID_NUM;
>>> +    } else {
>>> +        ret = SDEI_DENIED;
>>> +        goto unlock;
>>> +    }
>>> +
>>> +    /* Restore registers: x0 -> x17, PC, PState */
>>> +    for (index = 0; index < ARRAY_SIZE(regs->regs); index++)
>>> +        vcpu_set_reg(vcpu, index, regs->regs[index]);
>>> +
>>> +    *vcpu_cpsr(vcpu) = regs->pstate;
>>> +    *vcpu_pc(vcpu) = regs->pc;
>>> +
>>> +    /* Inject interrupt if needed */
>>> +    if (resume)
>>> +        kvm_inject_irq(vcpu);
>>> +
>>> +    /*
>>> +     * Update state. We needn't take lock in order to update the KVM
>>> +     * event state as it's not destroyed because of the reference
>>> +     * count.
>>> +     */
>>> +    kske = ksve->kske;
>>> +    ksve->state.refcount--;
>>> +    kske->state.refcount--;
>> why double --?
> 
> On each SDEI event is queued for delivery, both reference count are
> increased. I guess
> it's a bit confusing. I will change in next revision:
> 
> ksve->state.refcount: Increased on each SDEI event is queued for delivered
> kske->state.refcount: Increased on each @ksve is created
> 
> 
>>> +    if (!ksve->state.refcount) {
>> why not using a struct kref directly?
> 
> The reason is kref isn't friendly to userspace. This field (@refcount)
> needs to be
> migrated :)

I will see with next version migration doc

Thanks

Eric
> 
>>> +        list_del(&ksve->link);
>>> +        kfree(ksve);
>>> +    }
>>> +
>>> +    /* Make another request if there is pending event */
>>> +    if (!(list_empty(&vsdei->critical_events) &&
>>> +          list_empty(&vsdei->normal_events)))
>>> +        kvm_make_request(KVM_REQ_SDEI, vcpu);
>>> +
>>> +unlock:
>>> +    spin_unlock(&vsdei->lock);
>>> +out:
>>> +    return ret;
>>> +}
>>> +
>>>   static unsigned long kvm_sdei_hypercall_unregister(struct kvm_vcpu
>>> *vcpu)
>>>   {
>>>       struct kvm *kvm = vcpu->kvm;
>>> @@ -628,7 +697,13 @@ int kvm_sdei_hypercall(struct kvm_vcpu *vcpu)
>>>           ret = kvm_sdei_hypercall_context(vcpu);
>>>           break;
>>>       case SDEI_1_0_FN_SDEI_EVENT_COMPLETE:
>>> +        has_result = false;
>>> +        ret = kvm_sdei_hypercall_complete(vcpu, false);
>>> +        break;
>>>       case SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME:
>>> +        has_result = false;
>>> +        ret = kvm_sdei_hypercall_complete(vcpu, true);
>>> +        break;
>>>       case SDEI_1_0_FN_SDEI_EVENT_UNREGISTER:
>>>           ret = kvm_sdei_hypercall_unregister(vcpu);
>>>           break;
>>>
> 
> Thanks,
> Gavin
> 


  reply	other threads:[~2022-01-27 14:47 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-15  0:13 [PATCH v4 00/21] Support SDEI Virtualization Gavin Shan
2021-08-15  0:13 ` [PATCH v4 01/21] KVM: arm64: Introduce template for inline functions Gavin Shan
2021-11-09 15:26   ` Eric Auger
2022-01-11  7:52     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 02/21] KVM: arm64: Add SDEI virtualization infrastructure Gavin Shan
2021-11-09 15:45   ` Eric Auger
2022-01-11  9:20     ` Gavin Shan
2022-01-27 13:17       ` Eric Auger
2022-01-11  9:40   ` Shannon Zhao
2022-01-13  7:09     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 03/21] KVM: arm64: Support SDEI_VERSION hypercall Gavin Shan
2021-11-09 15:26   ` Eric Auger
2022-01-11  9:25     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 04/21] KVM: arm64: Support SDEI_EVENT_REGISTER hypercall Gavin Shan
2021-11-09 15:50   ` Eric Auger
2022-01-12  2:19     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 05/21] KVM: arm64: Support SDEI_EVENT_{ENABLE, DISABLE} hypercall Gavin Shan
2021-11-09 16:02   ` Eric Auger
2022-01-12  2:29     ` Gavin Shan
2022-01-25 18:23       ` Eric Auger
2021-08-15  0:13 ` [PATCH v4 06/21] KVM: arm64: Support SDEI_EVENT_CONTEXT hypercall Gavin Shan
2021-11-10 11:16   ` Eric Auger
2022-01-12  2:33     ` Gavin Shan
2022-01-25 18:29       ` Eric Auger
2022-01-11  9:43   ` Shannon Zhao
2022-01-13  7:02     ` Gavin Shan
2022-01-13  7:13       ` Gavin Shan
2022-01-25 18:32         ` Eric Auger
2022-01-25 18:31       ` Eric Auger
2021-08-15  0:13 ` [PATCH v4 07/21] KVM: arm64: Support SDEI_EVENT_UNREGISTER hypercall Gavin Shan
2021-11-09 17:05   ` Eric Auger
2022-01-12  2:38     ` Gavin Shan
2022-01-25 18:42       ` Eric Auger
2021-08-15  0:13 ` [PATCH v4 08/21] KVM: arm64: Support SDEI_EVENT_STATUS hypercall Gavin Shan
2021-11-09 17:12   ` Eric Auger
2022-01-12  2:40     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 09/21] KVM: arm64: Support SDEI_EVENT_GET_INFO hypercall Gavin Shan
2021-11-09 17:19   ` Eric Auger
2022-01-12  2:46     ` Gavin Shan
2022-01-27 14:19       ` Eric Auger
2021-08-15  0:13 ` [PATCH v4 10/21] KVM: arm64: Support SDEI_EVENT_ROUTING_SET hypercall Gavin Shan
2021-11-09 18:47   ` Eric Auger
2022-01-12  2:54     ` Gavin Shan
2022-01-27 14:13       ` Eric Auger
2021-08-15  0:13 ` [PATCH v4 11/21] KVM: arm64: Support SDEI_PE_{MASK, UNMASK} hypercall Gavin Shan
2021-11-09 20:31   ` Eric Auger
2022-01-12  2:58     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 12/21] KVM: arm64: Support SDEI_{PRIVATE, SHARED}_RESET hypercall Gavin Shan
2021-11-09 20:37   ` Eric Auger
2022-01-12  3:01     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 13/21] KVM: arm64: Impment SDEI event delivery Gavin Shan
2021-11-10 10:58   ` Eric Auger
2022-01-12  6:34     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 14/21] KVM: arm64: Support SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall Gavin Shan
2021-11-10 10:58   ` Eric Auger
2022-01-12  6:43     ` Gavin Shan
2022-01-27 14:47       ` Eric Auger [this message]
2022-01-27 15:20       ` Eric Auger
2021-08-15  0:13 ` [PATCH v4 15/21] KVM: arm64: Support SDEI event notifier Gavin Shan
2021-11-10 11:35   ` Eric Auger
2022-01-12  6:48     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 16/21] KVM: arm64: Support SDEI ioctl commands on VM Gavin Shan
2021-11-10 13:48   ` Eric Auger
2022-01-12  7:03     ` Gavin Shan
2022-01-27 13:48       ` Eric Auger
2021-08-15  0:13 ` [PATCH v4 17/21] KVM: arm64: Support SDEI ioctl commands on vCPU Gavin Shan
2021-08-15  0:13 ` [PATCH v4 18/21] KVM: arm64: Support SDEI event injection Gavin Shan
2021-11-10 14:05   ` Eric Auger
2022-01-12  7:12     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 19/21] KVM: arm64: Support SDEI event cancellation Gavin Shan
2021-11-10 14:09   ` Eric Auger
2022-01-12  7:19     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 20/21] KVM: arm64: Export SDEI capability Gavin Shan
2021-11-10 13:55   ` Eric Auger
2022-01-12  7:20     ` Gavin Shan
2021-08-15  0:13 ` [PATCH v4 21/21] KVM: selftests: Add SDEI test case Gavin Shan
2021-08-15  0:19 ` [PATCH v4 00/21] Support SDEI Virtualization Gavin Shan
2021-11-10 14:29   ` Eric Auger
2022-01-12  7:24     ` Gavin Shan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=adbeb3e7-0edd-502e-4d6b-a51350d596bf@redhat.com \
    --to=eauger@redhat.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=gshan@redhat.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).