From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 002E1C433EF for ; Thu, 30 Sep 2021 13:36:10 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id DFFAD61440 for ; Thu, 30 Sep 2021 13:36:09 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1350977AbhI3Nhu (ORCPT ); Thu, 30 Sep 2021 09:37:50 -0400 Received: from mail.kernel.org ([198.145.29.99]:44986 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1350801AbhI3Nhq (ORCPT ); Thu, 30 Sep 2021 09:37:46 -0400 Received: by mail.kernel.org (Postfix) with ESMTPSA id 23E6961440; Thu, 30 Sep 2021 13:36:00 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1633008963; bh=pjrc9VhsJkOrDhWGJxTk9nMm3AOBxD3Ofrhgi6/53qk=; h=Date:From:To:Cc:Subject:References:In-Reply-To:From; b=HA4bAYfs8MfLop6TpMlT4qc+pOilOHs6jcLMT7k0cyfHNYYd44L34nO3w1h1vTcQC UckW2CTk+YbCisOCwe8zzh/1SPZXVogipmoR7Dso3w1JtUsMunW3l4tVkCAsAPOx8o NGVwKd8UDCEkIqrvwGppgoyKKOxl9Pvkct1D+COc7dgB+LLVTMOV46kFrE4EDz8IFp OjkXLiCjJPj668vQQ0m+CVGWStdfzXc5iMQ5RwIRSoLidMfteDfg4XWFZdkQMG5J/+ GljbocoGzdOo2PrtdgjkWnA1n/LhZYkB91eyHJhiJ4RQFdvN9u8upZbljsvHr2oAcj 0J4Qk3xMiX04A== Date: Thu, 30 Sep 2021 14:35:57 +0100 From: Will Deacon To: Fuad Tabba Cc: kvmarm@lists.cs.columbia.edu, maz@kernel.org, james.morse@arm.com, alexandru.elisei@arm.com, suzuki.poulose@arm.com, mark.rutland@arm.com, christoffer.dall@arm.com, pbonzini@redhat.com, drjones@redhat.com, oupton@google.com, qperret@google.com, kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org, kernel-team@android.com Subject: Re: [PATCH v6 03/12] KVM: arm64: Move early handlers to per-EC handlers Message-ID: <20210930133444.GC23809@willie-the-truck> References: <20210922124704.600087-1-tabba@google.com> <20210922124704.600087-4-tabba@google.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20210922124704.600087-4-tabba@google.com> User-Agent: Mutt/1.10.1 (2018-07-13) Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org On Wed, Sep 22, 2021 at 01:46:55PM +0100, Fuad Tabba wrote: > From: Marc Zyngier > > Simplify the early exception handling by slicing the gigantic decoding > tree into a more manageable set of functions, similar to what we have > in handle_exit.c. > > This will also make the structure reusable for pKVM's own early exit > handling. > > Signed-off-by: Marc Zyngier > Signed-off-by: Fuad Tabba > --- > arch/arm64/kvm/hyp/include/hyp/switch.h | 160 ++++++++++++++---------- > arch/arm64/kvm/hyp/nvhe/switch.c | 17 +++ > arch/arm64/kvm/hyp/vhe/switch.c | 17 +++ > 3 files changed, 126 insertions(+), 68 deletions(-) > > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h > index 54abc8298ec3..0397606c0951 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h > @@ -136,16 +136,7 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) > > static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) > { > - u8 ec; > - u64 esr; > - > - esr = vcpu->arch.fault.esr_el2; > - ec = ESR_ELx_EC(esr); > - > - if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) > - return true; > - > - return __get_fault_info(esr, &vcpu->arch.fault); > + return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); > } > > static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) > @@ -166,8 +157,13 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) > write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); > } > > -/* Check for an FPSIMD/SVE trap and handle as appropriate */ > -static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) > +/* > + * We trap the first access to the FP/SIMD to save the host context and > + * restore the guest context lazily. > + * If FP/SIMD is not implemented, handle the trap and inject an undefined > + * instruction exception to the guest. Similarly for trapped SVE accesses. > + */ > +static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) > { > bool sve_guest, sve_host; > u8 esr_ec; > @@ -185,9 +181,6 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) > } > > esr_ec = kvm_vcpu_trap_get_class(vcpu); > - if (esr_ec != ESR_ELx_EC_FP_ASIMD && > - esr_ec != ESR_ELx_EC_SVE) > - return false; > > /* Don't handle SVE traps for non-SVE vcpus here: */ > if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) > @@ -325,7 +318,7 @@ static inline bool esr_is_ptrauth_trap(u32 esr) > > DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); > > -static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) > +static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) > { > struct kvm_cpu_context *ctxt; > u64 val; > @@ -350,6 +343,87 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) > return true; > } > > +static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) > +{ > + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && > + handle_tx2_tvm(vcpu)) > + return true; > + > + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && > + __vgic_v3_perform_cpuif_access(vcpu) == 1) > + return true; > + > + return false; > +} > + > +static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code) > +{ > + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && > + __vgic_v3_perform_cpuif_access(vcpu) == 1) > + return true; I think you're now calling this for the 64-bit CP15 access path, which I don't think is correct. Maybe have separate handlers for 32-bit v4 64-bit accesses? Will From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 22F57C433F5 for ; Thu, 30 Sep 2021 13:36:09 +0000 (UTC) Received: from mm01.cs.columbia.edu (mm01.cs.columbia.edu [128.59.11.253]) by mail.kernel.org (Postfix) with ESMTP id 93AC5619E4 for ; Thu, 30 Sep 2021 13:36:08 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.4.1 mail.kernel.org 93AC5619E4 Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=kernel.org Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=lists.cs.columbia.edu Received: from localhost (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id 1FCBC4B0EC; Thu, 30 Sep 2021 09:36:08 -0400 (EDT) X-Virus-Scanned: at lists.cs.columbia.edu Authentication-Results: mm01.cs.columbia.edu (amavisd-new); dkim=softfail (fail, message has been altered) header.i=@kernel.org Received: from mm01.cs.columbia.edu ([127.0.0.1]) by localhost (mm01.cs.columbia.edu [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id UUGa9oX1FTtH; Thu, 30 Sep 2021 09:36:06 -0400 (EDT) Received: from mm01.cs.columbia.edu (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id B81694B0D7; Thu, 30 Sep 2021 09:36:06 -0400 (EDT) Received: from localhost (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id CB4364A534 for ; Thu, 30 Sep 2021 09:36:05 -0400 (EDT) X-Virus-Scanned: at lists.cs.columbia.edu Received: from mm01.cs.columbia.edu ([127.0.0.1]) by localhost (mm01.cs.columbia.edu [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id 04goPZ2ZFg36 for ; Thu, 30 Sep 2021 09:36:04 -0400 (EDT) Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by mm01.cs.columbia.edu (Postfix) with ESMTPS id 897144A4BE for ; Thu, 30 Sep 2021 09:36:04 -0400 (EDT) Received: by mail.kernel.org (Postfix) with ESMTPSA id 23E6961440; Thu, 30 Sep 2021 13:36:00 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1633008963; bh=pjrc9VhsJkOrDhWGJxTk9nMm3AOBxD3Ofrhgi6/53qk=; h=Date:From:To:Cc:Subject:References:In-Reply-To:From; b=HA4bAYfs8MfLop6TpMlT4qc+pOilOHs6jcLMT7k0cyfHNYYd44L34nO3w1h1vTcQC UckW2CTk+YbCisOCwe8zzh/1SPZXVogipmoR7Dso3w1JtUsMunW3l4tVkCAsAPOx8o NGVwKd8UDCEkIqrvwGppgoyKKOxl9Pvkct1D+COc7dgB+LLVTMOV46kFrE4EDz8IFp OjkXLiCjJPj668vQQ0m+CVGWStdfzXc5iMQ5RwIRSoLidMfteDfg4XWFZdkQMG5J/+ GljbocoGzdOo2PrtdgjkWnA1n/LhZYkB91eyHJhiJ4RQFdvN9u8upZbljsvHr2oAcj 0J4Qk3xMiX04A== Date: Thu, 30 Sep 2021 14:35:57 +0100 From: Will Deacon To: Fuad Tabba Subject: Re: [PATCH v6 03/12] KVM: arm64: Move early handlers to per-EC handlers Message-ID: <20210930133444.GC23809@willie-the-truck> References: <20210922124704.600087-1-tabba@google.com> <20210922124704.600087-4-tabba@google.com> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20210922124704.600087-4-tabba@google.com> User-Agent: Mutt/1.10.1 (2018-07-13) Cc: kernel-team@android.com, kvm@vger.kernel.org, maz@kernel.org, pbonzini@redhat.com, kvmarm@lists.cs.columbia.edu, linux-arm-kernel@lists.infradead.org X-BeenThere: kvmarm@lists.cs.columbia.edu X-Mailman-Version: 2.1.14 Precedence: list List-Id: Where KVM/ARM decisions are made List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: kvmarm-bounces@lists.cs.columbia.edu Sender: kvmarm-bounces@lists.cs.columbia.edu On Wed, Sep 22, 2021 at 01:46:55PM +0100, Fuad Tabba wrote: > From: Marc Zyngier > > Simplify the early exception handling by slicing the gigantic decoding > tree into a more manageable set of functions, similar to what we have > in handle_exit.c. > > This will also make the structure reusable for pKVM's own early exit > handling. > > Signed-off-by: Marc Zyngier > Signed-off-by: Fuad Tabba > --- > arch/arm64/kvm/hyp/include/hyp/switch.h | 160 ++++++++++++++---------- > arch/arm64/kvm/hyp/nvhe/switch.c | 17 +++ > arch/arm64/kvm/hyp/vhe/switch.c | 17 +++ > 3 files changed, 126 insertions(+), 68 deletions(-) > > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h > index 54abc8298ec3..0397606c0951 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h > @@ -136,16 +136,7 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) > > static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) > { > - u8 ec; > - u64 esr; > - > - esr = vcpu->arch.fault.esr_el2; > - ec = ESR_ELx_EC(esr); > - > - if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) > - return true; > - > - return __get_fault_info(esr, &vcpu->arch.fault); > + return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); > } > > static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) > @@ -166,8 +157,13 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) > write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); > } > > -/* Check for an FPSIMD/SVE trap and handle as appropriate */ > -static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) > +/* > + * We trap the first access to the FP/SIMD to save the host context and > + * restore the guest context lazily. > + * If FP/SIMD is not implemented, handle the trap and inject an undefined > + * instruction exception to the guest. Similarly for trapped SVE accesses. > + */ > +static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) > { > bool sve_guest, sve_host; > u8 esr_ec; > @@ -185,9 +181,6 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) > } > > esr_ec = kvm_vcpu_trap_get_class(vcpu); > - if (esr_ec != ESR_ELx_EC_FP_ASIMD && > - esr_ec != ESR_ELx_EC_SVE) > - return false; > > /* Don't handle SVE traps for non-SVE vcpus here: */ > if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) > @@ -325,7 +318,7 @@ static inline bool esr_is_ptrauth_trap(u32 esr) > > DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); > > -static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) > +static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) > { > struct kvm_cpu_context *ctxt; > u64 val; > @@ -350,6 +343,87 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) > return true; > } > > +static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) > +{ > + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && > + handle_tx2_tvm(vcpu)) > + return true; > + > + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && > + __vgic_v3_perform_cpuif_access(vcpu) == 1) > + return true; > + > + return false; > +} > + > +static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code) > +{ > + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && > + __vgic_v3_perform_cpuif_access(vcpu) == 1) > + return true; I think you're now calling this for the 64-bit CP15 access path, which I don't think is correct. Maybe have separate handlers for 32-bit v4 64-bit accesses? Will _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id C8E0BC433EF for ; Thu, 30 Sep 2021 13:38:34 +0000 (UTC) Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 983D3611C7 for ; Thu, 30 Sep 2021 13:38:34 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.4.1 mail.kernel.org 983D3611C7 Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=kernel.org Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=lists.infradead.org DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=bombadil.20210309; h=Sender: Content-Transfer-Encoding:Content-Type:List-Subscribe:List-Help:List-Post: List-Archive:List-Unsubscribe:List-Id:In-Reply-To:MIME-Version:References: Message-ID:Subject:Cc:To:From:Date:Reply-To:Content-ID:Content-Description: Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID: List-Owner; bh=B2vJDRTkq3IGX1fwAPKtYPiXS1QiVkcyhKOO1tmQYHc=; b=YEsia9QbgLgtYx ZVo+8qbal77Mxd9GvDuF2caG9gENGrzZYGghM6Lob87Zscf/Pekk6tr+okRtvqCxEosbYmcOUgbXf Aee/+m3XmMTEbGr3GAKdOUXsssFs74nQ1BgwLVAzbaDRJWBU7l0WD6HFSG9e/vWt6Mbzjotmd//hi Aa20P496dDFEEqFWoCpaN5ag+B7ZtybLUFnSCtfvOcRCHmTJvIapRWA4VSsCcMXvdYtydTP2ZbSyO vsPNmQpaiEKrht7I/FcnVSxgJErJfebHSqBzmBYEyeoabHpkGr1MX/x4BCq/4/weOWzm/i/T/4sph mh8iIKjFoIHANkAjoN3w==; Received: from localhost ([::1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.94.2 #2 (Red Hat Linux)) id 1mVwEK-00EWOQ-OE; Thu, 30 Sep 2021 13:36:09 +0000 Received: from mail.kernel.org ([198.145.29.99]) by bombadil.infradead.org with esmtps (Exim 4.94.2 #2 (Red Hat Linux)) id 1mVwEF-00EWNA-RN for linux-arm-kernel@lists.infradead.org; Thu, 30 Sep 2021 13:36:05 +0000 Received: by mail.kernel.org (Postfix) with ESMTPSA id 23E6961440; Thu, 30 Sep 2021 13:36:00 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1633008963; bh=pjrc9VhsJkOrDhWGJxTk9nMm3AOBxD3Ofrhgi6/53qk=; h=Date:From:To:Cc:Subject:References:In-Reply-To:From; b=HA4bAYfs8MfLop6TpMlT4qc+pOilOHs6jcLMT7k0cyfHNYYd44L34nO3w1h1vTcQC UckW2CTk+YbCisOCwe8zzh/1SPZXVogipmoR7Dso3w1JtUsMunW3l4tVkCAsAPOx8o NGVwKd8UDCEkIqrvwGppgoyKKOxl9Pvkct1D+COc7dgB+LLVTMOV46kFrE4EDz8IFp OjkXLiCjJPj668vQQ0m+CVGWStdfzXc5iMQ5RwIRSoLidMfteDfg4XWFZdkQMG5J/+ GljbocoGzdOo2PrtdgjkWnA1n/LhZYkB91eyHJhiJ4RQFdvN9u8upZbljsvHr2oAcj 0J4Qk3xMiX04A== Date: Thu, 30 Sep 2021 14:35:57 +0100 From: Will Deacon To: Fuad Tabba Cc: kvmarm@lists.cs.columbia.edu, maz@kernel.org, james.morse@arm.com, alexandru.elisei@arm.com, suzuki.poulose@arm.com, mark.rutland@arm.com, christoffer.dall@arm.com, pbonzini@redhat.com, drjones@redhat.com, oupton@google.com, qperret@google.com, kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org, kernel-team@android.com Subject: Re: [PATCH v6 03/12] KVM: arm64: Move early handlers to per-EC handlers Message-ID: <20210930133444.GC23809@willie-the-truck> References: <20210922124704.600087-1-tabba@google.com> <20210922124704.600087-4-tabba@google.com> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20210922124704.600087-4-tabba@google.com> User-Agent: Mutt/1.10.1 (2018-07-13) X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20210930_063603_952043_42A2146B X-CRM114-Status: GOOD ( 23.89 ) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.34 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+linux-arm-kernel=archiver.kernel.org@lists.infradead.org On Wed, Sep 22, 2021 at 01:46:55PM +0100, Fuad Tabba wrote: > From: Marc Zyngier > > Simplify the early exception handling by slicing the gigantic decoding > tree into a more manageable set of functions, similar to what we have > in handle_exit.c. > > This will also make the structure reusable for pKVM's own early exit > handling. > > Signed-off-by: Marc Zyngier > Signed-off-by: Fuad Tabba > --- > arch/arm64/kvm/hyp/include/hyp/switch.h | 160 ++++++++++++++---------- > arch/arm64/kvm/hyp/nvhe/switch.c | 17 +++ > arch/arm64/kvm/hyp/vhe/switch.c | 17 +++ > 3 files changed, 126 insertions(+), 68 deletions(-) > > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h > index 54abc8298ec3..0397606c0951 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h > @@ -136,16 +136,7 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) > > static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) > { > - u8 ec; > - u64 esr; > - > - esr = vcpu->arch.fault.esr_el2; > - ec = ESR_ELx_EC(esr); > - > - if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) > - return true; > - > - return __get_fault_info(esr, &vcpu->arch.fault); > + return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); > } > > static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) > @@ -166,8 +157,13 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) > write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); > } > > -/* Check for an FPSIMD/SVE trap and handle as appropriate */ > -static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) > +/* > + * We trap the first access to the FP/SIMD to save the host context and > + * restore the guest context lazily. > + * If FP/SIMD is not implemented, handle the trap and inject an undefined > + * instruction exception to the guest. Similarly for trapped SVE accesses. > + */ > +static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) > { > bool sve_guest, sve_host; > u8 esr_ec; > @@ -185,9 +181,6 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) > } > > esr_ec = kvm_vcpu_trap_get_class(vcpu); > - if (esr_ec != ESR_ELx_EC_FP_ASIMD && > - esr_ec != ESR_ELx_EC_SVE) > - return false; > > /* Don't handle SVE traps for non-SVE vcpus here: */ > if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) > @@ -325,7 +318,7 @@ static inline bool esr_is_ptrauth_trap(u32 esr) > > DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); > > -static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) > +static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) > { > struct kvm_cpu_context *ctxt; > u64 val; > @@ -350,6 +343,87 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) > return true; > } > > +static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) > +{ > + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && > + handle_tx2_tvm(vcpu)) > + return true; > + > + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && > + __vgic_v3_perform_cpuif_access(vcpu) == 1) > + return true; > + > + return false; > +} > + > +static bool kvm_hyp_handle_cp15(struct kvm_vcpu *vcpu, u64 *exit_code) > +{ > + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && > + __vgic_v3_perform_cpuif_access(vcpu) == 1) > + return true; I think you're now calling this for the 64-bit CP15 access path, which I don't think is correct. Maybe have separate handlers for 32-bit v4 64-bit accesses? Will _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel