From mboxrd@z Thu Jan 1 00:00:00 1970 From: Marc Zyngier Subject: [PATCH 05/29] arm64: KVM: Basic ESR_EL2 helpers and vcpu register access Date: Tue, 5 Mar 2013 03:47:21 +0000 Message-ID: <1362455265-24165-6-git-send-email-marc.zyngier@arm.com> References: <1362455265-24165-1-git-send-email-marc.zyngier@arm.com> Cc: catalin.marinas@arm.com To: linux-arm-kernel@lists.infradead.org, kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu Return-path: Received: from inca-roads.misterjones.org ([213.251.177.50]:60529 "EHLO inca-roads.misterjones.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932843Ab3CEDsJ (ORCPT ); Mon, 4 Mar 2013 22:48:09 -0500 In-Reply-To: <1362455265-24165-1-git-send-email-marc.zyngier@arm.com> Sender: kvm-owner@vger.kernel.org List-ID: Implements helpers for dealing with the EL2 syndrome register as well as accessing the vcpu registers. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_emulate.h | 159 +++++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 arch/arm64/include/asm/kvm_emulate.h diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h new file mode 100644 index 0000000..16a343b --- /dev/null +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2012 - ARM Ltd + * Author: Marc Zyngier + * + * Derived from arch/arm/include/kvm_emulate.h + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM64_KVM_EMULATE_H__ +#define __ARM64_KVM_EMULATE_H__ + +#include +#include +#include +#include +#include + +void kvm_inject_undefined(struct kvm_vcpu *vcpu); +void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); + +static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) +{ + return (unsigned long *)&vcpu->arch.regs.regs.pc; +} + +static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) +{ + return (unsigned long *)&vcpu->arch.regs.regs.pstate; +} + +static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) +{ + return false; /* 32bit? Bahhh... */ +} + +static inline bool kvm_condition_valid(struct kvm_vcpu *vcpu) +{ + return true; /* No conditionals on arm64 */ +} + +static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) +{ + *vcpu_pc(vcpu) += 4; +} + +static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) +{ +} + +static inline unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) +{ + return (unsigned long *)&vcpu->arch.regs.regs.regs[reg_num]; + +} + +/* Get vcpu SPSR for current mode */ +static inline unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) +{ + return &vcpu->arch.regs.spsr[KVM_SPSR_EL1]; +} + +static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) +{ + return false; +} + +static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) +{ + u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; + + return mode != PSR_MODE_EL0t; +} + +static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.esr_el2; +} + +static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.far_el2; +} + +static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) +{ + return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; +} + +static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); +} + +static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); +} + +static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); +} + +static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) +{ + return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; +} + +static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); +} + +static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); +} + +static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) +{ + return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); +} + +/* This one is not specific to Data Abort */ +static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); +} + +static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; +} + +static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; +} + +static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; +} + +#endif /* __ARM64_KVM_EMULATE_H__ */ -- 1.7.12.4 From mboxrd@z Thu Jan 1 00:00:00 1970 From: marc.zyngier@arm.com (Marc Zyngier) Date: Tue, 5 Mar 2013 03:47:21 +0000 Subject: [PATCH 05/29] arm64: KVM: Basic ESR_EL2 helpers and vcpu register access In-Reply-To: <1362455265-24165-1-git-send-email-marc.zyngier@arm.com> References: <1362455265-24165-1-git-send-email-marc.zyngier@arm.com> Message-ID: <1362455265-24165-6-git-send-email-marc.zyngier@arm.com> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org Implements helpers for dealing with the EL2 syndrome register as well as accessing the vcpu registers. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_emulate.h | 159 +++++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 arch/arm64/include/asm/kvm_emulate.h diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h new file mode 100644 index 0000000..16a343b --- /dev/null +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2012 - ARM Ltd + * Author: Marc Zyngier + * + * Derived from arch/arm/include/kvm_emulate.h + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM64_KVM_EMULATE_H__ +#define __ARM64_KVM_EMULATE_H__ + +#include +#include +#include +#include +#include + +void kvm_inject_undefined(struct kvm_vcpu *vcpu); +void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); + +static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) +{ + return (unsigned long *)&vcpu->arch.regs.regs.pc; +} + +static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) +{ + return (unsigned long *)&vcpu->arch.regs.regs.pstate; +} + +static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) +{ + return false; /* 32bit? Bahhh... */ +} + +static inline bool kvm_condition_valid(struct kvm_vcpu *vcpu) +{ + return true; /* No conditionals on arm64 */ +} + +static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) +{ + *vcpu_pc(vcpu) += 4; +} + +static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) +{ +} + +static inline unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) +{ + return (unsigned long *)&vcpu->arch.regs.regs.regs[reg_num]; + +} + +/* Get vcpu SPSR for current mode */ +static inline unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) +{ + return &vcpu->arch.regs.spsr[KVM_SPSR_EL1]; +} + +static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) +{ + return false; +} + +static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) +{ + u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; + + return mode != PSR_MODE_EL0t; +} + +static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.esr_el2; +} + +static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.far_el2; +} + +static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) +{ + return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; +} + +static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); +} + +static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); +} + +static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); +} + +static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) +{ + return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; +} + +static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); +} + +static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); +} + +static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) +{ + return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); +} + +/* This one is not specific to Data Abort */ +static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); +} + +static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; +} + +static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; +} + +static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; +} + +#endif /* __ARM64_KVM_EMULATE_H__ */ -- 1.7.12.4