kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Fuad Tabba <tabba@google.com>
To: kvmarm@lists.cs.columbia.edu
Cc: maz@kernel.org, will@kernel.org, james.morse@arm.com,
	alexandru.elisei@arm.com, suzuki.poulose@arm.com,
	mark.rutland@arm.com, christoffer.dall@arm.com,
	drjones@redhat.com, qperret@google.com, kvm@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, kernel-team@android.com,
	tabba@google.com
Subject: [RFC PATCH v1 10/30] KVM: arm64: Add accessors for hypervisor state in kvm_vcpu_arch
Date: Fri, 24 Sep 2021 13:53:39 +0100	[thread overview]
Message-ID: <20210924125359.2587041-11-tabba@google.com> (raw)
In-Reply-To: <20210924125359.2587041-1-tabba@google.com>

Some of the members of vcpu_arch represent state that belongs to
the hypervisor. Future patches will factor these out into their
own structure. To simplify the refactoring and make it easier to
read, add accessors for the members of kvm_vcpu_arch that
represent the hypervisor state.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/include/asm/kvm_emulate.h | 182 ++++++++++++++++++++++-----
 arch/arm64/include/asm/kvm_host.h    |  38 ++++--
 2 files changed, 181 insertions(+), 39 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 7d09a9356d89..e095afeecd10 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -41,9 +41,14 @@ void kvm_inject_vabt(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
+static __always_inline bool hyp_state_el1_is_32bit(struct vcpu_hyp_state *vcpu_hyps)
+{
+	return !(hyp_state_hcr_el2(vcpu_hyps) & HCR_RW);
+}
+
 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 {
-	return !(vcpu_hcr_el2(vcpu) & HCR_RW);
+	return hyp_state_el1_is_32bit(&hyp_state(vcpu));
 }
 
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
@@ -252,14 +257,19 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 	return mode != PSR_MODE_EL0t;
 }
 
+static __always_inline u32 kvm_hyp_state_get_esr(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return hyp_state_fault(vcpu_hyps).esr_el2;
+}
+
 static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
 {
-	return vcpu_fault(vcpu).esr_el2;
+	return kvm_hyp_state_get_esr(&hyp_state(vcpu));
 }
 
-static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+static __always_inline u32 kvm_hyp_state_get_condition(const struct vcpu_hyp_state *vcpu_hyps)
 {
-	u32 esr = kvm_vcpu_get_esr(vcpu);
+	u32 esr = kvm_hyp_state_get_esr(vcpu_hyps);
 
 	if (esr & ESR_ELx_CV)
 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
@@ -267,111 +277,216 @@ static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
 	return -1;
 }
 
+static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+{
+	return kvm_hyp_state_get_condition(&hyp_state(vcpu));
+}
+
+static __always_inline phys_addr_t kvm_hyp_state_get_hfar(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return hyp_state_fault(vcpu_hyps).far_el2;
+}
+
 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
 {
-	return vcpu_fault(vcpu).far_el2;
+	return kvm_hyp_state_get_hfar(&hyp_state(vcpu));
+}
+
+static __always_inline phys_addr_t kvm_hyp_state_get_fault_ipa(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return ((phys_addr_t) hyp_state_fault(vcpu_hyps).hpfar_el2 & HPFAR_MASK) << 8;
 }
 
 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
 {
-	return ((phys_addr_t) vcpu_fault(vcpu).hpfar_el2 & HPFAR_MASK) << 8;
+	return kvm_hyp_state_get_fault_ipa(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_get_disr(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return hyp_state_fault(vcpu_hyps).disr_el1;
 }
 
 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
 {
-	return vcpu_fault(vcpu).disr_el1;
+	return kvm_hyp_state_get_disr(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_get_imm(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_xVC_IMM_MASK;
 }
 
 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
+	return kvm_hyp_state_get_imm(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_dabt_isvalid(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return !!(kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_ISV);
 }
 
 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
+	return kvm_hyp_state_dabt_isvalid(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_iss_nisv_sanitized(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return kvm_hyp_state_get_esr(vcpu_hyps) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
 }
 
 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
+	return kvm_hyp_state_iss_nisv_sanitized(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_issext(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return !!(kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_SSE);
 }
 
 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
+	return kvm_hyp_state_issext(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_issf(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return !!(kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_SF);
 }
 
 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
+	return kvm_hyp_state_issf(&hyp_state(vcpu));
+}
+
+static __always_inline phys_addr_t kvm_hyp_state_dabt_get_rd(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return (kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
 }
 
 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
 {
-	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+	return kvm_hyp_state_dabt_get_rd(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_abt_iss1tw(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return !!(kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_S1PTW);
 }
 
 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
+	return kvm_hyp_state_abt_iss1tw(&hyp_state(vcpu));
 }
 
 /* Always check for S1PTW *before* using this. */
+static __always_inline u32 kvm_hyp_state_dabt_iswrite(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_WNR;
+}
+
 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
+	return kvm_hyp_state_dabt_iswrite(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_dabt_is_cm(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return !!(kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_CM);
 }
 
 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
+	return kvm_hyp_state_dabt_is_cm(&hyp_state(vcpu));
+}
+
+static __always_inline phys_addr_t kvm_hyp_state_dabt_get_as(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return 1 << ((kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
 }
 
 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
 {
-	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
+	return kvm_hyp_state_dabt_get_as(&hyp_state(vcpu));
 }
 
 /* This one is not specific to Data Abort */
+static __always_inline u32 kvm_hyp_state_trap_il_is32bit(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return !!(kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_IL);
+}
+
 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
+	return kvm_hyp_state_trap_il_is32bit(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_trap_get_class(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return ESR_ELx_EC(kvm_hyp_state_get_esr(vcpu_hyps));
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
 {
-	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
+	return kvm_hyp_state_trap_get_class(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_trap_is_iabt(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return kvm_hyp_state_trap_get_class(vcpu_hyps) == ESR_ELx_EC_IABT_LOW;
 }
 
 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
+	return kvm_hyp_state_trap_is_iabt(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_trap_is_exec_fault(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return kvm_hyp_state_trap_is_iabt(vcpu_hyps) && !kvm_hyp_state_abt_iss1tw(vcpu_hyps);
 }
 
 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
+	return kvm_hyp_state_trap_is_exec_fault(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_trap_get_fault(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_FSC;
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
+	return kvm_hyp_state_trap_get_fault(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_trap_get_fault_type(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_FSC_TYPE;
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
+	return kvm_hyp_state_trap_get_fault_type(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_trap_get_fault_level(const struct vcpu_hyp_state *vcpu_hyps)
+{
+	return kvm_hyp_state_get_esr(vcpu_hyps) & ESR_ELx_FSC_LEVEL;
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
+	return kvm_hyp_state_trap_get_fault_level(&hyp_state(vcpu));
 }
 
-static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
+static __always_inline u32 kvm_hyp_state_abt_issea(const struct vcpu_hyp_state *vcpu_hyps)
 {
-	switch (kvm_vcpu_trap_get_fault(vcpu)) {
+	switch (kvm_hyp_state_trap_get_fault(vcpu_hyps)) {
 	case FSC_SEA:
 	case FSC_SEA_TTW0:
 	case FSC_SEA_TTW1:
@@ -388,12 +503,23 @@ static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
 	}
 }
 
-static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
+{
+	return kvm_hyp_state_abt_issea(&hyp_state(vcpu));
+}
+
+static __always_inline u32 kvm_hyp_state_sys_get_rt(const struct vcpu_hyp_state *vcpu_hyps)
 {
-	u32 esr = kvm_vcpu_get_esr(vcpu);
+	u32 esr = kvm_hyp_state_get_esr(vcpu_hyps);
 	return ESR_ELx_SYS64_ISS_RT(esr);
 }
 
+
+static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+{
+	return kvm_hyp_state_sys_get_rt(&hyp_state(vcpu));
+}
+
 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
 {
 	if (kvm_vcpu_abt_iss1tw(vcpu))
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 280ee23dfc5a..3e5c173d2360 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -373,12 +373,21 @@ struct kvm_vcpu_arch {
 	} steal;
 };
 
+#define hyp_state(vcpu) ((vcpu)->arch)
+
+/* Accessors for hyp_state parameters related to the hypervistor state. */
+#define hyp_state_hcr_el2(hyps) (hyps)->hcr_el2
+#define hyp_state_mdcr_el2(hyps) (hyps)->mdcr_el2
+#define hyp_state_vsesr_el2(hyps) (hyps)->vsesr_el2
+#define hyp_state_fault(hyps) (hyps)->fault
+#define hyp_state_flags(hyps) (hyps)->flags
+
 /* Accessors for vcpu parameters related to the hypervistor state. */
-#define vcpu_hcr_el2(vcpu) (vcpu)->arch.hcr_el2
-#define vcpu_mdcr_el2(vcpu) (vcpu)->arch.mdcr_el2
-#define vcpu_vsesr_el2(vcpu) (vcpu)->arch.vsesr_el2
-#define vcpu_fault(vcpu) (vcpu)->arch.fault
-#define vcpu_flags(vcpu) (vcpu)->arch.flags
+#define vcpu_hcr_el2(vcpu) hyp_state_hcr_el2(&hyp_state(vcpu))
+#define vcpu_mdcr_el2(vcpu) hyp_state_mdcr_el2(&hyp_state(vcpu))
+#define vcpu_vsesr_el2(vcpu) hyp_state_vsesr_el2(&hyp_state(vcpu))
+#define vcpu_fault(vcpu) hyp_state_fault(&hyp_state(vcpu))
+#define vcpu_flags(vcpu) hyp_state_flags(&hyp_state(vcpu))
 
 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +	\
@@ -441,18 +450,22 @@ struct kvm_vcpu_arch {
  */
 #define KVM_ARM64_INCREMENT_PC		(1 << 9) /* Increment PC */
 
-#define vcpu_has_sve(vcpu) (system_supports_sve() &&			\
-			    ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
+#define hyp_state_has_sve(hyps) (system_supports_sve() &&		\
+			    (hyp_state_flags((hyps)) & KVM_ARM64_GUEST_HAS_SVE))
+
+#define vcpu_has_sve(vcpu) hyp_state_has_sve(&hyp_state(vcpu))
 
 #ifdef CONFIG_ARM64_PTR_AUTH
-#define vcpu_has_ptrauth(vcpu)						\
+#define hyp_state_has_ptrauth(hyps)					\
 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
-	 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
+	 hyp_state_flags(hyps) & KVM_ARM64_GUEST_HAS_PTRAUTH)
 #else
-#define vcpu_has_ptrauth(vcpu)		false
+#define hyp_state_has_ptrauth(hyps)		false
 #endif
 
+#define vcpu_has_ptrauth(vcpu)	hyp_state_has_ptrauth(&hyp_state(vcpu))
+
 #define vcpu_ctxt(vcpu) ((vcpu)->arch.ctxt)
 
 /* VCPU Context accessors (direct) */
@@ -794,8 +807,11 @@ static inline bool kvm_vm_is_protected(struct kvm *kvm)
 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
 
+#define kvm_arm_hyp_state_sve_finalized(hyps) \
+	(hyp_state_flags((hyps)) & KVM_ARM64_VCPU_SVE_FINALIZED)
+
 #define kvm_arm_vcpu_sve_finalized(vcpu) \
-	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+	kvm_arm_hyp_state_sve_finalized(&hyp_state(vcpu))
 
 #define kvm_vcpu_has_pmu(vcpu)					\
 	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
-- 
2.33.0.685.g46640cef36-goog


  parent reply	other threads:[~2021-09-24 13:23 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-24 12:53 [RFC PATCH v1 00/30] Reduce scope of vcpu state at hyp by refactoring out state hyp needs Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 01/30] KVM: arm64: placeholder to check if VM is protected Fuad Tabba
2021-09-27 15:50   ` Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 02/30] [DONOTMERGE] Temporarily disable unused variable warning Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 03/30] [DONOTMERGE] Coccinelle scripts for refactoring Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 04/30] KVM: arm64: remove unused parameters and asm offsets Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 05/30] KVM: arm64: add accessors for kvm_cpu_context Fuad Tabba
2021-09-27 15:57   ` Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 06/30] KVM: arm64: COCCI: use_ctxt_access.cocci: use kvm_cpu_context accessors Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 07/30] KVM: arm64: COCCI: add_ctxt.cocci use_ctxt.cocci: reduce scope of functions to kvm_cpu_ctxt Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 08/30] KVM: arm64: add hypervisor state accessors Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 09/30] KVM: arm64: COCCI: vcpu_hyp_accessors.cocci: use accessors for hypervisor state vcpu variables Fuad Tabba
2021-09-24 12:53 ` Fuad Tabba [this message]
2021-09-27 16:10   ` [RFC PATCH v1 10/30] KVM: arm64: Add accessors for hypervisor state in kvm_vcpu_arch Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 11/30] KVM: arm64: create and use a new vcpu_hyp_state struct Fuad Tabba
2021-09-27 16:32   ` Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 12/30] KVM: arm64: COCCI: add_hypstate.cocci use_hypstate.cocci: Reduce scope of functions to hyp_state Fuad Tabba
2021-09-27 16:40   ` Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 13/30] KVM: arm64: change function parameters to use kvm_cpu_ctxt and hyp_state Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 14/30] KVM: arm64: reduce scope of vgic v2 Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 15/30] KVM: arm64: COCCI: vgic3_cpu.cocci: reduce scope of vgic v3 Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 16/30] KVM: arm64: reduce scope of vgic_v3 access parameters Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 17/30] KVM: arm64: access __hyp_running_vcpu via accessors only Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 18/30] KVM: arm64: reduce scope of __guest_exit to only depend on kvm_cpu_context Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 19/30] KVM: arm64: change calls of get_loaded_vcpu to get_loaded_vcpu_ctxt Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 20/30] KVM: arm64: add __hyp_running_ctxt and __hyp_running_hyps Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 21/30] KVM: arm64: transition code to " Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 22/30] KVM: arm64: reduce scope of __guest_enter to depend only on kvm_cpu_ctxt Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 23/30] KVM: arm64: COCCI: remove_unused.cocci: remove unused ctxt and hypstate variables Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 24/30] KVM: arm64: remove unused functions Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 25/30] KVM: arm64: separate kvm_run() for protected VMs Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 26/30] KVM: arm64: pVM activate_traps to use vcpu_ctxt and vcpu_hyp_state Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 27/30] KVM: arm64: remove unsupported pVM features Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 28/30] KVM: arm64: reduce scope of pVM fixup_guest_exit to hyp_state and kvm_cpu_ctxt Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 29/30] [DONOTMERGE] Remove Coccinelle scripts added for refactoring Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 30/30] [DONOTMERGE] Re-enable warnings Fuad Tabba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210924125359.2587041-11-tabba@google.com \
    --to=tabba@google.com \
    --cc=alexandru.elisei@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=drjones@redhat.com \
    --cc=james.morse@arm.com \
    --cc=kernel-team@android.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=qperret@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).