kvmarm.lists.cs.columbia.edu archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] Refactor ESR related functions
@ 2020-06-29  9:18 Gavin Shan
  2020-06-29  9:18 ` [PATCH 1/2] kvm/arm64: Rename HSR to ESR Gavin Shan
  2020-06-29  9:18 ` [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct Gavin Shan
  0 siblings, 2 replies; 13+ messages in thread
From: Gavin Shan @ 2020-06-29  9:18 UTC (permalink / raw)
  To: kvmarm; +Cc: catalin.marinas, will, linux-arm-kernel

This series bases on kvm/arm64 ("next") tree as below. It's preparatory
work for async page fault where the ESR is specified instead of fetching
from vCPU struct. Besides, HSR isn't meaningful since kvm/arm32 support
has been dropped. This refactors ESR related functions to serve them.

   git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
   ("next" branch)

Gavin Shan (2):
  kvm/arm64: Rename HSR to ESR
  kvm/arm64: Detach ESR operator from vCPU struct

 arch/arm64/include/asm/esr.h         | 32 ++++++++++++++++++++
 arch/arm64/include/asm/kvm_emulate.h | 45 +++++++++++++---------------
 arch/arm64/include/uapi/asm/kvm.h    |  2 +-
 arch/arm64/kvm/handle_exit.c         | 32 ++++++++++----------
 arch/arm64/kvm/hyp/aarch32.c         |  2 +-
 arch/arm64/kvm/hyp/switch.c          | 14 ++++-----
 arch/arm64/kvm/hyp/vgic-v3-sr.c      |  4 +--
 arch/arm64/kvm/mmu.c                 |  6 ++--
 arch/arm64/kvm/sys_regs.c            | 28 ++++++++---------
 arch/arm64/kvm/trace_arm.h           | 14 ++++-----
 arch/arm64/kvm/trace_handle_exit.h   | 10 +++----
 11 files changed, 108 insertions(+), 81 deletions(-)

-- 
2.23.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/2] kvm/arm64: Rename HSR to ESR
  2020-06-29  9:18 [PATCH 0/2] Refactor ESR related functions Gavin Shan
@ 2020-06-29  9:18 ` Gavin Shan
  2020-06-29  9:44   ` Andrew Scull
  2020-06-29 10:32   ` Mark Rutland
  2020-06-29  9:18 ` [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct Gavin Shan
  1 sibling, 2 replies; 13+ messages in thread
From: Gavin Shan @ 2020-06-29  9:18 UTC (permalink / raw)
  To: kvmarm; +Cc: catalin.marinas, will, linux-arm-kernel

kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
32bit KVM host support"). So HSR isn't meaningful since then. This
renames HSR to ESR accordingly. This shouldn't cause any functional
changes:

   * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
     function names self-explanatory.
   * Rename variables from @hsr to @esr to make them self-explanatory.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 arch/arm64/include/asm/kvm_emulate.h | 34 ++++++++++++++--------------
 arch/arm64/include/uapi/asm/kvm.h    |  2 +-
 arch/arm64/kvm/handle_exit.c         | 32 +++++++++++++-------------
 arch/arm64/kvm/hyp/aarch32.c         |  2 +-
 arch/arm64/kvm/hyp/switch.c          | 14 ++++++------
 arch/arm64/kvm/hyp/vgic-v3-sr.c      |  4 ++--
 arch/arm64/kvm/mmu.c                 |  6 ++---
 arch/arm64/kvm/sys_regs.c            | 28 +++++++++++------------
 arch/arm64/kvm/trace_arm.h           | 14 ++++++------
 arch/arm64/kvm/trace_handle_exit.h   | 10 ++++----
 10 files changed, 73 insertions(+), 73 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 4d0f8ea600ba..c9ba0df47f7d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -259,14 +259,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 	return mode != PSR_MODE_EL0t;
 }
 
-static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
+static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.fault.esr_el2;
 }
 
 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 
 	if (esr & ESR_ELx_CV)
 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
@@ -291,64 +291,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
 
 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
+	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
 }
 
 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
 }
 
 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
+	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
 }
 
 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
 }
 
 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
 }
 
 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
 {
-	return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
 }
 
 static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
 }
 
 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
 		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
 }
 
 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
 }
 
 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
 {
-	return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
+	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
 }
 
 /* This one is not specific to Data Abort */
 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
 {
-	return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
+	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
@@ -358,12 +358,12 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
 
 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
+	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
+	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
 }
 
 static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
@@ -387,7 +387,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
 
 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	return ESR_ELx_SYS64_ISS_RT(esr);
 }
 
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index ba85bb23f060..d54345573a88 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -140,7 +140,7 @@ struct kvm_guest_debug_arch {
 };
 
 struct kvm_debug_exit_arch {
-	__u32 hsr;
+	__u32 esr;
 	__u64 far;	/* used for watchpoints */
 };
 
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 5a02d4c90559..9baca85c5aa8 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -89,7 +89,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
  */
 static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
+	if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
 		vcpu->stat.wfe_exit_stat++;
 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
@@ -119,13 +119,13 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
  */
 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	int ret = 0;
 
 	run->exit_reason = KVM_EXIT_DEBUG;
-	run->debug.arch.hsr = hsr;
+	run->debug.arch.esr = esr;
 
-	switch (ESR_ELx_EC(hsr)) {
+	switch (ESR_ELx_EC(esr)) {
 	case ESR_ELx_EC_WATCHPT_LOW:
 		run->debug.arch.far = vcpu->arch.fault.far_el2;
 		/* fall through */
@@ -135,8 +135,8 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	case ESR_ELx_EC_BRK64:
 		break;
 	default:
-		kvm_err("%s: un-handled case hsr: %#08x\n",
-			__func__, (unsigned int) hsr);
+		kvm_err("%s: un-handled case esr: %#08x\n",
+			__func__, (unsigned int) esr);
 		ret = -1;
 		break;
 	}
@@ -146,10 +146,10 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 
-	kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
-		      hsr, esr_get_class_string(hsr));
+	kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
+		      esr, esr_get_class_string(esr));
 
 	kvm_inject_undefined(vcpu);
 	return 1;
@@ -200,10 +200,10 @@ static exit_handle_fn arm_exit_handlers[] = {
 
 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
 {
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
-	u8 hsr_ec = ESR_ELx_EC(hsr);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
+	u8 esr_ec = ESR_ELx_EC(esr);
 
-	return arm_exit_handlers[hsr_ec];
+	return arm_exit_handlers[esr_ec];
 }
 
 /*
@@ -241,15 +241,15 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		       int exception_index)
 {
 	if (ARM_SERROR_PENDING(exception_index)) {
-		u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
+		u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
 
 		/*
 		 * HVC/SMC already have an adjusted PC, which we need
 		 * to correct in order to return to after having
 		 * injected the SError.
 		 */
-		if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
-		    hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
+		if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64 ||
+		    esr_ec == ESR_ELx_EC_SMC32 || esr_ec == ESR_ELx_EC_SMC64) {
 			u32 adj =  kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
 			*vcpu_pc(vcpu) -= adj;
 		}
@@ -307,5 +307,5 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
 	exception_index = ARM_EXCEPTION_CODE(exception_index);
 
 	if (exception_index == ARM_EXCEPTION_EL1_SERROR)
-		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
+		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
 }
diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
index 25c0e47d57cb..1e948704d60f 100644
--- a/arch/arm64/kvm/hyp/aarch32.c
+++ b/arch/arm64/kvm/hyp/aarch32.c
@@ -51,7 +51,7 @@ bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
 	int cond;
 
 	/* Top two bits non-zero?  Unconditional. */
-	if (kvm_vcpu_get_hsr(vcpu) >> 30)
+	if (kvm_vcpu_get_esr(vcpu) >> 30)
 		return true;
 
 	/* Is condition field valid? */
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index db1c4487d95d..5164074c1ae1 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -356,7 +356,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
 static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 {
 	bool vhe, sve_guest, sve_host;
-	u8 hsr_ec;
+	u8 esr_ec;
 
 	if (!system_supports_fpsimd())
 		return false;
@@ -371,14 +371,14 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 		vhe = has_vhe();
 	}
 
-	hsr_ec = kvm_vcpu_trap_get_class(vcpu);
-	if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
-	    hsr_ec != ESR_ELx_EC_SVE)
+	esr_ec = kvm_vcpu_trap_get_class(vcpu);
+	if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
+	    esr_ec != ESR_ELx_EC_SVE)
 		return false;
 
 	/* Don't handle SVE traps for non-SVE vcpus here: */
 	if (!sve_guest)
-		if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
+		if (esr_ec != ESR_ELx_EC_FP_ASIMD)
 			return false;
 
 	/* Valid trap.  Switch the context: */
@@ -437,7 +437,7 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 
 static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
 {
-	u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
+	u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
 	int rt = kvm_vcpu_sys_get_rt(vcpu);
 	u64 val = vcpu_get_reg(vcpu, rt);
 
@@ -529,7 +529,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
 	u64 val;
 
 	if (!vcpu_has_ptrauth(vcpu) ||
-	    !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
+	    !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
 		return false;
 
 	ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 10ed539835c1..bee0a74671ca 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -426,7 +426,7 @@ static int __hyp_text __vgic_v3_bpr_min(void)
 
 static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
 
 	return crm != 8;
@@ -992,7 +992,7 @@ int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
 	bool is_read;
 	u32 sysreg;
 
-	esr = kvm_vcpu_get_hsr(vcpu);
+	esr = kvm_vcpu_get_esr(vcpu);
 	if (vcpu_mode_is_32bit(vcpu)) {
 		if (!kvm_condition_valid(vcpu)) {
 			__kvm_skip_instr(vcpu);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8c0035cab6b6..36506112480e 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2079,7 +2079,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		 * For RAS the host kernel may handle this abort.
 		 * There is no need to pass the error into the guest.
 		 */
-		if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
+		if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
 			return 1;
 
 		if (unlikely(!is_iabt)) {
@@ -2088,7 +2088,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		}
 	}
 
-	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
+	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
 			      kvm_vcpu_get_hfar(vcpu), fault_ipa);
 
 	/* Check the stage-2 fault is trans. fault or write fault */
@@ -2097,7 +2097,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
 			kvm_vcpu_trap_get_class(vcpu),
 			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
-			(unsigned long)kvm_vcpu_get_hsr(vcpu));
+			(unsigned long)kvm_vcpu_get_esr(vcpu));
 		return -EFAULT;
 	}
 
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index baf5ce9225ce..a96dd62a90ce 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2220,10 +2220,10 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
 				struct sys_reg_params *params)
 {
-	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+	u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
 	int cp = -1;
 
-	switch(hsr_ec) {
+	switch (esr_ec) {
 	case ESR_ELx_EC_CP15_32:
 	case ESR_ELx_EC_CP15_64:
 		cp = 15;
@@ -2254,17 +2254,17 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
 			    size_t nr_specific)
 {
 	struct sys_reg_params params;
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
-	int Rt2 = (hsr >> 10) & 0x1f;
+	int Rt2 = (esr >> 10) & 0x1f;
 
 	params.is_aarch32 = true;
 	params.is_32bit = false;
-	params.CRm = (hsr >> 1) & 0xf;
-	params.is_write = ((hsr & 1) == 0);
+	params.CRm = (esr >> 1) & 0xf;
+	params.is_write = ((esr & 1) == 0);
 
 	params.Op0 = 0;
-	params.Op1 = (hsr >> 16) & 0xf;
+	params.Op1 = (esr >> 16) & 0xf;
 	params.Op2 = 0;
 	params.CRn = 0;
 
@@ -2311,18 +2311,18 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
 			    size_t nr_specific)
 {
 	struct sys_reg_params params;
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
 
 	params.is_aarch32 = true;
 	params.is_32bit = true;
-	params.CRm = (hsr >> 1) & 0xf;
+	params.CRm = (esr >> 1) & 0xf;
 	params.regval = vcpu_get_reg(vcpu, Rt);
-	params.is_write = ((hsr & 1) == 0);
-	params.CRn = (hsr >> 10) & 0xf;
+	params.is_write = ((esr & 1) == 0);
+	params.CRn = (esr >> 10) & 0xf;
 	params.Op0 = 0;
-	params.Op1 = (hsr >> 14) & 0x7;
-	params.Op2 = (hsr >> 17) & 0x7;
+	params.Op1 = (esr >> 14) & 0x7;
+	params.Op2 = (esr >> 17) & 0x7;
 
 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
 	    !emulate_cp(vcpu, &params, global, nr_global)) {
@@ -2421,7 +2421,7 @@ static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 	struct sys_reg_params params;
-	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+	unsigned long esr = kvm_vcpu_get_esr(vcpu);
 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
 	int ret;
 
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 4c71270cc097..ee4f691b16ff 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
 		__entry->vcpu_pc		= vcpu_pc;
 	),
 
-	TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+	TP_printk("%s: ESR_EC: 0x%04x (%s), PC: 0x%08lx",
 		  __print_symbolic(__entry->ret, kvm_arm_exception_type),
 		  __entry->esr_ec,
 		  __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
@@ -50,27 +50,27 @@ TRACE_EVENT(kvm_exit,
 );
 
 TRACE_EVENT(kvm_guest_fault,
-	TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
+	TP_PROTO(unsigned long vcpu_pc, unsigned long esr,
 		 unsigned long hxfar,
 		 unsigned long long ipa),
-	TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
+	TP_ARGS(vcpu_pc, esr, hxfar, ipa),
 
 	TP_STRUCT__entry(
 		__field(	unsigned long,	vcpu_pc		)
-		__field(	unsigned long,	hsr		)
+		__field(	unsigned long,	esr		)
 		__field(	unsigned long,	hxfar		)
 		__field(   unsigned long long,	ipa		)
 	),
 
 	TP_fast_assign(
 		__entry->vcpu_pc		= vcpu_pc;
-		__entry->hsr			= hsr;
+		__entry->esr			= esr;
 		__entry->hxfar			= hxfar;
 		__entry->ipa			= ipa;
 	),
 
-	TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
-		  __entry->ipa, __entry->hsr,
+	TP_printk("ipa %#llx, esr %#08lx, hxfar %#08lx, pc %#08lx",
+		  __entry->ipa, __entry->esr,
 		  __entry->hxfar, __entry->vcpu_pc)
 );
 
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index 2c56d1e0f5bd..94ef1a98e609 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -139,18 +139,18 @@ TRACE_EVENT(trap_reg,
 );
 
 TRACE_EVENT(kvm_handle_sys_reg,
-	TP_PROTO(unsigned long hsr),
-	TP_ARGS(hsr),
+	TP_PROTO(unsigned long esr),
+	TP_ARGS(esr),
 
 	TP_STRUCT__entry(
-		__field(unsigned long,	hsr)
+		__field(unsigned long,	esr)
 	),
 
 	TP_fast_assign(
-		__entry->hsr = hsr;
+		__entry->esr = esr;
 	),
 
-	TP_printk("HSR 0x%08lx", __entry->hsr)
+	TP_printk("ESR 0x%08lx", __entry->esr)
 );
 
 TRACE_EVENT(kvm_sys_access,
-- 
2.23.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct
  2020-06-29  9:18 [PATCH 0/2] Refactor ESR related functions Gavin Shan
  2020-06-29  9:18 ` [PATCH 1/2] kvm/arm64: Rename HSR to ESR Gavin Shan
@ 2020-06-29  9:18 ` Gavin Shan
  2020-06-29  9:59   ` Andrew Scull
  2020-06-29 11:00   ` Mark Rutland
  1 sibling, 2 replies; 13+ messages in thread
From: Gavin Shan @ 2020-06-29  9:18 UTC (permalink / raw)
  To: kvmarm; +Cc: catalin.marinas, will, linux-arm-kernel

There are a set of inline functions defined in kvm_emulate.h. Those
functions reads ESR from vCPU fault information struct and then operate
on it. So it's tied with vCPU fault information and vCPU struct. It
limits their usage scope.

This detaches these functions from the vCPU struct by introducing an
other set of inline functions in esr.h to manupulate the specified
ESR value. With it, the inline functions defined in kvm_emulate.h
can call these inline functions (in esr.h) instead. This shouldn't
cause any functional changes.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
 arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
 2 files changed, 51 insertions(+), 24 deletions(-)

diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 035003acfa87..950204c5fbe1 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
 	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
 }
 
+#define ESR_DECLARE_CHECK_FUNC(name, field)	\
+static inline bool esr_is_##name(u32 esr)	\
+{						\
+	return !!(esr & (field));		\
+}
+#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
+static inline u32 esr_get_##name(u32 esr)	\
+{						\
+	return ((esr & (mask)) >> (shift));	\
+}
+
+ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
+ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
+ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
+ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
+ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
+ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
+ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
+ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
+
+ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
+ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
+ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
+ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
+ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
+ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
+		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
+ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
+ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
+ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
+				   ESR_ELx_SYS64_ISS_RT_SHIFT);
+
 const char *esr_get_class_string(u32 esr);
 #endif /* __ASSEMBLY */
 
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index c9ba0df47f7d..9337d90c517f 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
 
 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_esr(vcpu);
-
-	if (esr & ESR_ELx_CV)
-		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
-
-	return -1;
+	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
+	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
 }
 
 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
@@ -291,79 +287,79 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
 
 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
+	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
+	return esr_is_dabt_valid(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
+	return esr_get_dabt_iss_nisv_sanitized(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
+	return esr_is_dabt_sse(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
+	return esr_is_dabt_sf(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
 {
-	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+	return esr_get_dabt_rd(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
+	return esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
-		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+	return esr_is_dabt_write(kvm_vcpu_get_esr(vcpu)) ||
+	       esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu)); /* AF/DBM update */
 }
 
 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
+	return esr_is_dabt_cm(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
 {
-	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
+	return 1 << esr_get_dabt_as(kvm_vcpu_get_esr(vcpu));
 }
 
 /* This one is not specific to Data Abort */
 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
+	return esr_is_il_32bit(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
 {
-	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
+	return esr_get_class(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
+	return esr_get_class(kvm_vcpu_get_esr(vcpu)) == ESR_ELx_EC_IABT_LOW;
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
+	return esr_get_fault(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
+	return esr_get_fault_type(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
@@ -387,8 +383,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
 
 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_esr(vcpu);
-	return ESR_ELx_SYS64_ISS_RT(esr);
+	return esr_get_sys_rt(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-- 
2.23.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2] kvm/arm64: Rename HSR to ESR
  2020-06-29  9:18 ` [PATCH 1/2] kvm/arm64: Rename HSR to ESR Gavin Shan
@ 2020-06-29  9:44   ` Andrew Scull
  2020-06-29 10:32   ` Mark Rutland
  1 sibling, 0 replies; 13+ messages in thread
From: Andrew Scull @ 2020-06-29  9:44 UTC (permalink / raw)
  To: Gavin Shan; +Cc: catalin.marinas, will, kvmarm, linux-arm-kernel

On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
> kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
> 32bit KVM host support"). So HSR isn't meaningful since then. This
> renames HSR to ESR accordingly. This shouldn't cause any functional
> changes:
> 
>    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
>      function names self-explanatory.
>    * Rename variables from @hsr to @esr to make them self-explanatory.

I like this; there's been more than once this has confused me recently!

Acked-by: Andrew Scull <ascull@google.com>
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct
  2020-06-29  9:18 ` [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct Gavin Shan
@ 2020-06-29  9:59   ` Andrew Scull
  2020-06-30  0:28     ` Gavin Shan
  2020-06-29 11:00   ` Mark Rutland
  1 sibling, 1 reply; 13+ messages in thread
From: Andrew Scull @ 2020-06-29  9:59 UTC (permalink / raw)
  To: Gavin Shan; +Cc: catalin.marinas, will, kvmarm, linux-arm-kernel

On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
> There are a set of inline functions defined in kvm_emulate.h. Those
> functions reads ESR from vCPU fault information struct and then operate
> on it. So it's tied with vCPU fault information and vCPU struct. It
> limits their usage scope.
> 
> This detaches these functions from the vCPU struct by introducing an
> other set of inline functions in esr.h to manupulate the specified
> ESR value. With it, the inline functions defined in kvm_emulate.h
> can call these inline functions (in esr.h) instead. This shouldn't
> cause any functional changes.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>
> ---
>  arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
>  arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
>  2 files changed, 51 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index 035003acfa87..950204c5fbe1 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
>  	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
>  }
>  
> +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
> +static inline bool esr_is_##name(u32 esr)	\
> +{						\
> +	return !!(esr & (field));		\
> +}
> +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
> +static inline u32 esr_get_##name(u32 esr)	\
> +{						\
> +	return ((esr & (mask)) >> (shift));	\
> +}

Should these be named DEFINE rather than DECLARE given it also includes
the function definition?

> +
> +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
> +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
> +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
> +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
> +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
> +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
> +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
> +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
> +
> +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
> +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
> +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
> +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
> +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
> +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
> +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
> +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
> +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
> +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
> +				   ESR_ELx_SYS64_ISS_RT_SHIFT);
> +
>  const char *esr_get_class_string(u32 esr);
>  #endif /* __ASSEMBLY */
>  
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index c9ba0df47f7d..9337d90c517f 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
>  
>  static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
>  {
> -	u32 esr = kvm_vcpu_get_esr(vcpu);
> -
> -	if (esr & ESR_ELx_CV)
> -		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
> -
> -	return -1;
> +	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
> +	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
>  }
>  
>  static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
> @@ -291,79 +287,79 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
>  
>  static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
> +	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
>  }

It feels a little strange that in the raw esr case it uses macro magic
but in the vcpu cases here it writes everything out in full. Was there a
reason that I'm missing or is there a chance to apply a consistent
approach?

I'm not sure of the style preferences, but if it goes the macro path,
the esr field definitions could be reused with something x-macro like to
get the kvm_emulate.h and esr.h functions generated from a singe list of
the esr fields.

>  static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
> +	return esr_is_dabt_valid(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
> +	return esr_get_dabt_iss_nisv_sanitized(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
> +	return esr_is_dabt_sse(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
> +	return esr_is_dabt_sf(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
>  {
> -	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
> +	return esr_get_dabt_rd(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
> +	return esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
> -		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
> +	return esr_is_dabt_write(kvm_vcpu_get_esr(vcpu)) ||
> +	       esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu)); /* AF/DBM update */
>  }
>  
>  static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
> +	return esr_is_dabt_cm(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
>  {
> -	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
> +	return 1 << esr_get_dabt_as(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  /* This one is not specific to Data Abort */
>  static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
> +	return esr_is_il_32bit(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
>  {
> -	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
> +	return esr_get_class(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
> +	return esr_get_class(kvm_vcpu_get_esr(vcpu)) == ESR_ELx_EC_IABT_LOW;
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
> +	return esr_get_fault(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
> +	return esr_get_fault_type(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
> @@ -387,8 +383,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
>  
>  static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
>  {
> -	u32 esr = kvm_vcpu_get_esr(vcpu);
> -	return ESR_ELx_SYS64_ISS_RT(esr);
> +	return esr_get_sys_rt(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
> -- 
> 2.23.0
> 
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2] kvm/arm64: Rename HSR to ESR
  2020-06-29  9:18 ` [PATCH 1/2] kvm/arm64: Rename HSR to ESR Gavin Shan
  2020-06-29  9:44   ` Andrew Scull
@ 2020-06-29 10:32   ` Mark Rutland
  2020-06-29 11:05     ` Mark Rutland
  2020-06-29 17:00     ` Marc Zyngier
  1 sibling, 2 replies; 13+ messages in thread
From: Mark Rutland @ 2020-06-29 10:32 UTC (permalink / raw)
  To: Gavin Shan; +Cc: catalin.marinas, will, kvmarm, linux-arm-kernel

On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
> kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
> 32bit KVM host support"). So HSR isn't meaningful since then. This
> renames HSR to ESR accordingly. This shouldn't cause any functional
> changes:
> 
>    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
>      function names self-explanatory.
>    * Rename variables from @hsr to @esr to make them self-explanatory.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>

At a high-level, I agree that we should move to the `esr` naming to
match the architecture and minimize surprise. However, I think there are
some ABI changes here, which *are* funcitonal changes, and those need to
be avoided.

[...]

> diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
> index ba85bb23f060..d54345573a88 100644
> --- a/arch/arm64/include/uapi/asm/kvm.h
> +++ b/arch/arm64/include/uapi/asm/kvm.h
> @@ -140,7 +140,7 @@ struct kvm_guest_debug_arch {
>  };
>  
>  struct kvm_debug_exit_arch {
> -	__u32 hsr;
> +	__u32 esr;
>  	__u64 far;	/* used for watchpoints */
>  };

This is userspace ABI, and changing this *will* break userspace. This
*is* a functional change.

NAK to this specifically. At best these should be a comment here that
this is naming is legacym but must stay for ABI reasons.

[...]

> diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
> index 4c71270cc097..ee4f691b16ff 100644
> --- a/arch/arm64/kvm/trace_arm.h
> +++ b/arch/arm64/kvm/trace_arm.h
> @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
>  		__entry->vcpu_pc		= vcpu_pc;
>  	),
>  
> -	TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
> +	TP_printk("%s: ESR_EC: 0x%04x (%s), PC: 0x%08lx",
>  		  __print_symbolic(__entry->ret, kvm_arm_exception_type),
>  		  __entry->esr_ec,
>  		  __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),

Likewise, isn't all the tracepoint format stuff ABI? I'm not comfortable
that we can change this.

Thanks,
Mark.

> @@ -50,27 +50,27 @@ TRACE_EVENT(kvm_exit,
>  );
>  
>  TRACE_EVENT(kvm_guest_fault,
> -	TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
> +	TP_PROTO(unsigned long vcpu_pc, unsigned long esr,
>  		 unsigned long hxfar,
>  		 unsigned long long ipa),
> -	TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
> +	TP_ARGS(vcpu_pc, esr, hxfar, ipa),
>  
>  	TP_STRUCT__entry(
>  		__field(	unsigned long,	vcpu_pc		)
> -		__field(	unsigned long,	hsr		)
> +		__field(	unsigned long,	esr		)
>  		__field(	unsigned long,	hxfar		)
>  		__field(   unsigned long long,	ipa		)
>  	),
>  
>  	TP_fast_assign(
>  		__entry->vcpu_pc		= vcpu_pc;
> -		__entry->hsr			= hsr;
> +		__entry->esr			= esr;
>  		__entry->hxfar			= hxfar;
>  		__entry->ipa			= ipa;
>  	),
>  
> -	TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
> -		  __entry->ipa, __entry->hsr,
> +	TP_printk("ipa %#llx, esr %#08lx, hxfar %#08lx, pc %#08lx",
> +		  __entry->ipa, __entry->esr,
>  		  __entry->hxfar, __entry->vcpu_pc)
>  );
>  
> diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
> index 2c56d1e0f5bd..94ef1a98e609 100644
> --- a/arch/arm64/kvm/trace_handle_exit.h
> +++ b/arch/arm64/kvm/trace_handle_exit.h
> @@ -139,18 +139,18 @@ TRACE_EVENT(trap_reg,
>  );
>  
>  TRACE_EVENT(kvm_handle_sys_reg,
> -	TP_PROTO(unsigned long hsr),
> -	TP_ARGS(hsr),
> +	TP_PROTO(unsigned long esr),
> +	TP_ARGS(esr),
>  
>  	TP_STRUCT__entry(
> -		__field(unsigned long,	hsr)
> +		__field(unsigned long,	esr)
>  	),
>  
>  	TP_fast_assign(
> -		__entry->hsr = hsr;
> +		__entry->esr = esr;
>  	),
>  
> -	TP_printk("HSR 0x%08lx", __entry->hsr)
> +	TP_printk("ESR 0x%08lx", __entry->esr)
>  );
>  
>  TRACE_EVENT(kvm_sys_access,
> -- 
> 2.23.0
> 
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct
  2020-06-29  9:18 ` [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct Gavin Shan
  2020-06-29  9:59   ` Andrew Scull
@ 2020-06-29 11:00   ` Mark Rutland
  2020-06-30  0:16     ` Gavin Shan
  1 sibling, 1 reply; 13+ messages in thread
From: Mark Rutland @ 2020-06-29 11:00 UTC (permalink / raw)
  To: Gavin Shan; +Cc: catalin.marinas, will, kvmarm, linux-arm-kernel

On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
> There are a set of inline functions defined in kvm_emulate.h. Those
> functions reads ESR from vCPU fault information struct and then operate
> on it. So it's tied with vCPU fault information and vCPU struct. It
> limits their usage scope.
> 
> This detaches these functions from the vCPU struct by introducing an
> other set of inline functions in esr.h to manupulate the specified
> ESR value. With it, the inline functions defined in kvm_emulate.h
> can call these inline functions (in esr.h) instead. This shouldn't
> cause any functional changes.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>

TBH, I'm not sure that this patch makes much sense on its own.

We already use vcpu_get_esr(), which is the bit that'd have to change if
we didn't pass the vcpu around, and the new helpers are just consuming
the value in a sifferent way rather than a necessarily simpler way.

Further comments on that front below.

> ---
>  arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
>  arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
>  2 files changed, 51 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index 035003acfa87..950204c5fbe1 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
>  	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
>  }
>  
> +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
> +static inline bool esr_is_##name(u32 esr)	\
> +{						\
> +	return !!(esr & (field));		\
> +}
> +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
> +static inline u32 esr_get_##name(u32 esr)	\
> +{						\
> +	return ((esr & (mask)) >> (shift));	\
> +}
> +
> +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
> +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
> +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
> +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
> +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
> +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
> +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
> +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
> +
> +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
> +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
> +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
> +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
> +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
> +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
> +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
> +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
> +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
> +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
> +				   ESR_ELx_SYS64_ISS_RT_SHIFT);

I'm really not keen on this, as I think it's abstracting the problem at
the wrong level, hiding information and making things harder to reason
about rather than abstracting that.

I strongly suspect the right thing to do is use FIELD_GET() in-place in
the functions below, e.g.

   !!FIELD_GET(esr, ESR_ELx_IL);

... rather than:

   esr_get_il_32bit(esr);

... as that avoids the wrapper entirely, minimizing indirection and
making the codebase simpler to navigate.

For the cases where we *really* want a helper, i'd rather write those
out explicitly, e.g.

#define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)

... but I'm not sure if we really need those given these are mostly used
*once* below.

> +
>  const char *esr_get_class_string(u32 esr);
>  #endif /* __ASSEMBLY */
>  
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index c9ba0df47f7d..9337d90c517f 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
>  
>  static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
>  {
> -	u32 esr = kvm_vcpu_get_esr(vcpu);
> -
> -	if (esr & ESR_ELx_CV)
> -		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
> -
> -	return -1;
> +	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
> +	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
>  }

Do we really need to change the structure of this code? I thought this
was purely about decooupling helpers from the vcpu struct. This could
have stayed as:

static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
	u32 esr = kvm_vcpu_get_esr(vcpu);

	if (esr_is_condition(esr))
		return esr_get_condition(esr);
	
	return -1;
}

... or:

static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
	u32 esr = kvm_vcpu_get_esr(vcpu);

	if (FEILD_GET(esr, ESR_ELx_CV))
		return FIELD_GET(esr, ESR_ELx_COND_MASK);
	
	return -1;
}

Thanks,
Mark.

>  
>  static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
> @@ -291,79 +287,79 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
>  
>  static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
> +	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
> +	return esr_is_dabt_valid(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
> +	return esr_get_dabt_iss_nisv_sanitized(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
> +	return esr_is_dabt_sse(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
> +	return esr_is_dabt_sf(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
>  {
> -	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
> +	return esr_get_dabt_rd(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
> +	return esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
> -		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
> +	return esr_is_dabt_write(kvm_vcpu_get_esr(vcpu)) ||
> +	       esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu)); /* AF/DBM update */
>  }
>  
>  static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
> +	return esr_is_dabt_cm(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
>  {
> -	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
> +	return 1 << esr_get_dabt_as(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  /* This one is not specific to Data Abort */
>  static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
> +	return esr_is_il_32bit(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
>  {
> -	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
> +	return esr_get_class(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
> +	return esr_get_class(kvm_vcpu_get_esr(vcpu)) == ESR_ELx_EC_IABT_LOW;
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
> +	return esr_get_fault(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
> +	return esr_get_fault_type(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
> @@ -387,8 +383,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
>  
>  static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
>  {
> -	u32 esr = kvm_vcpu_get_esr(vcpu);
> -	return ESR_ELx_SYS64_ISS_RT(esr);
> +	return esr_get_sys_rt(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
> -- 
> 2.23.0
> 
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2] kvm/arm64: Rename HSR to ESR
  2020-06-29 10:32   ` Mark Rutland
@ 2020-06-29 11:05     ` Mark Rutland
  2020-06-29 17:00     ` Marc Zyngier
  1 sibling, 0 replies; 13+ messages in thread
From: Mark Rutland @ 2020-06-29 11:05 UTC (permalink / raw)
  To: Gavin Shan; +Cc: catalin.marinas, will, kvmarm, linux-arm-kernel

On Mon, Jun 29, 2020 at 11:32:08AM +0100, Mark Rutland wrote:
> On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
> > kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
> > 32bit KVM host support"). So HSR isn't meaningful since then. This
> > renames HSR to ESR accordingly. This shouldn't cause any functional
> > changes:
> > 
> >    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
> >      function names self-explanatory.
> >    * Rename variables from @hsr to @esr to make them self-explanatory.
> > 
> > Signed-off-by: Gavin Shan <gshan@redhat.com>
> 
> At a high-level, I agree that we should move to the `esr` naming to
> match the architecture and minimize surprise. However, I think there are
> some ABI changes here, which *are* funcitonal changes, and those need to
> be avoided.
> 
> [...]
> 
> > diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
> > index ba85bb23f060..d54345573a88 100644
> > --- a/arch/arm64/include/uapi/asm/kvm.h
> > +++ b/arch/arm64/include/uapi/asm/kvm.h
> > @@ -140,7 +140,7 @@ struct kvm_guest_debug_arch {
> >  };
> >  
> >  struct kvm_debug_exit_arch {
> > -	__u32 hsr;
> > +	__u32 esr;
> >  	__u64 far;	/* used for watchpoints */
> >  };
> 
> This is userspace ABI, and changing this *will* break userspace. This
> *is* a functional change.

To be slightly clearer: while the structure isn't changed, any userspace
software consuming this header will fail to build after this change,
beacause there will no longer be a field called `hsr`.

Existing binaries will almost certianly not care, but regardless this is
a regression (when building userspce) that I don't think we can permit.

Thanks,
Mark.
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2] kvm/arm64: Rename HSR to ESR
  2020-06-29 10:32   ` Mark Rutland
  2020-06-29 11:05     ` Mark Rutland
@ 2020-06-29 17:00     ` Marc Zyngier
  2020-06-29 23:14       ` Gavin Shan
  1 sibling, 1 reply; 13+ messages in thread
From: Marc Zyngier @ 2020-06-29 17:00 UTC (permalink / raw)
  To: Mark Rutland; +Cc: linux-arm-kernel, catalin.marinas, kvmarm, will

On 2020-06-29 11:32, Mark Rutland wrote:
> On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
>> kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
>> 32bit KVM host support"). So HSR isn't meaningful since then. This
>> renames HSR to ESR accordingly. This shouldn't cause any functional
>> changes:
>> 
>>    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
>>      function names self-explanatory.
>>    * Rename variables from @hsr to @esr to make them self-explanatory.
>> 
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
> 
> At a high-level, I agree that we should move to the `esr` naming to
> match the architecture and minimize surprise. However, I think there 
> are
> some ABI changes here, which *are* funcitonal changes, and those need 
> to
> be avoided.
> 
> [...]
> 
>> diff --git a/arch/arm64/include/uapi/asm/kvm.h 
>> b/arch/arm64/include/uapi/asm/kvm.h
>> index ba85bb23f060..d54345573a88 100644
>> --- a/arch/arm64/include/uapi/asm/kvm.h
>> +++ b/arch/arm64/include/uapi/asm/kvm.h
>> @@ -140,7 +140,7 @@ struct kvm_guest_debug_arch {
>>  };
>> 
>>  struct kvm_debug_exit_arch {
>> -	__u32 hsr;
>> +	__u32 esr;
>>  	__u64 far;	/* used for watchpoints */
>>  };
> 
> This is userspace ABI, and changing this *will* break userspace. This
> *is* a functional change.
> 
> NAK to this specifically. At best these should be a comment here that
> this is naming is legacym but must stay for ABI reasons.
> 
> [...]
> 
>> diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
>> index 4c71270cc097..ee4f691b16ff 100644
>> --- a/arch/arm64/kvm/trace_arm.h
>> +++ b/arch/arm64/kvm/trace_arm.h
>> @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
>>  		__entry->vcpu_pc		= vcpu_pc;
>>  	),
>> 
>> -	TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
>> +	TP_printk("%s: ESR_EC: 0x%04x (%s), PC: 0x%08lx",
>>  		  __print_symbolic(__entry->ret, kvm_arm_exception_type),
>>  		  __entry->esr_ec,
>>  		  __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
> 
> Likewise, isn't all the tracepoint format stuff ABI? I'm not 
> comfortable
> that we can change this.

Tracepoints are ABI, and they cannot change. As it is, this patch
isn't acceptable (the worse offender being the uapi change though).

         M.
-- 
Who you jivin' with that Cosmik Debris?
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2] kvm/arm64: Rename HSR to ESR
  2020-06-29 17:00     ` Marc Zyngier
@ 2020-06-29 23:14       ` Gavin Shan
  0 siblings, 0 replies; 13+ messages in thread
From: Gavin Shan @ 2020-06-29 23:14 UTC (permalink / raw)
  To: Marc Zyngier, Mark Rutland
  Cc: catalin.marinas, will, kvmarm, linux-arm-kernel

On 6/30/20 3:00 AM, Marc Zyngier wrote:
> On 2020-06-29 11:32, Mark Rutland wrote:
>> On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
>>> kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
>>> 32bit KVM host support"). So HSR isn't meaningful since then. This
>>> renames HSR to ESR accordingly. This shouldn't cause any functional
>>> changes:
>>>
>>>    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
>>>      function names self-explanatory.
>>>    * Rename variables from @hsr to @esr to make them self-explanatory.
>>>
>>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>>
>> At a high-level, I agree that we should move to the `esr` naming to
>> match the architecture and minimize surprise. However, I think there are
>> some ABI changes here, which *are* funcitonal changes, and those need to
>> be avoided.
>>
>> [...]
>>
>>> diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
>>> index ba85bb23f060..d54345573a88 100644
>>> --- a/arch/arm64/include/uapi/asm/kvm.h
>>> +++ b/arch/arm64/include/uapi/asm/kvm.h
>>> @@ -140,7 +140,7 @@ struct kvm_guest_debug_arch {
>>>  };
>>>
>>>  struct kvm_debug_exit_arch {
>>> -    __u32 hsr;
>>> +    __u32 esr;
>>>      __u64 far;    /* used for watchpoints */
>>>  };
>>
>> This is userspace ABI, and changing this *will* break userspace. This
>> *is* a functional change.
>>
>> NAK to this specifically. At best these should be a comment here that
>> this is naming is legacym but must stay for ABI reasons.
>>
>> [...]
>>
>>> diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
>>> index 4c71270cc097..ee4f691b16ff 100644
>>> --- a/arch/arm64/kvm/trace_arm.h
>>> +++ b/arch/arm64/kvm/trace_arm.h
>>> @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
>>>          __entry->vcpu_pc        = vcpu_pc;
>>>      ),
>>>
>>> -    TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
>>> +    TP_printk("%s: ESR_EC: 0x%04x (%s), PC: 0x%08lx",
>>>            __print_symbolic(__entry->ret, kvm_arm_exception_type),
>>>            __entry->esr_ec,
>>>            __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
>>
>> Likewise, isn't all the tracepoint format stuff ABI? I'm not comfortable
>> that we can change this.
> 
> Tracepoints are ABI, and they cannot change. As it is, this patch
> isn't acceptable (the worse offender being the uapi change though).
> 

Yes, I was reluctant to make the changes regarding the uapi/tracepoint,
which is part of the ABI. I will drop the changes in v2.

Thanks,
Gavin

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct
  2020-06-29 11:00   ` Mark Rutland
@ 2020-06-30  0:16     ` Gavin Shan
  2020-06-30  8:00       ` Mark Rutland
  0 siblings, 1 reply; 13+ messages in thread
From: Gavin Shan @ 2020-06-30  0:16 UTC (permalink / raw)
  To: Mark Rutland; +Cc: catalin.marinas, will, kvmarm, linux-arm-kernel

Hi Mark,

On 6/29/20 9:00 PM, Mark Rutland wrote:
> On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
>> There are a set of inline functions defined in kvm_emulate.h. Those
>> functions reads ESR from vCPU fault information struct and then operate
>> on it. So it's tied with vCPU fault information and vCPU struct. It
>> limits their usage scope.
>>
>> This detaches these functions from the vCPU struct by introducing an
>> other set of inline functions in esr.h to manupulate the specified
>> ESR value. With it, the inline functions defined in kvm_emulate.h
>> can call these inline functions (in esr.h) instead. This shouldn't
>> cause any functional changes.
>>
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
> 
> TBH, I'm not sure that this patch makes much sense on its own.
> 
> We already use vcpu_get_esr(), which is the bit that'd have to change if
> we didn't pass the vcpu around, and the new helpers are just consuming
> the value in a sifferent way rather than a necessarily simpler way.
> 
> Further comments on that front below.
> 
>> ---
>>   arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
>>   arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
>>   2 files changed, 51 insertions(+), 24 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
>> index 035003acfa87..950204c5fbe1 100644
>> --- a/arch/arm64/include/asm/esr.h
>> +++ b/arch/arm64/include/asm/esr.h
>> @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
>>   	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
>>   }
>>   
>> +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
>> +static inline bool esr_is_##name(u32 esr)	\
>> +{						\
>> +	return !!(esr & (field));		\
>> +}
>> +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
>> +static inline u32 esr_get_##name(u32 esr)	\
>> +{						\
>> +	return ((esr & (mask)) >> (shift));	\
>> +}
>> +
>> +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
>> +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
>> +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
>> +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
>> +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
>> +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
>> +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
>> +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
>> +
>> +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
>> +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
>> +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
>> +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
>> +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
>> +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
>> +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
>> +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
>> +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
>> +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
>> +				   ESR_ELx_SYS64_ISS_RT_SHIFT);
> 
> I'm really not keen on this, as I think it's abstracting the problem at
> the wrong level, hiding information and making things harder to reason
> about rather than abstracting that.
> 
> I strongly suspect the right thing to do is use FIELD_GET() in-place in
> the functions below, e.g.
> 
>     !!FIELD_GET(esr, ESR_ELx_IL);
> 
> ... rather than:
> 
>     esr_get_il_32bit(esr);
> 
> ... as that avoids the wrapper entirely, minimizing indirection and
> making the codebase simpler to navigate.
> 
> For the cases where we *really* want a helper, i'd rather write those
> out explicitly, e.g.
> 

It will be no difference except to use FIELD_GET() to make the code
more explicit. Maybe I didn't fully understand your comments here.
Please let me know if something like below is what you expect?

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index c9ba0df47f7d..e8294edcd8f4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -343,7 +343,7 @@ static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *
  /* This one is not specific to Data Abort */
  static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
  {
-       return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
+       return !!FIELD_GET(kvm_vcpu_get_esr(vcpu), ESR_ELx_IL);
  }

If my understanding is correct, I think we needn't change the code
and this patch can be dropped.

> #define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)
> 
> ... but I'm not sure if we really need those given these are mostly used
> *once* below.
> 

We don't need these for now, but will be needed when the next revision
of async page fault is posted. Lets ignore this requirement for now
because I can revisit it when the async page fault patchset is posted.
That time, we can have accessors defined in esr.h and helpers in
kvm_emulate.h use those accessors. It's similar to what you're suggesting.

#define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)

static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
{
	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
}


By the way, it's long way to reach that point because I'm still in the
middle of working on virtualizing SDEI currently.

>> +
>>   const char *esr_get_class_string(u32 esr);
>>   #endif /* __ASSEMBLY */
>>   
>> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
>> index c9ba0df47f7d..9337d90c517f 100644
>> --- a/arch/arm64/include/asm/kvm_emulate.h
>> +++ b/arch/arm64/include/asm/kvm_emulate.h
>> @@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
>>   
>>   static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
>>   {
>> -	u32 esr = kvm_vcpu_get_esr(vcpu);
>> -
>> -	if (esr & ESR_ELx_CV)
>> -		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
>> -
>> -	return -1;
>> +	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
>> +	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
>>   }
> 
> Do we really need to change the structure of this code? I thought this
> was purely about decooupling helpers from the vcpu struct. This could
> have stayed as:
> 
> static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
> {
> 	u32 esr = kvm_vcpu_get_esr(vcpu);
> 
> 	if (esr_is_condition(esr))
> 		return esr_get_condition(esr);
> 	
> 	return -1;
> }
> 
> ... or:
> 
> static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
> {
> 	u32 esr = kvm_vcpu_get_esr(vcpu);
> 
> 	if (FEILD_GET(esr, ESR_ELx_CV))
> 		return FIELD_GET(esr, ESR_ELx_COND_MASK);
> 	
> 	return -1;
> }
> 

It's not needed to change the structure of the code, but it does
reduce the lines of codes. It's kind of my personal taste :)

[...]

Thanks,
Gavin

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct
  2020-06-29  9:59   ` Andrew Scull
@ 2020-06-30  0:28     ` Gavin Shan
  0 siblings, 0 replies; 13+ messages in thread
From: Gavin Shan @ 2020-06-30  0:28 UTC (permalink / raw)
  To: Andrew Scull; +Cc: catalin.marinas, will, kvmarm, linux-arm-kernel

Hi Andrew,

On 6/29/20 7:59 PM, Andrew Scull wrote:
> On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
>> There are a set of inline functions defined in kvm_emulate.h. Those
>> functions reads ESR from vCPU fault information struct and then operate
>> on it. So it's tied with vCPU fault information and vCPU struct. It
>> limits their usage scope.
>>
>> This detaches these functions from the vCPU struct by introducing an
>> other set of inline functions in esr.h to manupulate the specified
>> ESR value. With it, the inline functions defined in kvm_emulate.h
>> can call these inline functions (in esr.h) instead. This shouldn't
>> cause any functional changes.
>>
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>> ---
>>   arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
>>   arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
>>   2 files changed, 51 insertions(+), 24 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
>> index 035003acfa87..950204c5fbe1 100644
>> --- a/arch/arm64/include/asm/esr.h
>> +++ b/arch/arm64/include/asm/esr.h
>> @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
>>   	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
>>   }
>>   
>> +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
>> +static inline bool esr_is_##name(u32 esr)	\
>> +{						\
>> +	return !!(esr & (field));		\
>> +}
>> +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
>> +static inline u32 esr_get_##name(u32 esr)	\
>> +{						\
>> +	return ((esr & (mask)) >> (shift));	\
>> +}
> 
> Should these be named DEFINE rather than DECLARE given it also includes
> the function definition?
> 

Thanks for your comments. Indeed, I think DEFINE is better than
DECLARE. These newly introduced helpers are unlikely needed basing
on the comments (and followup) from Mark Rutland.

>> +
>> +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
>> +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
>> +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
>> +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
>> +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
>> +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
>> +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
>> +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
>> +
>> +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
>> +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
>> +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
>> +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
>> +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
>> +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
>> +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
>> +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
>> +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
>> +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
>> +				   ESR_ELx_SYS64_ISS_RT_SHIFT);
>> +
>>   const char *esr_get_class_string(u32 esr);
>>   #endif /* __ASSEMBLY */
>>   
>> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
>> index c9ba0df47f7d..9337d90c517f 100644
>> --- a/arch/arm64/include/asm/kvm_emulate.h
>> +++ b/arch/arm64/include/asm/kvm_emulate.h
>> @@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
>>   
>>   static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
>>   {
>> -	u32 esr = kvm_vcpu_get_esr(vcpu);
>> -
>> -	if (esr & ESR_ELx_CV)
>> -		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
>> -
>> -	return -1;
>> +	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
>> +	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
>>   }
>>   
>>   static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
>> @@ -291,79 +287,79 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
>>   
>>   static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
>>   {
>> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
>> +	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
>>   }
> 
> It feels a little strange that in the raw esr case it uses macro magic
> but in the vcpu cases here it writes everything out in full. Was there a
> reason that I'm missing or is there a chance to apply a consistent
> approach?
> 

The request was raised when RFCv2 async page fault patchset was posted.
When async page fault is handled, the ESR is cached in advance, not
fetched from vCPU struct. So we want to detach the helpers defined in
kvm_emulate.h from vCPU struct. Hope the discussion in the following
link can help you to understand a bit more:

https://lore.kernel.org/kvmarm/20200508032919.52147-5-gshan@redhat.com/

> I'm not sure of the style preferences, but if it goes the macro path,
> the esr field definitions could be reused with something x-macro like to
> get the kvm_emulate.h and esr.h functions generated from a singe list of
> the esr fields.
> 

Yeah, it's same thing as Mark Rutland suggested. As I replied to his
comments, it can be postponed when next revision of async page fault
patchset is posted.

[...]

Thanks,
Gavin

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct
  2020-06-30  0:16     ` Gavin Shan
@ 2020-06-30  8:00       ` Mark Rutland
  0 siblings, 0 replies; 13+ messages in thread
From: Mark Rutland @ 2020-06-30  8:00 UTC (permalink / raw)
  To: Gavin Shan; +Cc: catalin.marinas, will, kvmarm, linux-arm-kernel

On Tue, Jun 30, 2020 at 10:16:07AM +1000, Gavin Shan wrote:
> Hi Mark,
> 
> On 6/29/20 9:00 PM, Mark Rutland wrote:
> > On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
> > > There are a set of inline functions defined in kvm_emulate.h. Those
> > > functions reads ESR from vCPU fault information struct and then operate
> > > on it. So it's tied with vCPU fault information and vCPU struct. It
> > > limits their usage scope.
> > > 
> > > This detaches these functions from the vCPU struct by introducing an
> > > other set of inline functions in esr.h to manupulate the specified
> > > ESR value. With it, the inline functions defined in kvm_emulate.h
> > > can call these inline functions (in esr.h) instead. This shouldn't
> > > cause any functional changes.
> > > 
> > > Signed-off-by: Gavin Shan <gshan@redhat.com>
> > 
> > TBH, I'm not sure that this patch makes much sense on its own.
> > 
> > We already use vcpu_get_esr(), which is the bit that'd have to change if
> > we didn't pass the vcpu around, and the new helpers are just consuming
> > the value in a sifferent way rather than a necessarily simpler way.
> > 
> > Further comments on that front below.
> > 
> > > ---
> > >   arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
> > >   arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
> > >   2 files changed, 51 insertions(+), 24 deletions(-)
> > > 
> > > diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> > > index 035003acfa87..950204c5fbe1 100644
> > > --- a/arch/arm64/include/asm/esr.h
> > > +++ b/arch/arm64/include/asm/esr.h
> > > @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
> > >   	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
> > >   }
> > > +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
> > > +static inline bool esr_is_##name(u32 esr)	\
> > > +{						\
> > > +	return !!(esr & (field));		\
> > > +}
> > > +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
> > > +static inline u32 esr_get_##name(u32 esr)	\
> > > +{						\
> > > +	return ((esr & (mask)) >> (shift));	\
> > > +}
> > > +
> > > +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
> > > +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
> > > +
> > > +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
> > > +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
> > > +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
> > > +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
> > > +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
> > > +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
> > > +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
> > > +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
> > > +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
> > > +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
> > > +				   ESR_ELx_SYS64_ISS_RT_SHIFT);
> > 
> > I'm really not keen on this, as I think it's abstracting the problem at
> > the wrong level, hiding information and making things harder to reason
> > about rather than abstracting that.
> > 
> > I strongly suspect the right thing to do is use FIELD_GET() in-place in
> > the functions below, e.g.
> > 
> >     !!FIELD_GET(esr, ESR_ELx_IL);
> > 
> > ... rather than:
> > 
> >     esr_get_il_32bit(esr);
> > 
> > ... as that avoids the wrapper entirely, minimizing indirection and
> > making the codebase simpler to navigate.
> > 
> > For the cases where we *really* want a helper, i'd rather write those
> > out explicitly, e.g.
> 
> It will be no difference except to use FIELD_GET() to make the code
> more explicit. Maybe I didn't fully understand your comments here.
> Please let me know if something like below is what you expect?

Sorry; my point here was just that using FIELD_GET() explicitly was
preferable to generating an entire function with
ESR_DECLARE_CHECK_FUNC() if the goal was just to remove the explciit
mask-and-shift at each callsite.

I agree they'd have the same functional behaviour, but I think the
explicit FIELD_GET() approach is easier to read (and possible to search
for), which makes code maintenance much easier.

> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index c9ba0df47f7d..e8294edcd8f4 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -343,7 +343,7 @@ static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *
>  /* This one is not specific to Data Abort */
>  static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
>  {
> -       return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
> +       return !!FIELD_GET(kvm_vcpu_get_esr(vcpu), ESR_ELx_IL);
>  }
> 
> If my understanding is correct, I think we needn't change the code
> and this patch can be dropped.

If you don't see a need for a change, I'm also happy for this to be
dropped.

[...]

> > #define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)
> > 
> > ... but I'm not sure if we really need those given these are mostly used
> > *once* below.
> > 
> 
> We don't need these for now, but will be needed when the next revision
> of async page fault is posted. Lets ignore this requirement for now
> because I can revisit it when the async page fault patchset is posted.
> That time, we can have accessors defined in esr.h and helpers in
> kvm_emulate.h use those accessors. It's similar to what you're suggesting.
> 
> #define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)
> 
> static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
> {
> 	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
> }

That'd be fine by me.

Thanks,
Mark.
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2020-06-30  8:00 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-29  9:18 [PATCH 0/2] Refactor ESR related functions Gavin Shan
2020-06-29  9:18 ` [PATCH 1/2] kvm/arm64: Rename HSR to ESR Gavin Shan
2020-06-29  9:44   ` Andrew Scull
2020-06-29 10:32   ` Mark Rutland
2020-06-29 11:05     ` Mark Rutland
2020-06-29 17:00     ` Marc Zyngier
2020-06-29 23:14       ` Gavin Shan
2020-06-29  9:18 ` [PATCH 2/2] kvm/arm64: Detach ESR operator from vCPU struct Gavin Shan
2020-06-29  9:59   ` Andrew Scull
2020-06-30  0:28     ` Gavin Shan
2020-06-29 11:00   ` Mark Rutland
2020-06-30  0:16     ` Gavin Shan
2020-06-30  8:00       ` Mark Rutland

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).