All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RFC] arm64/sysregs: Align field names in ID_AA64DFR0_EL1 with architecture
@ 2022-05-18 14:57 Mark Brown
  2022-05-25  8:46 ` Mark Rutland
  0 siblings, 1 reply; 5+ messages in thread
From: Mark Brown @ 2022-05-18 14:57 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Marc Zyngier
  Cc: linux-arm-kernel, Mark Rutland, Mark Brown

The naming scheme the architecture uses for the fields in ID_AA64DFR0_EL1
does not align well with kernel conventions, using as it does a lot of
MixedCase in various arrangements. In preparation for automatically
generating the defines for this register rename the defines used to match
what is in the architecture.

Signed-off-by: Mark Brown <broonie@kernel.org>
---

I am not entirely convinced if the best approach here is to deviate from
the kernel naming convention as this does or to follow the architecture
as we've decided in other cases, I don't really mind but would like some
feedback before going ahead and sorting out the remaining issues with
this register.

 arch/arm64/include/asm/assembler.h     |  2 +-
 arch/arm64/include/asm/el2_setup.h     |  8 ++++----
 arch/arm64/include/asm/hw_breakpoint.h |  4 ++--
 arch/arm64/include/asm/sysreg.h        | 20 ++++++++++----------
 arch/arm64/kernel/cpufeature.c         | 14 +++++++-------
 arch/arm64/kernel/debug-monitors.c     |  2 +-
 arch/arm64/kernel/perf_event.c         |  2 +-
 arch/arm64/kvm/debug.c                 |  4 ++--
 arch/arm64/kvm/hyp/nvhe/pkvm.c         | 12 ++++++------
 arch/arm64/kvm/sys_regs.c              | 14 +++++++-------
 10 files changed, 41 insertions(+), 41 deletions(-)

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 8c5a61aeaf8e..a97190938ff7 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -489,7 +489,7 @@ alternative_endif
  */
 	.macro	reset_pmuserenr_el0, tmpreg
 	mrs	\tmpreg, id_aa64dfr0_el1
-	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
+	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVer_SHIFT, #4
 	cmp	\tmpreg, #1			// Skip if no PMU present
 	b.lt	9000f
 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 34ceff08cac4..5584c810bdc7 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -40,7 +40,7 @@
 
 .macro __init_el2_debug
 	mrs	x1, id_aa64dfr0_el1
-	sbfx	x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
+	sbfx	x0, x1, #ID_AA64DFR0_PMUVer_SHIFT, #4
 	cmp	x0, #1
 	b.lt	.Lskip_pmu_\@			// Skip if no PMU present
 	mrs	x0, pmcr_el0			// Disable debug access traps
@@ -49,7 +49,7 @@
 	csel	x2, xzr, x0, lt			// all PMU counters from EL1
 
 	/* Statistical profiling */
-	ubfx	x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
+	ubfx	x0, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4
 	cbz	x0, .Lskip_spe_\@		// Skip if SPE not present
 
 	mrs_s	x0, SYS_PMBIDR_EL1              // If SPE available at EL2,
@@ -65,7 +65,7 @@
 
 .Lskip_spe_\@:
 	/* Trace buffer */
-	ubfx	x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
+	ubfx	x0, x1, #ID_AA64DFR0_TraceBuffer_SHIFT, #4
 	cbz	x0, .Lskip_trace_\@		// Skip if TraceBuffer is not present
 
 	mrs_s	x0, SYS_TRBIDR_EL1
@@ -195,7 +195,7 @@
 
 	mov	x0, xzr
 	mrs	x1, id_aa64dfr0_el1
-	ubfx	x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
+	ubfx	x1, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4
 	cmp	x1, #3
 	b.lt	.Lset_debug_fgt_\@
 	/* Disable PMSNEVFR_EL1 read and write traps */
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index bc7aaed4b34e..d667c03d5f5e 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -142,7 +142,7 @@ static inline int get_num_brps(void)
 	u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
 	return 1 +
 		cpuid_feature_extract_unsigned_field(dfr0,
-						ID_AA64DFR0_BRPS_SHIFT);
+						ID_AA64DFR0_BRPs_SHIFT);
 }
 
 /* Determine number of WRP registers available. */
@@ -151,7 +151,7 @@ static inline int get_num_wrps(void)
 	u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
 	return 1 +
 		cpuid_feature_extract_unsigned_field(dfr0,
-						ID_AA64DFR0_WRPS_SHIFT);
+						ID_AA64DFR0_WRPs_SHIFT);
 }
 
 #endif	/* __ASM_BREAKPOINT_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 9a44e04701d3..67691adce459 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -932,16 +932,16 @@
 
 /* id_aa64dfr0 */
 #define ID_AA64DFR0_MTPMU_SHIFT		48
-#define ID_AA64DFR0_TRBE_SHIFT		44
-#define ID_AA64DFR0_TRACE_FILT_SHIFT	40
-#define ID_AA64DFR0_DOUBLELOCK_SHIFT	36
-#define ID_AA64DFR0_PMSVER_SHIFT	32
-#define ID_AA64DFR0_CTX_CMPS_SHIFT	28
-#define ID_AA64DFR0_WRPS_SHIFT		20
-#define ID_AA64DFR0_BRPS_SHIFT		12
-#define ID_AA64DFR0_PMUVER_SHIFT	8
-#define ID_AA64DFR0_TRACEVER_SHIFT	4
-#define ID_AA64DFR0_DEBUGVER_SHIFT	0
+#define ID_AA64DFR0_TraceBuffer_SHIFT	44
+#define ID_AA64DFR0_TraceFilt_SHIFT	40
+#define ID_AA64DFR0_DoubleLock_SHIFT	36
+#define ID_AA64DFR0_PMSVer_SHIFT	32
+#define ID_AA64DFR0_CTX_CMPs_SHIFT	28
+#define ID_AA64DFR0_WRPs_SHIFT		20
+#define ID_AA64DFR0_BRPs_SHIFT		12
+#define ID_AA64DFR0_PMUVer_SHIFT	8
+#define ID_AA64DFR0_TraceVer_SHIFT	4
+#define ID_AA64DFR0_DebugVer_SHIFT	0
 
 #define ID_AA64DFR0_PMUVER_8_0		0x1
 #define ID_AA64DFR0_PMUVER_8_1		0x4
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 089221eb5ae8..f60ba05214ab 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -432,17 +432,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
-	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
+	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DoubleLock_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVer_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPs_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPs_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPs_SHIFT, 4, 0),
 	/*
 	 * We can instantiate multiple PMU instances with different levels
 	 * of support.
 	 */
-	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
+	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVer_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DebugVer_SHIFT, 4, 0x6),
 	ARM64_FTR_END,
 };
 
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index bf9fe71589bc..572c8ac2873c 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -28,7 +28,7 @@
 u8 debug_monitors_arch(void)
 {
 	return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
-						ID_AA64DFR0_DEBUGVER_SHIFT);
+						ID_AA64DFR0_DebugVer_SHIFT);
 }
 
 /*
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cb69ff1e6138..0f363546cc55 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1145,7 +1145,7 @@ static void __armv8pmu_probe_pmu(void *info)
 
 	dfr0 = read_sysreg(id_aa64dfr0_el1);
 	pmuver = cpuid_feature_extract_unsigned_field(dfr0,
-			ID_AA64DFR0_PMUVER_SHIFT);
+			ID_AA64DFR0_PMUVer_SHIFT);
 	if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0)
 		return;
 
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 4fd5c216c4bb..19a5bab1ea06 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -296,12 +296,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
 	 * If SPE is present on this CPU and is available at current EL,
 	 * we may need to check if the host state needs to be saved.
 	 */
-	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
+	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVer_SHIFT) &&
 	    !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
 		vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
 
 	/* Check if we have TRBE implemented and available at the host */
-	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
+	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TraceBuffer_SHIFT) &&
 	    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
 		vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
 }
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 99c8d8b73e70..6e4e5cb4fb55 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
 	u64 cptr_set = 0;
 
 	/* Trap/constrain PMU */
-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) {
+	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVer), feature_ids)) {
 		mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
 		mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
 			      MDCR_EL2_HPMN_MASK;
 	}
 
 	/* Trap Debug */
-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids))
+	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), feature_ids))
 		mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
 
 	/* Trap OS Double Lock */
-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids))
+	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DoubleLock), feature_ids))
 		mdcr_set |= MDCR_EL2_TDOSA;
 
 	/* Trap SPE */
-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) {
+	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer), feature_ids)) {
 		mdcr_set |= MDCR_EL2_TPMS;
 		mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
 	}
 
 	/* Trap Trace Filter */
-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids))
+	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceFilt), feature_ids))
 		mdcr_set |= MDCR_EL2_TTRF;
 
 	/* Trap Trace */
-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids))
+	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceVer), feature_ids))
 		cptr_set |= CPTR_EL2_TTA;
 
 	vcpu->arch.mdcr_el2 |= mdcr_set;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c06c0477fab5..dc3e42df6cde 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1150,14 +1150,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
 		break;
 	case SYS_ID_AA64DFR0_EL1:
 		/* Limit debug to ARMv8.0 */
-		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
-		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
+		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer);
+		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), 6);
 		/* Limit guests to PMUv3 for ARMv8.4 */
 		val = cpuid_feature_cap_perfmon_field(val,
-						      ID_AA64DFR0_PMUVER_SHIFT,
+						      ID_AA64DFR0_PMUVer_SHIFT,
 						      kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
 		/* Hide SPE from guests */
-		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
+		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer);
 		break;
 	case SYS_ID_DFR0_EL1:
 		/* Limit guests to PMUv3 for ARMv8.4 */
@@ -1894,9 +1894,9 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
 		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
 		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
 
-		p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
-			     (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
-			     (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
+		p->regval = ((((dfr >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) << 28) |
+			     (((dfr >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) << 24) |
+			     (((dfr >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) << 20)
 			     | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
 		return true;
 	}
-- 
2.30.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH RFC] arm64/sysregs: Align field names in ID_AA64DFR0_EL1 with architecture
  2022-05-18 14:57 [PATCH RFC] arm64/sysregs: Align field names in ID_AA64DFR0_EL1 with architecture Mark Brown
@ 2022-05-25  8:46 ` Mark Rutland
  2022-05-25  9:07   ` Catalin Marinas
  2022-05-25  9:27   ` Will Deacon
  0 siblings, 2 replies; 5+ messages in thread
From: Mark Rutland @ 2022-05-25  8:46 UTC (permalink / raw)
  To: Mark Brown; +Cc: Catalin Marinas, Will Deacon, Marc Zyngier, linux-arm-kernel

Hi Mark,

On Wed, May 18, 2022 at 03:57:50PM +0100, Mark Brown wrote:
> The naming scheme the architecture uses for the fields in ID_AA64DFR0_EL1
> does not align well with kernel conventions, using as it does a lot of
> MixedCase in various arrangements. In preparation for automatically
> generating the defines for this register rename the defines used to match
> what is in the architecture.
> 
> Signed-off-by: Mark Brown <broonie@kernel.org>
> ---
> 
> I am not entirely convinced if the best approach here is to deviate from
> the kernel naming convention as this does or to follow the architecture
> as we've decided in other cases, I don't really mind but would like some
> feedback before going ahead and sorting out the remaining issues with
> this register.

It's unfortunate the architecture itself doesn't follow a consistent pattern.
:/

I don't personally have strong feelings here. I'm happy with either:

(a) Matching the case to the architectural names, even if that means some
    fields are MixedCase, as with this patch

(b) Always using ALLCAPS for ID reg field definitions.

Catalin/Marc/Will, any preference?

Mark.

>  arch/arm64/include/asm/assembler.h     |  2 +-
>  arch/arm64/include/asm/el2_setup.h     |  8 ++++----
>  arch/arm64/include/asm/hw_breakpoint.h |  4 ++--
>  arch/arm64/include/asm/sysreg.h        | 20 ++++++++++----------
>  arch/arm64/kernel/cpufeature.c         | 14 +++++++-------
>  arch/arm64/kernel/debug-monitors.c     |  2 +-
>  arch/arm64/kernel/perf_event.c         |  2 +-
>  arch/arm64/kvm/debug.c                 |  4 ++--
>  arch/arm64/kvm/hyp/nvhe/pkvm.c         | 12 ++++++------
>  arch/arm64/kvm/sys_regs.c              | 14 +++++++-------
>  10 files changed, 41 insertions(+), 41 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index 8c5a61aeaf8e..a97190938ff7 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -489,7 +489,7 @@ alternative_endif
>   */
>  	.macro	reset_pmuserenr_el0, tmpreg
>  	mrs	\tmpreg, id_aa64dfr0_el1
> -	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
> +	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVer_SHIFT, #4
>  	cmp	\tmpreg, #1			// Skip if no PMU present
>  	b.lt	9000f
>  	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
> diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
> index 34ceff08cac4..5584c810bdc7 100644
> --- a/arch/arm64/include/asm/el2_setup.h
> +++ b/arch/arm64/include/asm/el2_setup.h
> @@ -40,7 +40,7 @@
>  
>  .macro __init_el2_debug
>  	mrs	x1, id_aa64dfr0_el1
> -	sbfx	x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
> +	sbfx	x0, x1, #ID_AA64DFR0_PMUVer_SHIFT, #4
>  	cmp	x0, #1
>  	b.lt	.Lskip_pmu_\@			// Skip if no PMU present
>  	mrs	x0, pmcr_el0			// Disable debug access traps
> @@ -49,7 +49,7 @@
>  	csel	x2, xzr, x0, lt			// all PMU counters from EL1
>  
>  	/* Statistical profiling */
> -	ubfx	x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
> +	ubfx	x0, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4
>  	cbz	x0, .Lskip_spe_\@		// Skip if SPE not present
>  
>  	mrs_s	x0, SYS_PMBIDR_EL1              // If SPE available at EL2,
> @@ -65,7 +65,7 @@
>  
>  .Lskip_spe_\@:
>  	/* Trace buffer */
> -	ubfx	x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
> +	ubfx	x0, x1, #ID_AA64DFR0_TraceBuffer_SHIFT, #4
>  	cbz	x0, .Lskip_trace_\@		// Skip if TraceBuffer is not present
>  
>  	mrs_s	x0, SYS_TRBIDR_EL1
> @@ -195,7 +195,7 @@
>  
>  	mov	x0, xzr
>  	mrs	x1, id_aa64dfr0_el1
> -	ubfx	x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
> +	ubfx	x1, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4
>  	cmp	x1, #3
>  	b.lt	.Lset_debug_fgt_\@
>  	/* Disable PMSNEVFR_EL1 read and write traps */
> diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
> index bc7aaed4b34e..d667c03d5f5e 100644
> --- a/arch/arm64/include/asm/hw_breakpoint.h
> +++ b/arch/arm64/include/asm/hw_breakpoint.h
> @@ -142,7 +142,7 @@ static inline int get_num_brps(void)
>  	u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
>  	return 1 +
>  		cpuid_feature_extract_unsigned_field(dfr0,
> -						ID_AA64DFR0_BRPS_SHIFT);
> +						ID_AA64DFR0_BRPs_SHIFT);
>  }
>  
>  /* Determine number of WRP registers available. */
> @@ -151,7 +151,7 @@ static inline int get_num_wrps(void)
>  	u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
>  	return 1 +
>  		cpuid_feature_extract_unsigned_field(dfr0,
> -						ID_AA64DFR0_WRPS_SHIFT);
> +						ID_AA64DFR0_WRPs_SHIFT);
>  }
>  
>  #endif	/* __ASM_BREAKPOINT_H */
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 9a44e04701d3..67691adce459 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -932,16 +932,16 @@
>  
>  /* id_aa64dfr0 */
>  #define ID_AA64DFR0_MTPMU_SHIFT		48
> -#define ID_AA64DFR0_TRBE_SHIFT		44
> -#define ID_AA64DFR0_TRACE_FILT_SHIFT	40
> -#define ID_AA64DFR0_DOUBLELOCK_SHIFT	36
> -#define ID_AA64DFR0_PMSVER_SHIFT	32
> -#define ID_AA64DFR0_CTX_CMPS_SHIFT	28
> -#define ID_AA64DFR0_WRPS_SHIFT		20
> -#define ID_AA64DFR0_BRPS_SHIFT		12
> -#define ID_AA64DFR0_PMUVER_SHIFT	8
> -#define ID_AA64DFR0_TRACEVER_SHIFT	4
> -#define ID_AA64DFR0_DEBUGVER_SHIFT	0
> +#define ID_AA64DFR0_TraceBuffer_SHIFT	44
> +#define ID_AA64DFR0_TraceFilt_SHIFT	40
> +#define ID_AA64DFR0_DoubleLock_SHIFT	36
> +#define ID_AA64DFR0_PMSVer_SHIFT	32
> +#define ID_AA64DFR0_CTX_CMPs_SHIFT	28
> +#define ID_AA64DFR0_WRPs_SHIFT		20
> +#define ID_AA64DFR0_BRPs_SHIFT		12
> +#define ID_AA64DFR0_PMUVer_SHIFT	8
> +#define ID_AA64DFR0_TraceVer_SHIFT	4
> +#define ID_AA64DFR0_DebugVer_SHIFT	0
>  
>  #define ID_AA64DFR0_PMUVER_8_0		0x1
>  #define ID_AA64DFR0_PMUVER_8_1		0x4
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 089221eb5ae8..f60ba05214ab 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -432,17 +432,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
>  };
>  
>  static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
> -	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
> -	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
> -	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
> -	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
> -	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
> +	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DoubleLock_SHIFT, 4, 0),
> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVer_SHIFT, 4, 0),
> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPs_SHIFT, 4, 0),
> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPs_SHIFT, 4, 0),
> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPs_SHIFT, 4, 0),
>  	/*
>  	 * We can instantiate multiple PMU instances with different levels
>  	 * of support.
>  	 */
> -	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
> -	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
> +	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVer_SHIFT, 4, 0),
> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DebugVer_SHIFT, 4, 0x6),
>  	ARM64_FTR_END,
>  };
>  
> diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
> index bf9fe71589bc..572c8ac2873c 100644
> --- a/arch/arm64/kernel/debug-monitors.c
> +++ b/arch/arm64/kernel/debug-monitors.c
> @@ -28,7 +28,7 @@
>  u8 debug_monitors_arch(void)
>  {
>  	return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
> -						ID_AA64DFR0_DEBUGVER_SHIFT);
> +						ID_AA64DFR0_DebugVer_SHIFT);
>  }
>  
>  /*
> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
> index cb69ff1e6138..0f363546cc55 100644
> --- a/arch/arm64/kernel/perf_event.c
> +++ b/arch/arm64/kernel/perf_event.c
> @@ -1145,7 +1145,7 @@ static void __armv8pmu_probe_pmu(void *info)
>  
>  	dfr0 = read_sysreg(id_aa64dfr0_el1);
>  	pmuver = cpuid_feature_extract_unsigned_field(dfr0,
> -			ID_AA64DFR0_PMUVER_SHIFT);
> +			ID_AA64DFR0_PMUVer_SHIFT);
>  	if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0)
>  		return;
>  
> diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
> index 4fd5c216c4bb..19a5bab1ea06 100644
> --- a/arch/arm64/kvm/debug.c
> +++ b/arch/arm64/kvm/debug.c
> @@ -296,12 +296,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
>  	 * If SPE is present on this CPU and is available at current EL,
>  	 * we may need to check if the host state needs to be saved.
>  	 */
> -	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
> +	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVer_SHIFT) &&
>  	    !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
>  		vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
>  
>  	/* Check if we have TRBE implemented and available at the host */
> -	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
> +	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TraceBuffer_SHIFT) &&
>  	    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
>  		vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
>  }
> diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
> index 99c8d8b73e70..6e4e5cb4fb55 100644
> --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
> +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
> @@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
>  	u64 cptr_set = 0;
>  
>  	/* Trap/constrain PMU */
> -	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) {
> +	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVer), feature_ids)) {
>  		mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
>  		mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
>  			      MDCR_EL2_HPMN_MASK;
>  	}
>  
>  	/* Trap Debug */
> -	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids))
> +	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), feature_ids))
>  		mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
>  
>  	/* Trap OS Double Lock */
> -	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids))
> +	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DoubleLock), feature_ids))
>  		mdcr_set |= MDCR_EL2_TDOSA;
>  
>  	/* Trap SPE */
> -	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) {
> +	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer), feature_ids)) {
>  		mdcr_set |= MDCR_EL2_TPMS;
>  		mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
>  	}
>  
>  	/* Trap Trace Filter */
> -	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids))
> +	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceFilt), feature_ids))
>  		mdcr_set |= MDCR_EL2_TTRF;
>  
>  	/* Trap Trace */
> -	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids))
> +	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceVer), feature_ids))
>  		cptr_set |= CPTR_EL2_TTA;
>  
>  	vcpu->arch.mdcr_el2 |= mdcr_set;
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index c06c0477fab5..dc3e42df6cde 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -1150,14 +1150,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
>  		break;
>  	case SYS_ID_AA64DFR0_EL1:
>  		/* Limit debug to ARMv8.0 */
> -		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
> -		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
> +		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer);
> +		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), 6);
>  		/* Limit guests to PMUv3 for ARMv8.4 */
>  		val = cpuid_feature_cap_perfmon_field(val,
> -						      ID_AA64DFR0_PMUVER_SHIFT,
> +						      ID_AA64DFR0_PMUVer_SHIFT,
>  						      kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
>  		/* Hide SPE from guests */
> -		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
> +		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer);
>  		break;
>  	case SYS_ID_DFR0_EL1:
>  		/* Limit guests to PMUv3 for ARMv8.4 */
> @@ -1894,9 +1894,9 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
>  		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
>  		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
>  
> -		p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
> -			     (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
> -			     (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
> +		p->regval = ((((dfr >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) << 28) |
> +			     (((dfr >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) << 24) |
> +			     (((dfr >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) << 20)
>  			     | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
>  		return true;
>  	}
> -- 
> 2.30.2
> 

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH RFC] arm64/sysregs: Align field names in ID_AA64DFR0_EL1 with architecture
  2022-05-25  8:46 ` Mark Rutland
@ 2022-05-25  9:07   ` Catalin Marinas
  2022-05-25  9:27   ` Will Deacon
  1 sibling, 0 replies; 5+ messages in thread
From: Catalin Marinas @ 2022-05-25  9:07 UTC (permalink / raw)
  To: Mark Rutland; +Cc: Mark Brown, Will Deacon, Marc Zyngier, linux-arm-kernel

On Wed, May 25, 2022 at 09:46:13AM +0100, Mark Rutland wrote:
> On Wed, May 18, 2022 at 03:57:50PM +0100, Mark Brown wrote:
> > The naming scheme the architecture uses for the fields in ID_AA64DFR0_EL1
> > does not align well with kernel conventions, using as it does a lot of
> > MixedCase in various arrangements. In preparation for automatically
> > generating the defines for this register rename the defines used to match
> > what is in the architecture.
> > 
> > Signed-off-by: Mark Brown <broonie@kernel.org>
> > ---
> > 
> > I am not entirely convinced if the best approach here is to deviate from
> > the kernel naming convention as this does or to follow the architecture
> > as we've decided in other cases, I don't really mind but would like some
> > feedback before going ahead and sorting out the remaining issues with
> > this register.
> 
> It's unfortunate the architecture itself doesn't follow a consistent pattern.
> :/
> 
> I don't personally have strong feelings here. I'm happy with either:
> 
> (a) Matching the case to the architectural names, even if that means some
>     fields are MixedCase, as with this patch
> 
> (b) Always using ALLCAPS for ID reg field definitions.
> 
> Catalin/Marc/Will, any preference?

I don't have a preference either. We have some limited mixed case places
but mostly with a single 'n'. As we go for the automatic generation of
these macros, it makes some sense to keep them aligned with the
architecture. My only worry with (a) is the diffstat and potential
conflicts.

-- 
Catalin

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH RFC] arm64/sysregs: Align field names in ID_AA64DFR0_EL1 with architecture
  2022-05-25  8:46 ` Mark Rutland
  2022-05-25  9:07   ` Catalin Marinas
@ 2022-05-25  9:27   ` Will Deacon
  2022-05-25 12:06     ` Mark Brown
  1 sibling, 1 reply; 5+ messages in thread
From: Will Deacon @ 2022-05-25  9:27 UTC (permalink / raw)
  To: Mark Rutland; +Cc: Mark Brown, Catalin Marinas, Marc Zyngier, linux-arm-kernel

On Wed, May 25, 2022 at 09:46:13AM +0100, Mark Rutland wrote:
> Hi Mark,
> 
> On Wed, May 18, 2022 at 03:57:50PM +0100, Mark Brown wrote:
> > The naming scheme the architecture uses for the fields in ID_AA64DFR0_EL1
> > does not align well with kernel conventions, using as it does a lot of
> > MixedCase in various arrangements. In preparation for automatically
> > generating the defines for this register rename the defines used to match
> > what is in the architecture.
> > 
> > Signed-off-by: Mark Brown <broonie@kernel.org>
> > ---
> > 
> > I am not entirely convinced if the best approach here is to deviate from
> > the kernel naming convention as this does or to follow the architecture
> > as we've decided in other cases, I don't really mind but would like some
> > feedback before going ahead and sorting out the remaining issues with
> > this register.
> 
> It's unfortunate the architecture itself doesn't follow a consistent pattern.
> :/
> 
> I don't personally have strong feelings here. I'm happy with either:
> 
> (a) Matching the case to the architectural names, even if that means some
>     fields are MixedCase, as with this patch
> 
> (b) Always using ALLCAPS for ID reg field definitions.
> 
> Catalin/Marc/Will, any preference?

I don't really see what we gain by changing this stuff, other than it making
it harder to apply stable backports automatically.

Will

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH RFC] arm64/sysregs: Align field names in ID_AA64DFR0_EL1 with architecture
  2022-05-25  9:27   ` Will Deacon
@ 2022-05-25 12:06     ` Mark Brown
  0 siblings, 0 replies; 5+ messages in thread
From: Mark Brown @ 2022-05-25 12:06 UTC (permalink / raw)
  To: Will Deacon; +Cc: Mark Rutland, Catalin Marinas, Marc Zyngier, linux-arm-kernel


[-- Attachment #1.1: Type: text/plain, Size: 358 bytes --]

On Wed, May 25, 2022 at 10:27:52AM +0100, Will Deacon wrote:
> On Wed, May 25, 2022 at 09:46:13AM +0100, Mark Rutland wrote:

> > Catalin/Marc/Will, any preference?

> I don't really see what we gain by changing this stuff, other than it making
> it harder to apply stable backports automatically.

OK, I'll just leave these with the existing naming scheme.

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

[-- Attachment #2: Type: text/plain, Size: 176 bytes --]

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-05-25 12:07 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-18 14:57 [PATCH RFC] arm64/sysregs: Align field names in ID_AA64DFR0_EL1 with architecture Mark Brown
2022-05-25  8:46 ` Mark Rutland
2022-05-25  9:07   ` Catalin Marinas
2022-05-25  9:27   ` Will Deacon
2022-05-25 12:06     ` Mark Brown

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.