* [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-13 13:58 ` Marc Zyngier
0 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-13 13:58 UTC (permalink / raw)
To: linux-arm-kernel, kvm, kvmarm
Cc: James Morse, Suzuki K Poulose, Alexandru Elisei,
Alexandre Chartre, Robin Murphy, kernel-team
A number of the PMU sysregs expose reset values that are not in
compliant with the architecture (set bits in the RES0 ranges,
for example).
This in turn has the effect that we need to pointlessly mask
some register when using them.
Let's start by making sure we don't have illegal values in the
shadow registers at reset time. This affects all the registers
that dedicate one bit per counter, the counters themselves,
PMEVTYPERn_EL0 and PMSELR_EL0.
Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
1 file changed, 43 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f6f126eb6ac1..95ccb8f45409 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
+static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 n, mask;
+
+ /* No PMU available, any PMU reg may UNDEF... */
+ if (!kvm_arm_support_pmu_v3())
+ return;
+
+ n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
+ n &= ARMV8_PMU_PMCR_N_MASK;
+
+ reset_unknown(vcpu, r);
+
+ mask = BIT(ARMV8_PMU_CYCLE_IDX);
+ if (n)
+ mask |= GENMASK(n - 1, 0);
+
+ __vcpu_sys_reg(vcpu, r->reg) &= mask;
+}
+
+static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ reset_unknown(vcpu, r);
+ __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
+}
+
+static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ reset_unknown(vcpu, r);
+ __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
+}
+
+static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ reset_unknown(vcpu, r);
+ __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
+}
+
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
@@ -944,16 +982,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
#define PMU_SYS_REG(r) \
- SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
+ SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
+ .reset = reset_pmevcntr, \
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
#define PMU_PMEVTYPER_EL0(n) \
{ PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
+ .reset = reset_pmevtyper, \
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
@@ -1595,13 +1635,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(SYS_PMSWINC_EL0),
.access = access_pmswinc, .reg = PMSWINC_EL0 },
{ PMU_SYS_REG(SYS_PMSELR_EL0),
- .access = access_pmselr, .reg = PMSELR_EL0 },
+ .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
{ PMU_SYS_REG(SYS_PMCEID0_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
- .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
+ .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
.access = access_pmu_evtyper, .reset = NULL },
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
--
2.30.2
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 42+ messages in thread
* [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-13 13:58 ` Marc Zyngier
0 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-13 13:58 UTC (permalink / raw)
To: linux-arm-kernel, kvm, kvmarm; +Cc: kernel-team, Robin Murphy
A number of the PMU sysregs expose reset values that are not in
compliant with the architecture (set bits in the RES0 ranges,
for example).
This in turn has the effect that we need to pointlessly mask
some register when using them.
Let's start by making sure we don't have illegal values in the
shadow registers at reset time. This affects all the registers
that dedicate one bit per counter, the counters themselves,
PMEVTYPERn_EL0 and PMSELR_EL0.
Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
1 file changed, 43 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f6f126eb6ac1..95ccb8f45409 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
+static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 n, mask;
+
+ /* No PMU available, any PMU reg may UNDEF... */
+ if (!kvm_arm_support_pmu_v3())
+ return;
+
+ n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
+ n &= ARMV8_PMU_PMCR_N_MASK;
+
+ reset_unknown(vcpu, r);
+
+ mask = BIT(ARMV8_PMU_CYCLE_IDX);
+ if (n)
+ mask |= GENMASK(n - 1, 0);
+
+ __vcpu_sys_reg(vcpu, r->reg) &= mask;
+}
+
+static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ reset_unknown(vcpu, r);
+ __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
+}
+
+static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ reset_unknown(vcpu, r);
+ __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
+}
+
+static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ reset_unknown(vcpu, r);
+ __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
+}
+
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
@@ -944,16 +982,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
#define PMU_SYS_REG(r) \
- SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
+ SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
+ .reset = reset_pmevcntr, \
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
#define PMU_PMEVTYPER_EL0(n) \
{ PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
+ .reset = reset_pmevtyper, \
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
@@ -1595,13 +1635,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(SYS_PMSWINC_EL0),
.access = access_pmswinc, .reg = PMSWINC_EL0 },
{ PMU_SYS_REG(SYS_PMSELR_EL0),
- .access = access_pmselr, .reg = PMSELR_EL0 },
+ .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
{ PMU_SYS_REG(SYS_PMCEID0_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
- .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
+ .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
.access = access_pmu_evtyper, .reset = NULL },
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
--
2.30.2
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply related [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
2021-07-13 13:58 ` Marc Zyngier
(?)
@ 2021-07-13 14:39 ` Russell King (Oracle)
-1 siblings, 0 replies; 42+ messages in thread
From: Russell King (Oracle) @ 2021-07-13 14:39 UTC (permalink / raw)
To: Marc Zyngier
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandru Elisei, Alexandre Chartre, Robin Murphy, kernel-team
On Tue, Jul 13, 2021 at 02:58:58PM +0100, Marc Zyngier wrote:
> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + u64 n, mask;
> +
> + /* No PMU available, any PMU reg may UNDEF... */
> + if (!kvm_arm_support_pmu_v3())
> + return;
> +
> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> + n &= ARMV8_PMU_PMCR_N_MASK;
> +
> + reset_unknown(vcpu, r);
> +
> + mask = BIT(ARMV8_PMU_CYCLE_IDX);
> + if (n)
> + mask |= GENMASK(n - 1, 0);
> +
> + __vcpu_sys_reg(vcpu, r->reg) &= mask;
Would this read more logically to structure it as:
mask = BIT(ARMV8_PMU_CYCLE_IDX);
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
n &= ARMV8_PMU_PMCR_N_MASK;
if (n)
mask |= GENMASK(n - 1, 0);
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= mask;
?
Thanks.
--
RMK's Patch system: https://www.armlinux.org.uk/developer/patches/
FTTP is here! 40Mbps down 10Mbps up. Decent connectivity at last!
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-13 14:39 ` Russell King (Oracle)
0 siblings, 0 replies; 42+ messages in thread
From: Russell King (Oracle) @ 2021-07-13 14:39 UTC (permalink / raw)
To: Marc Zyngier
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandru Elisei, Alexandre Chartre, Robin Murphy, kernel-team
On Tue, Jul 13, 2021 at 02:58:58PM +0100, Marc Zyngier wrote:
> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + u64 n, mask;
> +
> + /* No PMU available, any PMU reg may UNDEF... */
> + if (!kvm_arm_support_pmu_v3())
> + return;
> +
> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> + n &= ARMV8_PMU_PMCR_N_MASK;
> +
> + reset_unknown(vcpu, r);
> +
> + mask = BIT(ARMV8_PMU_CYCLE_IDX);
> + if (n)
> + mask |= GENMASK(n - 1, 0);
> +
> + __vcpu_sys_reg(vcpu, r->reg) &= mask;
Would this read more logically to structure it as:
mask = BIT(ARMV8_PMU_CYCLE_IDX);
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
n &= ARMV8_PMU_PMCR_N_MASK;
if (n)
mask |= GENMASK(n - 1, 0);
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= mask;
?
Thanks.
--
RMK's Patch system: https://www.armlinux.org.uk/developer/patches/
FTTP is here! 40Mbps down 10Mbps up. Decent connectivity at last!
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-13 14:39 ` Russell King (Oracle)
0 siblings, 0 replies; 42+ messages in thread
From: Russell King (Oracle) @ 2021-07-13 14:39 UTC (permalink / raw)
To: Marc Zyngier; +Cc: kernel-team, kvm, Robin Murphy, kvmarm, linux-arm-kernel
On Tue, Jul 13, 2021 at 02:58:58PM +0100, Marc Zyngier wrote:
> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + u64 n, mask;
> +
> + /* No PMU available, any PMU reg may UNDEF... */
> + if (!kvm_arm_support_pmu_v3())
> + return;
> +
> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> + n &= ARMV8_PMU_PMCR_N_MASK;
> +
> + reset_unknown(vcpu, r);
> +
> + mask = BIT(ARMV8_PMU_CYCLE_IDX);
> + if (n)
> + mask |= GENMASK(n - 1, 0);
> +
> + __vcpu_sys_reg(vcpu, r->reg) &= mask;
Would this read more logically to structure it as:
mask = BIT(ARMV8_PMU_CYCLE_IDX);
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
n &= ARMV8_PMU_PMCR_N_MASK;
if (n)
mask |= GENMASK(n - 1, 0);
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= mask;
?
Thanks.
--
RMK's Patch system: https://www.armlinux.org.uk/developer/patches/
FTTP is here! 40Mbps down 10Mbps up. Decent connectivity at last!
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
2021-07-13 14:39 ` Russell King (Oracle)
(?)
@ 2021-07-13 15:59 ` Marc Zyngier
-1 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-13 15:59 UTC (permalink / raw)
To: Russell King (Oracle)
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandru Elisei, Alexandre Chartre, Robin Murphy, kernel-team
On Tue, 13 Jul 2021 15:39:49 +0100,
"Russell King (Oracle)" <linux@armlinux.org.uk> wrote:
>
> On Tue, Jul 13, 2021 at 02:58:58PM +0100, Marc Zyngier wrote:
> > +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> > +{
> > + u64 n, mask;
> > +
> > + /* No PMU available, any PMU reg may UNDEF... */
> > + if (!kvm_arm_support_pmu_v3())
> > + return;
> > +
> > + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> > + n &= ARMV8_PMU_PMCR_N_MASK;
> > +
> > + reset_unknown(vcpu, r);
> > +
> > + mask = BIT(ARMV8_PMU_CYCLE_IDX);
> > + if (n)
> > + mask |= GENMASK(n - 1, 0);
> > +
> > + __vcpu_sys_reg(vcpu, r->reg) &= mask;
>
> Would this read more logically to structure it as:
>
> mask = BIT(ARMV8_PMU_CYCLE_IDX);
>
> n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> n &= ARMV8_PMU_PMCR_N_MASK;
> if (n)
> mask |= GENMASK(n - 1, 0);
>
> reset_unknown(vcpu, r);
> __vcpu_sys_reg(vcpu, r->reg) &= mask;
>
> ?
Yup, that's nicer. Amended locally.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-13 15:59 ` Marc Zyngier
0 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-13 15:59 UTC (permalink / raw)
To: Russell King (Oracle)
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandru Elisei, Alexandre Chartre, Robin Murphy, kernel-team
On Tue, 13 Jul 2021 15:39:49 +0100,
"Russell King (Oracle)" <linux@armlinux.org.uk> wrote:
>
> On Tue, Jul 13, 2021 at 02:58:58PM +0100, Marc Zyngier wrote:
> > +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> > +{
> > + u64 n, mask;
> > +
> > + /* No PMU available, any PMU reg may UNDEF... */
> > + if (!kvm_arm_support_pmu_v3())
> > + return;
> > +
> > + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> > + n &= ARMV8_PMU_PMCR_N_MASK;
> > +
> > + reset_unknown(vcpu, r);
> > +
> > + mask = BIT(ARMV8_PMU_CYCLE_IDX);
> > + if (n)
> > + mask |= GENMASK(n - 1, 0);
> > +
> > + __vcpu_sys_reg(vcpu, r->reg) &= mask;
>
> Would this read more logically to structure it as:
>
> mask = BIT(ARMV8_PMU_CYCLE_IDX);
>
> n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> n &= ARMV8_PMU_PMCR_N_MASK;
> if (n)
> mask |= GENMASK(n - 1, 0);
>
> reset_unknown(vcpu, r);
> __vcpu_sys_reg(vcpu, r->reg) &= mask;
>
> ?
Yup, that's nicer. Amended locally.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-13 15:59 ` Marc Zyngier
0 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-13 15:59 UTC (permalink / raw)
To: Russell King (Oracle)
Cc: kernel-team, kvm, Robin Murphy, kvmarm, linux-arm-kernel
On Tue, 13 Jul 2021 15:39:49 +0100,
"Russell King (Oracle)" <linux@armlinux.org.uk> wrote:
>
> On Tue, Jul 13, 2021 at 02:58:58PM +0100, Marc Zyngier wrote:
> > +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> > +{
> > + u64 n, mask;
> > +
> > + /* No PMU available, any PMU reg may UNDEF... */
> > + if (!kvm_arm_support_pmu_v3())
> > + return;
> > +
> > + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> > + n &= ARMV8_PMU_PMCR_N_MASK;
> > +
> > + reset_unknown(vcpu, r);
> > +
> > + mask = BIT(ARMV8_PMU_CYCLE_IDX);
> > + if (n)
> > + mask |= GENMASK(n - 1, 0);
> > +
> > + __vcpu_sys_reg(vcpu, r->reg) &= mask;
>
> Would this read more logically to structure it as:
>
> mask = BIT(ARMV8_PMU_CYCLE_IDX);
>
> n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> n &= ARMV8_PMU_PMCR_N_MASK;
> if (n)
> mask |= GENMASK(n - 1, 0);
>
> reset_unknown(vcpu, r);
> __vcpu_sys_reg(vcpu, r->reg) &= mask;
>
> ?
Yup, that's nicer. Amended locally.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
2021-07-13 15:59 ` Marc Zyngier
(?)
@ 2021-07-13 16:15 ` Russell King (Oracle)
-1 siblings, 0 replies; 42+ messages in thread
From: Russell King (Oracle) @ 2021-07-13 16:15 UTC (permalink / raw)
To: Marc Zyngier
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandru Elisei, Alexandre Chartre, Robin Murphy, kernel-team
On Tue, Jul 13, 2021 at 04:59:58PM +0100, Marc Zyngier wrote:
> On Tue, 13 Jul 2021 15:39:49 +0100,
> "Russell King (Oracle)" <linux@armlinux.org.uk> wrote:
> >
> > On Tue, Jul 13, 2021 at 02:58:58PM +0100, Marc Zyngier wrote:
> > > +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> > > +{
> > > + u64 n, mask;
> > > +
> > > + /* No PMU available, any PMU reg may UNDEF... */
> > > + if (!kvm_arm_support_pmu_v3())
> > > + return;
> > > +
> > > + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> > > + n &= ARMV8_PMU_PMCR_N_MASK;
> > > +
> > > + reset_unknown(vcpu, r);
> > > +
> > > + mask = BIT(ARMV8_PMU_CYCLE_IDX);
> > > + if (n)
> > > + mask |= GENMASK(n - 1, 0);
> > > +
> > > + __vcpu_sys_reg(vcpu, r->reg) &= mask;
> >
> > Would this read more logically to structure it as:
> >
> > mask = BIT(ARMV8_PMU_CYCLE_IDX);
> >
> > n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> > n &= ARMV8_PMU_PMCR_N_MASK;
> > if (n)
> > mask |= GENMASK(n - 1, 0);
> >
> > reset_unknown(vcpu, r);
> > __vcpu_sys_reg(vcpu, r->reg) &= mask;
> >
> > ?
>
> Yup, that's nicer. Amended locally.
Thanks Marc.
For the whole series:
Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
--
RMK's Patch system: https://www.armlinux.org.uk/developer/patches/
FTTP is here! 40Mbps down 10Mbps up. Decent connectivity at last!
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-13 16:15 ` Russell King (Oracle)
0 siblings, 0 replies; 42+ messages in thread
From: Russell King (Oracle) @ 2021-07-13 16:15 UTC (permalink / raw)
To: Marc Zyngier
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandru Elisei, Alexandre Chartre, Robin Murphy, kernel-team
On Tue, Jul 13, 2021 at 04:59:58PM +0100, Marc Zyngier wrote:
> On Tue, 13 Jul 2021 15:39:49 +0100,
> "Russell King (Oracle)" <linux@armlinux.org.uk> wrote:
> >
> > On Tue, Jul 13, 2021 at 02:58:58PM +0100, Marc Zyngier wrote:
> > > +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> > > +{
> > > + u64 n, mask;
> > > +
> > > + /* No PMU available, any PMU reg may UNDEF... */
> > > + if (!kvm_arm_support_pmu_v3())
> > > + return;
> > > +
> > > + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> > > + n &= ARMV8_PMU_PMCR_N_MASK;
> > > +
> > > + reset_unknown(vcpu, r);
> > > +
> > > + mask = BIT(ARMV8_PMU_CYCLE_IDX);
> > > + if (n)
> > > + mask |= GENMASK(n - 1, 0);
> > > +
> > > + __vcpu_sys_reg(vcpu, r->reg) &= mask;
> >
> > Would this read more logically to structure it as:
> >
> > mask = BIT(ARMV8_PMU_CYCLE_IDX);
> >
> > n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> > n &= ARMV8_PMU_PMCR_N_MASK;
> > if (n)
> > mask |= GENMASK(n - 1, 0);
> >
> > reset_unknown(vcpu, r);
> > __vcpu_sys_reg(vcpu, r->reg) &= mask;
> >
> > ?
>
> Yup, that's nicer. Amended locally.
Thanks Marc.
For the whole series:
Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
--
RMK's Patch system: https://www.armlinux.org.uk/developer/patches/
FTTP is here! 40Mbps down 10Mbps up. Decent connectivity at last!
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-13 16:15 ` Russell King (Oracle)
0 siblings, 0 replies; 42+ messages in thread
From: Russell King (Oracle) @ 2021-07-13 16:15 UTC (permalink / raw)
To: Marc Zyngier; +Cc: kernel-team, kvm, Robin Murphy, kvmarm, linux-arm-kernel
On Tue, Jul 13, 2021 at 04:59:58PM +0100, Marc Zyngier wrote:
> On Tue, 13 Jul 2021 15:39:49 +0100,
> "Russell King (Oracle)" <linux@armlinux.org.uk> wrote:
> >
> > On Tue, Jul 13, 2021 at 02:58:58PM +0100, Marc Zyngier wrote:
> > > +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> > > +{
> > > + u64 n, mask;
> > > +
> > > + /* No PMU available, any PMU reg may UNDEF... */
> > > + if (!kvm_arm_support_pmu_v3())
> > > + return;
> > > +
> > > + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> > > + n &= ARMV8_PMU_PMCR_N_MASK;
> > > +
> > > + reset_unknown(vcpu, r);
> > > +
> > > + mask = BIT(ARMV8_PMU_CYCLE_IDX);
> > > + if (n)
> > > + mask |= GENMASK(n - 1, 0);
> > > +
> > > + __vcpu_sys_reg(vcpu, r->reg) &= mask;
> >
> > Would this read more logically to structure it as:
> >
> > mask = BIT(ARMV8_PMU_CYCLE_IDX);
> >
> > n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> > n &= ARMV8_PMU_PMCR_N_MASK;
> > if (n)
> > mask |= GENMASK(n - 1, 0);
> >
> > reset_unknown(vcpu, r);
> > __vcpu_sys_reg(vcpu, r->reg) &= mask;
> >
> > ?
>
> Yup, that's nicer. Amended locally.
Thanks Marc.
For the whole series:
Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
--
RMK's Patch system: https://www.armlinux.org.uk/developer/patches/
FTTP is here! 40Mbps down 10Mbps up. Decent connectivity at last!
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
2021-07-13 13:58 ` Marc Zyngier
(?)
@ 2021-07-14 15:48 ` Alexandru Elisei
-1 siblings, 0 replies; 42+ messages in thread
From: Alexandru Elisei @ 2021-07-14 15:48 UTC (permalink / raw)
To: Marc Zyngier, linux-arm-kernel, kvm, kvmarm
Cc: James Morse, Suzuki K Poulose, Alexandre Chartre, Robin Murphy,
kernel-team
Hi Marc,
On 7/13/21 2:58 PM, Marc Zyngier wrote:
> A number of the PMU sysregs expose reset values that are not in
> compliant with the architecture (set bits in the RES0 ranges,
> for example).
>
> This in turn has the effect that we need to pointlessly mask
> some register when using them.
>
> Let's start by making sure we don't have illegal values in the
> shadow registers at reset time. This affects all the registers
> that dedicate one bit per counter, the counters themselves,
> PMEVTYPERn_EL0 and PMSELR_EL0.
>
> Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
> arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
> 1 file changed, 43 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index f6f126eb6ac1..95ccb8f45409 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
> return REG_HIDDEN;
> }
>
> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + u64 n, mask;
> +
> + /* No PMU available, any PMU reg may UNDEF... */
> + if (!kvm_arm_support_pmu_v3())
> + return;
> +
> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
Isn't this going to cause a lot of unnecessary traps with NV? Is that going to be
a problem? Because at the moment I can't think of an elegant way to avoid it,
other than special casing PMCR_EL0 in kvm_reset_sys_regs() and using here
__vcpu_sys_reg(vcpu, PMCR_EL0). Or, even better, using
kvm_pmu_valid_counter_mask(vcpu), since this is identical to what that function does.
> + n &= ARMV8_PMU_PMCR_N_MASK;
> +
> + reset_unknown(vcpu, r);
> +
> + mask = BIT(ARMV8_PMU_CYCLE_IDX);
PMSWINC_EL0 has bit 31 RES0. Other than that, looked at all the PMU registers and
everything looks correct to me.
Thanks,
Alex
> + if (n)
> + mask |= GENMASK(n - 1, 0);
> +
> + __vcpu_sys_reg(vcpu, r->reg) &= mask;
> +}
> +
> +static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + reset_unknown(vcpu, r);
> + __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
> +}
> +
> +static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + reset_unknown(vcpu, r);
> + __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
> +}
> +
> +static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + reset_unknown(vcpu, r);
> + __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
> +}
> +
> static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> {
> u64 pmcr, val;
> @@ -944,16 +982,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
>
> #define PMU_SYS_REG(r) \
> - SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
> + SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
>
> /* Macro to expand the PMEVCNTRn_EL0 register */
> #define PMU_PMEVCNTR_EL0(n) \
> { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
> + .reset = reset_pmevcntr, \
> .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
>
> /* Macro to expand the PMEVTYPERn_EL0 register */
> #define PMU_PMEVTYPER_EL0(n) \
> { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
> + .reset = reset_pmevtyper, \
> .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
>
> static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> @@ -1595,13 +1635,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> { PMU_SYS_REG(SYS_PMSWINC_EL0),
> .access = access_pmswinc, .reg = PMSWINC_EL0 },
> { PMU_SYS_REG(SYS_PMSELR_EL0),
> - .access = access_pmselr, .reg = PMSELR_EL0 },
> + .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
> { PMU_SYS_REG(SYS_PMCEID0_EL0),
> .access = access_pmceid, .reset = NULL },
> { PMU_SYS_REG(SYS_PMCEID1_EL0),
> .access = access_pmceid, .reset = NULL },
> { PMU_SYS_REG(SYS_PMCCNTR_EL0),
> - .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
> + .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
> { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
> .access = access_pmu_evtyper, .reset = NULL },
> { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-14 15:48 ` Alexandru Elisei
0 siblings, 0 replies; 42+ messages in thread
From: Alexandru Elisei @ 2021-07-14 15:48 UTC (permalink / raw)
To: Marc Zyngier, linux-arm-kernel, kvm, kvmarm
Cc: James Morse, Suzuki K Poulose, Alexandre Chartre, Robin Murphy,
kernel-team
Hi Marc,
On 7/13/21 2:58 PM, Marc Zyngier wrote:
> A number of the PMU sysregs expose reset values that are not in
> compliant with the architecture (set bits in the RES0 ranges,
> for example).
>
> This in turn has the effect that we need to pointlessly mask
> some register when using them.
>
> Let's start by making sure we don't have illegal values in the
> shadow registers at reset time. This affects all the registers
> that dedicate one bit per counter, the counters themselves,
> PMEVTYPERn_EL0 and PMSELR_EL0.
>
> Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
> arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
> 1 file changed, 43 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index f6f126eb6ac1..95ccb8f45409 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
> return REG_HIDDEN;
> }
>
> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + u64 n, mask;
> +
> + /* No PMU available, any PMU reg may UNDEF... */
> + if (!kvm_arm_support_pmu_v3())
> + return;
> +
> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
Isn't this going to cause a lot of unnecessary traps with NV? Is that going to be
a problem? Because at the moment I can't think of an elegant way to avoid it,
other than special casing PMCR_EL0 in kvm_reset_sys_regs() and using here
__vcpu_sys_reg(vcpu, PMCR_EL0). Or, even better, using
kvm_pmu_valid_counter_mask(vcpu), since this is identical to what that function does.
> + n &= ARMV8_PMU_PMCR_N_MASK;
> +
> + reset_unknown(vcpu, r);
> +
> + mask = BIT(ARMV8_PMU_CYCLE_IDX);
PMSWINC_EL0 has bit 31 RES0. Other than that, looked at all the PMU registers and
everything looks correct to me.
Thanks,
Alex
> + if (n)
> + mask |= GENMASK(n - 1, 0);
> +
> + __vcpu_sys_reg(vcpu, r->reg) &= mask;
> +}
> +
> +static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + reset_unknown(vcpu, r);
> + __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
> +}
> +
> +static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + reset_unknown(vcpu, r);
> + __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
> +}
> +
> +static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + reset_unknown(vcpu, r);
> + __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
> +}
> +
> static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> {
> u64 pmcr, val;
> @@ -944,16 +982,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
>
> #define PMU_SYS_REG(r) \
> - SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
> + SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
>
> /* Macro to expand the PMEVCNTRn_EL0 register */
> #define PMU_PMEVCNTR_EL0(n) \
> { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
> + .reset = reset_pmevcntr, \
> .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
>
> /* Macro to expand the PMEVTYPERn_EL0 register */
> #define PMU_PMEVTYPER_EL0(n) \
> { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
> + .reset = reset_pmevtyper, \
> .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
>
> static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> @@ -1595,13 +1635,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> { PMU_SYS_REG(SYS_PMSWINC_EL0),
> .access = access_pmswinc, .reg = PMSWINC_EL0 },
> { PMU_SYS_REG(SYS_PMSELR_EL0),
> - .access = access_pmselr, .reg = PMSELR_EL0 },
> + .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
> { PMU_SYS_REG(SYS_PMCEID0_EL0),
> .access = access_pmceid, .reset = NULL },
> { PMU_SYS_REG(SYS_PMCEID1_EL0),
> .access = access_pmceid, .reset = NULL },
> { PMU_SYS_REG(SYS_PMCCNTR_EL0),
> - .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
> + .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
> { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
> .access = access_pmu_evtyper, .reset = NULL },
> { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-14 15:48 ` Alexandru Elisei
0 siblings, 0 replies; 42+ messages in thread
From: Alexandru Elisei @ 2021-07-14 15:48 UTC (permalink / raw)
To: Marc Zyngier, linux-arm-kernel, kvm, kvmarm; +Cc: kernel-team, Robin Murphy
Hi Marc,
On 7/13/21 2:58 PM, Marc Zyngier wrote:
> A number of the PMU sysregs expose reset values that are not in
> compliant with the architecture (set bits in the RES0 ranges,
> for example).
>
> This in turn has the effect that we need to pointlessly mask
> some register when using them.
>
> Let's start by making sure we don't have illegal values in the
> shadow registers at reset time. This affects all the registers
> that dedicate one bit per counter, the counters themselves,
> PMEVTYPERn_EL0 and PMSELR_EL0.
>
> Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
> arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
> 1 file changed, 43 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index f6f126eb6ac1..95ccb8f45409 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
> return REG_HIDDEN;
> }
>
> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + u64 n, mask;
> +
> + /* No PMU available, any PMU reg may UNDEF... */
> + if (!kvm_arm_support_pmu_v3())
> + return;
> +
> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
Isn't this going to cause a lot of unnecessary traps with NV? Is that going to be
a problem? Because at the moment I can't think of an elegant way to avoid it,
other than special casing PMCR_EL0 in kvm_reset_sys_regs() and using here
__vcpu_sys_reg(vcpu, PMCR_EL0). Or, even better, using
kvm_pmu_valid_counter_mask(vcpu), since this is identical to what that function does.
> + n &= ARMV8_PMU_PMCR_N_MASK;
> +
> + reset_unknown(vcpu, r);
> +
> + mask = BIT(ARMV8_PMU_CYCLE_IDX);
PMSWINC_EL0 has bit 31 RES0. Other than that, looked at all the PMU registers and
everything looks correct to me.
Thanks,
Alex
> + if (n)
> + mask |= GENMASK(n - 1, 0);
> +
> + __vcpu_sys_reg(vcpu, r->reg) &= mask;
> +}
> +
> +static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + reset_unknown(vcpu, r);
> + __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
> +}
> +
> +static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + reset_unknown(vcpu, r);
> + __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
> +}
> +
> +static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + reset_unknown(vcpu, r);
> + __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
> +}
> +
> static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> {
> u64 pmcr, val;
> @@ -944,16 +982,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
>
> #define PMU_SYS_REG(r) \
> - SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
> + SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
>
> /* Macro to expand the PMEVCNTRn_EL0 register */
> #define PMU_PMEVCNTR_EL0(n) \
> { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
> + .reset = reset_pmevcntr, \
> .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
>
> /* Macro to expand the PMEVTYPERn_EL0 register */
> #define PMU_PMEVTYPER_EL0(n) \
> { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
> + .reset = reset_pmevtyper, \
> .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
>
> static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> @@ -1595,13 +1635,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> { PMU_SYS_REG(SYS_PMSWINC_EL0),
> .access = access_pmswinc, .reg = PMSWINC_EL0 },
> { PMU_SYS_REG(SYS_PMSELR_EL0),
> - .access = access_pmselr, .reg = PMSELR_EL0 },
> + .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
> { PMU_SYS_REG(SYS_PMCEID0_EL0),
> .access = access_pmceid, .reset = NULL },
> { PMU_SYS_REG(SYS_PMCEID1_EL0),
> .access = access_pmceid, .reset = NULL },
> { PMU_SYS_REG(SYS_PMCCNTR_EL0),
> - .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
> + .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
> { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
> .access = access_pmu_evtyper, .reset = NULL },
> { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
2021-07-14 15:48 ` Alexandru Elisei
(?)
@ 2021-07-15 11:11 ` Marc Zyngier
-1 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-15 11:11 UTC (permalink / raw)
To: Alexandru Elisei
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandre Chartre, Robin Murphy, kernel-team
Hi Alex,
On Wed, 14 Jul 2021 16:48:07 +0100,
Alexandru Elisei <alexandru.elisei@arm.com> wrote:
>
> Hi Marc,
>
> On 7/13/21 2:58 PM, Marc Zyngier wrote:
> > A number of the PMU sysregs expose reset values that are not in
> > compliant with the architecture (set bits in the RES0 ranges,
> > for example).
> >
> > This in turn has the effect that we need to pointlessly mask
> > some register when using them.
> >
> > Let's start by making sure we don't have illegal values in the
> > shadow registers at reset time. This affects all the registers
> > that dedicate one bit per counter, the counters themselves,
> > PMEVTYPERn_EL0 and PMSELR_EL0.
> >
> > Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> > arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
> > 1 file changed, 43 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > index f6f126eb6ac1..95ccb8f45409 100644
> > --- a/arch/arm64/kvm/sys_regs.c
> > +++ b/arch/arm64/kvm/sys_regs.c
> > @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
> > return REG_HIDDEN;
> > }
> >
> > +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> > +{
> > + u64 n, mask;
> > +
> > + /* No PMU available, any PMU reg may UNDEF... */
> > + if (!kvm_arm_support_pmu_v3())
> > + return;
> > +
> > + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
>
> Isn't this going to cause a lot of unnecessary traps with NV? Is
> that going to be a problem?
We'll get a new traps at L2 VM creation if we expose a PMU to the L1
guest, and if L2 gets one too. I don't think that's a real problem, as
the performance of an L2 PMU is bound to be hilarious, and if we are
really worried about that, we can always cache it locally. Which is
likely the best thing to do if you think of big-little.
Let's not think of big-little.
Another thing is that we could perfectly ignore the number of counter
on the host and always expose the architectural maximum, given that
the PMU is completely emulated. With that, no trap.
> Because at the moment I can't think of an elegant way to avoid it,
> other than special casing PMCR_EL0 in kvm_reset_sys_regs() and using
> here __vcpu_sys_reg(vcpu, PMCR_EL0). Or, even better, using
> kvm_pmu_valid_counter_mask(vcpu), since this is identical to what
> that function does.
I looked into that and bailed out, as it creates interesting ordering
problems...
>
> > + n &= ARMV8_PMU_PMCR_N_MASK;
> > +
> > + reset_unknown(vcpu, r);
> > +
> > + mask = BIT(ARMV8_PMU_CYCLE_IDX);
>
> PMSWINC_EL0 has bit 31 RES0. Other than that, looked at all the PMU
> registers and everything looks correct to me.
PMSWINC_EL0 is a RAZ/WO register, which really shouldn't have a shadow
counterpart (the storage is completely unused). Let me get rid on this
sucker in v2.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-15 11:11 ` Marc Zyngier
0 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-15 11:11 UTC (permalink / raw)
To: Alexandru Elisei
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandre Chartre, Robin Murphy, kernel-team
Hi Alex,
On Wed, 14 Jul 2021 16:48:07 +0100,
Alexandru Elisei <alexandru.elisei@arm.com> wrote:
>
> Hi Marc,
>
> On 7/13/21 2:58 PM, Marc Zyngier wrote:
> > A number of the PMU sysregs expose reset values that are not in
> > compliant with the architecture (set bits in the RES0 ranges,
> > for example).
> >
> > This in turn has the effect that we need to pointlessly mask
> > some register when using them.
> >
> > Let's start by making sure we don't have illegal values in the
> > shadow registers at reset time. This affects all the registers
> > that dedicate one bit per counter, the counters themselves,
> > PMEVTYPERn_EL0 and PMSELR_EL0.
> >
> > Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> > arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
> > 1 file changed, 43 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > index f6f126eb6ac1..95ccb8f45409 100644
> > --- a/arch/arm64/kvm/sys_regs.c
> > +++ b/arch/arm64/kvm/sys_regs.c
> > @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
> > return REG_HIDDEN;
> > }
> >
> > +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> > +{
> > + u64 n, mask;
> > +
> > + /* No PMU available, any PMU reg may UNDEF... */
> > + if (!kvm_arm_support_pmu_v3())
> > + return;
> > +
> > + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
>
> Isn't this going to cause a lot of unnecessary traps with NV? Is
> that going to be a problem?
We'll get a new traps at L2 VM creation if we expose a PMU to the L1
guest, and if L2 gets one too. I don't think that's a real problem, as
the performance of an L2 PMU is bound to be hilarious, and if we are
really worried about that, we can always cache it locally. Which is
likely the best thing to do if you think of big-little.
Let's not think of big-little.
Another thing is that we could perfectly ignore the number of counter
on the host and always expose the architectural maximum, given that
the PMU is completely emulated. With that, no trap.
> Because at the moment I can't think of an elegant way to avoid it,
> other than special casing PMCR_EL0 in kvm_reset_sys_regs() and using
> here __vcpu_sys_reg(vcpu, PMCR_EL0). Or, even better, using
> kvm_pmu_valid_counter_mask(vcpu), since this is identical to what
> that function does.
I looked into that and bailed out, as it creates interesting ordering
problems...
>
> > + n &= ARMV8_PMU_PMCR_N_MASK;
> > +
> > + reset_unknown(vcpu, r);
> > +
> > + mask = BIT(ARMV8_PMU_CYCLE_IDX);
>
> PMSWINC_EL0 has bit 31 RES0. Other than that, looked at all the PMU
> registers and everything looks correct to me.
PMSWINC_EL0 is a RAZ/WO register, which really shouldn't have a shadow
counterpart (the storage is completely unused). Let me get rid on this
sucker in v2.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-15 11:11 ` Marc Zyngier
0 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-15 11:11 UTC (permalink / raw)
To: Alexandru Elisei; +Cc: kvm, kernel-team, Robin Murphy, kvmarm, linux-arm-kernel
Hi Alex,
On Wed, 14 Jul 2021 16:48:07 +0100,
Alexandru Elisei <alexandru.elisei@arm.com> wrote:
>
> Hi Marc,
>
> On 7/13/21 2:58 PM, Marc Zyngier wrote:
> > A number of the PMU sysregs expose reset values that are not in
> > compliant with the architecture (set bits in the RES0 ranges,
> > for example).
> >
> > This in turn has the effect that we need to pointlessly mask
> > some register when using them.
> >
> > Let's start by making sure we don't have illegal values in the
> > shadow registers at reset time. This affects all the registers
> > that dedicate one bit per counter, the counters themselves,
> > PMEVTYPERn_EL0 and PMSELR_EL0.
> >
> > Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> > arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
> > 1 file changed, 43 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > index f6f126eb6ac1..95ccb8f45409 100644
> > --- a/arch/arm64/kvm/sys_regs.c
> > +++ b/arch/arm64/kvm/sys_regs.c
> > @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
> > return REG_HIDDEN;
> > }
> >
> > +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> > +{
> > + u64 n, mask;
> > +
> > + /* No PMU available, any PMU reg may UNDEF... */
> > + if (!kvm_arm_support_pmu_v3())
> > + return;
> > +
> > + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
>
> Isn't this going to cause a lot of unnecessary traps with NV? Is
> that going to be a problem?
We'll get a new traps at L2 VM creation if we expose a PMU to the L1
guest, and if L2 gets one too. I don't think that's a real problem, as
the performance of an L2 PMU is bound to be hilarious, and if we are
really worried about that, we can always cache it locally. Which is
likely the best thing to do if you think of big-little.
Let's not think of big-little.
Another thing is that we could perfectly ignore the number of counter
on the host and always expose the architectural maximum, given that
the PMU is completely emulated. With that, no trap.
> Because at the moment I can't think of an elegant way to avoid it,
> other than special casing PMCR_EL0 in kvm_reset_sys_regs() and using
> here __vcpu_sys_reg(vcpu, PMCR_EL0). Or, even better, using
> kvm_pmu_valid_counter_mask(vcpu), since this is identical to what
> that function does.
I looked into that and bailed out, as it creates interesting ordering
problems...
>
> > + n &= ARMV8_PMU_PMCR_N_MASK;
> > +
> > + reset_unknown(vcpu, r);
> > +
> > + mask = BIT(ARMV8_PMU_CYCLE_IDX);
>
> PMSWINC_EL0 has bit 31 RES0. Other than that, looked at all the PMU
> registers and everything looks correct to me.
PMSWINC_EL0 is a RAZ/WO register, which really shouldn't have a shadow
counterpart (the storage is completely unused). Let me get rid on this
sucker in v2.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
2021-07-15 11:11 ` Marc Zyngier
(?)
@ 2021-07-15 11:51 ` Robin Murphy
-1 siblings, 0 replies; 42+ messages in thread
From: Robin Murphy @ 2021-07-15 11:51 UTC (permalink / raw)
To: Marc Zyngier, Alexandru Elisei
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandre Chartre, kernel-team
On 2021-07-15 12:11, Marc Zyngier wrote:
> Hi Alex,
>
> On Wed, 14 Jul 2021 16:48:07 +0100,
> Alexandru Elisei <alexandru.elisei@arm.com> wrote:
>>
>> Hi Marc,
>>
>> On 7/13/21 2:58 PM, Marc Zyngier wrote:
>>> A number of the PMU sysregs expose reset values that are not in
>>> compliant with the architecture (set bits in the RES0 ranges,
>>> for example).
>>>
>>> This in turn has the effect that we need to pointlessly mask
>>> some register when using them.
>>>
>>> Let's start by making sure we don't have illegal values in the
>>> shadow registers at reset time. This affects all the registers
>>> that dedicate one bit per counter, the counters themselves,
>>> PMEVTYPERn_EL0 and PMSELR_EL0.
>>>
>>> Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
>>> Signed-off-by: Marc Zyngier <maz@kernel.org>
>>> ---
>>> arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
>>> 1 file changed, 43 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
>>> index f6f126eb6ac1..95ccb8f45409 100644
>>> --- a/arch/arm64/kvm/sys_regs.c
>>> +++ b/arch/arm64/kvm/sys_regs.c
>>> @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
>>> return REG_HIDDEN;
>>> }
>>>
>>> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
>>> +{
>>> + u64 n, mask;
>>> +
>>> + /* No PMU available, any PMU reg may UNDEF... */
>>> + if (!kvm_arm_support_pmu_v3())
>>> + return;
>>> +
>>> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
>>
>> Isn't this going to cause a lot of unnecessary traps with NV? Is
>> that going to be a problem?
>
> We'll get a new traps at L2 VM creation if we expose a PMU to the L1
> guest, and if L2 gets one too. I don't think that's a real problem, as
> the performance of an L2 PMU is bound to be hilarious, and if we are
> really worried about that, we can always cache it locally. Which is
> likely the best thing to do if you think of big-little.
>
> Let's not think of big-little.
>
> Another thing is that we could perfectly ignore the number of counter
> on the host and always expose the architectural maximum, given that
> the PMU is completely emulated. With that, no trap.
Although that would deliberately exacerbate the existing problem of
guest counters mysteriously under-reporting due to the host event
getting multiplexed, thus arguably make the L2 PMU even less useful.
But then trying to analyse application performance under NV at all seems
to stand a high chance of being akin to shovelling fog, so...
Robin.
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-15 11:51 ` Robin Murphy
0 siblings, 0 replies; 42+ messages in thread
From: Robin Murphy @ 2021-07-15 11:51 UTC (permalink / raw)
To: Marc Zyngier, Alexandru Elisei
Cc: linux-arm-kernel, kvm, kvmarm, James Morse, Suzuki K Poulose,
Alexandre Chartre, kernel-team
On 2021-07-15 12:11, Marc Zyngier wrote:
> Hi Alex,
>
> On Wed, 14 Jul 2021 16:48:07 +0100,
> Alexandru Elisei <alexandru.elisei@arm.com> wrote:
>>
>> Hi Marc,
>>
>> On 7/13/21 2:58 PM, Marc Zyngier wrote:
>>> A number of the PMU sysregs expose reset values that are not in
>>> compliant with the architecture (set bits in the RES0 ranges,
>>> for example).
>>>
>>> This in turn has the effect that we need to pointlessly mask
>>> some register when using them.
>>>
>>> Let's start by making sure we don't have illegal values in the
>>> shadow registers at reset time. This affects all the registers
>>> that dedicate one bit per counter, the counters themselves,
>>> PMEVTYPERn_EL0 and PMSELR_EL0.
>>>
>>> Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
>>> Signed-off-by: Marc Zyngier <maz@kernel.org>
>>> ---
>>> arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
>>> 1 file changed, 43 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
>>> index f6f126eb6ac1..95ccb8f45409 100644
>>> --- a/arch/arm64/kvm/sys_regs.c
>>> +++ b/arch/arm64/kvm/sys_regs.c
>>> @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
>>> return REG_HIDDEN;
>>> }
>>>
>>> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
>>> +{
>>> + u64 n, mask;
>>> +
>>> + /* No PMU available, any PMU reg may UNDEF... */
>>> + if (!kvm_arm_support_pmu_v3())
>>> + return;
>>> +
>>> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
>>
>> Isn't this going to cause a lot of unnecessary traps with NV? Is
>> that going to be a problem?
>
> We'll get a new traps at L2 VM creation if we expose a PMU to the L1
> guest, and if L2 gets one too. I don't think that's a real problem, as
> the performance of an L2 PMU is bound to be hilarious, and if we are
> really worried about that, we can always cache it locally. Which is
> likely the best thing to do if you think of big-little.
>
> Let's not think of big-little.
>
> Another thing is that we could perfectly ignore the number of counter
> on the host and always expose the architectural maximum, given that
> the PMU is completely emulated. With that, no trap.
Although that would deliberately exacerbate the existing problem of
guest counters mysteriously under-reporting due to the host event
getting multiplexed, thus arguably make the L2 PMU even less useful.
But then trying to analyse application performance under NV at all seems
to stand a high chance of being akin to shovelling fog, so...
Robin.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-15 11:51 ` Robin Murphy
0 siblings, 0 replies; 42+ messages in thread
From: Robin Murphy @ 2021-07-15 11:51 UTC (permalink / raw)
To: Marc Zyngier, Alexandru Elisei; +Cc: kvm, kernel-team, kvmarm, linux-arm-kernel
On 2021-07-15 12:11, Marc Zyngier wrote:
> Hi Alex,
>
> On Wed, 14 Jul 2021 16:48:07 +0100,
> Alexandru Elisei <alexandru.elisei@arm.com> wrote:
>>
>> Hi Marc,
>>
>> On 7/13/21 2:58 PM, Marc Zyngier wrote:
>>> A number of the PMU sysregs expose reset values that are not in
>>> compliant with the architecture (set bits in the RES0 ranges,
>>> for example).
>>>
>>> This in turn has the effect that we need to pointlessly mask
>>> some register when using them.
>>>
>>> Let's start by making sure we don't have illegal values in the
>>> shadow registers at reset time. This affects all the registers
>>> that dedicate one bit per counter, the counters themselves,
>>> PMEVTYPERn_EL0 and PMSELR_EL0.
>>>
>>> Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
>>> Signed-off-by: Marc Zyngier <maz@kernel.org>
>>> ---
>>> arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
>>> 1 file changed, 43 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
>>> index f6f126eb6ac1..95ccb8f45409 100644
>>> --- a/arch/arm64/kvm/sys_regs.c
>>> +++ b/arch/arm64/kvm/sys_regs.c
>>> @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
>>> return REG_HIDDEN;
>>> }
>>>
>>> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
>>> +{
>>> + u64 n, mask;
>>> +
>>> + /* No PMU available, any PMU reg may UNDEF... */
>>> + if (!kvm_arm_support_pmu_v3())
>>> + return;
>>> +
>>> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
>>
>> Isn't this going to cause a lot of unnecessary traps with NV? Is
>> that going to be a problem?
>
> We'll get a new traps at L2 VM creation if we expose a PMU to the L1
> guest, and if L2 gets one too. I don't think that's a real problem, as
> the performance of an L2 PMU is bound to be hilarious, and if we are
> really worried about that, we can always cache it locally. Which is
> likely the best thing to do if you think of big-little.
>
> Let's not think of big-little.
>
> Another thing is that we could perfectly ignore the number of counter
> on the host and always expose the architectural maximum, given that
> the PMU is completely emulated. With that, no trap.
Although that would deliberately exacerbate the existing problem of
guest counters mysteriously under-reporting due to the host event
getting multiplexed, thus arguably make the L2 PMU even less useful.
But then trying to analyse application performance under NV at all seems
to stand a high chance of being akin to shovelling fog, so...
Robin.
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
2021-07-15 11:51 ` Robin Murphy
(?)
@ 2021-07-15 12:25 ` Marc Zyngier
-1 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-15 12:25 UTC (permalink / raw)
To: Robin Murphy
Cc: Alexandru Elisei, linux-arm-kernel, kvm, kvmarm, James Morse,
Suzuki K Poulose, Alexandre Chartre, kernel-team
On Thu, 15 Jul 2021 12:51:49 +0100,
Robin Murphy <robin.murphy@arm.com> wrote:
>
> On 2021-07-15 12:11, Marc Zyngier wrote:
> > Hi Alex,
> >
> > On Wed, 14 Jul 2021 16:48:07 +0100,
> > Alexandru Elisei <alexandru.elisei@arm.com> wrote:
> >>
> >> Hi Marc,
> >>
> >> On 7/13/21 2:58 PM, Marc Zyngier wrote:
> >>> A number of the PMU sysregs expose reset values that are not in
> >>> compliant with the architecture (set bits in the RES0 ranges,
> >>> for example).
> >>>
> >>> This in turn has the effect that we need to pointlessly mask
> >>> some register when using them.
> >>>
> >>> Let's start by making sure we don't have illegal values in the
> >>> shadow registers at reset time. This affects all the registers
> >>> that dedicate one bit per counter, the counters themselves,
> >>> PMEVTYPERn_EL0 and PMSELR_EL0.
> >>>
> >>> Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
> >>> Signed-off-by: Marc Zyngier <maz@kernel.org>
> >>> ---
> >>> arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
> >>> 1 file changed, 43 insertions(+), 3 deletions(-)
> >>>
> >>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> >>> index f6f126eb6ac1..95ccb8f45409 100644
> >>> --- a/arch/arm64/kvm/sys_regs.c
> >>> +++ b/arch/arm64/kvm/sys_regs.c
> >>> @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
> >>> return REG_HIDDEN;
> >>> }
> >>> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct
> >>> sys_reg_desc *r)
> >>> +{
> >>> + u64 n, mask;
> >>> +
> >>> + /* No PMU available, any PMU reg may UNDEF... */
> >>> + if (!kvm_arm_support_pmu_v3())
> >>> + return;
> >>> +
> >>> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> >>
> >> Isn't this going to cause a lot of unnecessary traps with NV? Is
> >> that going to be a problem?
> >
> > We'll get a new traps at L2 VM creation if we expose a PMU to the L1
> > guest, and if L2 gets one too. I don't think that's a real problem, as
> > the performance of an L2 PMU is bound to be hilarious, and if we are
> > really worried about that, we can always cache it locally. Which is
> > likely the best thing to do if you think of big-little.
> >
> > Let's not think of big-little.
> >
> > Another thing is that we could perfectly ignore the number of counter
> > on the host and always expose the architectural maximum, given that
> > the PMU is completely emulated. With that, no trap.
>
> Although that would deliberately exacerbate the existing problem of
> guest counters mysteriously under-reporting due to the host event
> getting multiplexed, thus arguably make the L2 PMU even less useful.
Oh, absolutely. But the current implementation of the PMU emulation
would be pretty terrible on NV anyway.
> But then trying to analyse application performance under NV at all
> seems to stand a high chance of being akin to shovelling fog, so...
Indeed. Not to mention that there is no (publicly available) HW to
measure performance on anyway...
M.
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-15 12:25 ` Marc Zyngier
0 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-15 12:25 UTC (permalink / raw)
To: Robin Murphy
Cc: Alexandru Elisei, linux-arm-kernel, kvm, kvmarm, James Morse,
Suzuki K Poulose, Alexandre Chartre, kernel-team
On Thu, 15 Jul 2021 12:51:49 +0100,
Robin Murphy <robin.murphy@arm.com> wrote:
>
> On 2021-07-15 12:11, Marc Zyngier wrote:
> > Hi Alex,
> >
> > On Wed, 14 Jul 2021 16:48:07 +0100,
> > Alexandru Elisei <alexandru.elisei@arm.com> wrote:
> >>
> >> Hi Marc,
> >>
> >> On 7/13/21 2:58 PM, Marc Zyngier wrote:
> >>> A number of the PMU sysregs expose reset values that are not in
> >>> compliant with the architecture (set bits in the RES0 ranges,
> >>> for example).
> >>>
> >>> This in turn has the effect that we need to pointlessly mask
> >>> some register when using them.
> >>>
> >>> Let's start by making sure we don't have illegal values in the
> >>> shadow registers at reset time. This affects all the registers
> >>> that dedicate one bit per counter, the counters themselves,
> >>> PMEVTYPERn_EL0 and PMSELR_EL0.
> >>>
> >>> Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
> >>> Signed-off-by: Marc Zyngier <maz@kernel.org>
> >>> ---
> >>> arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
> >>> 1 file changed, 43 insertions(+), 3 deletions(-)
> >>>
> >>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> >>> index f6f126eb6ac1..95ccb8f45409 100644
> >>> --- a/arch/arm64/kvm/sys_regs.c
> >>> +++ b/arch/arm64/kvm/sys_regs.c
> >>> @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
> >>> return REG_HIDDEN;
> >>> }
> >>> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct
> >>> sys_reg_desc *r)
> >>> +{
> >>> + u64 n, mask;
> >>> +
> >>> + /* No PMU available, any PMU reg may UNDEF... */
> >>> + if (!kvm_arm_support_pmu_v3())
> >>> + return;
> >>> +
> >>> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> >>
> >> Isn't this going to cause a lot of unnecessary traps with NV? Is
> >> that going to be a problem?
> >
> > We'll get a new traps at L2 VM creation if we expose a PMU to the L1
> > guest, and if L2 gets one too. I don't think that's a real problem, as
> > the performance of an L2 PMU is bound to be hilarious, and if we are
> > really worried about that, we can always cache it locally. Which is
> > likely the best thing to do if you think of big-little.
> >
> > Let's not think of big-little.
> >
> > Another thing is that we could perfectly ignore the number of counter
> > on the host and always expose the architectural maximum, given that
> > the PMU is completely emulated. With that, no trap.
>
> Although that would deliberately exacerbate the existing problem of
> guest counters mysteriously under-reporting due to the host event
> getting multiplexed, thus arguably make the L2 PMU even less useful.
Oh, absolutely. But the current implementation of the PMU emulation
would be pretty terrible on NV anyway.
> But then trying to analyse application performance under NV at all
> seems to stand a high chance of being akin to shovelling fog, so...
Indeed. Not to mention that there is no (publicly available) HW to
measure performance on anyway...
M.
--
Without deviation from the norm, progress is not possible.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH 1/3] KVM: arm64: Narrow PMU sysreg reset values to architectural requirements
@ 2021-07-15 12:25 ` Marc Zyngier
0 siblings, 0 replies; 42+ messages in thread
From: Marc Zyngier @ 2021-07-15 12:25 UTC (permalink / raw)
To: Robin Murphy; +Cc: kvm, kernel-team, kvmarm, linux-arm-kernel
On Thu, 15 Jul 2021 12:51:49 +0100,
Robin Murphy <robin.murphy@arm.com> wrote:
>
> On 2021-07-15 12:11, Marc Zyngier wrote:
> > Hi Alex,
> >
> > On Wed, 14 Jul 2021 16:48:07 +0100,
> > Alexandru Elisei <alexandru.elisei@arm.com> wrote:
> >>
> >> Hi Marc,
> >>
> >> On 7/13/21 2:58 PM, Marc Zyngier wrote:
> >>> A number of the PMU sysregs expose reset values that are not in
> >>> compliant with the architecture (set bits in the RES0 ranges,
> >>> for example).
> >>>
> >>> This in turn has the effect that we need to pointlessly mask
> >>> some register when using them.
> >>>
> >>> Let's start by making sure we don't have illegal values in the
> >>> shadow registers at reset time. This affects all the registers
> >>> that dedicate one bit per counter, the counters themselves,
> >>> PMEVTYPERn_EL0 and PMSELR_EL0.
> >>>
> >>> Reported-by: Alexandre Chartre <alexandre.chartre@oracle.com>
> >>> Signed-off-by: Marc Zyngier <maz@kernel.org>
> >>> ---
> >>> arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++---
> >>> 1 file changed, 43 insertions(+), 3 deletions(-)
> >>>
> >>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> >>> index f6f126eb6ac1..95ccb8f45409 100644
> >>> --- a/arch/arm64/kvm/sys_regs.c
> >>> +++ b/arch/arm64/kvm/sys_regs.c
> >>> @@ -603,6 +603,44 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
> >>> return REG_HIDDEN;
> >>> }
> >>> +static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct
> >>> sys_reg_desc *r)
> >>> +{
> >>> + u64 n, mask;
> >>> +
> >>> + /* No PMU available, any PMU reg may UNDEF... */
> >>> + if (!kvm_arm_support_pmu_v3())
> >>> + return;
> >>> +
> >>> + n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
> >>
> >> Isn't this going to cause a lot of unnecessary traps with NV? Is
> >> that going to be a problem?
> >
> > We'll get a new traps at L2 VM creation if we expose a PMU to the L1
> > guest, and if L2 gets one too. I don't think that's a real problem, as
> > the performance of an L2 PMU is bound to be hilarious, and if we are
> > really worried about that, we can always cache it locally. Which is
> > likely the best thing to do if you think of big-little.
> >
> > Let's not think of big-little.
> >
> > Another thing is that we could perfectly ignore the number of counter
> > on the host and always expose the architectural maximum, given that
> > the PMU is completely emulated. With that, no trap.
>
> Although that would deliberately exacerbate the existing problem of
> guest counters mysteriously under-reporting due to the host event
> getting multiplexed, thus arguably make the L2 PMU even less useful.
Oh, absolutely. But the current implementation of the PMU emulation
would be pretty terrible on NV anyway.
> But then trying to analyse application performance under NV at all
> seems to stand a high chance of being akin to shovelling fog, so...
Indeed. Not to mention that there is no (publicly available) HW to
measure performance on anyway...
M.
--
Without deviation from the norm, progress is not possible.
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 42+ messages in thread