Here's a few cleanups and fixes for 5.6, all around our debug and PMU ID register emulation: 1) Missing RES1 bit in DBGDIDR 2) Limiting PMU version to v8.1 3) Limiting debug version to ARMv8.0 4) Support for ARMv8.4-PMU (1) was reported by Peter, (2) had a patch from Andrew Murray on the list, but it's been a while since he was going to rebase and fix it, so I did bite the bullet. (3) is needed until we implement the right thing with NV. (4) was too easy not to be done right away. Patch #2 is a cleanup that helps the following patches. Unless someone objects, I'd like to take this into 5.6 (except maybe for the last patch). Marc Zyngier (5): KVM: arm64: Fix missing RES1 in emulation of DBGBIDR KVM: arm64: Refactor filtering of ID registers kvm: arm64: Limit PMU version to ARMv8.1 KVM: arm64: Limit the debug architecture to ARMv8.0 KVM: arm64: Upgrade PMU support to ARMv8.4 arch/arm64/include/asm/sysreg.h | 2 ++ arch/arm64/kvm/sys_regs.c | 35 +++++++++++++++++++++++++-------- 2 files changed, 29 insertions(+), 8 deletions(-) -- 2.20.1 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
The AArch32 CP14 DBGDIDR has bit 15 set to RES1, which our current emulation doesn't set. Just add the missing bit. Reported-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/sys_regs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3e909b117f0c..da82c4b03aab 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1658,7 +1658,7 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) - | (6 << 16) | (el3 << 14) | (el3 << 12)); + | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); return true; } } -- 2.20.1 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Our current ID register filtering is starting to be a mess of if() statements, and isn't going to get any saner. Let's turn it into a switch(), which has a chance of being more readable. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/sys_regs.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index da82c4b03aab..682fedd7700f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -9,6 +9,7 @@ * Christoffer Dall <c.dall@virtualopensystems.com> */ +#include <linux/bitfield.h> #include <linux/bsearch.h> #include <linux/kvm_host.h> #include <linux/mm.h> @@ -1070,6 +1071,8 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, return true; } +#define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT)) + /* Read a sanitised cpufeature ID register by sys_reg_desc */ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r, bool raz) @@ -1078,13 +1081,18 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { - val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); - } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { - val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | - (0xfUL << ID_AA64ISAR1_API_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); + switch (id) { + case SYS_ID_AA64PFR0_EL1: + if (!vcpu_has_sve(vcpu)) + val &= ~FEATURE(ID_AA64PFR0_SVE); + break; + case SYS_ID_AA64ISAR1_EL1: + if (!vcpu_has_ptrauth(vcpu)) + val &= ~(FEATURE(ID_AA64ISAR1_APA) | + FEATURE(ID_AA64ISAR1_API) | + FEATURE(ID_AA64ISAR1_GPA) | + FEATURE(ID_AA64ISAR1_GPI)); + break; } return val; -- 2.20.1 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Our PMU code is only implementing the ARMv8.1 features, so let's stick to this when reporting the feature set to the guest. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/sys_regs.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 682fedd7700f..06b2d0dc6c73 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1093,6 +1093,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, FEATURE(ID_AA64ISAR1_GPA) | FEATURE(ID_AA64ISAR1_GPI)); break; + case SYS_ID_AA64DFR0_EL1: + /* Limit PMU to ARMv8.1 */ + val &= ~FEATURE(ID_AA64DFR0_PMUVER); + val |= FIELD_PREP(FEATURE(ID_AA64DFR0_PMUVER), 4); + break; } return val; -- 2.20.1 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Let's not pretend we support anything but ARMv8.0 as far as the debug architecture is concerned. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/sys_regs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 06b2d0dc6c73..43087b50a211 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1094,6 +1094,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, FEATURE(ID_AA64ISAR1_GPI)); break; case SYS_ID_AA64DFR0_EL1: + /* Limit debug to ARMv8.0 */ + val &= ~FEATURE(ID_AA64DFR0_DEBUGVER); + val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6); /* Limit PMU to ARMv8.1 */ val &= ~FEATURE(ID_AA64DFR0_PMUVER); val |= FIELD_PREP(FEATURE(ID_AA64DFR0_PMUVER), 4); -- 2.20.1 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Upgrading the PMU code from ARMv8.1 to ARMv8.4 turns out to be pretty easy. All that is required is support for PMMIR_EL1, which is read-only, and for which returning 0 is a valid option. Let's just do that and adjust what we return to the guest. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/sysreg.h | 2 ++ arch/arm64/kvm/sys_regs.c | 9 ++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b91570ff9db1..16d91ed51d06 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -312,6 +312,8 @@ #define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) #define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) +#define SYS_PMMIR_EL1 sys_reg(3, 0, 9, 14, 6) + #define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0) #define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 43087b50a211..4eee61fb94be 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1097,9 +1097,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, /* Limit debug to ARMv8.0 */ val &= ~FEATURE(ID_AA64DFR0_DEBUGVER); val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6); - /* Limit PMU to ARMv8.1 */ - val &= ~FEATURE(ID_AA64DFR0_PMUVER); - val |= FIELD_PREP(FEATURE(ID_AA64DFR0_PMUVER), 4); + /* Limit PMU to ARMv8.4 */ + if (FIELD_GET(FEATURE(ID_AA64DFR0_PMUVER), val) > 5) { + val &= ~FEATURE(ID_AA64DFR0_PMUVER); + val |= FIELD_PREP(FEATURE(ID_AA64DFR0_PMUVER), 5); + } break; } @@ -1524,6 +1526,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 }, + { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, -- 2.20.1 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Hi Marc, $subject typo: ~/DBGBIDR/DBGDIDR/ On 16/02/2020 18:53, Marc Zyngier wrote: > The AArch32 CP14 DBGDIDR has bit 15 set to RES1, which our current > emulation doesn't set. Just add the missing bit. So it does. Reviewed-by: James Morse <james.morse@arm.com> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index 3e909b117f0c..da82c4b03aab 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -1658,7 +1658,7 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, > p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | > (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | > (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) > - | (6 << 16) | (el3 << 14) | (el3 << 12)); > + | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); Hmmm, where el3 is: | u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); Aren't we depending on the compilers 'true' being 1 here? Thanks, James _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Hi Marc, On 16/02/2020 18:53, Marc Zyngier wrote: > Our PMU code is only implementing the ARMv8.1 features, so let's > stick to this when reporting the feature set to the guest. > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index 682fedd7700f..06b2d0dc6c73 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -1093,6 +1093,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, > FEATURE(ID_AA64ISAR1_GPA) | > FEATURE(ID_AA64ISAR1_GPI)); > break; > + case SYS_ID_AA64DFR0_EL1: > + /* Limit PMU to ARMv8.1 */ Not just limit, but upgrade too! (force?) This looks safe because ARMV8_PMU_EVTYPE_EVENT always includes the extra bits this added, and the register is always trapped. The PMU version is also readable via ID_DFR0_EL1.PerfMon, should that be sanitised to be the same? (I don't think we've hidden an aarch64 feature that also existed in aarch32 before). Regardless: Reviewed-by: James Morse <james.morse@arm.com> Thanks, James > + val &= ~FEATURE(ID_AA64DFR0_PMUVER); > + val |= FIELD_PREP(FEATURE(ID_AA64DFR0_PMUVER), 4); > + break; > } > > return val; > _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Hi Marc, On 16/02/2020 18:53, Marc Zyngier wrote: > Let's not pretend we support anything but ARMv8.0 as far as the > debug architecture is concerned. (what happens for features that disappeared?) For v8.0 the 'OS Double Lock' was mandatory. With v8.2 it became optional, and not-implemented with v8.3. The guest can see whether its implemented in ID_AA64DFR0_EL1. (and its 32bit friends) Previously these values would have at least matched, even though KVM implements it as RAZ/WI (which is the not-implemented behaviour). Would anyone care that these are inconsistent? (I've never had a solid grasp of how these debug 'lock' registers are supposed to be used). Thanks, James > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index 06b2d0dc6c73..43087b50a211 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -1094,6 +1094,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, > FEATURE(ID_AA64ISAR1_GPI)); > break; > case SYS_ID_AA64DFR0_EL1: > + /* Limit debug to ARMv8.0 */ > + val &= ~FEATURE(ID_AA64DFR0_DEBUGVER); > + val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6); > /* Limit PMU to ARMv8.1 */ > val &= ~FEATURE(ID_AA64DFR0_PMUVER); > val |= FIELD_PREP(FEATURE(ID_AA64DFR0_PMUVER), 4); > _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
On 18/02/2020 5:43 pm, James Morse wrote: > Hi Marc, > > $subject typo: ~/DBGBIDR/DBGDIDR/ > > On 16/02/2020 18:53, Marc Zyngier wrote: >> The AArch32 CP14 DBGDIDR has bit 15 set to RES1, which our current >> emulation doesn't set. Just add the missing bit. > > So it does. > > Reviewed-by: James Morse <james.morse@arm.com> > > >> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c >> index 3e909b117f0c..da82c4b03aab 100644 >> --- a/arch/arm64/kvm/sys_regs.c >> +++ b/arch/arm64/kvm/sys_regs.c >> @@ -1658,7 +1658,7 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, >> p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | >> (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | >> (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) >> - | (6 << 16) | (el3 << 14) | (el3 << 12)); >> + | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); > > Hmmm, where el3 is: > | u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); > > Aren't we depending on the compilers 'true' being 1 here? Pretty much, but thankfully the only compilers we support are C compilers: "The result of the logical negation operator ! is 0 if the value of its operand compares unequal to 0, 1 if the value of its operand compares equal to 0. The result has type int." And now I have you to thank for flashbacks to bitwise logical operators in Visual Basic... :P Robin. _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Hi Robin, On 18/02/2020 18:01, Robin Murphy wrote: > On 18/02/2020 5:43 pm, James Morse wrote: >> On 16/02/2020 18:53, Marc Zyngier wrote: >>> The AArch32 CP14 DBGDIDR has bit 15 set to RES1, which our current >>> emulation doesn't set. Just add the missing bit. >>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c >>> index 3e909b117f0c..da82c4b03aab 100644 >>> --- a/arch/arm64/kvm/sys_regs.c >>> +++ b/arch/arm64/kvm/sys_regs.c >>> @@ -1658,7 +1658,7 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, >>> p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | >>> (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | >>> (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) >>> - | (6 << 16) | (el3 << 14) | (el3 << 12)); >>> + | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); >> >> Hmmm, where el3 is: >> | u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); >> >> Aren't we depending on the compilers 'true' being 1 here? > > Pretty much, but thankfully the only compilers we support are C compilers: > > "The result of the logical negation operator ! is 0 if the value of its operand compares > unequal to 0, 1 if the value of its operand compares equal to 0. The result has type int." Excellent. I thought this was the sort of thing that couldn't be depended on! > And now I have you to thank for flashbacks to bitwise logical operators in Visual Basic... :P ... sorry? Thanks, James _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
On 2020-02-18 17:43, James Morse wrote: > Hi Marc, > > On 16/02/2020 18:53, Marc Zyngier wrote: >> Our PMU code is only implementing the ARMv8.1 features, so let's >> stick to this when reporting the feature set to the guest. > >> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c >> index 682fedd7700f..06b2d0dc6c73 100644 >> --- a/arch/arm64/kvm/sys_regs.c >> +++ b/arch/arm64/kvm/sys_regs.c >> @@ -1093,6 +1093,11 @@ static u64 read_id_reg(const struct kvm_vcpu >> *vcpu, >> FEATURE(ID_AA64ISAR1_GPA) | >> FEATURE(ID_AA64ISAR1_GPI)); >> break; >> + case SYS_ID_AA64DFR0_EL1: >> + /* Limit PMU to ARMv8.1 */ > > Not just limit, but upgrade too! (force?) > This looks safe because ARMV8_PMU_EVTYPE_EVENT always includes the > extra bits this added, and the register is always trapped. That's definitely not what I intended! Let me fix that one. > The PMU version is also readable via ID_DFR0_EL1.PerfMon, should that > be sanitised to be the same? (I don't think we've hidden an aarch64 > feature that also existed in aarch32 before). Indeed, yet another oversight. I'll fix that too. > Regardless: > Reviewed-by: James Morse <james.morse@arm.com> You're way too kind! ;-) M. -- Jazz is not dead. It just smells funny... _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Hi Marc, On 2/19/20 9:46 AM, Marc Zyngier wrote: > On 2020-02-18 17:43, James Morse wrote: >> Hi Marc, >> >> On 16/02/2020 18:53, Marc Zyngier wrote: >>> Our PMU code is only implementing the ARMv8.1 features, so let's >>> stick to this when reporting the feature set to the guest. >> >>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c >>> index 682fedd7700f..06b2d0dc6c73 100644 >>> --- a/arch/arm64/kvm/sys_regs.c >>> +++ b/arch/arm64/kvm/sys_regs.c >>> @@ -1093,6 +1093,11 @@ static u64 read_id_reg(const struct kvm_vcpu >>> *vcpu, >>> FEATURE(ID_AA64ISAR1_GPA) | >>> FEATURE(ID_AA64ISAR1_GPI)); >>> break; >>> + case SYS_ID_AA64DFR0_EL1: >>> + /* Limit PMU to ARMv8.1 */ >> >> Not just limit, but upgrade too! (force?) >> This looks safe because ARMV8_PMU_EVTYPE_EVENT always includes the >> extra bits this added, and the register is always trapped. > > That's definitely not what I intended! Let me fix that one. What goes wrong? The register description says to support v8.1 you need: | Extended 16-bit PMEVTYPER<n>_EL0.evtCount field | If EL2 is implemented, the MDCR_EL2.HPMD control bit It looks like the extended PMEVTYPER would work via the emulation, and EL2 guests are totally crazy. Is the STALL_* bits in ARMv8.1-PMU the problem, ... or the extra work for NV? >> The PMU version is also readable via ID_DFR0_EL1.PerfMon, should that >> be sanitised to be the same? (I don't think we've hidden an aarch64 >> feature that also existed in aarch32 before). > > Indeed, yet another oversight. I'll fix that too. (Weird variation in the aarch32 and aarch64 ID registers isn't something I care about ... who would ever look at both?) Thanks, James _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
On 2020-02-19 10:18, James Morse wrote: > Hi Marc, > > On 2/19/20 9:46 AM, Marc Zyngier wrote: >> On 2020-02-18 17:43, James Morse wrote: >>> Hi Marc, >>> >>> On 16/02/2020 18:53, Marc Zyngier wrote: >>>> Our PMU code is only implementing the ARMv8.1 features, so let's >>>> stick to this when reporting the feature set to the guest. >>> >>>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c >>>> index 682fedd7700f..06b2d0dc6c73 100644 >>>> --- a/arch/arm64/kvm/sys_regs.c >>>> +++ b/arch/arm64/kvm/sys_regs.c >>>> @@ -1093,6 +1093,11 @@ static u64 read_id_reg(const struct kvm_vcpu >>>> *vcpu, >>>> FEATURE(ID_AA64ISAR1_GPA) | >>>> FEATURE(ID_AA64ISAR1_GPI)); >>>> break; >>>> + case SYS_ID_AA64DFR0_EL1: >>>> + /* Limit PMU to ARMv8.1 */ >>> >>> Not just limit, but upgrade too! (force?) >>> This looks safe because ARMV8_PMU_EVTYPE_EVENT always includes the >>> extra bits this added, and the register is always trapped. >> >> That's definitely not what I intended! Let me fix that one. > > What goes wrong? > > The register description says to support v8.1 you need: > | Extended 16-bit PMEVTYPER<n>_EL0.evtCount field > | If EL2 is implemented, the MDCR_EL2.HPMD control bit > > It looks like the extended PMEVTYPER would work via the emulation, and > EL2 guests are totally crazy. > > Is the STALL_* bits in ARMv8.1-PMU the problem, ... or the extra work > for NV? What goes wrong is that on a v8.0 system, the guest could be tempted to use events in the 0x0400-0xffff range. It wouldn't break anything, but it wouldn't give the expected result. I don't care much for PMU support in EL2 guests at this stage. >>> The PMU version is also readable via ID_DFR0_EL1.PerfMon, should that >>> be sanitised to be the same? (I don't think we've hidden an aarch64 >>> feature that also existed in aarch32 before). >> >> Indeed, yet another oversight. I'll fix that too. > > (Weird variation in the aarch32 and aarch64 ID registers isn't > something > I care about ... who would ever look at both?) A 64bit guest running a 32bit EL0? M. -- Jazz is not dead. It just smells funny... _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
On Sun, 16 Feb 2020 18:53:21 +0000 Marc Zyngier <maz@kernel.org> wrote: Hi, > Our current ID register filtering is starting to be a mess of if() > statements, and isn't going to get any saner. > > Let's turn it into a switch(), which has a chance of being more > readable. Indeed, much better now. > Signed-off-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Andre Przywara <andre.przywara@arm.com> Thanks, Andre > --- > arch/arm64/kvm/sys_regs.c | 22 +++++++++++++++------- > 1 file changed, 15 insertions(+), 7 deletions(-) > > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index da82c4b03aab..682fedd7700f 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -9,6 +9,7 @@ > * Christoffer Dall <c.dall@virtualopensystems.com> > */ > > +#include <linux/bitfield.h> > #include <linux/bsearch.h> > #include <linux/kvm_host.h> > #include <linux/mm.h> > @@ -1070,6 +1071,8 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, > return true; > } > > +#define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT)) > + > /* Read a sanitised cpufeature ID register by sys_reg_desc */ > static u64 read_id_reg(const struct kvm_vcpu *vcpu, > struct sys_reg_desc const *r, bool raz) > @@ -1078,13 +1081,18 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, > (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); > u64 val = raz ? 0 : read_sanitised_ftr_reg(id); > > - if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { > - val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); > - } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { > - val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | > - (0xfUL << ID_AA64ISAR1_API_SHIFT) | > - (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | > - (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); > + switch (id) { > + case SYS_ID_AA64PFR0_EL1: > + if (!vcpu_has_sve(vcpu)) > + val &= ~FEATURE(ID_AA64PFR0_SVE); > + break; > + case SYS_ID_AA64ISAR1_EL1: > + if (!vcpu_has_ptrauth(vcpu)) > + val &= ~(FEATURE(ID_AA64ISAR1_APA) | > + FEATURE(ID_AA64ISAR1_API) | > + FEATURE(ID_AA64ISAR1_GPA) | > + FEATURE(ID_AA64ISAR1_GPI)); > + break; > } > > return val; _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm