From mboxrd@z Thu Jan 1 00:00:00 1970 From: Shannon Zhao Subject: [PATCH v10 09/21] KVM: ARM64: Add access handler for event counter register Date: Wed, 27 Jan 2016 11:51:37 +0800 Message-ID: <1453866709-20324-10-git-send-email-zhaoshenglong@huawei.com> References: <1453866709-20324-1-git-send-email-zhaoshenglong@huawei.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Cc: kvm@vger.kernel.org, will.deacon@arm.com, linux-arm-kernel@lists.infradead.org, shannon.zhao@linaro.org To: , , Return-path: In-Reply-To: <1453866709-20324-1-git-send-email-zhaoshenglong@huawei.com> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: kvmarm-bounces@lists.cs.columbia.edu Sender: kvmarm-bounces@lists.cs.columbia.edu List-Id: kvm.vger.kernel.org From: Shannon Zhao These kind of registers include PMEVCNTRn, PMCCNTR and PMXEVCNTR which is mapped to PMEVCNTRn. The access handler translates all aarch32 register offsets to aarch64 ones and uses vcpu_sys_reg() to access their values to avoid taking care of big endian. When reading these registers, return the sum of register value and the value perf event counts. Signed-off-by: Shannon Zhao --- arch/arm64/kvm/sys_regs.c | 129 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 125 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 298ae94..6a50262 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -561,6 +561,48 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 idx, reg, val; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (r->CRn == 9 && r->CRm == 13) { + if (r->Op2 == 2) { + /* PMXEVCNTR_EL0 */ + idx = vcpu_sys_reg(vcpu, PMSELR_EL0) + & ARMV8_COUNTER_MASK; + reg = PMEVCNTR0_EL0 + idx; + } else if (r->Op2 == 0) { + /* PMCCNTR_EL0 */ + idx = ARMV8_CYCLE_IDX; + reg = PMCCNTR_EL0; + } else { + BUG(); + } + } else if (r->CRn == 14 && (r->CRm & 12) == 8) { + /* PMEVCNTRn_EL0 */ + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); + reg = PMEVCNTR0_EL0 + idx; + } else { + BUG(); + } + + if (!pmu_counter_idx_valid(vcpu, idx)) + return false; + + val = kvm_pmu_get_counter_value(vcpu, idx); + if (p->is_write) + vcpu_sys_reg(vcpu, reg) += (s64)p->regval - val; + else + p->regval = val; + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -576,6 +618,13 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } +/* Macro to expand the PMEVCNTRn_EL0 register */ +#define PMU_PMEVCNTR_EL0(n) \ + /* PMEVCNTRn_EL0 */ \ + { Op0(0b11), Op1(0b011), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } + /* Macro to expand the PMEVTYPERn_EL0 register */ #define PMU_PMEVTYPER_EL0(n) \ /* PMEVTYPERn_EL0 */ \ @@ -776,13 +825,13 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmceid }, /* PMCCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), - trap_raz_wi }, + access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, /* PMXEVTYPER_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), access_pmu_evtyper }, /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), - trap_raz_wi }, + access_pmu_evcntr }, /* PMUSERENR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), trap_raz_wi }, @@ -797,6 +846,38 @@ static const struct sys_reg_desc sys_reg_descs[] = { { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), NULL, reset_unknown, TPIDRRO_EL0 }, + /* PMEVCNTRn_EL0 */ + PMU_PMEVCNTR_EL0(0), + PMU_PMEVCNTR_EL0(1), + PMU_PMEVCNTR_EL0(2), + PMU_PMEVCNTR_EL0(3), + PMU_PMEVCNTR_EL0(4), + PMU_PMEVCNTR_EL0(5), + PMU_PMEVCNTR_EL0(6), + PMU_PMEVCNTR_EL0(7), + PMU_PMEVCNTR_EL0(8), + PMU_PMEVCNTR_EL0(9), + PMU_PMEVCNTR_EL0(10), + PMU_PMEVCNTR_EL0(11), + PMU_PMEVCNTR_EL0(12), + PMU_PMEVCNTR_EL0(13), + PMU_PMEVCNTR_EL0(14), + PMU_PMEVCNTR_EL0(15), + PMU_PMEVCNTR_EL0(16), + PMU_PMEVCNTR_EL0(17), + PMU_PMEVCNTR_EL0(18), + PMU_PMEVCNTR_EL0(19), + PMU_PMEVCNTR_EL0(20), + PMU_PMEVCNTR_EL0(21), + PMU_PMEVCNTR_EL0(22), + PMU_PMEVCNTR_EL0(23), + PMU_PMEVCNTR_EL0(24), + PMU_PMEVCNTR_EL0(25), + PMU_PMEVCNTR_EL0(26), + PMU_PMEVCNTR_EL0(27), + PMU_PMEVCNTR_EL0(28), + PMU_PMEVCNTR_EL0(29), + PMU_PMEVCNTR_EL0(30), /* PMEVTYPERn_EL0 */ PMU_PMEVTYPER_EL0(0), PMU_PMEVTYPER_EL0(1), @@ -1025,6 +1106,13 @@ static const struct sys_reg_desc cp14_64_regs[] = { { Op1( 0), CRm( 2), .access = trap_raz_wi }, }; +/* Macro to expand the PMEVCNTRn register */ +#define PMU_PMEVCNTR(n) \ + /* PMEVCNTRn */ \ + { Op1(0), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr } + /* Macro to expand the PMEVTYPERn register */ #define PMU_PMEVTYPER(n) \ /* PMEVTYPERn */ \ @@ -1067,9 +1155,9 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, - { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, - { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, @@ -1084,6 +1172,38 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, + /* PMEVCNTRn */ + PMU_PMEVCNTR(0), + PMU_PMEVCNTR(1), + PMU_PMEVCNTR(2), + PMU_PMEVCNTR(3), + PMU_PMEVCNTR(4), + PMU_PMEVCNTR(5), + PMU_PMEVCNTR(6), + PMU_PMEVCNTR(7), + PMU_PMEVCNTR(8), + PMU_PMEVCNTR(9), + PMU_PMEVCNTR(10), + PMU_PMEVCNTR(11), + PMU_PMEVCNTR(12), + PMU_PMEVCNTR(13), + PMU_PMEVCNTR(14), + PMU_PMEVCNTR(15), + PMU_PMEVCNTR(16), + PMU_PMEVCNTR(17), + PMU_PMEVCNTR(18), + PMU_PMEVCNTR(19), + PMU_PMEVCNTR(20), + PMU_PMEVCNTR(21), + PMU_PMEVCNTR(22), + PMU_PMEVCNTR(23), + PMU_PMEVCNTR(24), + PMU_PMEVCNTR(25), + PMU_PMEVCNTR(26), + PMU_PMEVCNTR(27), + PMU_PMEVCNTR(28), + PMU_PMEVCNTR(29), + PMU_PMEVCNTR(30), /* PMEVTYPERn */ PMU_PMEVTYPER(0), PMU_PMEVTYPER(1), @@ -1122,6 +1242,7 @@ static const struct sys_reg_desc cp15_regs[] = { static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, }; -- 2.0.4 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Shannon Zhao Subject: [PATCH v10 09/21] KVM: ARM64: Add access handler for event counter register Date: Wed, 27 Jan 2016 11:51:37 +0800 Message-ID: <1453866709-20324-10-git-send-email-zhaoshenglong@huawei.com> References: <1453866709-20324-1-git-send-email-zhaoshenglong@huawei.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from localhost (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id 49EB4407A0 for ; Tue, 26 Jan 2016 22:48:43 -0500 (EST) Received: from mm01.cs.columbia.edu ([127.0.0.1]) by localhost (mm01.cs.columbia.edu [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id VcdB5KCGqJGg for ; Tue, 26 Jan 2016 22:48:40 -0500 (EST) Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [58.251.152.64]) by mm01.cs.columbia.edu (Postfix) with ESMTPS id D77254053F for ; Tue, 26 Jan 2016 22:48:39 -0500 (EST) In-Reply-To: <1453866709-20324-1-git-send-email-zhaoshenglong@huawei.com> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: kvmarm-bounces@lists.cs.columbia.edu Sender: kvmarm-bounces@lists.cs.columbia.edu To: kvmarm@lists.cs.columbia.edu, marc.zyngier@arm.com, christoffer.dall@linaro.org Cc: kvm@vger.kernel.org, will.deacon@arm.com, linux-arm-kernel@lists.infradead.org, shannon.zhao@linaro.org List-Id: kvmarm@lists.cs.columbia.edu From: Shannon Zhao These kind of registers include PMEVCNTRn, PMCCNTR and PMXEVCNTR which is mapped to PMEVCNTRn. The access handler translates all aarch32 register offsets to aarch64 ones and uses vcpu_sys_reg() to access their values to avoid taking care of big endian. When reading these registers, return the sum of register value and the value perf event counts. Signed-off-by: Shannon Zhao --- arch/arm64/kvm/sys_regs.c | 129 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 125 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 298ae94..6a50262 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -561,6 +561,48 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 idx, reg, val; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (r->CRn == 9 && r->CRm == 13) { + if (r->Op2 == 2) { + /* PMXEVCNTR_EL0 */ + idx = vcpu_sys_reg(vcpu, PMSELR_EL0) + & ARMV8_COUNTER_MASK; + reg = PMEVCNTR0_EL0 + idx; + } else if (r->Op2 == 0) { + /* PMCCNTR_EL0 */ + idx = ARMV8_CYCLE_IDX; + reg = PMCCNTR_EL0; + } else { + BUG(); + } + } else if (r->CRn == 14 && (r->CRm & 12) == 8) { + /* PMEVCNTRn_EL0 */ + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); + reg = PMEVCNTR0_EL0 + idx; + } else { + BUG(); + } + + if (!pmu_counter_idx_valid(vcpu, idx)) + return false; + + val = kvm_pmu_get_counter_value(vcpu, idx); + if (p->is_write) + vcpu_sys_reg(vcpu, reg) += (s64)p->regval - val; + else + p->regval = val; + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -576,6 +618,13 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } +/* Macro to expand the PMEVCNTRn_EL0 register */ +#define PMU_PMEVCNTR_EL0(n) \ + /* PMEVCNTRn_EL0 */ \ + { Op0(0b11), Op1(0b011), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } + /* Macro to expand the PMEVTYPERn_EL0 register */ #define PMU_PMEVTYPER_EL0(n) \ /* PMEVTYPERn_EL0 */ \ @@ -776,13 +825,13 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmceid }, /* PMCCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), - trap_raz_wi }, + access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, /* PMXEVTYPER_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), access_pmu_evtyper }, /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), - trap_raz_wi }, + access_pmu_evcntr }, /* PMUSERENR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), trap_raz_wi }, @@ -797,6 +846,38 @@ static const struct sys_reg_desc sys_reg_descs[] = { { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), NULL, reset_unknown, TPIDRRO_EL0 }, + /* PMEVCNTRn_EL0 */ + PMU_PMEVCNTR_EL0(0), + PMU_PMEVCNTR_EL0(1), + PMU_PMEVCNTR_EL0(2), + PMU_PMEVCNTR_EL0(3), + PMU_PMEVCNTR_EL0(4), + PMU_PMEVCNTR_EL0(5), + PMU_PMEVCNTR_EL0(6), + PMU_PMEVCNTR_EL0(7), + PMU_PMEVCNTR_EL0(8), + PMU_PMEVCNTR_EL0(9), + PMU_PMEVCNTR_EL0(10), + PMU_PMEVCNTR_EL0(11), + PMU_PMEVCNTR_EL0(12), + PMU_PMEVCNTR_EL0(13), + PMU_PMEVCNTR_EL0(14), + PMU_PMEVCNTR_EL0(15), + PMU_PMEVCNTR_EL0(16), + PMU_PMEVCNTR_EL0(17), + PMU_PMEVCNTR_EL0(18), + PMU_PMEVCNTR_EL0(19), + PMU_PMEVCNTR_EL0(20), + PMU_PMEVCNTR_EL0(21), + PMU_PMEVCNTR_EL0(22), + PMU_PMEVCNTR_EL0(23), + PMU_PMEVCNTR_EL0(24), + PMU_PMEVCNTR_EL0(25), + PMU_PMEVCNTR_EL0(26), + PMU_PMEVCNTR_EL0(27), + PMU_PMEVCNTR_EL0(28), + PMU_PMEVCNTR_EL0(29), + PMU_PMEVCNTR_EL0(30), /* PMEVTYPERn_EL0 */ PMU_PMEVTYPER_EL0(0), PMU_PMEVTYPER_EL0(1), @@ -1025,6 +1106,13 @@ static const struct sys_reg_desc cp14_64_regs[] = { { Op1( 0), CRm( 2), .access = trap_raz_wi }, }; +/* Macro to expand the PMEVCNTRn register */ +#define PMU_PMEVCNTR(n) \ + /* PMEVCNTRn */ \ + { Op1(0), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr } + /* Macro to expand the PMEVTYPERn register */ #define PMU_PMEVTYPER(n) \ /* PMEVTYPERn */ \ @@ -1067,9 +1155,9 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, - { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, - { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, @@ -1084,6 +1172,38 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, + /* PMEVCNTRn */ + PMU_PMEVCNTR(0), + PMU_PMEVCNTR(1), + PMU_PMEVCNTR(2), + PMU_PMEVCNTR(3), + PMU_PMEVCNTR(4), + PMU_PMEVCNTR(5), + PMU_PMEVCNTR(6), + PMU_PMEVCNTR(7), + PMU_PMEVCNTR(8), + PMU_PMEVCNTR(9), + PMU_PMEVCNTR(10), + PMU_PMEVCNTR(11), + PMU_PMEVCNTR(12), + PMU_PMEVCNTR(13), + PMU_PMEVCNTR(14), + PMU_PMEVCNTR(15), + PMU_PMEVCNTR(16), + PMU_PMEVCNTR(17), + PMU_PMEVCNTR(18), + PMU_PMEVCNTR(19), + PMU_PMEVCNTR(20), + PMU_PMEVCNTR(21), + PMU_PMEVCNTR(22), + PMU_PMEVCNTR(23), + PMU_PMEVCNTR(24), + PMU_PMEVCNTR(25), + PMU_PMEVCNTR(26), + PMU_PMEVCNTR(27), + PMU_PMEVCNTR(28), + PMU_PMEVCNTR(29), + PMU_PMEVCNTR(30), /* PMEVTYPERn */ PMU_PMEVTYPER(0), PMU_PMEVTYPER(1), @@ -1122,6 +1242,7 @@ static const struct sys_reg_desc cp15_regs[] = { static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, }; -- 2.0.4 From mboxrd@z Thu Jan 1 00:00:00 1970 From: zhaoshenglong@huawei.com (Shannon Zhao) Date: Wed, 27 Jan 2016 11:51:37 +0800 Subject: [PATCH v10 09/21] KVM: ARM64: Add access handler for event counter register In-Reply-To: <1453866709-20324-1-git-send-email-zhaoshenglong@huawei.com> References: <1453866709-20324-1-git-send-email-zhaoshenglong@huawei.com> Message-ID: <1453866709-20324-10-git-send-email-zhaoshenglong@huawei.com> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org From: Shannon Zhao These kind of registers include PMEVCNTRn, PMCCNTR and PMXEVCNTR which is mapped to PMEVCNTRn. The access handler translates all aarch32 register offsets to aarch64 ones and uses vcpu_sys_reg() to access their values to avoid taking care of big endian. When reading these registers, return the sum of register value and the value perf event counts. Signed-off-by: Shannon Zhao --- arch/arm64/kvm/sys_regs.c | 129 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 125 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 298ae94..6a50262 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -561,6 +561,48 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 idx, reg, val; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (r->CRn == 9 && r->CRm == 13) { + if (r->Op2 == 2) { + /* PMXEVCNTR_EL0 */ + idx = vcpu_sys_reg(vcpu, PMSELR_EL0) + & ARMV8_COUNTER_MASK; + reg = PMEVCNTR0_EL0 + idx; + } else if (r->Op2 == 0) { + /* PMCCNTR_EL0 */ + idx = ARMV8_CYCLE_IDX; + reg = PMCCNTR_EL0; + } else { + BUG(); + } + } else if (r->CRn == 14 && (r->CRm & 12) == 8) { + /* PMEVCNTRn_EL0 */ + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); + reg = PMEVCNTR0_EL0 + idx; + } else { + BUG(); + } + + if (!pmu_counter_idx_valid(vcpu, idx)) + return false; + + val = kvm_pmu_get_counter_value(vcpu, idx); + if (p->is_write) + vcpu_sys_reg(vcpu, reg) += (s64)p->regval - val; + else + p->regval = val; + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -576,6 +618,13 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } +/* Macro to expand the PMEVCNTRn_EL0 register */ +#define PMU_PMEVCNTR_EL0(n) \ + /* PMEVCNTRn_EL0 */ \ + { Op0(0b11), Op1(0b011), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } + /* Macro to expand the PMEVTYPERn_EL0 register */ #define PMU_PMEVTYPER_EL0(n) \ /* PMEVTYPERn_EL0 */ \ @@ -776,13 +825,13 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmceid }, /* PMCCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), - trap_raz_wi }, + access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, /* PMXEVTYPER_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), access_pmu_evtyper }, /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), - trap_raz_wi }, + access_pmu_evcntr }, /* PMUSERENR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), trap_raz_wi }, @@ -797,6 +846,38 @@ static const struct sys_reg_desc sys_reg_descs[] = { { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), NULL, reset_unknown, TPIDRRO_EL0 }, + /* PMEVCNTRn_EL0 */ + PMU_PMEVCNTR_EL0(0), + PMU_PMEVCNTR_EL0(1), + PMU_PMEVCNTR_EL0(2), + PMU_PMEVCNTR_EL0(3), + PMU_PMEVCNTR_EL0(4), + PMU_PMEVCNTR_EL0(5), + PMU_PMEVCNTR_EL0(6), + PMU_PMEVCNTR_EL0(7), + PMU_PMEVCNTR_EL0(8), + PMU_PMEVCNTR_EL0(9), + PMU_PMEVCNTR_EL0(10), + PMU_PMEVCNTR_EL0(11), + PMU_PMEVCNTR_EL0(12), + PMU_PMEVCNTR_EL0(13), + PMU_PMEVCNTR_EL0(14), + PMU_PMEVCNTR_EL0(15), + PMU_PMEVCNTR_EL0(16), + PMU_PMEVCNTR_EL0(17), + PMU_PMEVCNTR_EL0(18), + PMU_PMEVCNTR_EL0(19), + PMU_PMEVCNTR_EL0(20), + PMU_PMEVCNTR_EL0(21), + PMU_PMEVCNTR_EL0(22), + PMU_PMEVCNTR_EL0(23), + PMU_PMEVCNTR_EL0(24), + PMU_PMEVCNTR_EL0(25), + PMU_PMEVCNTR_EL0(26), + PMU_PMEVCNTR_EL0(27), + PMU_PMEVCNTR_EL0(28), + PMU_PMEVCNTR_EL0(29), + PMU_PMEVCNTR_EL0(30), /* PMEVTYPERn_EL0 */ PMU_PMEVTYPER_EL0(0), PMU_PMEVTYPER_EL0(1), @@ -1025,6 +1106,13 @@ static const struct sys_reg_desc cp14_64_regs[] = { { Op1( 0), CRm( 2), .access = trap_raz_wi }, }; +/* Macro to expand the PMEVCNTRn register */ +#define PMU_PMEVCNTR(n) \ + /* PMEVCNTRn */ \ + { Op1(0), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr } + /* Macro to expand the PMEVTYPERn register */ #define PMU_PMEVTYPER(n) \ /* PMEVTYPERn */ \ @@ -1067,9 +1155,9 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, - { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, - { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, @@ -1084,6 +1172,38 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, + /* PMEVCNTRn */ + PMU_PMEVCNTR(0), + PMU_PMEVCNTR(1), + PMU_PMEVCNTR(2), + PMU_PMEVCNTR(3), + PMU_PMEVCNTR(4), + PMU_PMEVCNTR(5), + PMU_PMEVCNTR(6), + PMU_PMEVCNTR(7), + PMU_PMEVCNTR(8), + PMU_PMEVCNTR(9), + PMU_PMEVCNTR(10), + PMU_PMEVCNTR(11), + PMU_PMEVCNTR(12), + PMU_PMEVCNTR(13), + PMU_PMEVCNTR(14), + PMU_PMEVCNTR(15), + PMU_PMEVCNTR(16), + PMU_PMEVCNTR(17), + PMU_PMEVCNTR(18), + PMU_PMEVCNTR(19), + PMU_PMEVCNTR(20), + PMU_PMEVCNTR(21), + PMU_PMEVCNTR(22), + PMU_PMEVCNTR(23), + PMU_PMEVCNTR(24), + PMU_PMEVCNTR(25), + PMU_PMEVCNTR(26), + PMU_PMEVCNTR(27), + PMU_PMEVCNTR(28), + PMU_PMEVCNTR(29), + PMU_PMEVCNTR(30), /* PMEVTYPERn */ PMU_PMEVTYPER(0), PMU_PMEVTYPER(1), @@ -1122,6 +1242,7 @@ static const struct sys_reg_desc cp15_regs[] = { static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, }; -- 2.0.4