linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Marc Zyngier <marc.zyngier@arm.com>
To: Andrew Murray <andrew.murray@arm.com>
Cc: Suzuki K Pouloze <suzuki.poulose@arm.com>,
	Julien Thierry <julien.thierry@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	James Morse <james.morse@arm.com>,
	kvmarm@lists.cs.columbia.edu,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v7 5/5] KVM: arm/arm64: support chained PMU counters
Date: Wed, 22 May 2019 12:50:43 +0100	[thread overview]
Message-ID: <37b16e12-cc0e-1d27-17f9-9bd1a326610f@arm.com> (raw)
In-Reply-To: <20190522103537.GZ8268@e119886-lin.cambridge.arm.com>

On 22/05/2019 11:35, Andrew Murray wrote:
> On Tue, May 21, 2019 at 05:31:47PM +0100, Marc Zyngier wrote:
>> On 21/05/2019 16:52, Andrew Murray wrote:
>>> ARMv8 provides support for chained PMU counters, where an event type
>>> of 0x001E is set for odd-numbered counters, the event counter will
>>> increment by one for each overflow of the preceding even-numbered
>>> counter. Let's emulate this in KVM by creating a 64 bit perf counter
>>> when a user chains two emulated counters together.
>>>
>>> For chained events we only support generating an overflow interrupt
>>> on the high counter. We use the attributes of the low counter to
>>> determine the attributes of the perf event.
>>>
>>> Suggested-by: Marc Zyngier <marc.zyngier@arm.com>
>>> Signed-off-by: Andrew Murray <andrew.murray@arm.com>
>>> ---
>>>  include/kvm/arm_pmu.h |   2 +
>>>  virt/kvm/arm/pmu.c    | 246 ++++++++++++++++++++++++++++++++++++------
>>>  2 files changed, 215 insertions(+), 33 deletions(-)
>>>
>>> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
>>> index b73f31baca52..8b434745500a 100644
>>> --- a/include/kvm/arm_pmu.h
>>> +++ b/include/kvm/arm_pmu.h
>>> @@ -22,6 +22,7 @@
>>>  #include <asm/perf_event.h>
>>>  
>>>  #define ARMV8_PMU_CYCLE_IDX		(ARMV8_PMU_MAX_COUNTERS - 1)
>>> +#define ARMV8_PMU_MAX_COUNTER_PAIRS	((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
>>>  
>>>  #ifdef CONFIG_KVM_ARM_PMU
>>>  
>>> @@ -34,6 +35,7 @@ struct kvm_pmc {
>>>  struct kvm_pmu {
>>>  	int irq_num;
>>>  	struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
>>> +	DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
>>>  	bool ready;
>>>  	bool created;
>>>  	bool irq_level;
>>> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
>>> index ae1e886d4a1a..4b0981c402c6 100644
>>> --- a/virt/kvm/arm/pmu.c
>>> +++ b/virt/kvm/arm/pmu.c
>>> @@ -25,28 +25,128 @@
>>>  #include <kvm/arm_vgic.h>
>>>  
>>>  static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
>>> +
>>> +#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
>>> +
>>> +static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
>>> +{
>>> +	struct kvm_pmu *pmu;
>>> +	struct kvm_vcpu_arch *vcpu_arch;
>>> +
>>> +	pmc -= pmc->idx;
>>> +	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
>>> +	vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
>>> +	return container_of(vcpu_arch, struct kvm_vcpu, arch);
>>> +}
>>> +
>>>  /**
>>> - * kvm_pmu_get_counter_value - get PMU counter value
>>> + * kvm_pmu_pmc_is_chained - determine if the pmc is chained
>>> + * @pmc: The PMU counter pointer
>>> + */
>>> +static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
>>> +{
>>> +	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
>>> +
>>> +	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
>>> +}
>>> +
>>> +/**
>>> + * kvm_pmu_pmc_is_high_counter - determine if select_idx is a high/low counter
>>> + * @select_idx: The counter index
>>> + */
>>> +static bool kvm_pmu_pmc_is_high_counter(u64 select_idx)
>>> +{
>>> +	return select_idx & 0x1;
>>> +}
>>> +
>>> +/**
>>> + * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
>>> + * @pmc: The PMU counter pointer
>>> + *
>>> + * When a pair of PMCs are chained together we use the low counter (canonical)
>>> + * to hold the underlying perf event.
>>> + */
>>> +static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
>>> +{
>>> +	if (kvm_pmu_pmc_is_chained(pmc) &&
>>> +	    kvm_pmu_pmc_is_high_counter(pmc->idx))
>>> +		return pmc - 1;
>>> +
>>> +	return pmc;
>>> +}
>>> +
>>> +/**
>>> + * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
>>>   * @vcpu: The vcpu pointer
>>>   * @select_idx: The counter index
>>>   */
>>> -u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
>>> +static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
>>>  {
>>> -	u64 counter, reg, enabled, running;
>>> -	struct kvm_pmu *pmu = &vcpu->arch.pmu;
>>> -	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
>>> +	u64 eventsel, reg;
>>>  
>>> -	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
>>> -	      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
>>> -	counter = __vcpu_sys_reg(vcpu, reg);
>>> +	select_idx |= 0x1;
>>> +
>>> +	if (select_idx == ARMV8_PMU_CYCLE_IDX)
>>> +		return false;
>>> +
>>> +	reg = PMEVTYPER0_EL0 + select_idx;
>>> +	eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
>>> +
>>> +	return armv8pmu_evtype_is_chain(eventsel);
>>> +}
>>> +
>>> +/**
>>> + * kvm_pmu_get_pair_counter_value - get PMU counter value
>>> + * @vcpu: The vcpu pointer
>>> + * @pmc: The PMU counter pointer
>>> + */
>>> +static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
>>> +					  struct kvm_pmc *pmc)
>>> +{
>>> +	u64 counter, counter_high, reg, enabled, running;
>>> +
>>> +	if (kvm_pmu_pmc_is_chained(pmc)) {
>>> +		pmc = kvm_pmu_get_canonical_pmc(pmc);
>>> +		reg = PMEVCNTR0_EL0 + pmc->idx;
>>> +
>>> +		counter = __vcpu_sys_reg(vcpu, reg);
>>> +		counter_high = __vcpu_sys_reg(vcpu, reg + 1);
>>> +
>>> +		counter = lower_32_bits(counter) | (counter_high << 32);
>>> +	} else {
>>> +		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
>>> +		      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
>>> +		counter = __vcpu_sys_reg(vcpu, reg);
>>> +	}
>>>  
>>> -	/* The real counter value is equal to the value of counter register plus
>>> +	/*
>>> +	 * The real counter value is equal to the value of counter register plus
>>>  	 * the value perf event counts.
>>>  	 */
>>>  	if (pmc->perf_event)
>>>  		counter += perf_event_read_value(pmc->perf_event, &enabled,
>>>  						 &running);
>>>  
>>> +	return counter;
>>> +}
>>> +
>>> +/**
>>> + * kvm_pmu_get_counter_value - get PMU counter value
>>> + * @vcpu: The vcpu pointer
>>> + * @select_idx: The counter index
>>> + */
>>> +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
>>> +{
>>> +	u64 counter;
>>> +	struct kvm_pmu *pmu = &vcpu->arch.pmu;
>>> +	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
>>> +
>>> +	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
>>> +
>>> +	if (kvm_pmu_pmc_is_chained(pmc) &&
>>> +	    kvm_pmu_pmc_is_high_counter(select_idx))
>>> +		counter >>= 32;
>>> +
>>>  	return counter & pmc->bitmask;
>>>  }
>>>  
>>> @@ -74,6 +174,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
>>>   */
>>>  static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
>>>  {
>>> +	pmc = kvm_pmu_get_canonical_pmc(pmc);
>>>  	if (pmc->perf_event) {
>>>  		perf_event_disable(pmc->perf_event);
>>>  		perf_event_release_kernel(pmc->perf_event);
>>> @@ -91,13 +192,24 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
>>>  {
>>>  	u64 counter, reg;
>>>  
>>> -	if (pmc->perf_event) {
>>> +	pmc = kvm_pmu_get_canonical_pmc(pmc);
>>> +	if (!pmc->perf_event)
>>> +		return;
>>> +
>>> +	if (kvm_pmu_pmc_is_chained(pmc)) {
>>> +		counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
>>> +
>>> +		reg = PMEVCNTR0_EL0 + pmc->idx;
>>> +		__vcpu_sys_reg(vcpu, reg) = counter & pmc->bitmask;
>>> +		__vcpu_sys_reg(vcpu, reg + 1) = (counter >> 32) & pmc->bitmask;
>>
>> There is something odd here: You use the same mask for both half of the
>> counter. The second one doesn't make much sense, and the first one makes
>> me wonder... Why isn't bitmask a 64bit quantity in this case?
>>
> 
> Yes it's incorrect, the second bitmask should have been pmc+1's bitmask. (In
> the previous revision of this series the sysreg values were populated by two
> calls to kvm_pmu_get_counter_value with pmc and pmc+1 - I introduced this error
> when using kvm_pmu_get_pair_counter_value instead).
> 
> My rationale has been that the __vcpu_sys_reg's should represent the underlying
> hardware registers. This means a 64 bit register with the first 32 bits RES0 for
> PMEVCNTR<n> registers (chained or otherwise) and a 64 bit register for PMCCNTR.
> We currently use the bitmask to mask off the RES0 bits in kvm_pmu_get_counter_value
> when requested by access_pmu_evcntr (to match the counter width). (And thus I've
> treated bitmask as the width of the counter *within* each register).

Well, the truncation is a property of the counter registers, and that's
what we should honor. The bitmask is a property associated to the perf
event, allowing us to only consider the useful bits.

> It may be possible, for chained counters, to use only the register value and
> bitmask in the canonical (just as we do now for the perf_event). Thus for chained
> counters the bitmask is stored in the low counter and is always 64 bits, and the
> 64 bit counter value is also only stored in the low counter vcpu_sys_reg register.
> 
> This means we could calculate the sample_period with the canonical bitmask (instead
> of the hunk you also commented on). However it means that in kvm_pmu_get_counter_value
> we'd have to mask out the RES0 bits indexes that are not the cycle counter. We
> would also have to write the value of the high counter upon demotion from chained
> to unchained in kvm_pmu_update_pmc_chained.
> 
> Does this seem a better approach to you?

It would be much better. It would certainly make it clear that there is
a difference between the perf_event and the emulated counter.

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2019-05-22 11:51 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-21 15:52 [PATCH v7 0/5] KVM: arm/arm64: add support for chained counters Andrew Murray
2019-05-21 15:52 ` [PATCH v7 1/5] KVM: arm/arm64: rename kvm_pmu_{enable/disable}_counter functions Andrew Murray
2019-05-21 15:52 ` [PATCH v7 2/5] KVM: arm/arm64: extract duplicated code to own function Andrew Murray
2019-05-21 15:52 ` [PATCH v7 3/5] KVM: arm/arm64: re-create event when setting counter value Andrew Murray
2019-05-21 15:52 ` [PATCH v7 4/5] arm64: perf: extract chain helper into header Andrew Murray
2019-05-21 16:15   ` Suzuki K Poulose
2019-05-21 15:52 ` [PATCH v7 5/5] KVM: arm/arm64: support chained PMU counters Andrew Murray
2019-05-21 16:31   ` Marc Zyngier
2019-05-22 10:35     ` Andrew Murray
2019-05-22 11:50       ` Marc Zyngier [this message]
2019-05-22 13:48         ` Andrew Murray
2019-05-21 16:46   ` Julien Thierry
2019-05-22  8:55     ` Andrew Murray

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=37b16e12-cc0e-1d27-17f9-9bd1a326610f@arm.com \
    --to=marc.zyngier@arm.com \
    --cc=andrew.murray@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=james.morse@arm.com \
    --cc=julien.thierry@arm.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=suzuki.poulose@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).