linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] KVM: x86: Adjust counter sample period after a wrmsr
@ 2020-02-22  2:34 Eric Hankland
  2020-02-22  7:34 ` Paolo Bonzini
  0 siblings, 1 reply; 5+ messages in thread
From: Eric Hankland @ 2020-02-22  2:34 UTC (permalink / raw)
  To: Jim Mattson, Peter Shier; +Cc: Paolo Bonzini, linux-kernel, kvm, Eric Hankland

The sample_period of a counter tracks when that counter will
overflow and set global status/trigger a PMI. However this currently
only gets set when the initial counter is created or when a counter is
resumed; this updates the sample period after a wrmsr so running
counters will accurately reflect their new value.

Signed-off-by: Eric Hankland <ehankland@google.com>
---
 arch/x86/kvm/pmu.c           | 4 ++--
 arch/x86/kvm/pmu.h           | 8 ++++++++
 arch/x86/kvm/vmx/pmu_intel.c | 6 ++++++
 3 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index bcc6a73d6628..d1f8ca57d354 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -111,7 +111,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
 		.config = config,
 	};
 
-	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+	attr.sample_period = get_sample_period(pmc, pmc->counter);
 
 	if (in_tx)
 		attr.config |= HSW_IN_TX;
@@ -158,7 +158,7 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
 
 	/* recalibrate sample period and check if it's accepted by perf core */
 	if (perf_event_period(pmc->perf_event,
-			(-pmc->counter) & pmc_bitmask(pmc)))
+			      get_sample_period(pmc, pmc->counter)))
 		return false;
 
 	/* reuse perf_event to serve as pmc_reprogram_counter() does*/
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 13332984b6d5..354b8598b6c1 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -129,6 +129,15 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
 	return NULL;
 }
 
+static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
+{
+	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
+
+	if (!sample_period)
+		sample_period = pmc_bitmask(pmc) + 1;
+	return sample_period;
+}
+
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index fd21cdb10b79..e933541751fb 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -263,9 +263,15 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			if (!msr_info->host_initiated)
 				data = (s64)(s32)data;
 			pmc->counter += data - pmc_read_counter(pmc);
+			if (pmc->perf_event)
+				perf_event_period(pmc->perf_event,
+						  get_sample_period(pmc, data));
 			return 0;
 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
 			pmc->counter += data - pmc_read_counter(pmc);
+			if (pmc->perf_event)
+				perf_event_period(pmc->perf_event,
+						  get_sample_period(pmc, data));
 			return 0;
 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
 			if (data == pmc->eventsel)

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: x86: Adjust counter sample period after a wrmsr
  2020-02-22  2:34 [PATCH] KVM: x86: Adjust counter sample period after a wrmsr Eric Hankland
@ 2020-02-22  7:34 ` Paolo Bonzini
  2020-02-24  6:56   ` Like Xu
  0 siblings, 1 reply; 5+ messages in thread
From: Paolo Bonzini @ 2020-02-22  7:34 UTC (permalink / raw)
  To: Eric Hankland, Jim Mattson, Peter Shier; +Cc: linux-kernel, kvm

On 22/02/20 03:34, Eric Hankland wrote:
> The sample_period of a counter tracks when that counter will
> overflow and set global status/trigger a PMI. However this currently
> only gets set when the initial counter is created or when a counter is
> resumed; this updates the sample period after a wrmsr so running
> counters will accurately reflect their new value.
> 
> Signed-off-by: Eric Hankland <ehankland@google.com>
> ---
>  arch/x86/kvm/pmu.c           | 4 ++--
>  arch/x86/kvm/pmu.h           | 8 ++++++++
>  arch/x86/kvm/vmx/pmu_intel.c | 6 ++++++
>  3 files changed, 16 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index bcc6a73d6628..d1f8ca57d354 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -111,7 +111,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
>  		.config = config,
>  	};
>  
> -	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
> +	attr.sample_period = get_sample_period(pmc, pmc->counter);
>  
>  	if (in_tx)
>  		attr.config |= HSW_IN_TX;
> @@ -158,7 +158,7 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
>  
>  	/* recalibrate sample period and check if it's accepted by perf core */
>  	if (perf_event_period(pmc->perf_event,
> -			(-pmc->counter) & pmc_bitmask(pmc)))
> +			      get_sample_period(pmc, pmc->counter)))
>  		return false;
>  
>  	/* reuse perf_event to serve as pmc_reprogram_counter() does*/
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index 13332984b6d5..354b8598b6c1 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -129,6 +129,15 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
>  	return NULL;
>  }
>  
> +static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
> +{
> +	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
> +
> +	if (!sample_period)
> +		sample_period = pmc_bitmask(pmc) + 1;
> +	return sample_period;
> +}
> +
>  void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
>  void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
>  void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index fd21cdb10b79..e933541751fb 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -263,9 +263,15 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  			if (!msr_info->host_initiated)
>  				data = (s64)(s32)data;
>  			pmc->counter += data - pmc_read_counter(pmc);
> +			if (pmc->perf_event)
> +				perf_event_period(pmc->perf_event,
> +						  get_sample_period(pmc, data));
>  			return 0;
>  		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
>  			pmc->counter += data - pmc_read_counter(pmc);
> +			if (pmc->perf_event)
> +				perf_event_period(pmc->perf_event,
> +						  get_sample_period(pmc, data));
>  			return 0;
>  		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
>  			if (data == pmc->eventsel)
> 

Queued, thanks.

Paolo


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: x86: Adjust counter sample period after a wrmsr
  2020-02-22  7:34 ` Paolo Bonzini
@ 2020-02-24  6:56   ` Like Xu
  2020-02-25  0:08     ` Eric Hankland
  0 siblings, 1 reply; 5+ messages in thread
From: Like Xu @ 2020-02-24  6:56 UTC (permalink / raw)
  To: Paolo Bonzini, Eric Hankland, Jim Mattson, Peter Shier; +Cc: linux-kernel, kvm

Hi Hankland,

On 2020/2/22 15:34, Paolo Bonzini wrote:
> On 22/02/20 03:34, Eric Hankland wrote:
>> The sample_period of a counter tracks when that counter will
>> overflow and set global status/trigger a PMI. However this currently
>> only gets set when the initial counter is created or when a counter is
>> resumed; this updates the sample period after a wrmsr so running
>> counters will accurately reflect their new value.
>>
>> Signed-off-by: Eric Hankland <ehankland@google.com>
>> ---
>>   arch/x86/kvm/pmu.c           | 4 ++--
>>   arch/x86/kvm/pmu.h           | 8 ++++++++
>>   arch/x86/kvm/vmx/pmu_intel.c | 6 ++++++
>>   3 files changed, 16 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
>> index bcc6a73d6628..d1f8ca57d354 100644
>> --- a/arch/x86/kvm/pmu.c
>> +++ b/arch/x86/kvm/pmu.c
>> @@ -111,7 +111,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
>>   		.config = config,
>>   	};
>>   
>> -	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
>> +	attr.sample_period = get_sample_period(pmc, pmc->counter);
>>   
>>   	if (in_tx)
>>   		attr.config |= HSW_IN_TX;
>> @@ -158,7 +158,7 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
>>   
>>   	/* recalibrate sample period and check if it's accepted by perf core */
>>   	if (perf_event_period(pmc->perf_event,
>> -			(-pmc->counter) & pmc_bitmask(pmc)))
>> +			      get_sample_period(pmc, pmc->counter)))
>>   		return false;
>>   
>>   	/* reuse perf_event to serve as pmc_reprogram_counter() does*/
>> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
>> index 13332984b6d5..354b8598b6c1 100644
>> --- a/arch/x86/kvm/pmu.h
>> +++ b/arch/x86/kvm/pmu.h
>> @@ -129,6 +129,15 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
>>   	return NULL;
>>   }
>>   
>> +static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
>> +{
>> +	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
>> +
>> +	if (!sample_period)
>> +		sample_period = pmc_bitmask(pmc) + 1;
>> +	return sample_period;
>> +}
>> +
>>   void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
>>   void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
>>   void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
>> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
>> index fd21cdb10b79..e933541751fb 100644
>> --- a/arch/x86/kvm/vmx/pmu_intel.c
>> +++ b/arch/x86/kvm/vmx/pmu_intel.c
>> @@ -263,9 +263,15 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>   			if (!msr_info->host_initiated)
>>   				data = (s64)(s32)data;
>>   			pmc->counter += data - pmc_read_counter(pmc);
>> +			if (pmc->perf_event)
>> +				perf_event_period(pmc->perf_event,
>> +						  get_sample_period(pmc, data));
>>   			return 0;
>>   		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
>>   			pmc->counter += data - pmc_read_counter(pmc);
>> +			if (pmc->perf_event)
>> +				perf_event_period(pmc->perf_event,
>> +						  get_sample_period(pmc, data));
>>   			return 0;
>>   		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
>>   			if (data == pmc->eventsel)

Although resetting the running counters is allowed,
it is not recommended to do it.

The motivation of this patch looks good to me.

However, it does hurt performance due to more frequent calls to 
perf_event_period() and we just took the perf_event_ctx_lock in the 
perf_event_read_value().

Thanks,
Like Xu

>>
> 
> Queued, thanks.
> 
> Paolo
> 


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: x86: Adjust counter sample period after a wrmsr
  2020-02-24  6:56   ` Like Xu
@ 2020-02-25  0:08     ` Eric Hankland
  2020-02-26  3:04       ` Like Xu
  0 siblings, 1 reply; 5+ messages in thread
From: Eric Hankland @ 2020-02-25  0:08 UTC (permalink / raw)
  To: Like Xu; +Cc: Paolo Bonzini, Jim Mattson, Peter Shier, linux-kernel, kvm

Hi Like -

Thanks for the feedback - is your recommendation to do the read and
period change at the same time and only take the lock once or is there
another way around this while still handling writes correctly?

Eric

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: x86: Adjust counter sample period after a wrmsr
  2020-02-25  0:08     ` Eric Hankland
@ 2020-02-26  3:04       ` Like Xu
  0 siblings, 0 replies; 5+ messages in thread
From: Like Xu @ 2020-02-26  3:04 UTC (permalink / raw)
  To: Eric Hankland; +Cc: Paolo Bonzini, Jim Mattson, Peter Shier, linux-kernel, kvm

On 2020/2/25 8:08, Eric Hankland wrote:
> Hi Like -
> 
> Thanks for the feedback - is your recommendation to do the read and
> period change at the same time and only take the lock once or is there
> another way around this while still handling writes correctly?

For non-running counters(the most common situation), we have too many
chances to reflect their new periods. In this case, calling
perf_event_period() in the trap of counter msr is redundant and burdensome.

A better way is to check if this counter is running via
pmc_speculative_in_use (), and if so,
just trigger kvm_make_request(KVM_REQ_PMU, pmc->vcpu).

Thanks,
Like Xu

> 
> Eric
> 


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-02-26  3:04 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-22  2:34 [PATCH] KVM: x86: Adjust counter sample period after a wrmsr Eric Hankland
2020-02-22  7:34 ` Paolo Bonzini
2020-02-24  6:56   ` Like Xu
2020-02-25  0:08     ` Eric Hankland
2020-02-26  3:04       ` Like Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).