All of lore.kernel.org
 help / color / mirror / Atom feed
From: Maxim Levitsky <mlevitsk@redhat.com>
To: Sean Christopherson <seanjc@google.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Xiaoyao Li <xiaoyao.li@intel.com>,
	Reiji Watanabe <reijiw@google.com>
Subject: Re: [PATCH 13/15] KVM: x86: Move uret MSR slot management to common x86
Date: Mon, 10 May 2021 11:28:54 +0300	[thread overview]
Message-ID: <f3a4ae84a227d131540762c55d357c6d7f48ac48.camel@redhat.com> (raw)
In-Reply-To: <20210504171734.1434054-14-seanjc@google.com>

On Tue, 2021-05-04 at 10:17 -0700, Sean Christopherson wrote:
> Now that SVM and VMX both probe MSRs before "defining" user return slots
> for them, consolidate the code for probe+define into common x86 and
> eliminate the odd behavior of having the vendor code define the slot for
> a given MSR.
> 
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  3 +--
>  arch/x86/kvm/svm/svm.c          |  5 +----
>  arch/x86/kvm/vmx/vmx.c          | 19 ++++---------------
>  arch/x86/kvm/x86.c              | 19 +++++++++++--------
>  4 files changed, 17 insertions(+), 29 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 10663610f105..a4b912f7e427 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1778,9 +1778,8 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
>  		    unsigned long ipi_bitmap_high, u32 min,
>  		    unsigned long icr, int op_64_bit);
>  
> -void kvm_define_user_return_msr(unsigned index, u32 msr);
> +int kvm_add_user_return_msr(u32 msr);
>  int kvm_find_user_return_msr(u32 msr);
> -int kvm_probe_user_return_msr(u32 msr);
>  int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
>  
>  u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 231b9650d864..de921935e8de 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -959,10 +959,7 @@ static __init int svm_hardware_setup(void)
>  		kvm_tsc_scaling_ratio_frac_bits = 32;
>  	}
>  
> -	if (!kvm_probe_user_return_msr(MSR_TSC_AUX)) {
> -		tsc_aux_uret_slot = 0;
> -		kvm_define_user_return_msr(tsc_aux_uret_slot, MSR_TSC_AUX);
> -	}
> +	tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
>  
>  	/* Check for pause filtering support */
>  	if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 7a53568b34fc..26f82f302391 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -454,9 +454,6 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
>  
>  static unsigned long host_idt_base;
>  
> -/* Number of user return MSRs that are actually supported in hardware. */
> -static int vmx_nr_uret_msrs;
> -
>  #if IS_ENABLED(CONFIG_HYPERV)
>  static bool __read_mostly enlightened_vmcs = true;
>  module_param(enlightened_vmcs, bool, 0444);
> @@ -1218,7 +1215,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
>  	 */
>  	if (!vmx->guest_uret_msrs_loaded) {
>  		vmx->guest_uret_msrs_loaded = true;
> -		for (i = 0; i < vmx_nr_uret_msrs; ++i) {
> +		for (i = 0; i < kvm_nr_uret_msrs; ++i) {
>  			if (!vmx->guest_uret_msrs[i].load_into_hardware)
>  				continue;
>  
> @@ -6921,7 +6918,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
>  			goto free_vpid;
>  	}
>  
> -	for (i = 0; i < vmx_nr_uret_msrs; ++i) {
> +	for (i = 0; i < kvm_nr_uret_msrs; ++i) {
>  		vmx->guest_uret_msrs[i].data = 0;
>  		vmx->guest_uret_msrs[i].mask = -1ull;
>  	}
> @@ -7810,20 +7807,12 @@ static __init void vmx_setup_user_return_msrs(void)
>  		MSR_EFER, MSR_TSC_AUX, MSR_STAR,
>  		MSR_IA32_TSX_CTRL,
>  	};
> -	u32 msr;
>  	int i;
>  
>  	BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
>  
> -	for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
> -		msr = vmx_uret_msrs_list[i];
> -
> -		if (kvm_probe_user_return_msr(msr))
> -			continue;
> -
> -		kvm_define_user_return_msr(vmx_nr_uret_msrs, msr);
> -		vmx_nr_uret_msrs++;
> -	}
> +	for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
> +		kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
>  }
>  
>  static __init int hardware_setup(void)
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 2fd46e917666..adca491d3b4b 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -336,7 +336,7 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
>  	}
>  }
>  
> -int kvm_probe_user_return_msr(u32 msr)
> +static int kvm_probe_user_return_msr(u32 msr)
>  {
>  	u64 val;
>  	int ret;
> @@ -350,16 +350,18 @@ int kvm_probe_user_return_msr(u32 msr)
>  	preempt_enable();
>  	return ret;
>  }
> -EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
>  
> -void kvm_define_user_return_msr(unsigned slot, u32 msr)
> +int kvm_add_user_return_msr(u32 msr)
>  {
> -	BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
> -	kvm_uret_msrs_list[slot] = msr;
> -	if (slot >= kvm_nr_uret_msrs)
> -		kvm_nr_uret_msrs = slot + 1;
> +	BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
> +
> +	if (kvm_probe_user_return_msr(msr))
> +		return -1;
> +
> +	kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
> +	return kvm_nr_uret_msrs++;
>  }
> -EXPORT_SYMBOL_GPL(kvm_define_user_return_msr);
> +EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
>  
>  int kvm_find_user_return_msr(u32 msr)
>  {
> @@ -8169,6 +8171,7 @@ int kvm_arch_init(void *opaque)
>  		printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n");
>  		goto out_free_x86_emulator_cache;
>  	}
> +	kvm_nr_uret_msrs = 0;
>  
>  	r = kvm_mmu_module_init();
>  	if (r)
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>

Best regards,
	Maxim Levitsky





  reply	other threads:[~2021-05-10  8:29 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-04 17:17 [PATCH 00/15] KVM: x86: RDPID/RDTSCP fixes and uret MSR cleanups Sean Christopherson
2021-05-04 17:17 ` [PATCH 01/15] KVM: VMX: Do not adverise RDPID if ENABLE_RDTSCP control is unsupported Sean Christopherson
2021-05-04 17:37   ` Jim Mattson
2021-05-04 17:53     ` Jim Mattson
2021-05-04 18:14       ` Sean Christopherson
2021-05-05  3:04   ` Reiji Watanabe
2021-05-10  8:03   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 02/15] KVM: x86: Emulate RDPID only if RDTSCP is supported Sean Christopherson
2021-05-04 17:50   ` Jim Mattson
2021-05-05  3:51   ` Reiji Watanabe
2021-05-05  8:01     ` Paolo Bonzini
2021-05-10  8:08   ` Maxim Levitsky
2021-05-10 17:20     ` Sean Christopherson
2021-05-11 12:32       ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 03/15] KVM: SVM: Inject #UD on RDTSCP when it should be disabled in the guest Sean Christopherson
2021-05-04 21:45   ` Jim Mattson
2021-05-04 21:53     ` Sean Christopherson
2021-05-04 21:56       ` Jim Mattson
2021-05-04 22:10         ` Sean Christopherson
2021-05-04 22:24           ` Jim Mattson
2021-05-04 21:57       ` Paolo Bonzini
2021-05-04 21:58         ` Jim Mattson
2021-05-10  8:08           ` Maxim Levitsky
2021-05-10 16:56             ` Sean Christopherson
2021-05-11 12:34               ` Maxim Levitsky
2021-05-18 10:59               ` Paolo Bonzini
2021-05-18 19:24                 ` Sean Christopherson
2021-05-05  4:26   ` Reiji Watanabe
2021-05-10  8:08   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 04/15] KVM: x86: Move RDPID emulation intercept to its own enum Sean Christopherson
2021-05-04 23:24   ` Jim Mattson
2021-05-10  8:14   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 05/15] KVM: VMX: Disable preemption when probing user return MSRs Sean Christopherson
2021-05-04 23:36   ` Jim Mattson
2021-05-10  8:18   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 06/15] KVM: SVM: Probe and load MSR_TSC_AUX regardless of RDTSCP support in host Sean Christopherson
2021-05-10  8:20   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 07/15] KVM: x86: Add support for RDPID without RDTSCP Sean Christopherson
2021-05-10  8:20   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 08/15] KVM: VMX: Configure list of user return MSRs at module init Sean Christopherson
2021-05-10  8:23   ` Maxim Levitsky
2021-05-10 15:13     ` Sean Christopherson
2021-05-11 12:34       ` Maxim Levitsky
2021-05-11 20:10         ` Sean Christopherson
2021-05-04 17:17 ` [PATCH 09/15] KVM: VMX: Use flag to indicate "active" uret MSRs instead of sorting list Sean Christopherson
2021-05-08  3:31   ` Reiji Watanabe
2021-05-10 16:43     ` Sean Christopherson
2021-05-10 17:55       ` Reiji Watanabe
2021-05-10  8:25   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 10/15] KVM: VMX: Use common x86's uret MSR list as the one true list Sean Christopherson
2021-05-10  8:25   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 11/15] KVM: VMX: Disable loading of TSX_CTRL MSR the more conventional way Sean Christopherson
2021-05-05  8:49   ` Paolo Bonzini
2021-05-05 15:36     ` Sean Christopherson
2021-05-05 15:50       ` Paolo Bonzini
2021-05-10  8:26   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 12/15] KVM: x86: Export the number of uret MSRs to vendor modules Sean Christopherson
2021-05-10  8:27   ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 13/15] KVM: x86: Move uret MSR slot management to common x86 Sean Christopherson
2021-05-10  8:28   ` Maxim Levitsky [this message]
2021-05-04 17:17 ` [PATCH 14/15] KVM: x86: Tie Intel and AMD behavior for MSR_TSC_AUX to guest CPU model Sean Christopherson
2021-05-10  8:29   ` Maxim Levitsky
2021-05-10 16:50     ` Sean Christopherson
2021-05-10 17:11       ` Jim Mattson
2021-05-11 12:34         ` Maxim Levitsky
2021-05-04 17:17 ` [PATCH 15/15] KVM: x86: Hide RDTSCP and RDPID if MSR_TSC_AUX probing failed Sean Christopherson
2021-05-10  8:29   ` Maxim Levitsky
2021-05-05  8:51 ` [PATCH 00/15] KVM: x86: RDPID/RDTSCP fixes and uret MSR cleanups Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f3a4ae84a227d131540762c55d357c6d7f48ac48.camel@redhat.com \
    --to=mlevitsk@redhat.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=reijiw@google.com \
    --cc=seanjc@google.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=xiaoyao.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.