kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Wei Huang <wei.huang2@amd.com>
To: Yu Zhang <yu.c.zhang@linux.intel.com>
Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	pbonzini@redhat.com, seanjc@google.com, vkuznets@redhat.com,
	wanpengli@tencent.com, jmattson@google.com, joro@8bytes.org,
	tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
	x86@kernel.org, hpa@zytor.com
Subject: Re: [PATCH v2 1/3] KVM: x86: Allow CPU to force vendor-specific TDP level
Date: Sun, 8 Aug 2021 23:11:40 -0500	[thread overview]
Message-ID: <c6324362-1439-ef94-789b-5934c0e1cdb8@amd.com> (raw)
In-Reply-To: <20210809035806.5cqdqm5vkexvngda@linux.intel.com>



On 8/8/21 10:58 PM, Yu Zhang wrote:
> On Sun, Aug 08, 2021 at 02:26:56PM -0500, Wei Huang wrote:
>> AMD future CPUs will require a 5-level NPT if host CR4.LA57 is set.
> 
> Sorry, but why? NPT is not indexed by HVA.

NPT is not indexed by HVA - it is always indexed by GPA. What I meant is 
NPT page table level has to be the same as the host OS page table: if 
5-level page table is enabled in host OS (CR4.LA57=1), guest NPT has to 
5-level too.

> 
>> To prevent kvm_mmu_get_tdp_level() from incorrectly changing NPT level
>> on behalf of CPUs, add a new parameter in kvm_configure_mmu() to force
>> a fixed TDP level.
>>
>> Signed-off-by: Wei Huang <wei.huang2@amd.com>
>> ---
>>   arch/x86/include/asm/kvm_host.h |  5 ++---
>>   arch/x86/kvm/mmu/mmu.c          | 10 ++++++++--
>>   arch/x86/kvm/svm/svm.c          |  4 +++-
>>   arch/x86/kvm/vmx/vmx.c          |  3 ++-
>>   4 files changed, 15 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>> index 974cbfb1eefe..6d16f75cc8da 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -723,7 +723,6 @@ struct kvm_vcpu_arch {
>>   
>>   	u64 reserved_gpa_bits;
>>   	int maxphyaddr;
>> -	int max_tdp_level;
>>   
>>   	/* emulate context */
>>   
>> @@ -1747,8 +1746,8 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
>>   void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
>>   void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
>>   
>> -void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
>> -		       int tdp_huge_page_level);
>> +void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
>> +		       int tdp_max_root_level, int tdp_huge_page_level);
>>   
>>   static inline u16 kvm_read_ldt(void)
>>   {
>> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
>> index 66f7f5bc3482..c11ee4531f6d 100644
>> --- a/arch/x86/kvm/mmu/mmu.c
>> +++ b/arch/x86/kvm/mmu/mmu.c
>> @@ -97,6 +97,7 @@ module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
>>   bool tdp_enabled = false;
>>   
>>   static int max_huge_page_level __read_mostly;
>> +static int tdp_root_level __read_mostly;
> 
> I think this is a broken design - meaning KVM can only use 5-level or
> 4-level NPT for all VMs.

Broken normally means non-functional or buggy, which doesn't apply here. 
A good TLB design should be able to offset the potential overhead of 
5-level page table for most cases.

> 
> B.R.
> Yu
> 
>>   static int max_tdp_level __read_mostly;
>>   
>>   enum {
>> @@ -4562,6 +4563,10 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
>>   
>>   static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
>>   {
>> +	/* tdp_root_level is architecture forced level, use it if nonzero */
>> +	if (tdp_root_level)
>> +		return tdp_root_level;
>> +
>>   	/* Use 5-level TDP if and only if it's useful/necessary. */
>>   	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
>>   		return 4;
>> @@ -5253,10 +5258,11 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
>>   	 */
>>   }
>>   
>> -void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
>> -		       int tdp_huge_page_level)
>> +void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
>> +		       int tdp_max_root_level, int tdp_huge_page_level)
>>   {
>>   	tdp_enabled = enable_tdp;
>> +	tdp_root_level = tdp_forced_root_level;
>>   	max_tdp_level = tdp_max_root_level;
>>   
>>   	/*
>> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>> index e8ccab50ebf6..f361d466e18e 100644
>> --- a/arch/x86/kvm/svm/svm.c
>> +++ b/arch/x86/kvm/svm/svm.c
>> @@ -1015,7 +1015,9 @@ static __init int svm_hardware_setup(void)
>>   	if (!boot_cpu_has(X86_FEATURE_NPT))
>>   		npt_enabled = false;
>>   
>> -	kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
>> +	/* Force VM NPT level equal to the host's max NPT level */
>> +	kvm_configure_mmu(npt_enabled, get_max_npt_level(),
>> +			  get_max_npt_level(), PG_LEVEL_1G);
>>   	pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
>>   
>>   	/* Note, SEV setup consumes npt_enabled. */
>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>> index 927a552393b9..034e1397c7d5 100644
>> --- a/arch/x86/kvm/vmx/vmx.c
>> +++ b/arch/x86/kvm/vmx/vmx.c
>> @@ -7803,7 +7803,8 @@ static __init int hardware_setup(void)
>>   		ept_lpage_level = PG_LEVEL_2M;
>>   	else
>>   		ept_lpage_level = PG_LEVEL_4K;
>> -	kvm_configure_mmu(enable_ept, vmx_get_max_tdp_level(), ept_lpage_level);
>> +	kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(),
>> +			  ept_lpage_level);
>>   
>>   	/*
>>   	 * Only enable PML when hardware supports PML feature, and both EPT
>> -- 
>> 2.31.1
>>

  reply	other threads:[~2021-08-09  4:11 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-08 19:26 [PATCH v2 0/3] SVM 5-level page table support Wei Huang
2021-08-08 19:26 ` [PATCH v2 1/3] KVM: x86: Allow CPU to force vendor-specific TDP level Wei Huang
2021-08-09  3:58   ` Yu Zhang
2021-08-09  4:11     ` Wei Huang [this message]
2021-08-09  4:27       ` Yu Zhang
2021-08-09  4:33         ` Wei Huang
2021-08-09  6:42           ` Yu Zhang
2021-08-09 15:30             ` Sean Christopherson
2021-08-09 21:49               ` Jim Mattson
2021-08-10  9:23                 ` Paolo Bonzini
2021-08-10  7:40               ` Yu Zhang
2021-08-10  9:25                 ` Paolo Bonzini
2021-08-10 11:00                   ` Yu Zhang
2021-08-10 12:47                     ` Paolo Bonzini
2021-08-10 14:37                       ` Yu Zhang
2021-08-08 19:26 ` [PATCH v2 2/3] KVM: x86: Handle the case of 5-level shadow page table Wei Huang
2021-08-09 15:17   ` Sean Christopherson
2021-08-09 17:03     ` Wei Huang
2021-08-08 19:26 ` [PATCH v2 3/3] KVM: SVM: Add 5-level page table support for SVM Wei Huang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c6324362-1439-ef94-789b-5934c0e1cdb8@amd.com \
    --to=wei.huang2@amd.com \
    --cc=bp@alien8.de \
    --cc=hpa@zytor.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    --cc=yu.c.zhang@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).