All of lore.kernel.org
 help / color / mirror / Atom feed
From: Brijesh Singh <brijesh.singh@amd.com>
To: "Tom Lendacky" <thomas.lendacky@amd.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Stephen Rothwell" <sfr@canb.auug.org.au>,
	"Radim Krčmář" <rkrcmar@redhat.com>, KVM <kvm@vger.kernel.org>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Ingo Molnar" <mingo@elte.hu>, "H. Peter Anvin" <hpa@zytor.com>,
	"Peter Zijlstra" <peterz@infradead.org>
Cc: brijesh.singh@amd.com,
	Linux-Next Mailing List <linux-next@vger.kernel.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	Yu Zhang <yu.c.zhang@linux.intel.com>,
	paolo.bonzini@gmail.com
Subject: Re: linux-next: manual merge of the kvm tree with the tip tree
Date: Fri, 25 Aug 2017 11:53:32 -0500	[thread overview]
Message-ID: <a71c9dbd-ec32-8bb6-9d36-c138d9b4f001@amd.com> (raw)
In-Reply-To: <dd8be323-8ffb-bab5-0431-f77e48fd5df1@amd.com>

Hi Paolo,


On 08/25/2017 08:57 AM, Tom Lendacky wrote:
> On 8/25/2017 1:39 AM, Paolo Bonzini wrote:
>> On 25/08/2017 06:39, Stephen Rothwell wrote:

>> First, rsvd_bits is just a simple function to return some 1 bits.  Applying
>> a mask based on properties of the host MMU is incorrect.
>>
>> Second, the masks computed by __reset_rsvds_bits_mask also apply to
>> guest page tables, where the C bit is reserved since we don't emulate
>> SME.
>>
>> Something like this:
> 

Thanks for the tip, I have expanded the patch to cover tdp cases and have verified
that it works fine with SME enabled KVM. If you are okay with this then I can
send patch.

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ccb70b8..7a8edc0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4109,16 +4109,30 @@ void
  reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
  {
         bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
+       struct rsvd_bits_validate *shadow_zero_check;
+       int i;
  
         /*
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ccb70b8..7a8edc0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4109,16 +4109,30 @@ void
  reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
  {
         bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
+       struct rsvd_bits_validate *shadow_zero_check;
+       int i;
  
         /*
          * Passing "true" to the last argument is okay; it adds a check
          * on bit 8 of the SPTEs which KVM doesn't use anyway.
          */
-       __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+       shadow_zero_check = &context->shadow_zero_check;
+       __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
                                 boot_cpu_data.x86_phys_bits,
                                 context->shadow_root_level, uses_nx,
                                 guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
                                 true);
+
+       if (!shadow_me_mask)
+               return;
+
+       for (i = context->shadow_root_level; --i >= 0;) {
+               shadow_zero_check->rsvd_bits_mask[i][0] &= ~shadow_me_mask;
+               shadow_zero_check->rsvd_bits_mask[i][1] &= ~shadow_me_mask;
+               shadow_zero_check->rsvd_bits_mask[i][2] &= ~shadow_me_mask;
+               shadow_zero_check->rsvd_bits_mask[i][3] &= ~shadow_me_mask;
+       }
+
  }
  EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
  
@@ -4136,8 +4150,13 @@ static void
  reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
                                 struct kvm_mmu *context)
  {
+       struct rsvd_bits_validate *shadow_zero_check;
+       int i;
+
+       shadow_zero_check = &context->shadow_zero_check;
+
         if (boot_cpu_is_amd())
-               __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+               __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
                                         boot_cpu_data.x86_phys_bits,
                                         context->shadow_root_level, false,
                                         boot_cpu_has(X86_FEATURE_GBPAGES),
@@ -4147,6 +4166,15 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
                                             boot_cpu_data.x86_phys_bits,
                                             false);
  
+       if (!shadow_me_mask)
+               return;
+
+       for (i = context->shadow_root_level; --i >= 0;) {
+               shadow_zero_check->rsvd_bits_mask[i][0] &= ~shadow_me_mask;
+               shadow_zero_check->rsvd_bits_mask[i][1] &= ~shadow_me_mask;
+               shadow_zero_check->rsvd_bits_mask[i][2] &= ~shadow_me_mask;
+               shadow_zero_check->rsvd_bits_mask[i][3] &= ~shadow_me_mask;
+       }
  }
  
  /*
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 3cc7255..d7d248a 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -48,7 +48,7 @@
  
  static inline u64 rsvd_bits(int s, int e)
  {
-       return __sme_clr(((1ULL << (e - s + 1)) - 1) << s);
+       return ((1ULL << (e - s + 1)) - 1) << s;
  }
  
  void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);





> Thanks Paolo, Brijesh and I will test this and make sure everything works
> properly with this patch.
> 
> Thanks,
> Tom
> 
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index 2dafd36368cc..e0597d703d72 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -4142,16 +4142,24 @@ void
>>   reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
>>   {
>>       bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
>> +    struct rsvd_bits_validate *shadow_zero_check;
>> +    int i;
>>       /*
>>        * Passing "true" to the last argument is okay; it adds a check
>>        * on bit 8 of the SPTEs which KVM doesn't use anyway.
>>        */
>> -    __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
>> +        shadow_zero_check = &context->shadow_zero_check;
>> +    __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
>>                   boot_cpu_data.x86_phys_bits,
>>                   context->shadow_root_level, uses_nx,
>>                   guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
>>                   is_pse(vcpu), true);
>> +
>> +    for (i = context->shadow_root_level; --i >= 0; ) {
>> +        shadow_zero_check->rsvd_bits_mask[i][0] &= ~shadow_me_mask;
>> +        shadow_zero_check->rsvd_bits_mask[i][1] &= ~shadow_me_mask;
>> +    }
>>   }
>>   EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
>>
>> Can you please fix it up?   Please Cc me at paolo.bonzini@gmail.com too
>> because I'll be on vacation next week.
>>
>> (And thanks Stephen for the heads-up!)
>>
>> Paolo
>>

  reply	other threads:[~2017-08-25 16:53 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-25  4:39 linux-next: manual merge of the kvm tree with the tip tree Stephen Rothwell
2017-08-25  6:39 ` Paolo Bonzini
2017-08-25 13:57   ` Tom Lendacky
2017-08-25 16:53     ` Brijesh Singh [this message]
2017-08-25 20:05       ` Paolo Bonzini
2017-08-25 20:41         ` Brijesh Singh
2017-08-25 20:42           ` Paolo Bonzini
2017-08-26  7:24             ` Ingo Molnar
  -- strict thread matches above, loose matches on Subject: below --
2023-01-18  0:32 Stephen Rothwell
2022-12-01  0:18 Stephen Rothwell
2022-12-01  0:14 Stephen Rothwell
2022-12-15 23:26 ` Stephen Rothwell
2022-01-10  2:16 Stephen Rothwell
2022-01-10  2:28 ` Like Xu
2021-12-13 17:46 broonie
2021-12-13 18:14 ` Paolo Bonzini
2021-12-13 18:23 ` Mark Brown
2021-10-25  5:11 Stephen Rothwell
2021-10-21  2:39 Stephen Rothwell
2021-10-21 15:32 ` Borislav Petkov
2021-04-22  4:30 Stephen Rothwell
2021-04-22  4:45 ` Nadav Amit
2021-04-22  4:58   ` Stephen Rothwell
2021-04-22  6:29   ` Paolo Bonzini
2020-07-29  6:47 Stephen Rothwell
2020-07-17  5:25 Stephen Rothwell
2020-06-02  4:53 Stephen Rothwell
2020-06-04  3:09 ` Stephen Rothwell
2020-01-16  2:48 Stephen Rothwell
2018-12-19  4:12 Stephen Rothwell
2018-12-17  5:22 Stephen Rothwell
2018-10-19  3:25 Stephen Rothwell
2018-08-08  3:54 Stephen Rothwell
2018-08-15  4:27 ` Stephen Rothwell
2018-08-06  5:12 Stephen Rothwell
2018-08-06  6:27 ` Tianyu Lan
2018-02-02  0:51 Stephen Rothwell
2018-01-15  2:39 Stephen Rothwell
2017-08-28  4:52 Stephen Rothwell
2017-09-04  6:09 ` Stephen Rothwell
2016-11-28  3:56 Stephen Rothwell
2016-11-17  3:50 Stephen Rothwell
2016-11-17  7:07 ` Thomas Gleixner
2016-11-17 21:31   ` Stephen Rothwell
2016-05-12  2:54 Stephen Rothwell
2016-05-12  2:54 ` Stephen Rothwell
2015-06-19  4:59 Michael Ellerman
2015-05-26  4:45 Stephen Rothwell
2012-11-30  4:26 Stephen Rothwell
2012-05-16  7:14 Stephen Rothwell
2012-05-16  7:53 ` Gleb Natapov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a71c9dbd-ec32-8bb6-9d36-c138d9b4f001@amd.com \
    --to=brijesh.singh@amd.com \
    --cc=hpa@zytor.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-next@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=paolo.bonzini@gmail.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rkrcmar@redhat.com \
    --cc=sfr@canb.auug.org.au \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=yu.c.zhang@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.