[v4,5/9] KVM: nSVM: introduce nested_svm_load_cr3()/nested_npt_enabled()
diff mbox series

Message ID 20200710141157.1640173-6-vkuznets@redhat.com
State New
Headers show
Series
  • KVM: nSVM: fixes for CR3/MMU switch upon nested guest entry/exit
Related show

Commit Message

Vitaly Kuznetsov July 10, 2020, 2:11 p.m. UTC
As a preparatory change for implementing nested specifig PGD switch for
nSVM (following nVMX' nested_vmx_load_cr3()) instead of relying on
kvm_set_cr3() introduce nested_svm_load_cr3().

No functional change intended.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 21 +++++++++++++++++++--
 1 file changed, 19 insertions(+), 2 deletions(-)

Comments

Sean Christopherson July 13, 2020, 10:38 p.m. UTC | #1
On Fri, Jul 10, 2020 at 04:11:53PM +0200, Vitaly Kuznetsov wrote:
> As a preparatory change for implementing nested specifig PGD switch for

s/specifig/specific

> nSVM (following nVMX' nested_vmx_load_cr3()) instead of relying on

nVMX's

> kvm_set_cr3() introduce nested_svm_load_cr3().

The changelog isn't all that helpful to understanding the actual change.
All this is doing is wrapping kvm_set_cr3(), but that's not at all obvious
from reading the above.

E.g.

  Add nested_svm_load_cr3() as a pass-through to kvm_set_cr3() as a
  preparatory change for implementing nested specific PGD switch for nSVM,
  (following nVMx's nested_vmx_load_cr3()).

> No functional change intended.
> 
> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> ---
>  arch/x86/kvm/svm/nested.c | 21 +++++++++++++++++++--
>  1 file changed, 19 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 5e6c988a4e6b..180929f3dbef 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -311,6 +311,21 @@ static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
>  	nested_vmcb->control.exit_int_info = exit_int_info;
>  }
>  
> +static inline bool nested_npt_enabled(struct vcpu_svm *svm)
> +{
> +	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
> +}
> +
> +/*
> + * Load guest's cr3 at nested entry. @nested_npt is true if we are
> + * emulating VM-Entry into a guest with NPT enabled.
> + */
> +static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
> +			       bool nested_npt)

IMO the addition of nested_npt_enabled() should be a separate patch, and
the additoin of @nested_npt should be in patch 7.

Hypothetically speaking, if nested_npt_enabled() is inaccurate at the call
site in nested_prepare_vmcb_save(), then this patch is technically wrong
even though it doesn't introduce a bug.  Given that the call site of
nested_svm_load_cr3() is moved in patch 7, I don't see any value in adding
the placeholder parameter early.

> +{
> +	return kvm_set_cr3(vcpu, cr3);
> +}
> +
>  static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
>  {
>  	/* Load the nested guest state */
> @@ -324,7 +339,8 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
>  	svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
>  	svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
>  	svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
> -	(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
> +	(void)nested_svm_load_cr3(&svm->vcpu, nested_vmcb->save.cr3,
> +				  nested_npt_enabled(svm));
>  
>  	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
>  	kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
> @@ -343,7 +359,8 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
>  static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
>  {
>  	const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
> -	if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
> +
> +	if (nested_npt_enabled(svm))
>  		nested_svm_init_mmu_context(&svm->vcpu);
>  
>  	/* Guest paging mode is active - reset mmu */
> -- 
> 2.25.4
>
Vitaly Kuznetsov July 14, 2020, 11:26 a.m. UTC | #2
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> On Fri, Jul 10, 2020 at 04:11:53PM +0200, Vitaly Kuznetsov wrote:
>> As a preparatory change for implementing nested specifig PGD switch for
>
> s/specifig/specific
>
>> nSVM (following nVMX' nested_vmx_load_cr3()) instead of relying on
>
> nVMX's
>
>> kvm_set_cr3() introduce nested_svm_load_cr3().
>
> The changelog isn't all that helpful to understanding the actual change.
> All this is doing is wrapping kvm_set_cr3(), but that's not at all obvious
> from reading the above.
>
> E.g.
>
>   Add nested_svm_load_cr3() as a pass-through to kvm_set_cr3() as a
>   preparatory change for implementing nested specific PGD switch for nSVM,
>   (following nVMx's nested_vmx_load_cr3()).
>

Sounds better indeed, thanks!

>> No functional change intended.
>> 
>> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
>> ---
>>  arch/x86/kvm/svm/nested.c | 21 +++++++++++++++++++--
>>  1 file changed, 19 insertions(+), 2 deletions(-)
>> 
>> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
>> index 5e6c988a4e6b..180929f3dbef 100644
>> --- a/arch/x86/kvm/svm/nested.c
>> +++ b/arch/x86/kvm/svm/nested.c
>> @@ -311,6 +311,21 @@ static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
>>  	nested_vmcb->control.exit_int_info = exit_int_info;
>>  }
>>  
>> +static inline bool nested_npt_enabled(struct vcpu_svm *svm)
>> +{
>> +	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
>> +}
>> +
>> +/*
>> + * Load guest's cr3 at nested entry. @nested_npt is true if we are
>> + * emulating VM-Entry into a guest with NPT enabled.
>> + */
>> +static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
>> +			       bool nested_npt)
>
> IMO the addition of nested_npt_enabled() should be a separate patch, and
> the additoin of @nested_npt should be in patch 7.
>
> Hypothetically speaking, if nested_npt_enabled() is inaccurate at the call
> site in nested_prepare_vmcb_save(), then this patch is technically wrong
> even though it doesn't introduce a bug.  Given that the call site of
> nested_svm_load_cr3() is moved in patch 7, I don't see any value in adding
> the placeholder parameter early.
>

I see and I mostly agree, I put it here to avoid the unneeded churn and
make it easier to review the whole thing: this patch is technically a
nop so it can be reviewed in "doesn't change anything" mode and patches
which actually change things are smaller.

Paolo already said 'queued' here and your comments can't be addressed in
a follow-up patch but I can certainly do v5 if needed.

Thanks for your review!
Sean Christopherson July 15, 2020, 4:36 a.m. UTC | #3
On Tue, Jul 14, 2020 at 01:26:24PM +0200, Vitaly Kuznetsov wrote:
> Sean Christopherson <sean.j.christopherson@intel.com> writes:
> > IMO the addition of nested_npt_enabled() should be a separate patch, and
> > the additoin of @nested_npt should be in patch 7.
> >
> > Hypothetically speaking, if nested_npt_enabled() is inaccurate at the call
> > site in nested_prepare_vmcb_save(), then this patch is technically wrong
> > even though it doesn't introduce a bug.  Given that the call site of
> > nested_svm_load_cr3() is moved in patch 7, I don't see any value in adding
> > the placeholder parameter early.
> >
> 
> I see and I mostly agree, I put it here to avoid the unneeded churn and
> make it easier to review the whole thing: this patch is technically a
> nop so it can be reviewed in "doesn't change anything" mode and patches
> which actually change things are smaller.
> 
> Paolo already said 'queued' here and your comments can't be addressed in
> a follow-up patch but I can certainly do v5 if needed.

Eh, not necessary, I didn't see that the series was in kvm/queue until after
I hit send.  Thanks!

Patch
diff mbox series

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 5e6c988a4e6b..180929f3dbef 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -311,6 +311,21 @@  static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
 	nested_vmcb->control.exit_int_info = exit_int_info;
 }
 
+static inline bool nested_npt_enabled(struct vcpu_svm *svm)
+{
+	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
+}
+
+/*
+ * Load guest's cr3 at nested entry. @nested_npt is true if we are
+ * emulating VM-Entry into a guest with NPT enabled.
+ */
+static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
+			       bool nested_npt)
+{
+	return kvm_set_cr3(vcpu, cr3);
+}
+
 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
 {
 	/* Load the nested guest state */
@@ -324,7 +339,8 @@  static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
 	svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
 	svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
 	svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
-	(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
+	(void)nested_svm_load_cr3(&svm->vcpu, nested_vmcb->save.cr3,
+				  nested_npt_enabled(svm));
 
 	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
 	kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
@@ -343,7 +359,8 @@  static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
 static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
 {
 	const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
-	if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
+
+	if (nested_npt_enabled(svm))
 		nested_svm_init_mmu_context(&svm->vcpu);
 
 	/* Guest paging mode is active - reset mmu */