linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode.
@ 2015-01-24 10:21 Wincy Van
  2015-01-27 21:37 ` Paolo Bonzini
  2015-01-27 21:39 ` Paolo Bonzini
  0 siblings, 2 replies; 8+ messages in thread
From: Wincy Van @ 2015-01-24 10:21 UTC (permalink / raw)
  To: Paolo Bonzini, gleb, Zhang, Yang Z
  Cc: kvm, linux-kernel, Wanpeng Li, Jan Kiszka, 范文一

When L2 is using x2apic, we can use virtualize x2apic mode to
gain higher performance, especially in apicv case.

This patch also introduces nested_vmx_check_apicv_controls
for the nested apicv patches.

Signed-off-by: Wincy Van <fanwenyi0529@gmail.com>
---
 arch/x86/kvm/vmx.c |  121 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 119 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 36d0724..4d8939d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1108,6 +1108,11 @@ static inline bool nested_cpu_has_xsaves(struct
vmcs12 *vmcs12)
                vmx_xsaves_supported();
 }

+static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
+{
+       return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
+}
+
 static inline bool is_exception(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -2395,6 +2400,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
        nested_vmx_secondary_ctls_low = 0;
        nested_vmx_secondary_ctls_high &=
                SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+               SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
                SECONDARY_EXEC_WBINVD_EXITING |
                SECONDARY_EXEC_XSAVES;

@@ -4155,6 +4161,50 @@ static void
__vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
        }
 }

+/*
+ * If a msr is allowed by L0, we should check whether it is allowed by L1.
+ * The corresponding bit will be cleared unless both of L0 and L1 allow it.
+ */
+static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
+                                              unsigned long *msr_bitmap_nested,
+                                              u32 msr, int type)
+{
+       int f = sizeof(unsigned long);
+
+       if (!cpu_has_vmx_msr_bitmap())
+               return;
+
+       /*
+        * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+        * have the write-low and read-high bitmap offsets the wrong way round.
+        * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+        */
+       if (msr <= 0x1fff) {
+               if (type & MSR_TYPE_R &&
+                  !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
+                       /* read-low */
+                       __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
+
+               if (type & MSR_TYPE_W &&
+                  !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
+                       /* write-low */
+                       __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
+
+       } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+               msr &= 0x1fff;
+               if (type & MSR_TYPE_R &&
+                  !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
+                       /* read-high */
+                       __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
+
+               if (type & MSR_TYPE_W &&
+                  !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
+                       /* write-high */
+                       __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
+
+       }
+}
+
 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
 {
        if (!longmode_only)
@@ -8344,7 +8394,68 @@ static int
nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
 static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
                                               struct vmcs12 *vmcs12)
 {
-       return false;
+       struct page *page;
+       unsigned long *msr_bitmap;
+
+       if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
+               return false;
+
+       page = nested_get_page(vcpu, vmcs12->msr_bitmap);
+       if (!page) {
+               WARN_ON(1);
+               return false;
+       }
+       msr_bitmap = (unsigned long *)kmap(page);
+       if (!msr_bitmap) {
+               nested_release_page_clean(page);
+               WARN_ON(1);
+               return false;
+       }
+
+       memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
+
+       if (nested_cpu_has_virt_x2apic_mode(vmcs12))
+               /* TPR is allowed */
+               nested_vmx_disable_intercept_for_msr(msr_bitmap,
+                               vmx_msr_bitmap_nested,
+                               APIC_BASE_MSR + (APIC_TASKPRI >> 4),
+                               MSR_TYPE_R | MSR_TYPE_W);
+       kunmap(page);
+       nested_release_page_clean(page);
+
+       return true;
+}
+
+static inline int nested_vmx_check_virt_x2apic(struct kvm_vcpu *vcpu,
+                                              struct vmcs12 *vmcs12)
+{
+       if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+               return -EINVAL;
+       return 0;
+}
+
+static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
+                                          struct vmcs12 *vmcs12)
+{
+       int r;
+
+       if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
+               return 0;
+
+       r = nested_vmx_check_virt_x2apic(vcpu, vmcs12);
+       if (r)
+               goto fail;
+
+       /* tpr shadow is needed by all apicv features. */
+       if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
+               r = -EINVAL;
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       return r;
 }

 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
@@ -8640,7 +8751,8 @@ static void prepare_vmcs02(struct kvm_vcpu
*vcpu, struct vmcs12 *vmcs12)
                        else
                                vmcs_write64(APIC_ACCESS_ADDR,
                                  page_to_phys(vmx->nested.apic_access_page));
-               } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
+               } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
+                           (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) {
                        exec_control |=
                                SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
                        kvm_vcpu_reload_apic_access_page(vcpu);
@@ -8851,6 +8963,11 @@ static int nested_vmx_run(struct kvm_vcpu
*vcpu, bool launch)
                return 1;
        }

+       if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
+               nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+               return 1;
+       }
+
        if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
                nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
                return 1;
--
1.7.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode.
  2015-01-24 10:21 [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode Wincy Van
@ 2015-01-27 21:37 ` Paolo Bonzini
  2015-01-28  6:19   ` Wincy Van
  2015-01-27 21:39 ` Paolo Bonzini
  1 sibling, 1 reply; 8+ messages in thread
From: Paolo Bonzini @ 2015-01-27 21:37 UTC (permalink / raw)
  To: Wincy Van, gleb, Zhang, Yang Z; +Cc: kvm, linux-kernel, Wanpeng Li, Jan Kiszka



On 24/01/2015 11:21, Wincy Van wrote:
> +       memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);

Most bytes are always 0xff.  It's better to initialize it to 0xff once,
and set the bit here if !nested_cpu_has_virt_x2apic_mode(vmcs12).

> +       if (nested_cpu_has_virt_x2apic_mode(vmcs12))

Please add braces here, because of the /* */ command below.

> +               /* TPR is allowed */
> +               nested_vmx_disable_intercept_for_msr(msr_bitmap,
> +                               vmx_msr_bitmap_nested,
> +                               APIC_BASE_MSR + (APIC_TASKPRI >> 4),
> +                               MSR_TYPE_R | MSR_TYPE_W);
> 
> +static inline int nested_vmx_check_virt_x2apic(struct kvm_vcpu *vcpu,
> +                                              struct vmcs12 *vmcs12)
> +{
> +       if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
> +               return -EINVAL;

No need for this function and nested_cpu_has_virt_x2apic_mode.  Just
inline them in their caller(s).  Same for other cases throughout the series.

Paolo

> +       return 0;
> +}
> +
> +static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
> +                                          struct vmcs12 *vmcs12)
> +{
> +       int r;
> +
> +       if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
> +               return 0;
> +
> +       r = nested_vmx_check_virt_x2apic(vcpu, vmcs12);
> +       if (r)
> +               goto fail;
> +

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode.
  2015-01-24 10:21 [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode Wincy Van
  2015-01-27 21:37 ` Paolo Bonzini
@ 2015-01-27 21:39 ` Paolo Bonzini
  2015-01-28  6:20   ` Wincy Van
  1 sibling, 1 reply; 8+ messages in thread
From: Paolo Bonzini @ 2015-01-27 21:39 UTC (permalink / raw)
  To: Wincy Van, gleb, Zhang, Yang Z; +Cc: kvm, linux-kernel, Wanpeng Li, Jan Kiszka



On 24/01/2015 11:21, Wincy Van wrote:
> +static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
> +                                              unsigned long *msr_bitmap_nested,
> +                                              u32 msr, int type)
> +{
> +       int f = sizeof(unsigned long);
> +
> +       if (!cpu_has_vmx_msr_bitmap())
> +               return;
> +

Also, make this a WARN_ON.

Paolo

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode.
  2015-01-27 21:37 ` Paolo Bonzini
@ 2015-01-28  6:19   ` Wincy Van
  2015-01-28  7:31     ` Paolo Bonzini
  0 siblings, 1 reply; 8+ messages in thread
From: Wincy Van @ 2015-01-28  6:19 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: gleb, Zhang, Yang Z, kvm, linux-kernel, Wanpeng Li, Jan Kiszka

On Wed, Jan 28, 2015 at 5:37 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>
>
> On 24/01/2015 11:21, Wincy Van wrote:
>> +       memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
>
> Most bytes are always 0xff.  It's better to initialize it to 0xff once,
> and set the bit here if !nested_cpu_has_virt_x2apic_mode(vmcs12).

Indeed, will do.

>
>> +       if (nested_cpu_has_virt_x2apic_mode(vmcs12))
>
> Please add braces here, because of the /* */ command below.

Will do.

>
>> +               /* TPR is allowed */
>> +               nested_vmx_disable_intercept_for_msr(msr_bitmap,
>> +                               vmx_msr_bitmap_nested,
>> +                               APIC_BASE_MSR + (APIC_TASKPRI >> 4),
>> +                               MSR_TYPE_R | MSR_TYPE_W);
>>
>> +static inline int nested_vmx_check_virt_x2apic(struct kvm_vcpu *vcpu,
>> +                                              struct vmcs12 *vmcs12)
>> +{
>> +       if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
>> +               return -EINVAL;
>
> No need for this function and nested_cpu_has_virt_x2apic_mode.  Just
> inline them in their caller(s).  Same for other cases throughout the series.
>

Do you mean that we should also inline the same functions in the other
patches of this patch set?
I think these functions will keep the code tidy, just like the
functions as nested_cpu_has_preemption_timer, nested_cpu_has_ept, etc.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode.
  2015-01-27 21:39 ` Paolo Bonzini
@ 2015-01-28  6:20   ` Wincy Van
  0 siblings, 0 replies; 8+ messages in thread
From: Wincy Van @ 2015-01-28  6:20 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: gleb, Zhang, Yang Z, kvm, linux-kernel, Wanpeng Li, Jan Kiszka

On Wed, Jan 28, 2015 at 5:39 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>
>
> On 24/01/2015 11:21, Wincy Van wrote:
>> +static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
>> +                                              unsigned long *msr_bitmap_nested,
>> +                                              u32 msr, int type)
>> +{
>> +       int f = sizeof(unsigned long);
>> +
>> +       if (!cpu_has_vmx_msr_bitmap())
>> +               return;
>> +
>
> Also, make this a WARN_ON.

WIll do.



Thanks,
Wincy


>
> Paolo

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode.
  2015-01-28  6:19   ` Wincy Van
@ 2015-01-28  7:31     ` Paolo Bonzini
  2015-01-28 10:19       ` Wincy Van
  0 siblings, 1 reply; 8+ messages in thread
From: Paolo Bonzini @ 2015-01-28  7:31 UTC (permalink / raw)
  To: Wincy Van; +Cc: gleb, Zhang, Yang Z, kvm, linux-kernel, Wanpeng Li, Jan Kiszka



On 28/01/2015 07:19, Wincy Van wrote:
>> >
>> > No need for this function and nested_cpu_has_virt_x2apic_mode.  Just
>> > inline them in their caller(s).  Same for other cases throughout the series.
>> >
> Do you mean that we should also inline the same functions in the other
> patches of this patch set?
> I think these functions will keep the code tidy, just like the
> functions as nested_cpu_has_preemption_timer, nested_cpu_has_ept, etc.

Most of the functions are just used once.  If you want to keep them,
please place them all close to the existing ones.

Paolo

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode.
  2015-01-28  7:31     ` Paolo Bonzini
@ 2015-01-28 10:19       ` Wincy Van
  2015-01-28 10:20         ` Paolo Bonzini
  0 siblings, 1 reply; 8+ messages in thread
From: Wincy Van @ 2015-01-28 10:19 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: gleb, Zhang, Yang Z, kvm, linux-kernel, Wanpeng Li, Jan Kiszka

On Wed, Jan 28, 2015 at 3:31 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>>> >
>>> > No need for this function and nested_cpu_has_virt_x2apic_mode.  Just
>>> > inline them in their caller(s).  Same for other cases throughout the series.
>>> >
>> Do you mean that we should also inline the same functions in the other
>> patches of this patch set?
>> I think these functions will keep the code tidy, just like the
>> functions as nested_cpu_has_preemption_timer, nested_cpu_has_ept, etc.
>
> Most of the functions are just used once.  If you want to keep them,
> please place them all close to the existing ones.
>

Yep, I will inline the functions like nested_vmx_check_virt_x2apic and keep
the nested_cpu_has series.


Thanks,
Wincy

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode.
  2015-01-28 10:19       ` Wincy Van
@ 2015-01-28 10:20         ` Paolo Bonzini
  0 siblings, 0 replies; 8+ messages in thread
From: Paolo Bonzini @ 2015-01-28 10:20 UTC (permalink / raw)
  To: Wincy Van; +Cc: gleb, Zhang, Yang Z, kvm, linux-kernel, Wanpeng Li, Jan Kiszka



On 28/01/2015 11:19, Wincy Van wrote:
> > Most of the functions are just used once.  If you want to keep them,
> > please place them all close to the existing ones.
>
> Yep, I will inline the functions like nested_vmx_check_virt_x2apic and keep
> the nested_cpu_has series.

Okay, thanks!

Paolo

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-01-29  2:45 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-24 10:21 [PATCH v3 2/6] KVM: nVMX: Enable nested virtualize x2apic mode Wincy Van
2015-01-27 21:37 ` Paolo Bonzini
2015-01-28  6:19   ` Wincy Van
2015-01-28  7:31     ` Paolo Bonzini
2015-01-28 10:19       ` Wincy Van
2015-01-28 10:20         ` Paolo Bonzini
2015-01-27 21:39 ` Paolo Bonzini
2015-01-28  6:20   ` Wincy Van

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).