kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] KVM: x86: hyper-v: Conditionally allow SynIC with APICv/AVIC
@ 2021-05-13 11:37 Vitaly Kuznetsov
  2021-05-13 11:37 ` [PATCH 1/2] KVM: x86: Invert APICv/AVIC enablement check Vitaly Kuznetsov
  2021-05-13 11:37 ` [PATCH 2/2] KVM: x86: hyper-v: Deactivate APICv only when AutoEOI feature is in use Vitaly Kuznetsov
  0 siblings, 2 replies; 7+ messages in thread
From: Vitaly Kuznetsov @ 2021-05-13 11:37 UTC (permalink / raw)
  To: kvm, Paolo Bonzini
  Cc: Sean Christopherson, Wanpeng Li, Jim Mattson, Kechen Lu,
	Maxim Levitsky, linux-kernel

APICV_INHIBIT_REASON_HYPERV is currently unconditionally forced upon
SynIC activation as SynIC's AutoEOI is incompatible with APICv/AVIC. It is,
however, possible to track whether the feature was actually used by the
guest and only inhibit APICv/AVIC when needed.

The feature can be tested with QEMU's 'hv-passthrough' debug mode.

Note, 'avic' kvm-amd module parameter is '0' by default and thus needs to
be explicitly enabled.

Vitaly Kuznetsov (2):
  KVM: x86: Invert APICv/AVIC enablement check
  KVM: x86: hyper-v: Deactivate APICv only when AutoEOI feature is in
    use

 arch/x86/include/asm/kvm_host.h |  5 ++++-
 arch/x86/kvm/hyperv.c           | 27 +++++++++++++++++++++------
 arch/x86/kvm/svm/svm.c          |  7 ++++++-
 arch/x86/kvm/vmx/vmx.c          |  7 ++++++-
 arch/x86/kvm/x86.c              |  6 +++---
 5 files changed, 40 insertions(+), 12 deletions(-)

-- 
2.31.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/2] KVM: x86: Invert APICv/AVIC enablement check
  2021-05-13 11:37 [PATCH 0/2] KVM: x86: hyper-v: Conditionally allow SynIC with APICv/AVIC Vitaly Kuznetsov
@ 2021-05-13 11:37 ` Vitaly Kuznetsov
  2021-05-17 21:03   ` Sean Christopherson
  2021-05-13 11:37 ` [PATCH 2/2] KVM: x86: hyper-v: Deactivate APICv only when AutoEOI feature is in use Vitaly Kuznetsov
  1 sibling, 1 reply; 7+ messages in thread
From: Vitaly Kuznetsov @ 2021-05-13 11:37 UTC (permalink / raw)
  To: kvm, Paolo Bonzini
  Cc: Sean Christopherson, Wanpeng Li, Jim Mattson, Kechen Lu,
	Maxim Levitsky, linux-kernel

Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
for Intel, 'avic' module parameter for AMD) but there's no way to check
it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
invert kvm_apicv_init() (which now doesn't need to be called from arch-
specific code).

No functional change intended.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/include/asm/kvm_host.h | 2 +-
 arch/x86/kvm/svm/svm.c          | 7 ++++++-
 arch/x86/kvm/vmx/vmx.c          | 7 ++++++-
 arch/x86/kvm/x86.c              | 6 +++---
 4 files changed, 16 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..ffafdb7b24cb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1205,6 +1205,7 @@ struct kvm_x86_ops {
 	void (*hardware_unsetup)(void);
 	bool (*cpu_has_accelerated_tpr)(void);
 	bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
+	bool (*apicv_supported)(void);
 	void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
 
 	unsigned int vm_size;
@@ -1661,7 +1662,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
 				struct x86_exception *exception);
 
 bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
 			      unsigned long bit);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 4dd9b7856e5b..360b3000c5a8 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4470,16 +4470,21 @@ static int svm_vm_init(struct kvm *kvm)
 			return ret;
 	}
 
-	kvm_apicv_init(kvm, avic);
 	return 0;
 }
 
+static bool svm_avic_supported(void)
+{
+	return avic;
+}
+
 static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.hardware_unsetup = svm_hardware_teardown,
 	.hardware_enable = svm_hardware_enable,
 	.hardware_disable = svm_hardware_disable,
 	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
 	.has_emulated_msr = svm_has_emulated_msr,
+	.apicv_supported = svm_avic_supported,
 
 	.vcpu_create = svm_create_vcpu,
 	.vcpu_free = svm_free_vcpu,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f2fd447eed45..3b0f4f9c21b3 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7034,7 +7034,6 @@ static int vmx_vm_init(struct kvm *kvm)
 			break;
 		}
 	}
-	kvm_apicv_init(kvm, enable_apicv);
 	return 0;
 }
 
@@ -7645,6 +7644,11 @@ static bool vmx_check_apicv_inhibit_reasons(ulong bit)
 	return supported & BIT(bit);
 }
 
+static bool vmx_apicv_supported(void)
+{
+	return enable_apicv;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
 	.hardware_unsetup = hardware_unsetup,
 
@@ -7652,6 +7656,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
 	.hardware_disable = hardware_disable,
 	.cpu_has_accelerated_tpr = report_flexpriority,
 	.has_emulated_msr = vmx_has_emulated_msr,
+	.apicv_supported = vmx_apicv_supported,
 
 	.vm_size = sizeof(struct kvm_vmx),
 	.vm_init = vmx_vm_init,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5bd550eaf683..fe7248e11e13 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8342,16 +8342,15 @@ bool kvm_apicv_activated(struct kvm *kvm)
 }
 EXPORT_SYMBOL_GPL(kvm_apicv_activated);
 
-void kvm_apicv_init(struct kvm *kvm, bool enable)
+static void kvm_apicv_init(struct kvm *kvm)
 {
-	if (enable)
+	if (kvm_x86_ops.apicv_supported())
 		clear_bit(APICV_INHIBIT_REASON_DISABLE,
 			  &kvm->arch.apicv_inhibit_reasons);
 	else
 		set_bit(APICV_INHIBIT_REASON_DISABLE,
 			&kvm->arch.apicv_inhibit_reasons);
 }
-EXPORT_SYMBOL_GPL(kvm_apicv_init);
 
 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
 {
@@ -10727,6 +10726,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
 
+	kvm_apicv_init(kvm);
 	kvm_hv_init_vm(kvm);
 	kvm_page_track_init(kvm);
 	kvm_mmu_init_vm(kvm);
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/2] KVM: x86: hyper-v: Deactivate APICv only when AutoEOI feature is in use
  2021-05-13 11:37 [PATCH 0/2] KVM: x86: hyper-v: Conditionally allow SynIC with APICv/AVIC Vitaly Kuznetsov
  2021-05-13 11:37 ` [PATCH 1/2] KVM: x86: Invert APICv/AVIC enablement check Vitaly Kuznetsov
@ 2021-05-13 11:37 ` Vitaly Kuznetsov
  1 sibling, 0 replies; 7+ messages in thread
From: Vitaly Kuznetsov @ 2021-05-13 11:37 UTC (permalink / raw)
  To: kvm, Paolo Bonzini
  Cc: Sean Christopherson, Wanpeng Li, Jim Mattson, Kechen Lu,
	Maxim Levitsky, linux-kernel

APICV_INHIBIT_REASON_HYPERV is currently unconditionally forced upon
SynIC activation as SynIC's AutoEOI is incompatible with APICv/AVIC. It is,
however, possible to track whether the feature was actually used by the
guest and only inhibit APICv/AVIC when needed.

TLFS suggests a dedicated 'HV_DEPRECATING_AEOI_RECOMMENDED' flag to let
Windows know that AutoEOI feature should be avoided. While it's up to
KVM userspace to set the flag, KVM can help a bit by exposing global
APICv/AVIC enablement: in case APICv/AVIC usage is impossible, AutoEOI
is still preferred.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  3 +++
 arch/x86/kvm/hyperv.c           | 27 +++++++++++++++++++++------
 2 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ffafdb7b24cb..98391c9c18df 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -936,6 +936,9 @@ struct kvm_hv {
 	/* How many vCPUs have VP index != vCPU index */
 	atomic_t num_mismatched_vp_indexes;
 
+	/* How many SynICs use 'AutoEOI' feature */
+	atomic_t synic_auto_eoi_used;
+
 	struct hv_partition_assist_pg *hv_pa_pg;
 	struct kvm_hv_syndbg hv_syndbg;
 };
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index f98370a39936..e1ecc2143794 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -87,6 +87,10 @@ static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
 				int vector)
 {
+	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
+	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
+	int auto_eoi_old, auto_eoi_new;
+
 	if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
 		return;
 
@@ -95,10 +99,25 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
 	else
 		__clear_bit(vector, synic->vec_bitmap);
 
+	auto_eoi_old = bitmap_weight(synic->auto_eoi_bitmap, 256);
+
 	if (synic_has_vector_auto_eoi(synic, vector))
 		__set_bit(vector, synic->auto_eoi_bitmap);
 	else
 		__clear_bit(vector, synic->auto_eoi_bitmap);
+
+	auto_eoi_new = bitmap_weight(synic->auto_eoi_bitmap, 256);
+
+	/* Hyper-V SynIC auto EOI SINTs are not compatible with APICV */
+	if (!auto_eoi_old && auto_eoi_new) {
+		if (atomic_inc_return(&hv->synic_auto_eoi_used) == 1)
+			kvm_request_apicv_update(vcpu->kvm, false,
+						 APICV_INHIBIT_REASON_HYPERV);
+	} else if (!auto_eoi_new && auto_eoi_old) {
+		if (atomic_dec_return(&hv->synic_auto_eoi_used) == 0)
+			kvm_request_apicv_update(vcpu->kvm, true,
+						 APICV_INHIBIT_REASON_HYPERV);
+	}
 }
 
 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
@@ -931,12 +950,6 @@ int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
 
 	synic = to_hv_synic(vcpu);
 
-	/*
-	 * Hyper-V SynIC auto EOI SINT's are
-	 * not compatible with APICV, so request
-	 * to deactivate APICV permanently.
-	 */
-	kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV);
 	synic->active = true;
 	synic->dont_zero_synic_pages = dont_zero_synic_pages;
 	synic->control = HV_SYNIC_CONTROL_ENABLE;
@@ -2198,6 +2211,8 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
 				ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
 			if (!cpu_smt_possible())
 				ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
+			if (kvm_x86_ops.apicv_supported())
+				ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
 			/*
 			 * Default number of spinlock retry attempts, matches
 			 * HyperV 2016.
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] KVM: x86: Invert APICv/AVIC enablement check
  2021-05-13 11:37 ` [PATCH 1/2] KVM: x86: Invert APICv/AVIC enablement check Vitaly Kuznetsov
@ 2021-05-17 21:03   ` Sean Christopherson
  2021-05-17 21:09     ` Jim Mattson
  0 siblings, 1 reply; 7+ messages in thread
From: Sean Christopherson @ 2021-05-17 21:03 UTC (permalink / raw)
  To: Vitaly Kuznetsov
  Cc: kvm, Paolo Bonzini, Wanpeng Li, Jim Mattson, Kechen Lu,
	Maxim Levitsky, linux-kernel

On Thu, May 13, 2021, Vitaly Kuznetsov wrote:
> Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
> for Intel, 'avic' module parameter for AMD) but there's no way to check
> it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
> invert kvm_apicv_init() (which now doesn't need to be called from arch-
> specific code).

Rather than add a new hook, just move the variable to x86.c, and export it so
that VMX and SVM can give it different module names.  The only hiccup is that
avic is off by default, but I don't see why that can't be changed.

On a related topic, the AVIC dependency on CONFIG_X86_LOCAL_APIC is dead code
since commit e42eef4ba388 ("KVM: add X86_LOCAL_APIC dependency").  Ditto for
cpu_has_vmx_posted_intr().


diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..bf5807d35339 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1422,6 +1422,7 @@ struct kvm_arch_async_pf {
 extern u32 __read_mostly kvm_nr_uret_msrs;
 extern u64 __read_mostly host_efer;
 extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __read_mostly enable_apicv;
 extern struct kvm_x86_ops kvm_x86_ops;

 #define KVM_X86_OP(func) \
@@ -1661,7 +1662,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
                                struct x86_exception *exception);

 bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
                              unsigned long bit);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 712b4e0de481..ec4aa804395b 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -28,10 +28,7 @@
 #include "svm.h"

 /* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
+module_param_named(avic, enable_apicv, bool, S_IRUGO);

 #define SVM_AVIC_DOORBELL      0xc001011b

@@ -126,7 +123,7 @@ void avic_vm_destroy(struct kvm *kvm)
        unsigned long flags;
        struct kvm_svm *kvm_svm = to_kvm_svm(kvm);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (kvm_svm->avic_logical_id_table_page)
@@ -149,7 +146,7 @@ int avic_vm_init(struct kvm *kvm)
        struct page *l_page;
        u32 vm_id;

-       if (!avic)
+       if (!enable_apicv)
                return 0;

        /* Allocating physical APIC ID table (4KB) */
@@ -571,7 +568,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
        int ret;
        struct kvm_vcpu *vcpu = &svm->vcpu;

-       if (!avic || !irqchip_in_kernel(vcpu->kvm))
+       if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
                return 0;

        ret = avic_init_backing_page(vcpu);
@@ -595,7 +592,7 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu)

 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
 {
-       if (!avic || !lapic_in_kernel(vcpu))
+       if (!enable_apicv || !lapic_in_kernel(vcpu))
                return;

        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -655,7 +652,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
        struct vmcb *vmcb = svm->vmcb;
        bool activated = kvm_vcpu_apicv_active(vcpu);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (activated) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index dfa351e605de..e650d4c466e1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1009,11 +1009,9 @@ static __init int svm_hardware_setup(void)
                        nrips = false;
        }

-       if (avic) {
-               if (!npt_enabled ||
-                   !boot_cpu_has(X86_FEATURE_AVIC) ||
-                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
-                       avic = false;
+       if (enable_apicv) {
+               if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)) {
+                       enable_apicv = false;
                } else {
                        pr_info("AVIC enabled\n");

@@ -4429,13 +4427,12 @@ static int svm_vm_init(struct kvm *kvm)
        if (!pause_filter_count || !pause_filter_thresh)
                kvm->arch.pause_in_guest = true;

-       if (avic) {
+       if (enable_apicv) {
                int ret = avic_vm_init(kvm);
                if (ret)
                        return ret;
        }

-       kvm_apicv_init(kvm, avic);
        return 0;
 }

diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index e44567ceb865..a514b490db4a 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -479,8 +479,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..bf5807d35339 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1422,6 +1422,7 @@ struct kvm_arch_async_pf {
 extern u32 __read_mostly kvm_nr_uret_msrs;
 extern u64 __read_mostly host_efer;
 extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __read_mostly enable_apicv;
 extern struct kvm_x86_ops kvm_x86_ops;

 #define KVM_X86_OP(func) \
@@ -1661,7 +1662,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
                                struct x86_exception *exception);

 bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
                              unsigned long bit);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 712b4e0de481..ec4aa804395b 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -28,10 +28,7 @@
 #include "svm.h"

 /* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
+module_param_named(avic, enable_apicv, bool, S_IRUGO);

 #define SVM_AVIC_DOORBELL      0xc001011b

@@ -126,7 +123,7 @@ void avic_vm_destroy(struct kvm *kvm)
        unsigned long flags;
        struct kvm_svm *kvm_svm = to_kvm_svm(kvm);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (kvm_svm->avic_logical_id_table_page)
@@ -149,7 +146,7 @@ int avic_vm_init(struct kvm *kvm)
        struct page *l_page;
        u32 vm_id;

-       if (!avic)
+       if (!enable_apicv)
                return 0;

        /* Allocating physical APIC ID table (4KB) */
@@ -571,7 +568,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
        int ret;
        struct kvm_vcpu *vcpu = &svm->vcpu;

-       if (!avic || !irqchip_in_kernel(vcpu->kvm))
+       if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
                return 0;

        ret = avic_init_backing_page(vcpu);
@@ -595,7 +592,7 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu)

 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
 {
-       if (!avic || !lapic_in_kernel(vcpu))
+       if (!enable_apicv || !lapic_in_kernel(vcpu))
                return;

        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -655,7 +652,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
        struct vmcb *vmcb = svm->vmcb;
        bool activated = kvm_vcpu_apicv_active(vcpu);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (activated) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index dfa351e605de..e650d4c466e1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1009,11 +1009,9 @@ static __init int svm_hardware_setup(void)
                        nrips = false;
        }

-       if (avic) {
-               if (!npt_enabled ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..bf5807d35339 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1422,6 +1422,7 @@ struct kvm_arch_async_pf {
 extern u32 __read_mostly kvm_nr_uret_msrs;
 extern u64 __read_mostly host_efer;
 extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __read_mostly enable_apicv;
 extern struct kvm_x86_ops kvm_x86_ops;

 #define KVM_X86_OP(func) \
@@ -1661,7 +1662,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
                                struct x86_exception *exception);

 bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
                              unsigned long bit);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 712b4e0de481..ec4aa804395b 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -28,10 +28,7 @@
 #include "svm.h"

 /* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
+module_param_named(avic, enable_apicv, bool, S_IRUGO);

 #define SVM_AVIC_DOORBELL      0xc001011b

@@ -126,7 +123,7 @@ void avic_vm_destroy(struct kvm *kvm)
        unsigned long flags;
        struct kvm_svm *kvm_svm = to_kvm_svm(kvm);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (kvm_svm->avic_logical_id_table_page)
@@ -149,7 +146,7 @@ int avic_vm_init(struct kvm *kvm)
        struct page *l_page;
        u32 vm_id;

-       if (!avic)
+       if (!enable_apicv)
                return 0;

        /* Allocating physical APIC ID table (4KB) */
@@ -571,7 +568,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
        int ret;
        struct kvm_vcpu *vcpu = &svm->vcpu;

-       if (!avic || !irqchip_in_kernel(vcpu->kvm))
+       if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
                return 0;

        ret = avic_init_backing_page(vcpu);
@@ -595,7 +592,7 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu)

 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
 {
-       if (!avic || !lapic_in_kernel(vcpu))
+       if (!enable_apicv || !lapic_in_kernel(vcpu))
                return;

        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -655,7 +652,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
        struct vmcb *vmcb = svm->vmcb;
        bool activated = kvm_vcpu_apicv_active(vcpu);

-       if (!avic)
+       if (!enable_apicv)
                return;

        if (activated) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index dfa351e605de..e650d4c466e1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1009,11 +1009,9 @@ static __init int svm_hardware_setup(void)
                        nrips = false;
        }

-       if (avic) {
-               if (!npt_enabled ||
...skipping...

 #define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL

-extern int avic;
-
 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
 {
        svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 8dee8a5fbc17..4705ad55abb5 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -12,7 +12,6 @@ extern bool __read_mostly enable_ept;
 extern bool __read_mostly enable_unrestricted_guest;
 extern bool __read_mostly enable_ept_ad_bits;
 extern bool __read_mostly enable_pml;
-extern bool __read_mostly enable_apicv;
 extern int __read_mostly pt_mode;

 #define PT_MODE_SYSTEM         0
@@ -90,8 +89,7 @@ static inline bool cpu_has_vmx_preemption_timer(void)

 static inline bool cpu_has_vmx_posted_intr(void)
 {
-       return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
-               vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+       return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
 }

 static inline bool cpu_has_load_ia32_efer(void)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4bceb5ca3a89..697dd54c7df8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -101,7 +101,6 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
 static bool __read_mostly fasteoi = 1;
 module_param(fasteoi, bool, S_IRUGO);

-bool __read_mostly enable_apicv = 1;
 module_param(enable_apicv, bool, S_IRUGO);

 /*
@@ -7001,7 +7000,6 @@ static int vmx_vm_init(struct kvm *kvm)
                        break;
                }
        }
-       kvm_apicv_init(kvm, enable_apicv);
        return 0;
 }

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9b6bca616929..22a1e2b438c3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -209,6 +209,9 @@ EXPORT_SYMBOL_GPL(host_efer);
 bool __read_mostly allow_smaller_maxphyaddr = 0;
 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);

+bool __read_mostly enable_apicv = true;
+EXPORT_SYMBOL_GPL(enable_apicv);
+
 u64 __read_mostly host_xss;
 EXPORT_SYMBOL_GPL(host_xss);
 u64 __read_mostly supported_xss;
@@ -8342,16 +8345,15 @@ bool kvm_apicv_activated(struct kvm *kvm)
 }
 EXPORT_SYMBOL_GPL(kvm_apicv_activated);

-void kvm_apicv_init(struct kvm *kvm, bool enable)
+static void kvm_apicv_init(struct kvm *kvm)
 {
-       if (enable)
+       if (enable_apicv)
                clear_bit(APICV_INHIBIT_REASON_DISABLE,
                          &kvm->arch.apicv_inhibit_reasons);
        else
                set_bit(APICV_INHIBIT_REASON_DISABLE,
                        &kvm->arch.apicv_inhibit_reasons);
 }
-EXPORT_SYMBOL_GPL(kvm_apicv_init);

 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
 {
@@ -10736,6 +10738,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);

+       kvm_apicv_init(kvm);
        kvm_hv_init_vm(kvm);
        kvm_page_track_init(kvm);
        kvm_mmu_init_vm(kvm);



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] KVM: x86: Invert APICv/AVIC enablement check
  2021-05-17 21:03   ` Sean Christopherson
@ 2021-05-17 21:09     ` Jim Mattson
  2021-05-17 21:26       ` Sean Christopherson
  0 siblings, 1 reply; 7+ messages in thread
From: Jim Mattson @ 2021-05-17 21:09 UTC (permalink / raw)
  To: Sean Christopherson
  Cc: Vitaly Kuznetsov, kvm list, Paolo Bonzini, Wanpeng Li, Kechen Lu,
	Maxim Levitsky, LKML

On Mon, May 17, 2021 at 2:03 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Thu, May 13, 2021, Vitaly Kuznetsov wrote:
> > Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
> > for Intel, 'avic' module parameter for AMD) but there's no way to check
> > it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
> > invert kvm_apicv_init() (which now doesn't need to be called from arch-
> > specific code).
>
> Rather than add a new hook, just move the variable to x86.c, and export it so
> that VMX and SVM can give it different module names.  The only hiccup is that
> avic is off by default, but I don't see why that can't be changed.

See https://www.spinics.net/lists/kvm/msg208722.html.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] KVM: x86: Invert APICv/AVIC enablement check
  2021-05-17 21:09     ` Jim Mattson
@ 2021-05-17 21:26       ` Sean Christopherson
  2021-05-17 21:56         ` Jim Mattson
  0 siblings, 1 reply; 7+ messages in thread
From: Sean Christopherson @ 2021-05-17 21:26 UTC (permalink / raw)
  To: Jim Mattson
  Cc: Vitaly Kuznetsov, kvm list, Paolo Bonzini, Wanpeng Li, Kechen Lu,
	Maxim Levitsky, LKML

On Mon, May 17, 2021, Jim Mattson wrote:
> On Mon, May 17, 2021 at 2:03 PM Sean Christopherson <seanjc@google.com> wrote:
> >
> > On Thu, May 13, 2021, Vitaly Kuznetsov wrote:
> > > Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
> > > for Intel, 'avic' module parameter for AMD) but there's no way to check
> > > it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
> > > invert kvm_apicv_init() (which now doesn't need to be called from arch-
> > > specific code).
> >
> > Rather than add a new hook, just move the variable to x86.c, and export it so
> > that VMX and SVM can give it different module names.  The only hiccup is that
> > avic is off by default, but I don't see why that can't be changed.
> 
> See https://www.spinics.net/lists/kvm/msg208722.html.

Boo.  A common enable_apicv can still work, SVM just needs an intermediary
between the module param and enable_apicv.

--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -185,6 +185,10 @@ module_param(vls, int, 0444);
 static int vgif = true;
 module_param(vgif, int, 0444);

+/* enable / disable AVIC */
+static bool avic;
+module_param(avic, bool, S_IRUGO);
+
 bool __read_mostly dump_invalid_vmcb;
 module_param(dump_invalid_vmcb, bool, 0644);

@@ -1009,16 +1013,19 @@ static __init int svm_hardware_setup(void)
                        nrips = false;
        }

-       if (avic) {
-               if (!npt_enabled ||
-                   !boot_cpu_has(X86_FEATURE_AVIC) ||
-                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
-                       avic = false;
-               } else {
-                       pr_info("AVIC enabled\n");
+       if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC))
+               avic = false;

-                       amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
-               }
+       /*
+        * Override the common enable_apicv.  AVIC is disabled by default
+        * because Jim said so.
+        */
+       enable_apicv = avic;
+
+       if (enable_apicv) {
+               pr_info("AVIC enabled\n");
+
+               amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
        }

        if (vls) {

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] KVM: x86: Invert APICv/AVIC enablement check
  2021-05-17 21:26       ` Sean Christopherson
@ 2021-05-17 21:56         ` Jim Mattson
  0 siblings, 0 replies; 7+ messages in thread
From: Jim Mattson @ 2021-05-17 21:56 UTC (permalink / raw)
  To: Sean Christopherson
  Cc: Vitaly Kuznetsov, kvm list, Paolo Bonzini, Wanpeng Li, Kechen Lu,
	Maxim Levitsky, LKML

On Mon, May 17, 2021 at 2:26 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Mon, May 17, 2021, Jim Mattson wrote:
> > On Mon, May 17, 2021 at 2:03 PM Sean Christopherson <seanjc@google.com> wrote:
> > >
> > > On Thu, May 13, 2021, Vitaly Kuznetsov wrote:
> > > > Currently, APICv/AVIC enablement is global ('enable_apicv' module parameter
> > > > for Intel, 'avic' module parameter for AMD) but there's no way to check
> > > > it from vendor-neutral code. Add 'apicv_supported()' to kvm_x86_ops and
> > > > invert kvm_apicv_init() (which now doesn't need to be called from arch-
> > > > specific code).
> > >
> > > Rather than add a new hook, just move the variable to x86.c, and export it so
> > > that VMX and SVM can give it different module names.  The only hiccup is that
> > > avic is off by default, but I don't see why that can't be changed.
> >
> > See https://www.spinics.net/lists/kvm/msg208722.html.
>
> Boo.  A common enable_apicv can still work, SVM just needs an intermediary
> between the module param and enable_apicv.
>
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -185,6 +185,10 @@ module_param(vls, int, 0444);
>  static int vgif = true;
>  module_param(vgif, int, 0444);
>
> +/* enable / disable AVIC */
> +static bool avic;
> +module_param(avic, bool, S_IRUGO);
> +
>  bool __read_mostly dump_invalid_vmcb;
>  module_param(dump_invalid_vmcb, bool, 0644);
>
> @@ -1009,16 +1013,19 @@ static __init int svm_hardware_setup(void)
>                         nrips = false;
>         }
>
> -       if (avic) {
> -               if (!npt_enabled ||
> -                   !boot_cpu_has(X86_FEATURE_AVIC) ||
> -                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
> -                       avic = false;
> -               } else {
> -                       pr_info("AVIC enabled\n");
> +       if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC))
> +               avic = false;
>
> -                       amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
> -               }
> +       /*
> +        * Override the common enable_apicv.  AVIC is disabled by default
> +        * because Jim said so.
> +        */

Hey! I'm just the messenger. Wei Huang said so.

> +       enable_apicv = avic;
> +
> +       if (enable_apicv) {
> +               pr_info("AVIC enabled\n");
> +
> +               amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
>         }
>
>         if (vls) {

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-05-17 21:56 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-13 11:37 [PATCH 0/2] KVM: x86: hyper-v: Conditionally allow SynIC with APICv/AVIC Vitaly Kuznetsov
2021-05-13 11:37 ` [PATCH 1/2] KVM: x86: Invert APICv/AVIC enablement check Vitaly Kuznetsov
2021-05-17 21:03   ` Sean Christopherson
2021-05-17 21:09     ` Jim Mattson
2021-05-17 21:26       ` Sean Christopherson
2021-05-17 21:56         ` Jim Mattson
2021-05-13 11:37 ` [PATCH 2/2] KVM: x86: hyper-v: Deactivate APICv only when AutoEOI feature is in use Vitaly Kuznetsov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).