All of lore.kernel.org
 help / color / mirror / Atom feed
From: Maxim Levitsky <mlevitsk@redhat.com>
To: kvm@vger.kernel.org
Cc: Wanpeng Li <wanpengli@tencent.com>,
	David Airlie <airlied@linux.ie>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	dri-devel@lists.freedesktop.org, "H. Peter Anvin" <hpa@zytor.com>,
	Brijesh Singh <brijesh.singh@amd.com>,
	Joerg Roedel <joro@8bytes.org>,
	x86@kernel.org, Maxim Levitsky <mlevitsk@redhat.com>,
	Ingo Molnar <mingo@redhat.com>, Zhi Wang <zhi.a.wang@intel.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	intel-gfx@lists.freedesktop.org, Borislav Petkov <bp@alien8.de>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	intel-gvt-dev@lists.freedesktop.org,
	Jim Mattson <jmattson@google.com>,
	Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
	Sean Christopherson <seanjc@google.com>,
	linux-kernel@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>
Subject: [RFC PATCH v3 12/19] KVM: x86: nSVM: make nested AVIC physid write tracking be aware of the host scheduling
Date: Wed, 27 Apr 2022 23:03:07 +0300	[thread overview]
Message-ID: <20220427200314.276673-13-mlevitsk@redhat.com> (raw)
In-Reply-To: <20220427200314.276673-1-mlevitsk@redhat.com>

For each vCPU
  - store a linked list of all shadow physical id entries
    which address it.

  - Update those entries when this vCPU is scheduled
    in/out

  - update this list, when physid tables are modified by
    other means (guest write and/or table sync)

To avoid races vs vcpu schedule, use a spinlock.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 arch/x86/kvm/svm/avic.c | 113 +++++++++++++++++++++++++++++++++++++---
 arch/x86/kvm/svm/svm.c  |   7 +++
 arch/x86/kvm/svm/svm.h  |  10 ++++
 3 files changed, 122 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index f462b7e48e3ca..34da9fabd5194 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -67,8 +67,12 @@ static void avic_physid_shadow_entry_set_vcpu(struct kvm *kvm,
 	struct avic_physid_entry_descr *e = &t->entries[n];
 	u64 sentry = READ_ONCE(*e->sentry);
 	u64 old_sentry = sentry;
+	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
 	struct kvm_vcpu *new_vcpu = NULL;
 	int l0_apicid = -1;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&kvm_svm->avic.table_entries_lock, flags);
 
 	WARN_ON(!test_bit(n, t->valid_entires));
 
@@ -79,6 +83,9 @@ static void avic_physid_shadow_entry_set_vcpu(struct kvm *kvm,
 		new_vcpu = avic_vcpu_by_l1_apicid(kvm, new_l1_apicid);
 
 	if (new_vcpu)
+		list_add_tail(&e->link, &to_svm(new_vcpu)->nested.physid_ref_entries);
+
+	if (new_vcpu && to_svm(new_vcpu)->nested_avic_active)
 		l0_apicid = kvm_cpu_get_apicid(new_vcpu->cpu);
 
 	physid_entry_set_apicid(&sentry, l0_apicid);
@@ -87,6 +94,8 @@ static void avic_physid_shadow_entry_set_vcpu(struct kvm *kvm,
 
 	if (sentry != old_sentry)
 		WRITE_ONCE(*e->sentry, sentry);
+
+	raw_spin_unlock_irqrestore(&kvm_svm->avic.table_entries_lock, flags);
 }
 
 static void avic_physid_shadow_entry_create(struct kvm *kvm,
@@ -131,7 +140,11 @@ static void avic_physid_shadow_entry_remove(struct kvm *kvm,
 					   int n)
 {
 	struct avic_physid_entry_descr *e = &t->entries[n];
+	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
 	hpa_t backing_page_hpa;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&kvm_svm->avic.table_entries_lock, flags);
 
 	if (!test_and_clear_bit(n, t->valid_entires))
 		WARN_ON(1);
@@ -147,8 +160,49 @@ static void avic_physid_shadow_entry_remove(struct kvm *kvm,
 
 	e->gentry = 0;
 	*e->sentry = 0;
+
+	raw_spin_unlock_irqrestore(&kvm_svm->avic.table_entries_lock, flags);
 }
 
+static void avic_update_peer_physid_entries(struct kvm_vcpu *vcpu, int cpu)
+{
+	/*
+	 * Update all shadow physid tables which contain entries
+	 * which reference this vCPU with its new physical location
+	 */
+	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+	struct vcpu_svm *vcpu_svm = to_svm(vcpu);
+	struct avic_physid_entry_descr *e;
+	int updated_nentries = 0;
+	int l0_apicid = -1;
+	unsigned long flags;
+	bool new_active = cpu != -1;
+
+	if (cpu != -1)
+		l0_apicid = kvm_cpu_get_apicid(cpu);
+
+	raw_spin_lock_irqsave(&kvm_svm->avic.table_entries_lock, flags);
+
+	list_for_each_entry(e, &vcpu_svm->nested.physid_ref_entries, link) {
+		u64 sentry = READ_ONCE(*e->sentry);
+		u64 old_sentry = sentry;
+
+		physid_entry_set_apicid(&sentry, l0_apicid);
+
+		if (sentry != old_sentry) {
+			updated_nentries++;
+			WRITE_ONCE(*e->sentry, sentry);
+		}
+	}
+
+	if (updated_nentries)
+		trace_kvm_avic_physid_update_vcpu_host(vcpu->vcpu_id,
+						       l0_apicid, updated_nentries);
+
+	vcpu_svm->nested_avic_active = new_active;
+
+	raw_spin_unlock_irqrestore(&kvm_svm->avic.table_entries_lock, flags);
+}
 
 static bool
 avic_physid_shadow_table_setup_write_tracking(struct kvm *kvm,
@@ -603,6 +657,7 @@ int avic_vm_init(struct kvm *kvm)
 	hash_add(svm_vm_data_hash, &avic->hnode, avic->vm_id);
 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
 
+	raw_spin_lock_init(&avic->table_entries_lock);
 	mutex_init(&avic->tables_lock);
 	INIT_LIST_HEAD(&avic->physid_tables);
 
@@ -1428,9 +1483,51 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu)
 static void avic_vcpu_put(struct kvm_vcpu *vcpu)
 {
 	preempt_disable();
-
 	__avic_vcpu_put(vcpu);
+	preempt_enable();
+}
+
 
+void __nested_avic_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	lockdep_assert_preemption_disabled();
+
+	/*
+	 * For the same reason as in __avic_vcpu_load there is no
+	 * need to load nested AVIC when this vCPU is blocking
+	 */
+	if (kvm_vcpu_is_blocking(vcpu))
+		return;
+
+	if (svm->nested.initialized)
+		avic_update_peer_physid_entries(vcpu, cpu);
+}
+
+void __nested_avic_put(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	lockdep_assert_preemption_disabled();
+
+	if (svm->nested.initialized)
+		avic_update_peer_physid_entries(vcpu, -1);
+}
+
+void nested_avic_load(struct kvm_vcpu *vcpu)
+{
+	int cpu = get_cpu();
+
+	WARN_ON(cpu != vcpu->cpu);
+	__nested_avic_load(vcpu, cpu);
+	put_cpu();
+}
+
+void nested_avic_put(struct kvm_vcpu *vcpu)
+{
+	preempt_disable();
+	__nested_avic_put(vcpu);
 	preempt_enable();
 }
 
@@ -1468,9 +1565,6 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 
 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
-	if (!kvm_vcpu_apicv_active(vcpu))
-		return;
-
        /*
         * Unload the AVIC when the vCPU is about to block, _before_
         * the vCPU actually blocks.
@@ -1484,13 +1578,16 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
         * IRR and reading IsRunning; the lack of this barrier might be
         * the cause of errata #1235).
         */
-	avic_vcpu_put(vcpu);
+	if (kvm_vcpu_apicv_active(vcpu))
+		avic_vcpu_put(vcpu);
+
+	nested_avic_put(vcpu);
 }
 
 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
 {
-	if (!kvm_vcpu_apicv_active(vcpu))
-		return;
+	if (kvm_vcpu_apicv_active(vcpu))
+		avic_vcpu_load(vcpu);
 
-	avic_vcpu_load(vcpu);
+	nested_avic_load(vcpu);
 }
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 75b4f3ac8b1a0..76fbee2c8c5d7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1302,6 +1302,8 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
 
 	svm->guest_state_loaded = false;
 
+	INIT_LIST_HEAD(&svm->nested.physid_ref_entries);
+
 	return 0;
 
 error_free_vmsa_page:
@@ -1391,8 +1393,11 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		sd->current_vmcb = svm->vmcb;
 		indirect_branch_prediction_barrier();
 	}
+
 	if (kvm_vcpu_apicv_active(vcpu))
 		__avic_vcpu_load(vcpu, cpu);
+
+	__nested_avic_load(vcpu, cpu);
 }
 
 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1400,6 +1405,8 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 	if (kvm_vcpu_apicv_active(vcpu))
 		__avic_vcpu_put(vcpu);
 
+	__nested_avic_put(vcpu);
+
 	svm_prepare_host_switch(vcpu);
 
 	++vcpu->stat.host_state_reload;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index fc15e1f938793..401449dbce65d 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -99,6 +99,7 @@ struct kvm_svm_avic {
 	struct page *physical_id_table_page;
 	struct hlist_node hnode;
 
+	raw_spinlock_t table_entries_lock;
 	struct mutex tables_lock;
 
 	/* List of all shadow tables */
@@ -244,6 +245,9 @@ struct svm_nested_state {
 	 * on its side.
 	 */
 	bool force_msr_bitmap_recalc;
+
+	/* All AVIC shadow PID table entry descriptors that reference this vCPU */
+	struct list_head physid_ref_entries;
 };
 
 struct vcpu_sev_es_state {
@@ -311,6 +315,7 @@ struct vcpu_svm {
 	u32 dfr_reg;
 	struct page *avic_backing_page;
 	u64 *avic_physical_id_cache;
+	bool nested_avic_active;
 
 	/*
 	 * Per-vcpu list of struct amd_svm_iommu_ir:
@@ -678,6 +683,11 @@ int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
 int avic_init_vcpu(struct vcpu_svm *svm);
 void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 void __avic_vcpu_put(struct kvm_vcpu *vcpu);
+void __nested_avic_load(struct kvm_vcpu *vcpu, int cpu);
+void __nested_avic_put(struct kvm_vcpu *vcpu);
+void nested_avic_load(struct kvm_vcpu *vcpu);
+void nested_avic_put(struct kvm_vcpu *vcpu);
+
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
-- 
2.26.3


WARNING: multiple messages have this Message-ID (diff)
From: Maxim Levitsky <mlevitsk@redhat.com>
To: kvm@vger.kernel.org
Cc: Wanpeng Li <wanpengli@tencent.com>,
	David Airlie <airlied@linux.ie>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	dri-devel@lists.freedesktop.org, "H. Peter Anvin" <hpa@zytor.com>,
	Brijesh Singh <brijesh.singh@amd.com>,
	Joerg Roedel <joro@8bytes.org>,
	x86@kernel.org, Maxim Levitsky <mlevitsk@redhat.com>,
	Ingo Molnar <mingo@redhat.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	intel-gfx@lists.freedesktop.org, Borislav Petkov <bp@alien8.de>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	intel-gvt-dev@lists.freedesktop.org,
	Jim Mattson <jmattson@google.com>,
	Sean Christopherson <seanjc@google.com>,
	linux-kernel@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>
Subject: [Intel-gfx] [RFC PATCH v3 12/19] KVM: x86: nSVM: make nested AVIC physid write tracking be aware of the host scheduling
Date: Wed, 27 Apr 2022 23:03:07 +0300	[thread overview]
Message-ID: <20220427200314.276673-13-mlevitsk@redhat.com> (raw)
In-Reply-To: <20220427200314.276673-1-mlevitsk@redhat.com>

For each vCPU
  - store a linked list of all shadow physical id entries
    which address it.

  - Update those entries when this vCPU is scheduled
    in/out

  - update this list, when physid tables are modified by
    other means (guest write and/or table sync)

To avoid races vs vcpu schedule, use a spinlock.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 arch/x86/kvm/svm/avic.c | 113 +++++++++++++++++++++++++++++++++++++---
 arch/x86/kvm/svm/svm.c  |   7 +++
 arch/x86/kvm/svm/svm.h  |  10 ++++
 3 files changed, 122 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index f462b7e48e3ca..34da9fabd5194 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -67,8 +67,12 @@ static void avic_physid_shadow_entry_set_vcpu(struct kvm *kvm,
 	struct avic_physid_entry_descr *e = &t->entries[n];
 	u64 sentry = READ_ONCE(*e->sentry);
 	u64 old_sentry = sentry;
+	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
 	struct kvm_vcpu *new_vcpu = NULL;
 	int l0_apicid = -1;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&kvm_svm->avic.table_entries_lock, flags);
 
 	WARN_ON(!test_bit(n, t->valid_entires));
 
@@ -79,6 +83,9 @@ static void avic_physid_shadow_entry_set_vcpu(struct kvm *kvm,
 		new_vcpu = avic_vcpu_by_l1_apicid(kvm, new_l1_apicid);
 
 	if (new_vcpu)
+		list_add_tail(&e->link, &to_svm(new_vcpu)->nested.physid_ref_entries);
+
+	if (new_vcpu && to_svm(new_vcpu)->nested_avic_active)
 		l0_apicid = kvm_cpu_get_apicid(new_vcpu->cpu);
 
 	physid_entry_set_apicid(&sentry, l0_apicid);
@@ -87,6 +94,8 @@ static void avic_physid_shadow_entry_set_vcpu(struct kvm *kvm,
 
 	if (sentry != old_sentry)
 		WRITE_ONCE(*e->sentry, sentry);
+
+	raw_spin_unlock_irqrestore(&kvm_svm->avic.table_entries_lock, flags);
 }
 
 static void avic_physid_shadow_entry_create(struct kvm *kvm,
@@ -131,7 +140,11 @@ static void avic_physid_shadow_entry_remove(struct kvm *kvm,
 					   int n)
 {
 	struct avic_physid_entry_descr *e = &t->entries[n];
+	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
 	hpa_t backing_page_hpa;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&kvm_svm->avic.table_entries_lock, flags);
 
 	if (!test_and_clear_bit(n, t->valid_entires))
 		WARN_ON(1);
@@ -147,8 +160,49 @@ static void avic_physid_shadow_entry_remove(struct kvm *kvm,
 
 	e->gentry = 0;
 	*e->sentry = 0;
+
+	raw_spin_unlock_irqrestore(&kvm_svm->avic.table_entries_lock, flags);
 }
 
+static void avic_update_peer_physid_entries(struct kvm_vcpu *vcpu, int cpu)
+{
+	/*
+	 * Update all shadow physid tables which contain entries
+	 * which reference this vCPU with its new physical location
+	 */
+	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+	struct vcpu_svm *vcpu_svm = to_svm(vcpu);
+	struct avic_physid_entry_descr *e;
+	int updated_nentries = 0;
+	int l0_apicid = -1;
+	unsigned long flags;
+	bool new_active = cpu != -1;
+
+	if (cpu != -1)
+		l0_apicid = kvm_cpu_get_apicid(cpu);
+
+	raw_spin_lock_irqsave(&kvm_svm->avic.table_entries_lock, flags);
+
+	list_for_each_entry(e, &vcpu_svm->nested.physid_ref_entries, link) {
+		u64 sentry = READ_ONCE(*e->sentry);
+		u64 old_sentry = sentry;
+
+		physid_entry_set_apicid(&sentry, l0_apicid);
+
+		if (sentry != old_sentry) {
+			updated_nentries++;
+			WRITE_ONCE(*e->sentry, sentry);
+		}
+	}
+
+	if (updated_nentries)
+		trace_kvm_avic_physid_update_vcpu_host(vcpu->vcpu_id,
+						       l0_apicid, updated_nentries);
+
+	vcpu_svm->nested_avic_active = new_active;
+
+	raw_spin_unlock_irqrestore(&kvm_svm->avic.table_entries_lock, flags);
+}
 
 static bool
 avic_physid_shadow_table_setup_write_tracking(struct kvm *kvm,
@@ -603,6 +657,7 @@ int avic_vm_init(struct kvm *kvm)
 	hash_add(svm_vm_data_hash, &avic->hnode, avic->vm_id);
 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
 
+	raw_spin_lock_init(&avic->table_entries_lock);
 	mutex_init(&avic->tables_lock);
 	INIT_LIST_HEAD(&avic->physid_tables);
 
@@ -1428,9 +1483,51 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu)
 static void avic_vcpu_put(struct kvm_vcpu *vcpu)
 {
 	preempt_disable();
-
 	__avic_vcpu_put(vcpu);
+	preempt_enable();
+}
+
 
+void __nested_avic_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	lockdep_assert_preemption_disabled();
+
+	/*
+	 * For the same reason as in __avic_vcpu_load there is no
+	 * need to load nested AVIC when this vCPU is blocking
+	 */
+	if (kvm_vcpu_is_blocking(vcpu))
+		return;
+
+	if (svm->nested.initialized)
+		avic_update_peer_physid_entries(vcpu, cpu);
+}
+
+void __nested_avic_put(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	lockdep_assert_preemption_disabled();
+
+	if (svm->nested.initialized)
+		avic_update_peer_physid_entries(vcpu, -1);
+}
+
+void nested_avic_load(struct kvm_vcpu *vcpu)
+{
+	int cpu = get_cpu();
+
+	WARN_ON(cpu != vcpu->cpu);
+	__nested_avic_load(vcpu, cpu);
+	put_cpu();
+}
+
+void nested_avic_put(struct kvm_vcpu *vcpu)
+{
+	preempt_disable();
+	__nested_avic_put(vcpu);
 	preempt_enable();
 }
 
@@ -1468,9 +1565,6 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 
 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
-	if (!kvm_vcpu_apicv_active(vcpu))
-		return;
-
        /*
         * Unload the AVIC when the vCPU is about to block, _before_
         * the vCPU actually blocks.
@@ -1484,13 +1578,16 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
         * IRR and reading IsRunning; the lack of this barrier might be
         * the cause of errata #1235).
         */
-	avic_vcpu_put(vcpu);
+	if (kvm_vcpu_apicv_active(vcpu))
+		avic_vcpu_put(vcpu);
+
+	nested_avic_put(vcpu);
 }
 
 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
 {
-	if (!kvm_vcpu_apicv_active(vcpu))
-		return;
+	if (kvm_vcpu_apicv_active(vcpu))
+		avic_vcpu_load(vcpu);
 
-	avic_vcpu_load(vcpu);
+	nested_avic_load(vcpu);
 }
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 75b4f3ac8b1a0..76fbee2c8c5d7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1302,6 +1302,8 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
 
 	svm->guest_state_loaded = false;
 
+	INIT_LIST_HEAD(&svm->nested.physid_ref_entries);
+
 	return 0;
 
 error_free_vmsa_page:
@@ -1391,8 +1393,11 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		sd->current_vmcb = svm->vmcb;
 		indirect_branch_prediction_barrier();
 	}
+
 	if (kvm_vcpu_apicv_active(vcpu))
 		__avic_vcpu_load(vcpu, cpu);
+
+	__nested_avic_load(vcpu, cpu);
 }
 
 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1400,6 +1405,8 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 	if (kvm_vcpu_apicv_active(vcpu))
 		__avic_vcpu_put(vcpu);
 
+	__nested_avic_put(vcpu);
+
 	svm_prepare_host_switch(vcpu);
 
 	++vcpu->stat.host_state_reload;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index fc15e1f938793..401449dbce65d 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -99,6 +99,7 @@ struct kvm_svm_avic {
 	struct page *physical_id_table_page;
 	struct hlist_node hnode;
 
+	raw_spinlock_t table_entries_lock;
 	struct mutex tables_lock;
 
 	/* List of all shadow tables */
@@ -244,6 +245,9 @@ struct svm_nested_state {
 	 * on its side.
 	 */
 	bool force_msr_bitmap_recalc;
+
+	/* All AVIC shadow PID table entry descriptors that reference this vCPU */
+	struct list_head physid_ref_entries;
 };
 
 struct vcpu_sev_es_state {
@@ -311,6 +315,7 @@ struct vcpu_svm {
 	u32 dfr_reg;
 	struct page *avic_backing_page;
 	u64 *avic_physical_id_cache;
+	bool nested_avic_active;
 
 	/*
 	 * Per-vcpu list of struct amd_svm_iommu_ir:
@@ -678,6 +683,11 @@ int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
 int avic_init_vcpu(struct vcpu_svm *svm);
 void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 void __avic_vcpu_put(struct kvm_vcpu *vcpu);
+void __nested_avic_load(struct kvm_vcpu *vcpu, int cpu);
+void __nested_avic_put(struct kvm_vcpu *vcpu);
+void nested_avic_load(struct kvm_vcpu *vcpu);
+void nested_avic_put(struct kvm_vcpu *vcpu);
+
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
-- 
2.26.3


WARNING: multiple messages have this Message-ID (diff)
From: Maxim Levitsky <mlevitsk@redhat.com>
To: kvm@vger.kernel.org
Cc: Wanpeng Li <wanpengli@tencent.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Zhenyu Wang <zhenyuw@linux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Ingo Molnar <mingo@redhat.com>, David Airlie <airlied@linux.ie>,
	Thomas Gleixner <tglx@linutronix.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	x86@kernel.org, intel-gfx@lists.freedesktop.org,
	Sean Christopherson <seanjc@google.com>,
	Daniel Vetter <daniel@ffwll.ch>, Borislav Petkov <bp@alien8.de>,
	Joerg Roedel <joro@8bytes.org>,
	linux-kernel@vger.kernel.org, Jim Mattson <jmattson@google.com>,
	Zhi Wang <zhi.a.wang@intel.com>,
	Brijesh Singh <brijesh.singh@amd.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	intel-gvt-dev@lists.freedesktop.org,
	dri-devel@lists.freedesktop.org,
	Maxim Levitsky <mlevitsk@redhat.com>
Subject: [RFC PATCH v3 12/19] KVM: x86: nSVM: make nested AVIC physid write tracking be aware of the host scheduling
Date: Wed, 27 Apr 2022 23:03:07 +0300	[thread overview]
Message-ID: <20220427200314.276673-13-mlevitsk@redhat.com> (raw)
In-Reply-To: <20220427200314.276673-1-mlevitsk@redhat.com>

For each vCPU
  - store a linked list of all shadow physical id entries
    which address it.

  - Update those entries when this vCPU is scheduled
    in/out

  - update this list, when physid tables are modified by
    other means (guest write and/or table sync)

To avoid races vs vcpu schedule, use a spinlock.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 arch/x86/kvm/svm/avic.c | 113 +++++++++++++++++++++++++++++++++++++---
 arch/x86/kvm/svm/svm.c  |   7 +++
 arch/x86/kvm/svm/svm.h  |  10 ++++
 3 files changed, 122 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index f462b7e48e3ca..34da9fabd5194 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -67,8 +67,12 @@ static void avic_physid_shadow_entry_set_vcpu(struct kvm *kvm,
 	struct avic_physid_entry_descr *e = &t->entries[n];
 	u64 sentry = READ_ONCE(*e->sentry);
 	u64 old_sentry = sentry;
+	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
 	struct kvm_vcpu *new_vcpu = NULL;
 	int l0_apicid = -1;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&kvm_svm->avic.table_entries_lock, flags);
 
 	WARN_ON(!test_bit(n, t->valid_entires));
 
@@ -79,6 +83,9 @@ static void avic_physid_shadow_entry_set_vcpu(struct kvm *kvm,
 		new_vcpu = avic_vcpu_by_l1_apicid(kvm, new_l1_apicid);
 
 	if (new_vcpu)
+		list_add_tail(&e->link, &to_svm(new_vcpu)->nested.physid_ref_entries);
+
+	if (new_vcpu && to_svm(new_vcpu)->nested_avic_active)
 		l0_apicid = kvm_cpu_get_apicid(new_vcpu->cpu);
 
 	physid_entry_set_apicid(&sentry, l0_apicid);
@@ -87,6 +94,8 @@ static void avic_physid_shadow_entry_set_vcpu(struct kvm *kvm,
 
 	if (sentry != old_sentry)
 		WRITE_ONCE(*e->sentry, sentry);
+
+	raw_spin_unlock_irqrestore(&kvm_svm->avic.table_entries_lock, flags);
 }
 
 static void avic_physid_shadow_entry_create(struct kvm *kvm,
@@ -131,7 +140,11 @@ static void avic_physid_shadow_entry_remove(struct kvm *kvm,
 					   int n)
 {
 	struct avic_physid_entry_descr *e = &t->entries[n];
+	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
 	hpa_t backing_page_hpa;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&kvm_svm->avic.table_entries_lock, flags);
 
 	if (!test_and_clear_bit(n, t->valid_entires))
 		WARN_ON(1);
@@ -147,8 +160,49 @@ static void avic_physid_shadow_entry_remove(struct kvm *kvm,
 
 	e->gentry = 0;
 	*e->sentry = 0;
+
+	raw_spin_unlock_irqrestore(&kvm_svm->avic.table_entries_lock, flags);
 }
 
+static void avic_update_peer_physid_entries(struct kvm_vcpu *vcpu, int cpu)
+{
+	/*
+	 * Update all shadow physid tables which contain entries
+	 * which reference this vCPU with its new physical location
+	 */
+	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+	struct vcpu_svm *vcpu_svm = to_svm(vcpu);
+	struct avic_physid_entry_descr *e;
+	int updated_nentries = 0;
+	int l0_apicid = -1;
+	unsigned long flags;
+	bool new_active = cpu != -1;
+
+	if (cpu != -1)
+		l0_apicid = kvm_cpu_get_apicid(cpu);
+
+	raw_spin_lock_irqsave(&kvm_svm->avic.table_entries_lock, flags);
+
+	list_for_each_entry(e, &vcpu_svm->nested.physid_ref_entries, link) {
+		u64 sentry = READ_ONCE(*e->sentry);
+		u64 old_sentry = sentry;
+
+		physid_entry_set_apicid(&sentry, l0_apicid);
+
+		if (sentry != old_sentry) {
+			updated_nentries++;
+			WRITE_ONCE(*e->sentry, sentry);
+		}
+	}
+
+	if (updated_nentries)
+		trace_kvm_avic_physid_update_vcpu_host(vcpu->vcpu_id,
+						       l0_apicid, updated_nentries);
+
+	vcpu_svm->nested_avic_active = new_active;
+
+	raw_spin_unlock_irqrestore(&kvm_svm->avic.table_entries_lock, flags);
+}
 
 static bool
 avic_physid_shadow_table_setup_write_tracking(struct kvm *kvm,
@@ -603,6 +657,7 @@ int avic_vm_init(struct kvm *kvm)
 	hash_add(svm_vm_data_hash, &avic->hnode, avic->vm_id);
 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
 
+	raw_spin_lock_init(&avic->table_entries_lock);
 	mutex_init(&avic->tables_lock);
 	INIT_LIST_HEAD(&avic->physid_tables);
 
@@ -1428,9 +1483,51 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu)
 static void avic_vcpu_put(struct kvm_vcpu *vcpu)
 {
 	preempt_disable();
-
 	__avic_vcpu_put(vcpu);
+	preempt_enable();
+}
+
 
+void __nested_avic_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	lockdep_assert_preemption_disabled();
+
+	/*
+	 * For the same reason as in __avic_vcpu_load there is no
+	 * need to load nested AVIC when this vCPU is blocking
+	 */
+	if (kvm_vcpu_is_blocking(vcpu))
+		return;
+
+	if (svm->nested.initialized)
+		avic_update_peer_physid_entries(vcpu, cpu);
+}
+
+void __nested_avic_put(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	lockdep_assert_preemption_disabled();
+
+	if (svm->nested.initialized)
+		avic_update_peer_physid_entries(vcpu, -1);
+}
+
+void nested_avic_load(struct kvm_vcpu *vcpu)
+{
+	int cpu = get_cpu();
+
+	WARN_ON(cpu != vcpu->cpu);
+	__nested_avic_load(vcpu, cpu);
+	put_cpu();
+}
+
+void nested_avic_put(struct kvm_vcpu *vcpu)
+{
+	preempt_disable();
+	__nested_avic_put(vcpu);
 	preempt_enable();
 }
 
@@ -1468,9 +1565,6 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 
 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
-	if (!kvm_vcpu_apicv_active(vcpu))
-		return;
-
        /*
         * Unload the AVIC when the vCPU is about to block, _before_
         * the vCPU actually blocks.
@@ -1484,13 +1578,16 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
         * IRR and reading IsRunning; the lack of this barrier might be
         * the cause of errata #1235).
         */
-	avic_vcpu_put(vcpu);
+	if (kvm_vcpu_apicv_active(vcpu))
+		avic_vcpu_put(vcpu);
+
+	nested_avic_put(vcpu);
 }
 
 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
 {
-	if (!kvm_vcpu_apicv_active(vcpu))
-		return;
+	if (kvm_vcpu_apicv_active(vcpu))
+		avic_vcpu_load(vcpu);
 
-	avic_vcpu_load(vcpu);
+	nested_avic_load(vcpu);
 }
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 75b4f3ac8b1a0..76fbee2c8c5d7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1302,6 +1302,8 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
 
 	svm->guest_state_loaded = false;
 
+	INIT_LIST_HEAD(&svm->nested.physid_ref_entries);
+
 	return 0;
 
 error_free_vmsa_page:
@@ -1391,8 +1393,11 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		sd->current_vmcb = svm->vmcb;
 		indirect_branch_prediction_barrier();
 	}
+
 	if (kvm_vcpu_apicv_active(vcpu))
 		__avic_vcpu_load(vcpu, cpu);
+
+	__nested_avic_load(vcpu, cpu);
 }
 
 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1400,6 +1405,8 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 	if (kvm_vcpu_apicv_active(vcpu))
 		__avic_vcpu_put(vcpu);
 
+	__nested_avic_put(vcpu);
+
 	svm_prepare_host_switch(vcpu);
 
 	++vcpu->stat.host_state_reload;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index fc15e1f938793..401449dbce65d 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -99,6 +99,7 @@ struct kvm_svm_avic {
 	struct page *physical_id_table_page;
 	struct hlist_node hnode;
 
+	raw_spinlock_t table_entries_lock;
 	struct mutex tables_lock;
 
 	/* List of all shadow tables */
@@ -244,6 +245,9 @@ struct svm_nested_state {
 	 * on its side.
 	 */
 	bool force_msr_bitmap_recalc;
+
+	/* All AVIC shadow PID table entry descriptors that reference this vCPU */
+	struct list_head physid_ref_entries;
 };
 
 struct vcpu_sev_es_state {
@@ -311,6 +315,7 @@ struct vcpu_svm {
 	u32 dfr_reg;
 	struct page *avic_backing_page;
 	u64 *avic_physical_id_cache;
+	bool nested_avic_active;
 
 	/*
 	 * Per-vcpu list of struct amd_svm_iommu_ir:
@@ -678,6 +683,11 @@ int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
 int avic_init_vcpu(struct vcpu_svm *svm);
 void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 void __avic_vcpu_put(struct kvm_vcpu *vcpu);
+void __nested_avic_load(struct kvm_vcpu *vcpu, int cpu);
+void __nested_avic_put(struct kvm_vcpu *vcpu);
+void nested_avic_load(struct kvm_vcpu *vcpu);
+void nested_avic_put(struct kvm_vcpu *vcpu);
+
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
-- 
2.26.3


  parent reply	other threads:[~2022-04-27 20:04 UTC|newest]

Thread overview: 159+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-27 20:02 [RFC PATCH v3 00/19] RFC: nested AVIC Maxim Levitsky
2022-04-27 20:02 ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:02 ` Maxim Levitsky
2022-04-27 20:02 ` [RFC PATCH v3 01/19] KVM: x86: document AVIC/APICv inhibit reasons Maxim Levitsky
2022-04-27 20:02   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:02   ` Maxim Levitsky
2022-05-18 15:56   ` Sean Christopherson
2022-05-18 15:56     ` Sean Christopherson
2022-05-18 17:13     ` Maxim Levitsky
2022-05-18 17:13       ` [Intel-gfx] " Maxim Levitsky
2022-05-18 17:13       ` Maxim Levitsky
2022-04-27 20:02 ` [RFC PATCH v3 02/19] KVM: x86: inhibit APICv/AVIC when the guest and/or host changes apic id/base from the defaults Maxim Levitsky
2022-04-27 20:02   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:02   ` Maxim Levitsky
2022-05-18  8:28   ` Chao Gao
2022-05-18  8:28     ` [Intel-gfx] " Chao Gao
2022-05-18  8:28     ` Chao Gao
2022-05-18  9:50     ` Maxim Levitsky
2022-05-18  9:50       ` [Intel-gfx] " Maxim Levitsky
2022-05-18  9:50       ` Maxim Levitsky
2022-05-18 11:51       ` Chao Gao
2022-05-18 11:51         ` [Intel-gfx] " Chao Gao
2022-05-18 11:51         ` Chao Gao
2022-05-18 12:36         ` Maxim Levitsky
2022-05-18 12:36           ` Maxim Levitsky
2022-05-18 12:36           ` [Intel-gfx] " Maxim Levitsky
2022-05-18 15:39       ` Sean Christopherson
2022-05-18 15:39         ` Sean Christopherson
2022-05-18 17:15         ` Maxim Levitsky
2022-05-18 17:15           ` [Intel-gfx] " Maxim Levitsky
2022-05-18 17:15           ` Maxim Levitsky
2022-05-19 16:06   ` Sean Christopherson
2022-05-19 16:06     ` Sean Christopherson
2022-05-22  9:03     ` Maxim Levitsky
2022-05-22  9:03       ` [Intel-gfx] " Maxim Levitsky
2022-05-22  9:03       ` Maxim Levitsky
2022-05-22 14:47       ` Jim Mattson
2022-05-22 14:47         ` Jim Mattson
2022-05-23  6:50         ` Maxim Levitsky
2022-05-23  6:50           ` Maxim Levitsky
2022-05-23  6:50           ` [Intel-gfx] " Maxim Levitsky
2022-05-23 17:22           ` Jim Mattson
2022-05-23 17:22             ` Jim Mattson
2022-05-23 17:31           ` Sean Christopherson
2022-05-23 17:31             ` Sean Christopherson
2022-06-23  9:44     ` Maxim Levitsky
2022-06-23  9:44       ` [Intel-gfx] " Maxim Levitsky
2022-06-23  9:44       ` Maxim Levitsky
2022-04-27 20:02 ` [RFC PATCH v3 03/19] KVM: x86: SVM: remove avic's broken code that updated APIC ID Maxim Levitsky
2022-04-27 20:02   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:02   ` Maxim Levitsky
2022-05-19 16:10   ` Sean Christopherson
2022-05-19 16:10     ` Sean Christopherson
2022-05-22  9:01     ` Maxim Levitsky
2022-05-22  9:01       ` [Intel-gfx] " Maxim Levitsky
2022-05-22  9:01       ` Maxim Levitsky
2022-05-23 17:19       ` Sean Christopherson
2022-05-23 17:19         ` Sean Christopherson
2022-04-27 20:02 ` [RFC PATCH v3 04/19] KVM: x86: mmu: allow to enable write tracking externally Maxim Levitsky
2022-04-27 20:02   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:02   ` Maxim Levitsky
2022-05-19 16:27   ` Sean Christopherson
2022-05-19 16:27     ` Sean Christopherson
2022-05-22 10:21     ` Maxim Levitsky
2022-05-22 10:21       ` [Intel-gfx] " Maxim Levitsky
2022-05-22 10:21       ` Maxim Levitsky
2022-05-19 16:37   ` Sean Christopherson
2022-05-19 16:37     ` Sean Christopherson
2022-05-22 10:22     ` Maxim Levitsky
2022-05-22 10:22       ` [Intel-gfx] " Maxim Levitsky
2022-05-22 10:22       ` Maxim Levitsky
2022-07-20 14:42       ` Maxim Levitsky
2022-07-20 14:42         ` [Intel-gfx] " Maxim Levitsky
2022-07-20 14:42         ` Maxim Levitsky
2022-07-25 16:08         ` Sean Christopherson
2022-07-25 16:08           ` [Intel-gfx] " Sean Christopherson
2022-07-25 16:08           ` Sean Christopherson
2022-07-28  7:46           ` Maxim Levitsky
2022-07-28  7:46             ` [Intel-gfx] " Maxim Levitsky
2022-07-28  7:46             ` Maxim Levitsky
2022-08-01 15:53             ` Maxim Levitsky
2022-08-01 15:53               ` [Intel-gfx] " Maxim Levitsky
2022-08-01 15:53               ` Maxim Levitsky
2022-08-01 17:20             ` Sean Christopherson
2022-08-01 17:20               ` [Intel-gfx] " Sean Christopherson
2022-08-01 17:20               ` Sean Christopherson
2022-08-08 13:13               ` Nested AVIC design (was:Re: [RFC PATCH v3 04/19] KVM: x86: mmu: allow to enable write tracking externally) Maxim Levitsky
2022-08-08 13:13                 ` [Intel-gfx] " Maxim Levitsky
2022-08-08 13:13                 ` Maxim Levitsky
2022-09-29 22:38                 ` Sean Christopherson
2022-09-29 22:38                   ` [Intel-gfx] " Sean Christopherson
2022-09-29 22:38                   ` Sean Christopherson
2022-10-03  7:27                   ` [Intel-gfx] " Maxim Levitsky
2022-10-03  7:27                     ` Maxim Levitsky
2022-10-03  7:27                     ` Maxim Levitsky
2022-11-10  0:47                     ` Sean Christopherson
2022-11-10  0:47                       ` [Intel-gfx] " Sean Christopherson
2022-11-10  0:47                       ` Sean Christopherson
2022-04-27 20:03 ` [RFC PATCH v3 05/19] x86: KVMGT: use kvm_page_track_write_tracking_enable Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-05-19 16:38   ` Sean Christopherson
2022-05-19 16:38     ` Sean Christopherson
2022-04-27 20:03 ` [RFC PATCH v3 06/19] KVM: x86: mmu: add gfn_in_memslot helper Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-05-19 16:43   ` Sean Christopherson
2022-05-19 16:43     ` Sean Christopherson
2022-05-22 10:22     ` Maxim Levitsky
2022-05-22 10:22       ` [Intel-gfx] " Maxim Levitsky
2022-05-22 10:22       ` Maxim Levitsky
2022-05-22 12:12     ` Maxim Levitsky
2022-05-22 12:12       ` [Intel-gfx] " Maxim Levitsky
2022-05-22 12:12       ` Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 07/19] KVM: x86: mmu: tweak fast path for emulation of access to nested NPT pages Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 08/19] KVM: x86: SVM: move avic state to separate struct Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 09/19] KVM: x86: nSVM: add nested AVIC tracepoints Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 10/19] KVM: x86: nSVM: implement AVIC's physid/logid table access helpers Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 11/19] KVM: x86: nSVM: implement shadowing of AVIC's physical id table Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` Maxim Levitsky [this message]
2022-04-27 20:03   ` [RFC PATCH v3 12/19] KVM: x86: nSVM: make nested AVIC physid write tracking be aware of the host scheduling Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` [Intel-gfx] [RFC PATCH v3 13/19] KVM: x86: nSVM: wire nested AVIC to nested guest entry/exit Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 14/19] KVM: x86: rename .set_apic_access_page_addr to reload_apic_access_page Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-05-19 16:55   ` Sean Christopherson
2022-05-19 16:55     ` Sean Christopherson
2022-05-22 10:22     ` Maxim Levitsky
2022-05-22 10:22       ` [Intel-gfx] " Maxim Levitsky
2022-05-22 10:22       ` Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 15/19] KVM: x86: nSVM: add code to reload AVIC physid table when it is invalidated Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 16/19] KVM: x86: nSVM: implement support for nested AVIC vmexits Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 17/19] KVM: x86: nSVM: implement nested AVIC doorbell emulation Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 18/19] KVM: x86: SVM/nSVM: add optional non strict AVIC doorbell mode Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:03 ` [RFC PATCH v3 19/19] KVM: x86: nSVM: expose the nested AVIC to the guest Maxim Levitsky
2022-04-27 20:03   ` Maxim Levitsky
2022-04-27 20:03   ` [Intel-gfx] " Maxim Levitsky
2022-04-27 20:11 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for RFC: nested AVIC (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220427200314.276673-13-mlevitsk@redhat.com \
    --to=mlevitsk@redhat.com \
    --cc=airlied@linux.ie \
    --cc=bp@alien8.de \
    --cc=brijesh.singh@amd.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=hpa@zytor.com \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=intel-gvt-dev@lists.freedesktop.org \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rodrigo.vivi@intel.com \
    --cc=seanjc@google.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=tvrtko.ursulin@linux.intel.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    --cc=zhi.a.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.