All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vitaly Kuznetsov <vkuznets@redhat.com>
To: kvm@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>
Cc: Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>,
	Michael Kelley <mikelley@microsoft.com>,
	Siddharth Chandrasekaran <sidcha@amazon.de>,
	Yuan Yao <yuan.yao@linux.intel.com>,
	Maxim Levitsky <mlevitsk@redhat.com>,
	linux-hyperv@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH v12 09/46] KVM: x86: hyper-v: Introduce TLB flush fifo
Date: Fri, 21 Oct 2022 17:34:44 +0200	[thread overview]
Message-ID: <20221021153521.1216911-10-vkuznets@redhat.com> (raw)
In-Reply-To: <20221021153521.1216911-1-vkuznets@redhat.com>

To allow flushing individual GVAs instead of always flushing the whole
VPID a per-vCPU structure to pass the requests is needed. Use standard
'kfifo' to queue two types of entries: individual GVA (GFN + up to 4095
following GFNs in the lower 12 bits) and 'flush all'.

The size of the fifo is arbitrarily set to '16'.

Note, kvm_hv_flush_tlb() only queues 'flush all' entries for now and
kvm_hv_vcpu_flush_tlb() doesn't actually read the fifo just resets the
queue before returning -EOPNOTSUPP (which triggers full TLB flush) so
the functional change is very small but the infrastructure is prepared
to handle individual GVA flush requests.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/include/asm/kvm_host.h | 20 ++++++++++++++
 arch/x86/kvm/hyperv.c           | 47 +++++++++++++++++++++++++++++++++
 arch/x86/kvm/hyperv.h           | 15 +++++++++++
 arch/x86/kvm/svm/svm.c          |  2 +-
 arch/x86/kvm/x86.c              | 11 ++++++--
 5 files changed, 92 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 05d90e11e535..f8ee72dd8660 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
 #include <linux/clocksource.h>
 #include <linux/irqbypass.h>
 #include <linux/hyperv.h>
+#include <linux/kfifo.h>
 
 #include <asm/apic.h>
 #include <asm/pvclock-abi.h>
@@ -599,6 +600,23 @@ struct kvm_vcpu_hv_synic {
 	bool dont_zero_synic_pages;
 };
 
+/* The maximum number of entries on the TLB flush fifo. */
+#define KVM_HV_TLB_FLUSH_FIFO_SIZE (16)
+/*
+ * Note: the following 'magic' entry is made up by KVM to avoid putting
+ * anything besides GVA on the TLB flush fifo. It is theoretically possible
+ * to observe a request to flush 4095 PFNs starting from 0xfffffffffffff000
+ * which will look identical. KVM's action to 'flush everything' instead of
+ * flushing these particular addresses is, however, fully legitimate as
+ * flushing more than requested is always OK.
+ */
+#define KVM_HV_TLB_FLUSHALL_ENTRY  ((u64)-1)
+
+struct kvm_vcpu_hv_tlb_flush_fifo {
+	spinlock_t write_lock;
+	DECLARE_KFIFO(entries, u64, KVM_HV_TLB_FLUSH_FIFO_SIZE);
+};
+
 /* Hyper-V per vcpu emulation context */
 struct kvm_vcpu_hv {
 	struct kvm_vcpu *vcpu;
@@ -620,6 +638,8 @@ struct kvm_vcpu_hv {
 		u32 nested_eax; /* HYPERV_CPUID_NESTED_FEATURES.EAX */
 		u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */
 	} cpuid_cache;
+
+	struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo;
 };
 
 /* Xen HVM per vcpu emulation context */
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 3c0f639f6a05..9d9a5ff2d54b 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -29,6 +29,7 @@
 #include <linux/kvm_host.h>
 #include <linux/highmem.h>
 #include <linux/sched/cputime.h>
+#include <linux/spinlock.h>
 #include <linux/eventfd.h>
 
 #include <asm/apicdef.h>
@@ -954,6 +955,9 @@ int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
 
 	hv_vcpu->vp_index = vcpu->vcpu_idx;
 
+	INIT_KFIFO(hv_vcpu->tlb_flush_fifo.entries);
+	spin_lock_init(&hv_vcpu->tlb_flush_fifo.write_lock);
+
 	return 0;
 }
 
@@ -1783,6 +1787,37 @@ static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
 			      var_cnt * sizeof(*sparse_banks));
 }
 
+static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu)
+{
+	struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
+	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+	u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY;
+
+	if (!hv_vcpu)
+		return;
+
+	tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
+
+	kfifo_in_spinlocked_noirqsave(&tlb_flush_fifo->entries, &flush_all_entry,
+				      1, &tlb_flush_fifo->write_lock);
+}
+
+int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
+{
+	struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
+	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+	if (!hv_vcpu)
+		return -EINVAL;
+
+	tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
+
+	kfifo_reset_out(&tlb_flush_fifo->entries);
+
+	/* Precise flushing isn't implemented yet. */
+	return -EOPNOTSUPP;
+}
+
 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
 {
 	struct kvm *kvm = vcpu->kvm;
@@ -1791,6 +1826,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
 	DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
 	u64 valid_bank_mask;
 	u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
+	struct kvm_vcpu *v;
+	unsigned long i;
 	bool all_cpus;
 
 	/*
@@ -1870,10 +1907,20 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
 	 * analyze it here, flush TLB regardless of the specified address space.
 	 */
 	if (all_cpus) {
+		kvm_for_each_vcpu(i, v, kvm)
+			hv_tlb_flush_enqueue(v);
+
 		kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
 	} else {
 		sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
 
+		for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
+			v = kvm_get_vcpu(kvm, i);
+			if (!v)
+				continue;
+			hv_tlb_flush_enqueue(v);
+		}
+
 		kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
 	}
 
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 1030b1b50552..f79edf9234cd 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -151,4 +151,19 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
 		     struct kvm_cpuid_entry2 __user *entries);
 
+static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu)
+{
+	struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
+	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+	if (!hv_vcpu || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
+		return;
+
+	tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
+
+	kfifo_reset_out(&tlb_flush_fifo->entries);
+}
+
+int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
+
 #endif
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9210a8840224..25dc38a94fa0 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3743,7 +3743,7 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
 	 * A TLB flush for the current ASID flushes both "host" and "guest" TLB
 	 * entries, and thus is a superset of Hyper-V's fine grained flushing.
 	 */
-	kvm_clear_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+	kvm_hv_vcpu_purge_flush_tlb(vcpu);
 
 	/*
 	 * Flush only the current ASID even if the TLB flush was invoked via
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bf2aa77b7a2f..ed8b7802ba88 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3422,7 +3422,7 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
 	 * Flushing all "guest" TLB is always a superset of Hyper-V's fine
 	 * grained flushing.
 	 */
-	kvm_clear_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+	kvm_hv_vcpu_purge_flush_tlb(vcpu);
 }
 
 
@@ -10497,7 +10497,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
 		kvm_service_local_tlb_flush_requests(vcpu);
 
-		if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
+		/*
+		 * Fall back to a "full" guest flush if Hyper-V's precise
+		 * flushing fails.  Note, Hyper-V's flushing is per-vCPU, but
+		 * the flushes are considered "remote" and not "local" because
+		 * the requests can be initiated from other vCPUs.
+		 */
+		if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) &&
+		    kvm_hv_vcpu_flush_tlb(vcpu))
 			kvm_vcpu_flush_tlb_guest(vcpu);
 
 		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
-- 
2.37.3


  parent reply	other threads:[~2022-10-21 15:37 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-21 15:34 [PATCH v12 00/46] KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush features Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 01/46] x86/hyperv: Move VMCB enlightenment definitions to hyperv-tlfs.h Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 02/46] KVM: selftests: Move "struct hv_enlightenments" to x86_64/svm.h Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 03/46] KVM: SVM: Add a proper field for Hyper-V VMCB enlightenments Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 04/46] x86/hyperv: KVM: Rename "hv_enlightenments" to "hv_vmcb_enlightenments" Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 05/46] KVM: x86: Rename 'enable_direct_tlbflush' to 'enable_l2_tlb_flush' Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 06/46] KVM: VMX: Rename "vmx/evmcs.{ch}" to "vmx/hyperv.{ch}" Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 07/46] KVM: x86: Move clearing of TLB_FLUSH_CURRENT to kvm_vcpu_flush_tlb_all() Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 08/46] KVM: x86: hyper-v: Resurrect dedicated KVM_REQ_HV_TLB_FLUSH flag Vitaly Kuznetsov
2022-10-21 15:34 ` Vitaly Kuznetsov [this message]
2022-10-21 15:34 ` [PATCH v12 10/46] KVM: x86: hyper-v: Add helper to read hypercall data for array Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 11/46] KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 12/46] KVM: x86: hyper-v: Expose support for extended gva ranges for flush hypercalls Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 13/46] KVM: x86: Prepare kvm_hv_flush_tlb() to handle L2's GPAs Vitaly Kuznetsov
2022-10-26 22:26   ` Sean Christopherson
2022-10-27  8:18     ` Vitaly Kuznetsov
2022-10-27 21:39       ` Sean Christopherson
2022-10-21 15:34 ` [PATCH v12 14/46] x86/hyperv: Introduce HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK constants Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 15/46] KVM: x86: hyper-v: Use HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK instead of raw '64' Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 16/46] KVM: x86: hyper-v: Don't use sparse_set_to_vcpu_mask() in kvm_hv_send_ipi() Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 17/46] KVM: x86: hyper-v: Create a separate fifo for L2 TLB flush Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 18/46] KVM: x86: hyper-v: Use preallocated buffer in 'struct kvm_vcpu_hv' instead of on-stack 'sparse_banks' Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 19/46] KVM: nVMX: Keep track of hv_vm_id/hv_vp_id when eVMCS is in use Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 20/46] KVM: nSVM: Keep track of Hyper-V hv_vm_id/hv_vp_id Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 21/46] KVM: x86: Introduce .hv_inject_synthetic_vmexit_post_tlb_flush() nested hook Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 22/46] KVM: x86: hyper-v: Introduce kvm_hv_is_tlb_flush_hcall() Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 23/46] KVM: x86: hyper-v: L2 TLB flush Vitaly Kuznetsov
2022-10-21 15:34 ` [PATCH v12 24/46] KVM: x86: hyper-v: Introduce fast guest_hv_cpuid_has_l2_tlb_flush() check Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 25/46] KVM: nVMX: hyper-v: Cache VP assist page in 'struct kvm_vcpu_hv' Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 26/46] KVM: nVMX: hyper-v: Enable L2 TLB flush Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 27/46] KVM: x86: Make kvm_hv_get_assist_page() return 0/-errno Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 28/46] KVM: nSVM: hyper-v: Enable L2 TLB flush Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 29/46] KVM: x86: Expose Hyper-V L2 TLB flush feature Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 30/46] KVM: selftests: Better XMM read/write helpers Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 31/46] KVM: selftests: Move HYPERV_LINUX_OS_ID definition to a common header Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 32/46] KVM: selftests: Move the function doing Hyper-V hypercall " Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 33/46] KVM: selftests: Hyper-V PV IPI selftest Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 34/46] KVM: selftests: Fill in vm->vpages_mapped bitmap in virt_map() too Vitaly Kuznetsov
2022-10-26 21:57   ` Sean Christopherson
2022-10-21 15:35 ` [PATCH v12 35/46] KVM: selftests: Export vm_vaddr_unused_gap() to make it possible to request unmapped ranges Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 36/46] KVM: selftests: Drop helpers to read/write page table entries Vitaly Kuznetsov
2022-10-26 21:59   ` Sean Christopherson
2022-10-21 15:35 ` [PATCH v12 37/46] KVM: selftests: Hyper-V PV TLB flush selftest Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 38/46] KVM: selftests: Sync 'struct hv_enlightened_vmcs' definition with hyperv-tlfs.h Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 39/46] KVM: selftests: Sync 'struct hv_vp_assist_page' " Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 40/46] KVM: selftests: Move Hyper-V VP assist page enablement out of evmcs.h Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 41/46] KVM: selftests: Split off load_evmcs() from load_vmcs() Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 42/46] KVM: selftests: Create a vendor independent helper to allocate Hyper-V specific test pages Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 43/46] KVM: selftests: Allocate Hyper-V partition assist page Vitaly Kuznetsov
2022-10-21 15:35 ` [PATCH v12 44/46] KVM: selftests: evmcs_test: Introduce L2 TLB flush test Vitaly Kuznetsov
2022-10-26 22:21   ` Sean Christopherson
2022-10-21 15:35 ` [PATCH v12 45/46] KVM: selftests: hyperv_svm_test: " Vitaly Kuznetsov
2022-10-26 22:23   ` Sean Christopherson
2022-10-21 15:35 ` [PATCH v12 46/46] KVM: selftests: Rename 'evmcs_test' to 'hyperv_evmcs' Vitaly Kuznetsov
2022-10-26 22:28 ` [PATCH v12 00/46] KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush features Sean Christopherson
2022-10-26 22:31   ` Sean Christopherson
2022-10-31  9:18     ` Vitaly Kuznetsov
2022-11-01 14:54       ` Vitaly Kuznetsov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221021153521.1216911-10-vkuznets@redhat.com \
    --to=vkuznets@redhat.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mikelley@microsoft.com \
    --cc=mlevitsk@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=sidcha@amazon.de \
    --cc=wanpengli@tencent.com \
    --cc=yuan.yao@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.