From: Vitaly Kuznetsov <vkuznets@redhat.com>
To: kvm@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>,
Sean Christopherson <seanjc@google.com>
Cc: Wanpeng Li <wanpengli@tencent.com>,
Jim Mattson <jmattson@google.com>,
Michael Kelley <mikelley@microsoft.com>,
Siddharth Chandrasekaran <sidcha@amazon.de>,
Yuan Yao <yuan.yao@linux.intel.com>,
Maxim Levitsky <mlevitsk@redhat.com>,
linux-hyperv@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH v9 17/40] KVM: x86: hyper-v: L2 TLB flush
Date: Wed, 3 Aug 2022 15:45:57 +0200 [thread overview]
Message-ID: <20220803134557.399286-1-vkuznets@redhat.com> (raw)
In-Reply-To: <20220803134110.397885-1-vkuznets@redhat.com>
Handle L2 TLB flush requests by going through all vCPUs and checking
whether there are vCPUs running the same VM_ID with a VP_ID specified
in the requests. Perform synthetic exit to L2 upon finish.
Note, while checking VM_ID/VP_ID of running vCPUs seem to be a bit
racy, we count on the fact that KVM flushes the whole L2 VPID upon
transition. Also, KVM_REQ_HV_TLB_FLUSH request needs to be done upon
transition between L1 and L2 to make sure all pending requests are
always processed.
For the reference, Hyper-V TLFS refers to the feature as "Direct
Virtual Flush".
Note, nVMX/nSVM code does not handle VMCALL/VMMCALL from L2 yet.
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
arch/x86/kvm/hyperv.c | 82 ++++++++++++++++++++++++++++++++++++-------
arch/x86/kvm/hyperv.h | 3 --
arch/x86/kvm/trace.h | 21 ++++++-----
3 files changed, 82 insertions(+), 24 deletions(-)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 401649a9d288..be25a8ddab9d 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -34,6 +34,7 @@
#include <linux/eventfd.h>
#include <asm/apicdef.h>
+#include <asm/mshyperv.h>
#include <trace/events/kvm.h>
#include "trace.h"
@@ -1835,9 +1836,10 @@ static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc
entries, consumed_xmm_halves, offset);
}
-static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu, u64 *entries, int count)
+static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo,
+ u64 *entries, int count)
{
- struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY;
unsigned long flags;
@@ -1845,9 +1847,6 @@ static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu, u64 *entries, int count)
if (!hv_vcpu)
return;
- /* kvm_hv_flush_tlb() is not ready to handle requests for L2s yet */
- tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo[HV_L1_TLB_FLUSH_FIFO];
-
spin_lock_irqsave(&tlb_flush_fifo->write_lock, flags);
/*
@@ -1916,6 +1915,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
struct hv_tlb_flush_ex flush_ex;
struct hv_tlb_flush flush;
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
+ struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
/*
* Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
* entries on the TLB flush fifo. The last entry, however, needs to be
@@ -1959,7 +1959,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
}
trace_kvm_hv_flush_tlb(flush.processor_mask,
- flush.address_space, flush.flags);
+ flush.address_space, flush.flags,
+ is_guest_mode(vcpu));
valid_bank_mask = BIT_ULL(0);
sparse_banks[0] = flush.processor_mask;
@@ -1990,7 +1991,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
flush_ex.hv_vp_set.format,
flush_ex.address_space,
- flush_ex.flags);
+ flush_ex.flags, is_guest_mode(vcpu));
valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
all_cpus = flush_ex.hv_vp_set.format !=
@@ -2028,19 +2029,57 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
* analyze it here, flush TLB regardless of the specified address space.
*/
- if (all_cpus) {
- kvm_for_each_vcpu(i, v, kvm)
- hv_tlb_flush_enqueue(v, tlb_flush_entries, hc->rep_cnt);
+ if (all_cpus && !is_guest_mode(vcpu)) {
+ kvm_for_each_vcpu(i, v, kvm) {
+ tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
+ hv_tlb_flush_enqueue(v, tlb_flush_fifo,
+ tlb_flush_entries, hc->rep_cnt);
+ }
kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
- } else {
+ } else if (!is_guest_mode(vcpu)) {
sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
v = kvm_get_vcpu(kvm, i);
if (!v)
continue;
- hv_tlb_flush_enqueue(v, tlb_flush_entries, hc->rep_cnt);
+ tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
+ hv_tlb_flush_enqueue(v, tlb_flush_fifo,
+ tlb_flush_entries, hc->rep_cnt);
+ }
+
+ kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
+ } else {
+ struct kvm_vcpu_hv *hv_v;
+
+ bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
+
+ kvm_for_each_vcpu(i, v, kvm) {
+ hv_v = to_hv_vcpu(v);
+
+ /*
+ * The following check races with nested vCPUs entering/exiting
+ * and/or migrating between L1's vCPUs, however the only case when
+ * KVM *must* flush the TLB is when the target L2 vCPU keeps
+ * running on the same L1 vCPU from the moment of the request until
+ * kvm_hv_flush_tlb() returns. TLB is fully flushed in all other
+ * cases, e.g. when the target L2 vCPU migrates to a different L1
+ * vCPU or when the corresponding L1 vCPU temporary switches to a
+ * different L2 vCPU while the request is being processed.
+ */
+ if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id)
+ continue;
+
+ if (!all_cpus &&
+ !hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask,
+ sparse_banks))
+ continue;
+
+ __set_bit(i, vcpu_mask);
+ tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true);
+ hv_tlb_flush_enqueue(v, tlb_flush_fifo,
+ tlb_flush_entries, hc->rep_cnt);
}
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
@@ -2228,10 +2267,27 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
{
+ int ret;
+
trace_kvm_hv_hypercall_done(result);
kvm_hv_hypercall_set_result(vcpu, result);
++vcpu->stat.hypercalls;
- return kvm_skip_emulated_instruction(vcpu);
+ ret = kvm_skip_emulated_instruction(vcpu);
+
+ if (unlikely(hv_result_success(result) && is_guest_mode(vcpu)
+ && kvm_hv_is_tlb_flush_hcall(vcpu))) {
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ u32 tlb_lock_count;
+
+ if (unlikely(kvm_read_guest(vcpu->kvm, hv_vcpu->nested.pa_page_gpa,
+ &tlb_lock_count, sizeof(tlb_lock_count))))
+ kvm_inject_gp(vcpu, 0);
+
+ if (tlb_lock_count)
+ kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu);
+ }
+
+ return ret;
}
static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 239946ea483b..892c252b9bc3 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -155,9 +155,6 @@ static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struc
int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO :
HV_L1_TLB_FLUSH_FIFO;
- /* KVM does not handle L2 TLB flush requests yet */
- WARN_ON_ONCE(i != HV_L1_TLB_FLUSH_FIFO);
-
return &hv_vcpu->tlb_flush_fifo[i];
}
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 2120d7c060a9..44471d9c8190 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1528,38 +1528,41 @@ TRACE_EVENT(kvm_hv_timer_state,
* Tracepoint for kvm_hv_flush_tlb.
*/
TRACE_EVENT(kvm_hv_flush_tlb,
- TP_PROTO(u64 processor_mask, u64 address_space, u64 flags),
- TP_ARGS(processor_mask, address_space, flags),
+ TP_PROTO(u64 processor_mask, u64 address_space, u64 flags, bool guest_mode),
+ TP_ARGS(processor_mask, address_space, flags, guest_mode),
TP_STRUCT__entry(
__field(u64, processor_mask)
__field(u64, address_space)
__field(u64, flags)
+ __field(bool, guest_mode)
),
TP_fast_assign(
__entry->processor_mask = processor_mask;
__entry->address_space = address_space;
__entry->flags = flags;
+ __entry->guest_mode = guest_mode;
),
- TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx",
+ TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx %s",
__entry->processor_mask, __entry->address_space,
- __entry->flags)
+ __entry->flags, __entry->guest_mode ? "(L2)" : "")
);
/*
* Tracepoint for kvm_hv_flush_tlb_ex.
*/
TRACE_EVENT(kvm_hv_flush_tlb_ex,
- TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags),
- TP_ARGS(valid_bank_mask, format, address_space, flags),
+ TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags, bool guest_mode),
+ TP_ARGS(valid_bank_mask, format, address_space, flags, guest_mode),
TP_STRUCT__entry(
__field(u64, valid_bank_mask)
__field(u64, format)
__field(u64, address_space)
__field(u64, flags)
+ __field(bool, guest_mode)
),
TP_fast_assign(
@@ -1567,12 +1570,14 @@ TRACE_EVENT(kvm_hv_flush_tlb_ex,
__entry->format = format;
__entry->address_space = address_space;
__entry->flags = flags;
+ __entry->guest_mode = guest_mode;
),
TP_printk("valid_bank_mask 0x%llx format 0x%llx "
- "address_space 0x%llx flags 0x%llx",
+ "address_space 0x%llx flags 0x%llx %s",
__entry->valid_bank_mask, __entry->format,
- __entry->address_space, __entry->flags)
+ __entry->address_space, __entry->flags,
+ __entry->guest_mode ? "(L2)" : "")
);
/*
--
2.35.3
next prev parent reply other threads:[~2022-08-03 13:46 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-03 13:40 [PATCH v9 00/40] KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush features Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 01/40] KVM: x86: Rename 'enable_direct_tlbflush' to 'enable_l2_tlb_flush' Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 02/40] KVM: x86: hyper-v: Resurrect dedicated KVM_REQ_HV_TLB_FLUSH flag Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 03/40] KVM: x86: hyper-v: Introduce TLB flush fifo Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 04/40] KVM: x86: hyper-v: Add helper to read hypercall data for array Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 05/40] KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 06/40] KVM: x86: hyper-v: Expose support for extended gva ranges for flush hypercalls Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 07/40] KVM: x86: Prepare kvm_hv_flush_tlb() to handle L2's GPAs Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 08/40] x86/hyperv: Introduce HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK constants Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 09/40] KVM: x86: hyper-v: Use HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK instead of raw '64' Vitaly Kuznetsov
2022-08-03 13:40 ` [PATCH v9 10/40] KVM: x86: hyper-v: Don't use sparse_set_to_vcpu_mask() in kvm_hv_send_ipi() Vitaly Kuznetsov
2022-08-03 13:45 ` [PATCH v9 11/40] KVM: x86: hyper-v: Create a separate fifo for L2 TLB flush Vitaly Kuznetsov
2022-08-03 13:45 ` [PATCH v9 12/40] KVM: x86: hyper-v: Use preallocated buffer in 'struct kvm_vcpu_hv' instead of on-stack 'sparse_banks' Vitaly Kuznetsov
2022-08-03 13:45 ` [PATCH v9 13/40] KVM: nVMX: Keep track of hv_vm_id/hv_vp_id when eVMCS is in use Vitaly Kuznetsov
2022-08-03 13:45 ` [PATCH v9 14/40] KVM: nSVM: Keep track of Hyper-V hv_vm_id/hv_vp_id Vitaly Kuznetsov
2022-08-03 13:45 ` [PATCH v9 15/40] KVM: x86: Introduce .hv_inject_synthetic_vmexit_post_tlb_flush() nested hook Vitaly Kuznetsov
2022-08-03 13:45 ` [PATCH v9 16/40] KVM: x86: hyper-v: Introduce kvm_hv_is_tlb_flush_hcall() Vitaly Kuznetsov
2022-08-03 13:45 ` Vitaly Kuznetsov [this message]
2022-08-03 13:46 ` [PATCH v9 18/40] KVM: x86: hyper-v: Introduce fast guest_hv_cpuid_has_l2_tlb_flush() check Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 19/40] x86/hyperv: Fix 'struct hv_enlightened_vmcs' definition Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 20/40] KVM: nVMX: hyper-v: Cache VP assist page in 'struct kvm_vcpu_hv' Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 21/40] KVM: nVMX: hyper-v: Enable L2 TLB flush Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 22/40] KVM: nSVM: " Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 23/40] KVM: x86: Expose Hyper-V L2 TLB flush feature Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 24/40] KVM: selftests: Better XMM read/write helpers Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 25/40] KVM: selftests: Move HYPERV_LINUX_OS_ID definition to a common header Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 26/40] KVM: selftests: Move the function doing Hyper-V hypercall " Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 27/40] KVM: selftests: Hyper-V PV IPI selftest Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 28/40] KVM: selftests: Fill in vm->vpages_mapped bitmap in virt_map() too Vitaly Kuznetsov
2022-08-03 14:29 ` Andrew Jones
2022-08-03 13:46 ` [PATCH v9 29/40] KVM: selftests: Export vm_vaddr_unused_gap() to make it possible to request unmapped ranges Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 30/40] KVM: selftests: Export _vm_get_page_table_entry() Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 31/40] KVM: selftests: Hyper-V PV TLB flush selftest Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 32/40] KVM: selftests: Sync 'struct hv_enlightened_vmcs' definition with hyperv-tlfs.h Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 33/40] KVM: selftests: Sync 'struct hv_vp_assist_page' " Vitaly Kuznetsov
2022-08-03 13:46 ` [PATCH v9 34/40] KVM: selftests: Move Hyper-V VP assist page enablement out of evmcs.h Vitaly Kuznetsov
2022-08-03 13:47 ` [PATCH v9 35/40] KVM: selftests: Split off load_evmcs() from load_vmcs() Vitaly Kuznetsov
2022-08-03 13:47 ` [PATCH v9 36/40] KVM: selftests: Create a vendor independent helper to allocate Hyper-V specific test pages Vitaly Kuznetsov
2022-08-03 13:47 ` [PATCH v9 37/40] KVM: selftests: Allocate Hyper-V partition assist page Vitaly Kuznetsov
2022-08-03 13:47 ` [PATCH v9 38/40] KVM: selftests: evmcs_test: Introduce L2 TLB flush test Vitaly Kuznetsov
2022-08-03 13:47 ` [PATCH v9 39/40] KVM: selftests: hyperv_svm_test: " Vitaly Kuznetsov
2022-08-03 13:47 ` [PATCH v9 40/40] KVM: selftests: Rename 'evmcs_test' to 'hyperv_evmcs' Vitaly Kuznetsov
2022-08-30 13:43 ` [PATCH v9 00/40] KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush features Vitaly Kuznetsov
2022-08-30 16:27 ` Sean Christopherson
2022-09-21 15:15 ` Vitaly Kuznetsov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220803134557.399286-1-vkuznets@redhat.com \
--to=vkuznets@redhat.com \
--cc=jmattson@google.com \
--cc=kvm@vger.kernel.org \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mikelley@microsoft.com \
--cc=mlevitsk@redhat.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=sidcha@amazon.de \
--cc=wanpengli@tencent.com \
--cc=yuan.yao@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).