From: Vitaly Kuznetsov <vkuznets@redhat.com>
To: kvm@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>,
Wanpeng Li <wanpengli@tencent.com>,
Jim Mattson <jmattson@google.com>,
Michael Kelley <mikelley@microsoft.com>,
Siddharth Chandrasekaran <sidcha@amazon.de>,
Yuan Yao <yuan.yao@linux.intel.com>,
Maxim Levitsky <mlevitsk@redhat.com>,
linux-hyperv@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH v10 05/39] KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently
Date: Wed, 21 Sep 2022 17:24:02 +0200 [thread overview]
Message-ID: <20220921152436.3673454-6-vkuznets@redhat.com> (raw)
In-Reply-To: <20220921152436.3673454-1-vkuznets@redhat.com>
Currently, HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls are handled
the exact same way as HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE{,EX}: by
flushing the whole VPID and this is sub-optimal. Switch to handling
these requests with 'flush_tlb_gva()' hooks instead. Use the newly
introduced TLB flush fifo to queue the requests.
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
arch/x86/kvm/hyperv.c | 101 ++++++++++++++++++++++++++++++++++++------
1 file changed, 88 insertions(+), 13 deletions(-)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index fb0f7342fccf..d5a329cebcc6 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1800,33 +1800,82 @@ static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
sparse_banks, consumed_xmm_halves, offset);
}
-static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu)
+static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[],
+ int consumed_xmm_halves, gpa_t offset)
+{
+ return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt,
+ entries, consumed_xmm_halves, offset);
+}
+
+static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu, u64 *entries, int count)
{
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY;
+ unsigned long flags;
if (!hv_vcpu)
return;
tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
- kfifo_in_spinlocked(&tlb_flush_fifo->entries, &flush_all_entry,
- 1, &tlb_flush_fifo->write_lock);
+ spin_lock_irqsave(&tlb_flush_fifo->write_lock, flags);
+
+ /*
+ * All entries should fit on the fifo leaving one free for 'flush all'
+ * entry in case another request comes in. In case there's not enough
+ * space, just put 'flush all' entry there.
+ */
+ if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) {
+ WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count);
+ goto out_unlock;
+ }
+
+ /*
+ * Note: full fifo always contains 'flush all' entry, no need to check the
+ * return value.
+ */
+ kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1);
+
+out_unlock:
+ spin_unlock_irqrestore(&tlb_flush_fifo->write_lock, flags);
}
void kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE];
+ int i, j, count;
+ gva_t gva;
- kvm_vcpu_flush_tlb_guest(vcpu);
-
- if (!hv_vcpu)
+ if (!tdp_enabled || !hv_vcpu) {
+ kvm_vcpu_flush_tlb_guest(vcpu);
return;
+ }
tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
+ count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE);
+
+ for (i = 0; i < count; i++) {
+ if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
+ goto out_flush_all;
+
+ /*
+ * Lower 12 bits of 'address' encode the number of additional
+ * pages to flush.
+ */
+ gva = entries[i] & PAGE_MASK;
+ for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
+ static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
+
+ ++vcpu->stat.tlb_flush;
+ }
+ return;
+
+out_flush_all:
+ kvm_vcpu_flush_tlb_guest(vcpu);
kfifo_reset_out(&tlb_flush_fifo->entries);
}
@@ -1836,11 +1885,21 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
struct hv_tlb_flush_ex flush_ex;
struct hv_tlb_flush flush;
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
+ /*
+ * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
+ * entries on the TLB flush fifo. The last entry, however, needs to be
+ * always left free for 'flush all' entry which gets placed when
+ * there is not enough space to put all the requested entries.
+ */
+ u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1];
+ u64 *tlb_flush_entries;
u64 valid_bank_mask;
u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
struct kvm_vcpu *v;
unsigned long i;
bool all_cpus;
+ int consumed_xmm_halves = 0;
+ gpa_t data_offset;
/*
* The Hyper-V TLFS doesn't allow more than 64 sparse banks, e.g. the
@@ -1856,10 +1915,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
flush.address_space = hc->ingpa;
flush.flags = hc->outgpa;
flush.processor_mask = sse128_lo(hc->xmm[0]);
+ consumed_xmm_halves = 1;
} else {
if (unlikely(kvm_read_guest(kvm, hc->ingpa,
&flush, sizeof(flush))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ data_offset = sizeof(flush);
}
trace_kvm_hv_flush_tlb(flush.processor_mask,
@@ -1883,10 +1944,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
flush_ex.flags = hc->outgpa;
memcpy(&flush_ex.hv_vp_set,
&hc->xmm[0], sizeof(hc->xmm[0]));
+ consumed_xmm_halves = 2;
} else {
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
sizeof(flush_ex))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ data_offset = sizeof(flush_ex);
}
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
@@ -1902,25 +1965,37 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
return HV_STATUS_INVALID_HYPERCALL_INPUT;
if (all_cpus)
- goto do_flush;
+ goto read_flush_entries;
if (!hc->var_cnt)
goto ret_success;
- if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, 2,
- offsetof(struct hv_tlb_flush_ex,
- hv_vp_set.bank_contents)))
+ if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, consumed_xmm_halves,
+ data_offset))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
+ consumed_xmm_halves += hc->var_cnt;
+ }
+
+read_flush_entries:
+ if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
+ hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
+ hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
+ tlb_flush_entries = NULL;
+ } else {
+ if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries,
+ consumed_xmm_halves, data_offset))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ tlb_flush_entries = __tlb_flush_entries;
}
-do_flush:
/*
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
* analyze it here, flush TLB regardless of the specified address space.
*/
if (all_cpus) {
kvm_for_each_vcpu(i, v, kvm)
- hv_tlb_flush_enqueue(v);
+ hv_tlb_flush_enqueue(v, tlb_flush_entries, hc->rep_cnt);
kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
} else {
@@ -1930,7 +2005,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
v = kvm_get_vcpu(kvm, i);
if (!v)
continue;
- hv_tlb_flush_enqueue(v);
+ hv_tlb_flush_enqueue(v, tlb_flush_entries, hc->rep_cnt);
}
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
--
2.37.3
next prev parent reply other threads:[~2022-09-21 15:25 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-21 15:23 [PATCH v10 00/39] KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush features Vitaly Kuznetsov
2022-09-21 15:23 ` [PATCH v10 01/39] KVM: x86: Rename 'enable_direct_tlbflush' to 'enable_l2_tlb_flush' Vitaly Kuznetsov
2022-09-21 15:23 ` [PATCH v10 02/39] KVM: x86: hyper-v: Resurrect dedicated KVM_REQ_HV_TLB_FLUSH flag Vitaly Kuznetsov
2022-09-21 16:23 ` Sean Christopherson
2022-09-21 16:45 ` Sean Christopherson
2022-09-22 9:35 ` Vitaly Kuznetsov
2022-09-22 9:31 ` Vitaly Kuznetsov
2022-09-22 15:23 ` Sean Christopherson
2022-09-22 15:37 ` Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 03/39] KVM: x86: hyper-v: Introduce TLB flush fifo Vitaly Kuznetsov
2022-09-21 16:56 ` Sean Christopherson
2022-09-22 9:42 ` Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 04/39] KVM: x86: hyper-v: Add helper to read hypercall data for array Vitaly Kuznetsov
2022-09-21 15:24 ` Vitaly Kuznetsov [this message]
2022-09-21 17:00 ` [PATCH v10 05/39] KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 06/39] KVM: x86: hyper-v: Expose support for extended gva ranges for flush hypercalls Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 07/39] KVM: x86: Prepare kvm_hv_flush_tlb() to handle L2's GPAs Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 08/39] x86/hyperv: Introduce HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK constants Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 09/39] KVM: x86: hyper-v: Use HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK instead of raw '64' Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 10/39] KVM: x86: hyper-v: Don't use sparse_set_to_vcpu_mask() in kvm_hv_send_ipi() Vitaly Kuznetsov
2022-09-21 20:54 ` Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 11/39] KVM: x86: hyper-v: Create a separate fifo for L2 TLB flush Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 12/39] KVM: x86: hyper-v: Use preallocated buffer in 'struct kvm_vcpu_hv' instead of on-stack 'sparse_banks' Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 13/39] KVM: nVMX: Keep track of hv_vm_id/hv_vp_id when eVMCS is in use Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 14/39] KVM: nSVM: Keep track of Hyper-V hv_vm_id/hv_vp_id Vitaly Kuznetsov
2022-09-21 21:16 ` Sean Christopherson
2022-09-22 9:51 ` Vitaly Kuznetsov
2022-09-22 19:52 ` Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 15/39] KVM: x86: Introduce .hv_inject_synthetic_vmexit_post_tlb_flush() nested hook Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 16/39] KVM: x86: hyper-v: Introduce kvm_hv_is_tlb_flush_hcall() Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 17/39] KVM: x86: hyper-v: L2 TLB flush Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 18/39] KVM: x86: hyper-v: Introduce fast guest_hv_cpuid_has_l2_tlb_flush() check Vitaly Kuznetsov
2022-09-21 21:19 ` Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 19/39] KVM: nVMX: hyper-v: Cache VP assist page in 'struct kvm_vcpu_hv' Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 20/39] KVM: nVMX: hyper-v: Enable L2 TLB flush Vitaly Kuznetsov
2022-09-21 21:24 ` Sean Christopherson
2022-09-22 16:05 ` Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 21/39] KVM: nSVM: " Vitaly Kuznetsov
2022-09-21 21:31 ` Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 22/39] KVM: x86: Expose Hyper-V L2 TLB flush feature Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 23/39] KVM: selftests: Better XMM read/write helpers Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 24/39] KVM: selftests: Move HYPERV_LINUX_OS_ID definition to a common header Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 25/39] KVM: selftests: Move the function doing Hyper-V hypercall " Vitaly Kuznetsov
2022-09-21 21:51 ` Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 26/39] KVM: selftests: Hyper-V PV IPI selftest Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 27/39] KVM: selftests: Fill in vm->vpages_mapped bitmap in virt_map() too Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 28/39] KVM: selftests: Export vm_vaddr_unused_gap() to make it possible to request unmapped ranges Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 29/39] KVM: selftests: Export _vm_get_page_table_entry() Vitaly Kuznetsov
2022-09-21 22:13 ` Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 30/39] KVM: selftests: Hyper-V PV TLB flush selftest Vitaly Kuznetsov
2022-09-21 22:52 ` Sean Christopherson
2022-10-03 13:01 ` Vitaly Kuznetsov
2022-10-03 15:47 ` Sean Christopherson
2022-10-03 16:00 ` Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 31/39] KVM: selftests: Sync 'struct hv_enlightened_vmcs' definition with hyperv-tlfs.h Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 32/39] KVM: selftests: Sync 'struct hv_vp_assist_page' " Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 33/39] KVM: selftests: Move Hyper-V VP assist page enablement out of evmcs.h Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 34/39] KVM: selftests: Split off load_evmcs() from load_vmcs() Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 35/39] KVM: selftests: Create a vendor independent helper to allocate Hyper-V specific test pages Vitaly Kuznetsov
2022-09-21 22:59 ` Sean Christopherson
2022-09-21 15:24 ` [PATCH v10 36/39] KVM: selftests: Allocate Hyper-V partition assist page Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 37/39] KVM: selftests: evmcs_test: Introduce L2 TLB flush test Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 38/39] KVM: selftests: hyperv_svm_test: " Vitaly Kuznetsov
2022-09-21 15:24 ` [PATCH v10 39/39] KVM: selftests: Rename 'evmcs_test' to 'hyperv_evmcs' Vitaly Kuznetsov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220921152436.3673454-6-vkuznets@redhat.com \
--to=vkuznets@redhat.com \
--cc=jmattson@google.com \
--cc=kvm@vger.kernel.org \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mikelley@microsoft.com \
--cc=mlevitsk@redhat.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=sidcha@amazon.de \
--cc=wanpengli@tencent.com \
--cc=yuan.yao@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).