* [PATCH V5 4/10] KVM/VMX: Add hv tlb range flush support
[not found] <20181108091447.8275-1-Tianyu.Lan@microsoft.com>
@ 2018-11-08 9:14 ` ltykernel
2018-11-08 9:14 ` [PATCH V5 8/10] KVM/MMU: Move tlb flush in kvm_set_pte_rmapp() to kvm_mmu_notifier_change_pte() ltykernel
` (2 subsequent siblings)
3 siblings, 0 replies; 4+ messages in thread
From: ltykernel @ 2018-11-08 9:14 UTC (permalink / raw)
Cc: Lan Tianyu, christoffer.dall, marc.zyngier, linux,
catalin.marinas, will.deacon, jhogan, ralf, paul.burton, paulus,
benh, mpe, pbonzini, rkrcmar, tglx, mingo, bp, hpa, x86,
linux-arm-kernel, kvmarm, linux-kernel, linux-mips, kvm-ppc,
linuxppc-dev, kvm, michael.h.kelley, kys, vkuznets
From: Lan Tianyu <Tianyu.Lan@microsoft.com>
This patch is to register tlb_remote_flush_with_range callback with
hv tlb range flush interface.
Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
Change since v4:
- Use new function kvm_fill_hv_flush_list_func() to fill flush
request.
Change since v3:
- Merge Vitaly's don't pass EPT configuration info to
vmx_hv_remote_flush_tlb() fix.
Change since v1:
- Pass flush range with new hyper-v tlb flush struct rather
than KVM tlb flush struct.
---
arch/x86/kvm/vmx.c | 69 ++++++++++++++++++++++++++++++++++++++----------------
1 file changed, 49 insertions(+), 20 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index edbc96cb990a..405dfbde70b2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1567,7 +1567,38 @@ static void check_ept_pointer_match(struct kvm *kvm)
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
}
-static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
+int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
+ void *data)
+{
+ struct kvm_tlb_range *range = data;
+
+ return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
+ range->pages);
+}
+
+static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
+{
+ u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
+
+ /* If ept_pointer is invalid pointer, bypass flush request. */
+ if (!VALID_PAGE(ept_pointer))
+ return 0;
+
+ /*
+ * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address
+ * of the base of EPT PML4 table, strip off EPT configuration
+ * information.
+ */
+ if (range)
+ return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK,
+ kvm_fill_hv_flush_list_func, (void *)range);
+ else
+ return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK);
+}
+
+static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range)
{
struct kvm_vcpu *vcpu;
int ret = -ENOTSUPP, i;
@@ -1577,30 +1608,23 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
check_ept_pointer_match(kvm);
- /*
- * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
- * base of EPT PML4 table, strip off EPT configuration information.
- * If ept_pointer is invalid pointer, bypass the flush request.
- */
if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!VALID_PAGE(to_vmx(vcpu)->ept_pointer))
- return 0;
-
- ret |= hyperv_flush_guest_mapping(
- to_vmx(vcpu)->ept_pointer & PAGE_MASK);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ ret |= __hv_remote_flush_tlb_with_range(
+ kvm, vcpu, range);
} else {
- if (!VALID_PAGE(to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer))
- return 0;
-
- ret = hyperv_flush_guest_mapping(
- to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
+ ret = __hv_remote_flush_tlb_with_range(kvm,
+ kvm_get_vcpu(kvm, 0), range);
}
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
return ret;
}
+
+static int hv_remote_flush_tlb(struct kvm *kvm)
+{
+ return hv_remote_flush_tlb_with_range(kvm, NULL);
+}
#else /* !IS_ENABLED(CONFIG_HYPERV) */
static inline void evmcs_write64(unsigned long field, u64 value) {}
static inline void evmcs_write32(unsigned long field, u32 value) {}
@@ -7957,8 +7981,11 @@ static __init int hardware_setup(void)
#if IS_ENABLED(CONFIG_HYPERV)
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
- && enable_ept)
- kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
+ && enable_ept) {
+ kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
+ kvm_x86_ops->tlb_remote_flush_with_range =
+ hv_remote_flush_tlb_with_range;
+ }
#endif
if (!cpu_has_vmx_ple()) {
@@ -11567,6 +11594,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
+ vmx->ept_pointer = INVALID_PAGE;
+
vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
/*
--
2.14.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH V5 8/10] KVM/MMU: Move tlb flush in kvm_set_pte_rmapp() to kvm_mmu_notifier_change_pte()
[not found] <20181108091447.8275-1-Tianyu.Lan@microsoft.com>
2018-11-08 9:14 ` [PATCH V5 4/10] KVM/VMX: Add hv tlb range flush support ltykernel
@ 2018-11-08 9:14 ` ltykernel
2018-11-08 9:14 ` [PATCH V5 9/10] KVM/MMU: Flush tlb directly in the kvm_set_pte_rmapp() ltykernel
2018-11-08 9:14 ` [PATCH V5 10/10] KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range() ltykernel
3 siblings, 0 replies; 4+ messages in thread
From: ltykernel @ 2018-11-08 9:14 UTC (permalink / raw)
Cc: Lan Tianyu, pbonzini, rkrcmar, tglx, mingo, bp, hpa, x86, kvm,
linux-kernel, michael.h.kelley, kys, vkuznets, linux
From: Lan Tianyu <Tianyu.Lan@microsoft.com>
This patch is to move tlb flush in kvm_set_pte_rmapp() to
kvm_mmu_notifier_change_pte() in order to avoid redundant tlb flush.
Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
arch/x86/kvm/mmu.c | 8 ++------
virt/kvm/kvm_main.c | 5 ++++-
2 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 06bfcd327ef6..b13b419166c4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1776,10 +1776,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
}
}
- if (need_flush)
- kvm_flush_remote_tlbs(kvm);
-
- return 0;
+ return need_flush;
}
struct slot_rmap_walk_iterator {
@@ -1915,8 +1912,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
- kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
- return 0;
+ return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
}
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2679e476b6c3..dac4b0446aed 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -354,7 +354,10 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
- kvm_set_spte_hva(kvm, address, pte);
+
+ if (kvm_set_spte_hva(kvm, address, pte))
+ kvm_flush_remote_tlbs(kvm);
+
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
--
2.14.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH V5 9/10] KVM/MMU: Flush tlb directly in the kvm_set_pte_rmapp()
[not found] <20181108091447.8275-1-Tianyu.Lan@microsoft.com>
2018-11-08 9:14 ` [PATCH V5 4/10] KVM/VMX: Add hv tlb range flush support ltykernel
2018-11-08 9:14 ` [PATCH V5 8/10] KVM/MMU: Move tlb flush in kvm_set_pte_rmapp() to kvm_mmu_notifier_change_pte() ltykernel
@ 2018-11-08 9:14 ` ltykernel
2018-11-08 9:14 ` [PATCH V5 10/10] KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range() ltykernel
3 siblings, 0 replies; 4+ messages in thread
From: ltykernel @ 2018-11-08 9:14 UTC (permalink / raw)
Cc: Lan Tianyu, pbonzini, rkrcmar, tglx, mingo, bp, hpa, x86, kvm,
linux-kernel, michael.h.kelley, kys, vkuznets, linux
From: Lan Tianyu <Tianyu.Lan@microsoft.com>
This patch is to flush tlb directly in the kvm_set_pte_rmapp()
and return 0.
Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
arch/x86/kvm/mmu.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b13b419166c4..39e0e2572710 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1776,6 +1776,11 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
}
}
+ if (need_flush && kvm_available_flush_tlb_with_range()) {
+ kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
+ return 0;
+ }
+
return need_flush;
}
--
2.14.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH V5 10/10] KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range()
[not found] <20181108091447.8275-1-Tianyu.Lan@microsoft.com>
` (2 preceding siblings ...)
2018-11-08 9:14 ` [PATCH V5 9/10] KVM/MMU: Flush tlb directly in the kvm_set_pte_rmapp() ltykernel
@ 2018-11-08 9:14 ` ltykernel
3 siblings, 0 replies; 4+ messages in thread
From: ltykernel @ 2018-11-08 9:14 UTC (permalink / raw)
Cc: Lan Tianyu, pbonzini, rkrcmar, tglx, mingo, bp, hpa, x86, kvm,
linux-kernel, michael.h.kelley, kys, vkuznets, linux
From: Lan Tianyu <Tianyu.Lan@microsoft.com>
Originally, flush tlb is done by slot_handle_level_range(). This patch
is to flush tlb directly in the kvm_zap_gfn_range() when range
flush is available.
Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
Change since v4:
Move operation of setting flush_tlb out of for loop.
---
arch/x86/kvm/mmu.c | 16 +++++++++++++---
1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 39e0e2572710..898560b0807c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5633,8 +5633,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
+ bool flush_tlb = true;
+ bool flush = false;
int i;
+ if (kvm_available_flush_tlb_with_range())
+ flush_tlb = false;
+
spin_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
@@ -5646,12 +5651,17 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (start >= end)
continue;
- slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
- PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true);
+ flush |= slot_handle_level_range(kvm, memslot,
+ kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
+ PT_MAX_HUGEPAGE_LEVEL, start,
+ end - 1, flush_tlb);
}
}
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+ gfn_end - gfn_start + 1);
+
spin_unlock(&kvm->mmu_lock);
}
--
2.14.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2018-11-08 9:14 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <20181108091447.8275-1-Tianyu.Lan@microsoft.com>
2018-11-08 9:14 ` [PATCH V5 4/10] KVM/VMX: Add hv tlb range flush support ltykernel
2018-11-08 9:14 ` [PATCH V5 8/10] KVM/MMU: Move tlb flush in kvm_set_pte_rmapp() to kvm_mmu_notifier_change_pte() ltykernel
2018-11-08 9:14 ` [PATCH V5 9/10] KVM/MMU: Flush tlb directly in the kvm_set_pte_rmapp() ltykernel
2018-11-08 9:14 ` [PATCH V5 10/10] KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range() ltykernel
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).