From: Marc Zyngier <maz@kernel.org>
To: unlisted-recipients:; (no To-header on input)
Cc: "James Morse" <james.morse@arm.com>,
"Julien Thierry" <julien.thierry.kdev@gmail.com>,
"Suzuki K Poulose" <suzuki.poulose@arm.com>,
"James Hogan" <jhogan@kernel.org>,
"Paul Mackerras" <paulus@ozlabs.org>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Radim Krčmář" <rkrcmar@redhat.com>,
"Sean Christopherson" <sean.j.christopherson@intel.com>,
"Vitaly Kuznetsov" <vkuznets@redhat.com>,
"Wanpeng Li" <wanpengli@tencent.com>,
"Jim Mattson" <jmattson@google.com>,
"Joerg Roedel" <joro@8bytes.org>,
linux-arm-kernel@lists.infradead.org,
kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org,
kvm-ppc@vger.kernel.org, kvm@vger.kernel.org
Subject: [PATCH 1/7] KVM: Pass mmu_notifier_range down to kvm_unmap_hva_range()
Date: Fri, 13 Dec 2019 18:24:57 +0000 [thread overview]
Message-ID: <20191213182503.14460-2-maz@kernel.org> (raw)
In-Reply-To: <20191213182503.14460-1-maz@kernel.org>
kvm_unmap_hva_range() is currently passed both start and end
fields from the mmu_notifier_range structure. As this struct
now contains important information about the reason of the
unmap (the event field), replace the start/end parameters
with the range struct, and update all architectures.
No functionnal change.
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm/include/asm/kvm_host.h | 2 +-
arch/arm64/include/asm/kvm_host.h | 2 +-
arch/mips/include/asm/kvm_host.h | 2 +-
arch/mips/kvm/mmu.c | 6 ++++--
arch/powerpc/include/asm/kvm_host.h | 2 +-
arch/powerpc/kvm/book3s.c | 5 +++--
arch/powerpc/kvm/e500_mmu_host.c | 4 ++--
arch/x86/include/asm/kvm_host.h | 3 ++-
arch/x86/kvm/mmu/mmu.c | 5 +++--
arch/x86/kvm/x86.c | 4 ++--
include/linux/kvm_host.h | 2 +-
virt/kvm/arm/mmu.c | 8 ++++----
virt/kvm/kvm_main.c | 7 +++----
13 files changed, 28 insertions(+), 24 deletions(-)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 556cd818eccf..621c71594499 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -276,7 +276,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ const struct mmu_notifier_range *range);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c61260cf63c5..dd850f5e81e3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -441,7 +441,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ const struct mmu_notifier_range *range);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 41204a49cf95..0ed065870f1b 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -935,7 +935,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ const struct mmu_notifier_range *range);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 7dad7a293eae..32ef868258b9 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -518,9 +518,11 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm,
+ const struct mmu_notifier_range *range)
{
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+ handle_hva_to_gpa(kvm, range->start, range->end,
+ &kvm_unmap_hva_handler, NULL);
kvm_mips_callbacks->flush_shadow_all(kvm);
return 0;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 0a398f2321c2..8cef585e0abe 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -58,7 +58,7 @@
#define KVM_ARCH_WANT_MMU_NOTIFIER
extern int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ const struct mmu_notifier_range *range);
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 58a59ee998e2..a1529a0dd656 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -842,9 +842,10 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm,
+ const struct mmu_notifier_range *range)
{
- return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
+ return kvm->arch.kvm_ops->unmap_hva_range(kvm, range->start, range->end);
}
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 425d13806645..5a7211901063 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -734,10 +734,10 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
return 0;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, const struct mmu_notifier_range *range)
{
/* kvm_unmap_hva flushes everything anyways */
- kvm_unmap_hva(kvm, start);
+ kvm_unmap_hva(kvm, range->start);
return 0;
}
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b79cd6aa4075..c479fa845d72 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1569,7 +1569,8 @@ asmlinkage void kvm_spurious_fault(void);
_ASM_EXTABLE(666b, 667b)
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_unmap_hva_range(struct kvm *kvm,
+ const struct mmu_notifier_range *range);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6f92b40d798c..86831be07c17 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2040,9 +2040,10 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm,
+ const struct mmu_notifier_range *range);
{
- return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
+ return kvm_handle_hva_range(kvm, range->start, range->end, 0, kvm_unmap_rmapp);
}
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cf917139de6b..c1a238f4ee35 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7942,7 +7942,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
}
int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end,
+ const struct mmu_notifier_range *range,
bool blockable)
{
unsigned long apic_address;
@@ -7952,7 +7952,7 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
* Update it when it becomes invalid.
*/
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
- if (start <= apic_address && apic_address < end)
+ if (range->start <= apic_address && apic_address < range->end)
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
return 0;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7ed1e2f8641e..d6e2ae2accc4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1391,7 +1391,7 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end, bool blockable);
+ const struct mmu_notifier_range *range, bool blockable);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 38b4c910b6c3..078e10c5650e 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -2035,14 +2035,14 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
return 0;
}
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, const struct mmu_notifier_range *range)
{
if (!kvm->arch.pgd)
return 0;
- trace_kvm_unmap_hva_range(start, end);
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+ trace_kvm_unmap_hva_range(range->start, range->end);
+ handle_hva_to_gpa(kvm, range->start, range->end,
+ &kvm_unmap_hva_handler, NULL);
return 0;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 00268290dcbd..7c3665ad1035 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -158,7 +158,7 @@ static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end, bool blockable)
+ const struct mmu_notifier_range *range, bool blockable)
{
return 0;
}
@@ -415,7 +415,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
- need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
+ need_tlb_flush = kvm_unmap_hva_range(kvm, range);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
@@ -423,8 +423,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
spin_unlock(&kvm->mmu_lock);
- ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
- range->end,
+ ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range,
mmu_notifier_range_blockable(range));
srcu_read_unlock(&kvm->srcu, idx);
--
2.20.1
next prev parent reply other threads:[~2019-12-13 20:40 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-12-13 18:24 [PATCH 0/7] KVM: arm/arm64: Help VMs dying quicker Marc Zyngier
2019-12-13 18:24 ` Marc Zyngier [this message]
2019-12-13 18:59 ` [PATCH 1/7] KVM: Pass mmu_notifier_range down to kvm_unmap_hva_range() Suzuki Kuruppassery Poulose
2019-12-14 10:37 ` Marc Zyngier
2019-12-15 10:27 ` Marc Zyngier
2020-01-15 18:10 ` Paolo Bonzini
2019-12-13 18:24 ` [PATCH 2/7] KVM: arm/arm64: Pass flags along Stage-2 unmapping functions Marc Zyngier
2019-12-13 18:24 ` [PATCH 3/7] KVM: arm/arm64: Condition cache maintenance on unmap with a flag Marc Zyngier
2019-12-13 18:25 ` [PATCH 4/7] KVM: arm/arm64: Condition TLB " Marc Zyngier
2019-12-13 18:25 ` [PATCH 5/7] KVM: arm/arm64: Elide both CMOs and TBLIs on freeing the whole Stage-2 Marc Zyngier
2019-12-13 18:25 ` [PATCH 6/7] KVM: arm/arm64: Elide CMOs when retrying a block mapping Marc Zyngier
2019-12-13 18:25 ` [PATCH 7/7] KVM: arm/arm64: Elide CMOs when unmapping a range Marc Zyngier
2019-12-18 15:07 ` James Morse
2019-12-18 15:30 ` Marc Zyngier
2019-12-19 13:46 ` James Morse
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191213182503.14460-2-maz@kernel.org \
--to=maz@kernel.org \
--cc=james.morse@arm.com \
--cc=jhogan@kernel.org \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=julien.thierry.kdev@gmail.com \
--cc=kvm-ppc@vger.kernel.org \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-mips@vger.kernel.org \
--cc=paulus@ozlabs.org \
--cc=pbonzini@redhat.com \
--cc=rkrcmar@redhat.com \
--cc=sean.j.christopherson@intel.com \
--cc=suzuki.poulose@arm.com \
--cc=vkuznets@redhat.com \
--cc=wanpengli@tencent.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).