From: Marc Zyngier <maz@kernel.org>
Cc: kvm-ppc@vger.kernel.org, Wanpeng Li <wanpengli@tencent.com>,
kvm@vger.kernel.org, James Hogan <jhogan@kernel.org>,
Joerg Roedel <joro@8bytes.org>,
linux-mips@vger.kernel.org,
Sean Christopherson <sean.j.christopherson@intel.com>,
Paul Mackerras <paulus@ozlabs.org>,
linux-arm-kernel@lists.infradead.org,
Paolo Bonzini <pbonzini@redhat.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>,
kvmarm@lists.cs.columbia.edu, Jim Mattson <jmattson@google.com>
Subject: [PATCH 2/7] KVM: arm/arm64: Pass flags along Stage-2 unmapping functions
Date: Fri, 13 Dec 2019 18:24:58 +0000 [thread overview]
Message-ID: <20191213182503.14460-3-maz@kernel.org> (raw)
In-Reply-To: <20191213182503.14460-1-maz@kernel.org>
Pass a set of flags to all Stage-2 unmapping functions.
The only value passed for now is zero, and it is not evaluated yet.
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
virt/kvm/arm/mmu.c | 47 ++++++++++++++++++++++++++--------------------
1 file changed, 27 insertions(+), 20 deletions(-)
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 078e10c5650e..0fed7c19c6d5 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -152,7 +152,8 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
return p;
}
-static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
+static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr,
+ unsigned long flags)
{
pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
stage2_pgd_clear(kvm, pgd);
@@ -161,7 +162,8 @@ static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr
put_page(virt_to_page(pgd));
}
-static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
+static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr,
+ unsigned long flags)
{
pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
VM_BUG_ON(stage2_pud_huge(kvm, *pud));
@@ -171,7 +173,8 @@ static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr
put_page(virt_to_page(pud));
}
-static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
+static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr,
+ unsigned long flags)
{
pte_t *pte_table = pte_offset_kernel(pmd, 0);
VM_BUG_ON(pmd_thp_or_huge(*pmd));
@@ -235,7 +238,8 @@ static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
* does.
*/
static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
- phys_addr_t addr, phys_addr_t end)
+ phys_addr_t addr, phys_addr_t end,
+ unsigned long flags)
{
phys_addr_t start_addr = addr;
pte_t *pte, *start_pte;
@@ -257,11 +261,12 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
} while (pte++, addr += PAGE_SIZE, addr != end);
if (stage2_pte_table_empty(kvm, start_pte))
- clear_stage2_pmd_entry(kvm, pmd, start_addr);
+ clear_stage2_pmd_entry(kvm, pmd, start_addr, flags);
}
static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
- phys_addr_t addr, phys_addr_t end)
+ phys_addr_t addr, phys_addr_t end,
+ unsigned long flags)
{
phys_addr_t next, start_addr = addr;
pmd_t *pmd, *start_pmd;
@@ -280,17 +285,18 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
put_page(virt_to_page(pmd));
} else {
- unmap_stage2_ptes(kvm, pmd, addr, next);
+ unmap_stage2_ptes(kvm, pmd, addr, next, flags);
}
}
} while (pmd++, addr = next, addr != end);
if (stage2_pmd_table_empty(kvm, start_pmd))
- clear_stage2_pud_entry(kvm, pud, start_addr);
+ clear_stage2_pud_entry(kvm, pud, start_addr, flags);
}
static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
- phys_addr_t addr, phys_addr_t end)
+ phys_addr_t addr, phys_addr_t end,
+ unsigned long flags)
{
phys_addr_t next, start_addr = addr;
pud_t *pud, *start_pud;
@@ -307,13 +313,13 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
kvm_flush_dcache_pud(old_pud);
put_page(virt_to_page(pud));
} else {
- unmap_stage2_pmds(kvm, pud, addr, next);
+ unmap_stage2_pmds(kvm, pud, addr, next, flags);
}
}
} while (pud++, addr = next, addr != end);
if (stage2_pud_table_empty(kvm, start_pud))
- clear_stage2_pgd_entry(kvm, pgd, start_addr);
+ clear_stage2_pgd_entry(kvm, pgd, start_addr, flags);
}
/**
@@ -327,7 +333,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
* destroying the VM), otherwise another faulting VCPU may come in and mess
* with things behind our backs.
*/
-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
+ unsigned long flags)
{
pgd_t *pgd;
phys_addr_t addr = start, end = start + size;
@@ -347,7 +354,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
break;
next = stage2_pgd_addr_end(kvm, addr, end);
if (!stage2_pgd_none(kvm, *pgd))
- unmap_stage2_puds(kvm, pgd, addr, next);
+ unmap_stage2_puds(kvm, pgd, addr, next, flags);
/*
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
@@ -950,7 +957,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
if (!(vma->vm_flags & VM_PFNMAP)) {
gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
- unmap_stage2_range(kvm, gpa, vm_end - vm_start);
+ unmap_stage2_range(kvm, gpa, vm_end - vm_start, 0);
}
hva = vm_end;
} while (hva < reg_end);
@@ -996,7 +1003,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
spin_lock(&kvm->mmu_lock);
if (kvm->arch.pgd) {
- unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
+ unmap_stage2_range(kvm, 0, kvm_phys_size(kvm), 0);
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
kvm->arch.pgd_phys = 0;
@@ -1086,7 +1093,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
* get handled accordingly.
*/
if (!pmd_thp_or_huge(old_pmd)) {
- unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
+ unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE, 0);
goto retry;
}
/*
@@ -1136,7 +1143,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
* the range for this block and retry.
*/
if (!stage2_pud_huge(kvm, old_pud)) {
- unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
+ unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE, 0);
goto retry;
}
@@ -2031,7 +2038,7 @@ static int handle_hva_to_gpa(struct kvm *kvm,
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
- unmap_stage2_range(kvm, gpa, size);
+ unmap_stage2_range(kvm, gpa, size, 0);
return 0;
}
@@ -2344,7 +2351,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
if (ret)
- unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
+ unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size, 0);
else
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
@@ -2380,7 +2387,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
phys_addr_t size = slot->npages << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(kvm, gpa, size);
+ unmap_stage2_range(kvm, gpa, size, 0);
spin_unlock(&kvm->mmu_lock);
}
--
2.20.1
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
next prev parent reply other threads:[~2019-12-13 21:33 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-12-13 18:24 [PATCH 0/7] KVM: arm/arm64: Help VMs dying quicker Marc Zyngier
2019-12-13 18:24 ` [PATCH 1/7] KVM: Pass mmu_notifier_range down to kvm_unmap_hva_range() Marc Zyngier
2019-12-13 18:59 ` Suzuki Kuruppassery Poulose
2019-12-14 10:37 ` Marc Zyngier
2019-12-15 10:27 ` Marc Zyngier
2020-01-15 18:10 ` Paolo Bonzini
2019-12-13 18:24 ` Marc Zyngier [this message]
2019-12-13 18:24 ` [PATCH 3/7] KVM: arm/arm64: Condition cache maintenance on unmap with a flag Marc Zyngier
2019-12-13 18:25 ` [PATCH 4/7] KVM: arm/arm64: Condition TLB " Marc Zyngier
2019-12-13 18:25 ` [PATCH 5/7] KVM: arm/arm64: Elide both CMOs and TBLIs on freeing the whole Stage-2 Marc Zyngier
2019-12-13 18:25 ` [PATCH 6/7] KVM: arm/arm64: Elide CMOs when retrying a block mapping Marc Zyngier
2019-12-13 18:25 ` [PATCH 7/7] KVM: arm/arm64: Elide CMOs when unmapping a range Marc Zyngier
2019-12-18 15:07 ` James Morse
2019-12-18 15:30 ` Marc Zyngier
2019-12-19 13:46 ` James Morse
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191213182503.14460-3-maz@kernel.org \
--to=maz@kernel.org \
--cc=jhogan@kernel.org \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=kvm-ppc@vger.kernel.org \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-mips@vger.kernel.org \
--cc=paulus@ozlabs.org \
--cc=pbonzini@redhat.com \
--cc=sean.j.christopherson@intel.com \
--cc=vkuznets@redhat.com \
--cc=wanpengli@tencent.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).