From: Anup Patel <anup.patel@wdc.com>
To: Palmer Dabbelt <palmer@dabbelt.com>,
Palmer Dabbelt <palmerdabbelt@google.com>,
Paul Walmsley <paul.walmsley@sifive.com>,
Albert Ou <aou@eecs.berkeley.edu>,
Paolo Bonzini <pbonzini@redhat.com>
Cc: Alexander Graf <graf@amazon.com>,
Atish Patra <atish.patra@wdc.com>,
Alistair Francis <Alistair.Francis@wdc.com>,
Damien Le Moal <damien.lemoal@wdc.com>,
Anup Patel <anup@brainfault.org>,
kvm@vger.kernel.org, kvm-riscv@lists.infradead.org,
linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org,
Anup Patel <anup.patel@wdc.com>
Subject: [PATCH v16 11/17] RISC-V: KVM: Implement MMU notifiers
Date: Fri, 15 Jan 2021 17:48:40 +0530 [thread overview]
Message-ID: <20210115121846.114528-12-anup.patel@wdc.com> (raw)
In-Reply-To: <20210115121846.114528-1-anup.patel@wdc.com>
This patch implements MMU notifiers for KVM RISC-V so that Guest
physical address space is in-sync with Host physical address space.
This will allow swapping, page migration, etc to work transparently
with KVM RISC-V.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Alexander Graf <graf@amazon.com>
---
arch/riscv/include/asm/kvm_host.h | 7 ++
arch/riscv/kvm/Kconfig | 1 +
arch/riscv/kvm/mmu.c | 144 +++++++++++++++++++++++++++++-
arch/riscv/kvm/vm.c | 1 +
4 files changed, 149 insertions(+), 4 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 5d70c8cf3733..d25f181c3433 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -203,6 +203,13 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start,
+ unsigned long end, unsigned int flags);
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+
void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa, unsigned long vmid);
void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid);
void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa);
diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig
index 633063edaee8..a712bb910cda 100644
--- a/arch/riscv/kvm/Kconfig
+++ b/arch/riscv/kvm/Kconfig
@@ -20,6 +20,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)"
depends on RISCV_SBI && MMU
+ select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select ANON_INODES
select KVM_MMIO
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index e311c4bb0b8a..56fda9ef70fd 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -296,7 +296,8 @@ static void stage2_op_pte(struct kvm *kvm, gpa_t addr,
}
}
-static void stage2_unmap_range(struct kvm *kvm, gpa_t start, gpa_t size)
+static void stage2_unmap_range(struct kvm *kvm, gpa_t start,
+ gpa_t size, bool may_block)
{
int ret;
pte_t *ptep;
@@ -321,6 +322,13 @@ static void stage2_unmap_range(struct kvm *kvm, gpa_t start, gpa_t size)
next:
addr += page_size;
+
+ /*
+ * If the range is too large, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector warnings.
+ */
+ if (may_block && addr < end)
+ cond_resched_lock(&kvm->mmu_lock);
}
}
@@ -404,6 +412,38 @@ int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
}
+static int handle_hva_to_gpa(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ int (*handler)(struct kvm *kvm,
+ gpa_t gpa, u64 size,
+ void *data),
+ void *data)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int ret = 0;
+
+ slots = kvm_memslots(kvm);
+
+ /* we only care about the pages that the guest sees */
+ kvm_for_each_memslot(memslot, slots) {
+ unsigned long hva_start, hva_end;
+ gfn_t gpa;
+
+ hva_start = max(start, memslot->userspace_addr);
+ hva_end = min(end, memslot->userspace_addr +
+ (memslot->npages << PAGE_SHIFT));
+ if (hva_start >= hva_end)
+ continue;
+
+ gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
+ ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
+ }
+
+ return ret;
+}
+
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
@@ -549,7 +589,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
if (ret)
stage2_unmap_range(kvm, mem->guest_phys_addr,
- mem->memory_size);
+ mem->memory_size, false);
spin_unlock(&kvm->mmu_lock);
out:
@@ -557,6 +597,96 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
return ret;
}
+static int kvm_unmap_hva_handler(struct kvm *kvm,
+ gpa_t gpa, u64 size, void *data)
+{
+ unsigned int flags = *(unsigned int *)data;
+ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
+
+ stage2_unmap_range(kvm, gpa, size, may_block);
+ return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start,
+ unsigned long end, unsigned int flags)
+{
+ if (!kvm->arch.pgd)
+ return 0;
+
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
+ return 0;
+}
+
+static int kvm_set_spte_handler(struct kvm *kvm,
+ gpa_t gpa, u64 size, void *data)
+{
+ pte_t *pte = (pte_t *)data;
+
+ WARN_ON(size != PAGE_SIZE);
+ stage2_set_pte(kvm, 0, NULL, gpa, pte);
+
+ return 0;
+}
+
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ unsigned long end = hva + PAGE_SIZE;
+ kvm_pfn_t pfn = pte_pfn(pte);
+ pte_t stage2_pte;
+
+ if (!kvm->arch.pgd)
+ return 0;
+
+ stage2_pte = pfn_pte(pfn, PAGE_WRITE_EXEC);
+ handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
+
+ return 0;
+}
+
+static int kvm_age_hva_handler(struct kvm *kvm,
+ gpa_t gpa, u64 size, void *data)
+{
+ pte_t *ptep;
+ u32 ptep_level = 0;
+
+ WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);
+
+ if (!stage2_get_leaf_entry(kvm, gpa, &ptep, &ptep_level))
+ return 0;
+
+ return ptep_test_and_clear_young(NULL, 0, ptep);
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ if (!kvm->arch.pgd)
+ return 0;
+
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+}
+
+static int kvm_test_age_hva_handler(struct kvm *kvm,
+ gpa_t gpa, u64 size, void *data)
+{
+ pte_t *ptep;
+ u32 ptep_level = 0;
+
+ WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
+ if (!stage2_get_leaf_entry(kvm, gpa, &ptep, &ptep_level))
+ return 0;
+
+ return pte_young(*ptep);
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ if (!kvm->arch.pgd)
+ return 0;
+
+ return handle_hva_to_gpa(kvm, hva, hva,
+ kvm_test_age_hva_handler, NULL);
+}
+
int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write)
@@ -571,7 +701,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache;
bool logging = (memslot->dirty_bitmap &&
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
- unsigned long vma_pagesize;
+ unsigned long vma_pagesize, mmu_seq;
mmap_read_lock(current->mm);
@@ -610,6 +740,8 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
return ret;
}
+ mmu_seq = kvm->mmu_notifier_seq;
+
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable);
if (hfn == KVM_PFN_ERR_HWPOISON) {
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
@@ -628,6 +760,9 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
spin_lock(&kvm->mmu_lock);
+ if (mmu_notifier_retry(kvm, mmu_seq))
+ goto out_unlock;
+
if (writeable) {
kvm_set_pfn_dirty(hfn);
mark_page_dirty(kvm, gfn);
@@ -641,6 +776,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
if (ret)
kvm_err("Failed to map in stage2\n");
+out_unlock:
spin_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(hfn);
kvm_release_pfn_clean(hfn);
@@ -677,7 +813,7 @@ void kvm_riscv_stage2_free_pgd(struct kvm *kvm)
spin_lock(&kvm->mmu_lock);
if (kvm->arch.pgd) {
- stage2_unmap_range(kvm, 0UL, stage2_gpa_size);
+ stage2_unmap_range(kvm, 0UL, stage2_gpa_size, false);
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
kvm->arch.pgd_phys = 0;
diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c
index 6cde69a82252..00a1a88008be 100644
--- a/arch/riscv/kvm/vm.c
+++ b/arch/riscv/kvm/vm.c
@@ -49,6 +49,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
+ case KVM_CAP_SYNC_MMU:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_ONE_REG:
case KVM_CAP_READONLY_MEM:
--
2.25.1
next prev parent reply other threads:[~2021-01-15 12:21 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-15 12:18 [PATCH v16 00/17] KVM RISC-V Support Anup Patel
2021-01-15 12:18 ` [PATCH v16 01/17] RISC-V: Add hypervisor extension related CSR defines Anup Patel
2021-01-15 12:18 ` [PATCH v16 02/17] RISC-V: Add initial skeletal KVM support Anup Patel
2021-01-15 12:18 ` [PATCH v16 03/17] RISC-V: KVM: Implement VCPU create, init and destroy functions Anup Patel
2021-01-15 12:18 ` [PATCH v16 04/17] RISC-V: KVM: Implement VCPU interrupts and requests handling Anup Patel
2021-01-15 12:18 ` [PATCH v16 05/17] RISC-V: KVM: Implement KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls Anup Patel
2021-01-15 12:18 ` [PATCH v16 06/17] RISC-V: KVM: Implement VCPU world-switch Anup Patel
2021-01-15 12:18 ` [PATCH v16 07/17] RISC-V: KVM: Handle MMIO exits for VCPU Anup Patel
2021-01-15 12:18 ` [PATCH v16 08/17] RISC-V: KVM: Handle WFI " Anup Patel
2021-01-15 12:18 ` [PATCH v16 09/17] RISC-V: KVM: Implement VMID allocator Anup Patel
2021-01-15 12:18 ` [PATCH v16 10/17] RISC-V: KVM: Implement stage2 page table programming Anup Patel
2021-01-15 12:18 ` Anup Patel [this message]
2021-01-15 12:18 ` [PATCH v16 12/17] RISC-V: KVM: Add timer functionality Anup Patel
2021-01-15 12:18 ` [PATCH v16 13/17] RISC-V: KVM: FP lazy save/restore Anup Patel
2021-01-15 12:18 ` [PATCH v16 14/17] RISC-V: KVM: Implement ONE REG interface for FP registers Anup Patel
2021-01-15 12:18 ` [PATCH v16 15/17] RISC-V: KVM: Add SBI v0.1 support Anup Patel
2021-01-15 12:18 ` [PATCH v16 16/17] RISC-V: KVM: Document RISC-V specific parts of KVM API Anup Patel
2021-01-15 12:18 ` [PATCH v16 17/17] RISC-V: KVM: Add MAINTAINERS entry Anup Patel
2021-01-23 3:40 ` [PATCH v16 00/17] KVM RISC-V Support Palmer Dabbelt
2021-03-30 5:48 ` Anup Patel
2021-03-31 9:21 ` Paolo Bonzini
2021-04-01 13:24 ` Anup Patel
2021-04-09 18:58 ` Palmer Dabbelt
2021-04-21 4:08 ` Anup Patel
2021-04-27 5:43 ` Paul Walmsley
2021-04-27 6:01 ` Anup Patel
2021-04-27 7:04 ` Paolo Bonzini
2021-04-28 7:07 ` Anup Patel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210115121846.114528-12-anup.patel@wdc.com \
--to=anup.patel@wdc.com \
--cc=Alistair.Francis@wdc.com \
--cc=anup@brainfault.org \
--cc=aou@eecs.berkeley.edu \
--cc=atish.patra@wdc.com \
--cc=damien.lemoal@wdc.com \
--cc=graf@amazon.com \
--cc=kvm-riscv@lists.infradead.org \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=palmer@dabbelt.com \
--cc=palmerdabbelt@google.com \
--cc=paul.walmsley@sifive.com \
--cc=pbonzini@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).