All of lore.kernel.org
 help / color / mirror / Atom feed
From: Keqian Zhu <zhukeqian1@huawei.com>
To: <linux-kernel@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<kvmarm@lists.cs.columbia.edu>, <kvm@vger.kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>,
	Will Deacon <will@kernel.org>,
	"Suzuki K Poulose" <suzuki.poulose@arm.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Mark Brown <broonie@kernel.org>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	Andrew Morton <akpm@linux-foundation.org>,
	Alexios Zavras <alexios.zavras@intel.com>,
	<liangpeng10@huawei.com>, <zhengxiang9@huawei.com>,
	<wanghaibin.wang@huawei.com>, Keqian Zhu <zhukeqian1@huawei.com>
Subject: [PATCH 04/12] KVM: arm64: Support clear DBM bit for PTEs
Date: Tue, 16 Jun 2020 17:35:45 +0800	[thread overview]
Message-ID: <20200616093553.27512-5-zhukeqian1@huawei.com> (raw)
In-Reply-To: <20200616093553.27512-1-zhukeqian1@huawei.com>

This supports clear DBM bit for PTEs, to realize dynamic enable
of hardware DBM.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 arch/arm64/include/asm/kvm_host.h |   2 +
 arch/arm64/kvm/mmu.c              | 151 ++++++++++++++++++++++++++++++
 2 files changed, 153 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c3e6fcc664b1..9ea2dcfd609c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -480,6 +480,8 @@ u64 __kvm_call_hyp(void *hypfn, ...);
 
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
+void kvm_mmu_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *memslot);
+void kvm_mmu_clear_dbm_all(struct kvm *kvm);
 
 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		int exception_index);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 27407153121b..f08b0fbca0a0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2446,6 +2446,157 @@ int kvm_mmu_init(void)
 	return err;
 }
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+/**
+ * stage2_clear_dbm_ptes() - clear DBM bit from PMD range
+ * @pmd:	pointer to pmd entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_ptes(pmd_t *pmd, phys_addr_t addr,
+				  phys_addr_t end)
+{
+	pte_t *pte;
+
+	pte = pte_offset_kernel(pmd, addr);
+	do {
+		if (!pte_none(*pte) && kvm_s2pte_dbm(pte))
+			kvm_clear_s2pte_dbm(pte);
+	} while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_pmds() - clear DBM bit from PUD range
+ * @kvm:	The KVM pointer
+ * @pud:	pointer to pud entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_pmds(struct kvm *kvm, pud_t *pud,
+				  phys_addr_t addr, phys_addr_t end)
+{
+	pmd_t *pmd;
+	phys_addr_t next;
+
+	pmd = stage2_pmd_offset(kvm, pud, addr);
+	do {
+		next = stage2_pmd_addr_end(kvm, addr, end);
+		if (!pmd_none(*pmd) && !pmd_thp_or_huge(*pmd))
+			stage2_clear_dbm_ptes(pmd, addr, next);
+	} while (pmd++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_puds() - clear DBM bit from P4D range
+ * @kvm:	The KVM pointer
+ * @pgd:	pointer to pgd entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_puds(struct kvm *kvm, p4d_t *p4d,
+				  phys_addr_t addr, phys_addr_t end)
+{
+	pud_t *pud;
+	phys_addr_t next;
+
+	pud = stage2_pud_offset(kvm, p4d, addr);
+	do {
+		next = stage2_pud_addr_end(kvm, addr, end);
+		if (!stage2_pud_none(kvm, *pud) && !stage2_pud_huge(kvm, *pud))
+			stage2_clear_dbm_pmds(kvm, pud, addr, next);
+	} while (pud++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_p4ds() - clear DBM bit from PGD range
+ * @kvm:	The KVM pointer
+ * @pgd:	pointer to pgd entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_p4ds(struct kvm *kvm, pgd_t *pgd,
+				  phys_addr_t addr, phys_addr_t end)
+{
+	p4d_t *p4d;
+	phys_addr_t next;
+
+	p4d = stage2_p4d_offset(kvm, pgd, addr);
+	do {
+		next = stage2_p4d_addr_end(kvm, addr, end);
+		if (!stage2_p4d_none(kvm, *p4d))
+			stage2_clear_dbm_puds(kvm, p4d, addr, next);
+	} while (p4d++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_range() - clear DBM bit from stage2 memory
+ * region range
+ * @kvm:	The KVM pointer
+ * @addr:	Start address of range
+ * @end:	End address of range
+ */
+static void stage2_clear_dbm_range(struct kvm *kvm, phys_addr_t addr,
+				   phys_addr_t end)
+{
+	pgd_t *pgd;
+	phys_addr_t next;
+
+	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
+	do {
+		cond_resched_lock(&kvm->mmu_lock);
+		if (!READ_ONCE(kvm->arch.pgd))
+			break;
+		next = stage2_pgd_addr_end(kvm, addr, end);
+		if (stage2_pgd_present(kvm, *pgd))
+			stage2_clear_dbm_p4ds(kvm, pgd, addr, next);
+	} while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * kvm_mmu_clear_dbm() - clear DBM bit from stage2 PTEs for memory slot
+ * @kvm:	The KVM pointer
+ * @slot:	The memory slot to clear DBM bit
+ *
+ * After this function returns, DBM bit of all block or page descriptors
+ * is cleared.
+ *
+ * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
+ * serializing operations for VM memory regions.
+ */
+void kvm_mmu_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+	phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
+	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+
+	spin_lock(&kvm->mmu_lock);
+	stage2_clear_dbm_range(kvm, start, end);
+	spin_unlock(&kvm->mmu_lock);
+	kvm_flush_remote_tlbs(kvm);
+}
+
+/**
+ * kvm_mmu_clear_dbm_all() - clear DBM bit from stage2 PTEs for whole VM
+ * @kvm:	The KVM pointer
+ *
+ * Called with kvm->slots_lock mutex acquired.
+ */
+void kvm_mmu_clear_dbm_all(struct kvm *kvm)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	struct kvm_memory_slot *memslots = slots->memslots;
+	struct kvm_memory_slot *memslot;
+	int slot;
+
+	if (unlikely(!slots->used_slots))
+		return;
+
+	for (slot = 0; slot < slots->used_slots; slot++) {
+		memslot = &memslots[slot];
+		kvm_mmu_clear_dbm(kvm, memslot);
+	}
+}
+#endif /* CONFIG_ARM64_HW_AFDBM */
+
 void kvm_arch_commit_memory_region(struct kvm *kvm,
 				   const struct kvm_userspace_memory_region *mem,
 				   struct kvm_memory_slot *old,
-- 
2.19.1


WARNING: multiple messages have this Message-ID (diff)
From: Keqian Zhu <zhukeqian1@huawei.com>
To: <linux-kernel@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<kvmarm@lists.cs.columbia.edu>, <kvm@vger.kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	liangpeng10@huawei.com, Alexios Zavras <alexios.zavras@intel.com>,
	Mark Brown <broonie@kernel.org>, Marc Zyngier <maz@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Will Deacon <will@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>
Subject: [PATCH 04/12] KVM: arm64: Support clear DBM bit for PTEs
Date: Tue, 16 Jun 2020 17:35:45 +0800	[thread overview]
Message-ID: <20200616093553.27512-5-zhukeqian1@huawei.com> (raw)
In-Reply-To: <20200616093553.27512-1-zhukeqian1@huawei.com>

This supports clear DBM bit for PTEs, to realize dynamic enable
of hardware DBM.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 arch/arm64/include/asm/kvm_host.h |   2 +
 arch/arm64/kvm/mmu.c              | 151 ++++++++++++++++++++++++++++++
 2 files changed, 153 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c3e6fcc664b1..9ea2dcfd609c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -480,6 +480,8 @@ u64 __kvm_call_hyp(void *hypfn, ...);
 
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
+void kvm_mmu_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *memslot);
+void kvm_mmu_clear_dbm_all(struct kvm *kvm);
 
 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		int exception_index);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 27407153121b..f08b0fbca0a0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2446,6 +2446,157 @@ int kvm_mmu_init(void)
 	return err;
 }
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+/**
+ * stage2_clear_dbm_ptes() - clear DBM bit from PMD range
+ * @pmd:	pointer to pmd entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_ptes(pmd_t *pmd, phys_addr_t addr,
+				  phys_addr_t end)
+{
+	pte_t *pte;
+
+	pte = pte_offset_kernel(pmd, addr);
+	do {
+		if (!pte_none(*pte) && kvm_s2pte_dbm(pte))
+			kvm_clear_s2pte_dbm(pte);
+	} while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_pmds() - clear DBM bit from PUD range
+ * @kvm:	The KVM pointer
+ * @pud:	pointer to pud entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_pmds(struct kvm *kvm, pud_t *pud,
+				  phys_addr_t addr, phys_addr_t end)
+{
+	pmd_t *pmd;
+	phys_addr_t next;
+
+	pmd = stage2_pmd_offset(kvm, pud, addr);
+	do {
+		next = stage2_pmd_addr_end(kvm, addr, end);
+		if (!pmd_none(*pmd) && !pmd_thp_or_huge(*pmd))
+			stage2_clear_dbm_ptes(pmd, addr, next);
+	} while (pmd++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_puds() - clear DBM bit from P4D range
+ * @kvm:	The KVM pointer
+ * @pgd:	pointer to pgd entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_puds(struct kvm *kvm, p4d_t *p4d,
+				  phys_addr_t addr, phys_addr_t end)
+{
+	pud_t *pud;
+	phys_addr_t next;
+
+	pud = stage2_pud_offset(kvm, p4d, addr);
+	do {
+		next = stage2_pud_addr_end(kvm, addr, end);
+		if (!stage2_pud_none(kvm, *pud) && !stage2_pud_huge(kvm, *pud))
+			stage2_clear_dbm_pmds(kvm, pud, addr, next);
+	} while (pud++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_p4ds() - clear DBM bit from PGD range
+ * @kvm:	The KVM pointer
+ * @pgd:	pointer to pgd entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_p4ds(struct kvm *kvm, pgd_t *pgd,
+				  phys_addr_t addr, phys_addr_t end)
+{
+	p4d_t *p4d;
+	phys_addr_t next;
+
+	p4d = stage2_p4d_offset(kvm, pgd, addr);
+	do {
+		next = stage2_p4d_addr_end(kvm, addr, end);
+		if (!stage2_p4d_none(kvm, *p4d))
+			stage2_clear_dbm_puds(kvm, p4d, addr, next);
+	} while (p4d++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_range() - clear DBM bit from stage2 memory
+ * region range
+ * @kvm:	The KVM pointer
+ * @addr:	Start address of range
+ * @end:	End address of range
+ */
+static void stage2_clear_dbm_range(struct kvm *kvm, phys_addr_t addr,
+				   phys_addr_t end)
+{
+	pgd_t *pgd;
+	phys_addr_t next;
+
+	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
+	do {
+		cond_resched_lock(&kvm->mmu_lock);
+		if (!READ_ONCE(kvm->arch.pgd))
+			break;
+		next = stage2_pgd_addr_end(kvm, addr, end);
+		if (stage2_pgd_present(kvm, *pgd))
+			stage2_clear_dbm_p4ds(kvm, pgd, addr, next);
+	} while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * kvm_mmu_clear_dbm() - clear DBM bit from stage2 PTEs for memory slot
+ * @kvm:	The KVM pointer
+ * @slot:	The memory slot to clear DBM bit
+ *
+ * After this function returns, DBM bit of all block or page descriptors
+ * is cleared.
+ *
+ * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
+ * serializing operations for VM memory regions.
+ */
+void kvm_mmu_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+	phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
+	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+
+	spin_lock(&kvm->mmu_lock);
+	stage2_clear_dbm_range(kvm, start, end);
+	spin_unlock(&kvm->mmu_lock);
+	kvm_flush_remote_tlbs(kvm);
+}
+
+/**
+ * kvm_mmu_clear_dbm_all() - clear DBM bit from stage2 PTEs for whole VM
+ * @kvm:	The KVM pointer
+ *
+ * Called with kvm->slots_lock mutex acquired.
+ */
+void kvm_mmu_clear_dbm_all(struct kvm *kvm)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	struct kvm_memory_slot *memslots = slots->memslots;
+	struct kvm_memory_slot *memslot;
+	int slot;
+
+	if (unlikely(!slots->used_slots))
+		return;
+
+	for (slot = 0; slot < slots->used_slots; slot++) {
+		memslot = &memslots[slot];
+		kvm_mmu_clear_dbm(kvm, memslot);
+	}
+}
+#endif /* CONFIG_ARM64_HW_AFDBM */
+
 void kvm_arch_commit_memory_region(struct kvm *kvm,
 				   const struct kvm_userspace_memory_region *mem,
 				   struct kvm_memory_slot *old,
-- 
2.19.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Keqian Zhu <zhukeqian1@huawei.com>
To: <linux-kernel@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<kvmarm@lists.cs.columbia.edu>, <kvm@vger.kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Keqian Zhu <zhukeqian1@huawei.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	liangpeng10@huawei.com, Alexios Zavras <alexios.zavras@intel.com>,
	zhengxiang9@huawei.com, Mark Brown <broonie@kernel.org>,
	James Morse <james.morse@arm.com>, Marc Zyngier <maz@kernel.org>,
	wanghaibin.wang@huawei.com, Thomas Gleixner <tglx@linutronix.de>,
	Will Deacon <will@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Julien Thierry <julien.thierry.kdev@gmail.com>
Subject: [PATCH 04/12] KVM: arm64: Support clear DBM bit for PTEs
Date: Tue, 16 Jun 2020 17:35:45 +0800	[thread overview]
Message-ID: <20200616093553.27512-5-zhukeqian1@huawei.com> (raw)
In-Reply-To: <20200616093553.27512-1-zhukeqian1@huawei.com>

This supports clear DBM bit for PTEs, to realize dynamic enable
of hardware DBM.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 arch/arm64/include/asm/kvm_host.h |   2 +
 arch/arm64/kvm/mmu.c              | 151 ++++++++++++++++++++++++++++++
 2 files changed, 153 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c3e6fcc664b1..9ea2dcfd609c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -480,6 +480,8 @@ u64 __kvm_call_hyp(void *hypfn, ...);
 
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
+void kvm_mmu_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *memslot);
+void kvm_mmu_clear_dbm_all(struct kvm *kvm);
 
 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		int exception_index);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 27407153121b..f08b0fbca0a0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2446,6 +2446,157 @@ int kvm_mmu_init(void)
 	return err;
 }
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+/**
+ * stage2_clear_dbm_ptes() - clear DBM bit from PMD range
+ * @pmd:	pointer to pmd entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_ptes(pmd_t *pmd, phys_addr_t addr,
+				  phys_addr_t end)
+{
+	pte_t *pte;
+
+	pte = pte_offset_kernel(pmd, addr);
+	do {
+		if (!pte_none(*pte) && kvm_s2pte_dbm(pte))
+			kvm_clear_s2pte_dbm(pte);
+	} while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_pmds() - clear DBM bit from PUD range
+ * @kvm:	The KVM pointer
+ * @pud:	pointer to pud entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_pmds(struct kvm *kvm, pud_t *pud,
+				  phys_addr_t addr, phys_addr_t end)
+{
+	pmd_t *pmd;
+	phys_addr_t next;
+
+	pmd = stage2_pmd_offset(kvm, pud, addr);
+	do {
+		next = stage2_pmd_addr_end(kvm, addr, end);
+		if (!pmd_none(*pmd) && !pmd_thp_or_huge(*pmd))
+			stage2_clear_dbm_ptes(pmd, addr, next);
+	} while (pmd++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_puds() - clear DBM bit from P4D range
+ * @kvm:	The KVM pointer
+ * @pgd:	pointer to pgd entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_puds(struct kvm *kvm, p4d_t *p4d,
+				  phys_addr_t addr, phys_addr_t end)
+{
+	pud_t *pud;
+	phys_addr_t next;
+
+	pud = stage2_pud_offset(kvm, p4d, addr);
+	do {
+		next = stage2_pud_addr_end(kvm, addr, end);
+		if (!stage2_pud_none(kvm, *pud) && !stage2_pud_huge(kvm, *pud))
+			stage2_clear_dbm_pmds(kvm, pud, addr, next);
+	} while (pud++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_p4ds() - clear DBM bit from PGD range
+ * @kvm:	The KVM pointer
+ * @pgd:	pointer to pgd entry
+ * @addr:	range start address
+ * @end:	range end address
+ */
+static void stage2_clear_dbm_p4ds(struct kvm *kvm, pgd_t *pgd,
+				  phys_addr_t addr, phys_addr_t end)
+{
+	p4d_t *p4d;
+	phys_addr_t next;
+
+	p4d = stage2_p4d_offset(kvm, pgd, addr);
+	do {
+		next = stage2_p4d_addr_end(kvm, addr, end);
+		if (!stage2_p4d_none(kvm, *p4d))
+			stage2_clear_dbm_puds(kvm, p4d, addr, next);
+	} while (p4d++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_range() - clear DBM bit from stage2 memory
+ * region range
+ * @kvm:	The KVM pointer
+ * @addr:	Start address of range
+ * @end:	End address of range
+ */
+static void stage2_clear_dbm_range(struct kvm *kvm, phys_addr_t addr,
+				   phys_addr_t end)
+{
+	pgd_t *pgd;
+	phys_addr_t next;
+
+	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
+	do {
+		cond_resched_lock(&kvm->mmu_lock);
+		if (!READ_ONCE(kvm->arch.pgd))
+			break;
+		next = stage2_pgd_addr_end(kvm, addr, end);
+		if (stage2_pgd_present(kvm, *pgd))
+			stage2_clear_dbm_p4ds(kvm, pgd, addr, next);
+	} while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * kvm_mmu_clear_dbm() - clear DBM bit from stage2 PTEs for memory slot
+ * @kvm:	The KVM pointer
+ * @slot:	The memory slot to clear DBM bit
+ *
+ * After this function returns, DBM bit of all block or page descriptors
+ * is cleared.
+ *
+ * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
+ * serializing operations for VM memory regions.
+ */
+void kvm_mmu_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+	phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
+	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+
+	spin_lock(&kvm->mmu_lock);
+	stage2_clear_dbm_range(kvm, start, end);
+	spin_unlock(&kvm->mmu_lock);
+	kvm_flush_remote_tlbs(kvm);
+}
+
+/**
+ * kvm_mmu_clear_dbm_all() - clear DBM bit from stage2 PTEs for whole VM
+ * @kvm:	The KVM pointer
+ *
+ * Called with kvm->slots_lock mutex acquired.
+ */
+void kvm_mmu_clear_dbm_all(struct kvm *kvm)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	struct kvm_memory_slot *memslots = slots->memslots;
+	struct kvm_memory_slot *memslot;
+	int slot;
+
+	if (unlikely(!slots->used_slots))
+		return;
+
+	for (slot = 0; slot < slots->used_slots; slot++) {
+		memslot = &memslots[slot];
+		kvm_mmu_clear_dbm(kvm, memslot);
+	}
+}
+#endif /* CONFIG_ARM64_HW_AFDBM */
+
 void kvm_arch_commit_memory_region(struct kvm *kvm,
 				   const struct kvm_userspace_memory_region *mem,
 				   struct kvm_memory_slot *old,
-- 
2.19.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2020-06-16  9:36 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-16  9:35 [PATCH 00/12] KVM: arm64: Support stage2 hardware DBM Keqian Zhu
2020-06-16  9:35 ` Keqian Zhu
2020-06-16  9:35 ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 01/12] KVM: arm64: Add some basic functions to support hw DBM Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 02/12] KVM: arm64: Modify stage2 young mechanism " Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 03/12] KVM: arm64: Report hardware dirty status of stage2 PTE if coverred Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-07-01 11:28   ` Steven Price
2020-07-01 11:28     ` Steven Price
2020-07-01 11:28     ` Steven Price
2020-07-02 11:28     ` zhukeqian
2020-07-02 11:28       ` zhukeqian
2020-07-02 11:28       ` zhukeqian
2020-06-16  9:35 ` Keqian Zhu [this message]
2020-06-16  9:35   ` [PATCH 04/12] KVM: arm64: Support clear DBM bit for PTEs Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 05/12] KVM: arm64: Add KVM_CAP_ARM_HW_DIRTY_LOG capability Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 06/12] KVM: arm64: Set DBM bit of PTEs during write protecting Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 07/12] KVM: arm64: Scan PTEs to sync dirty log Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 08/12] KVM: Omit dirty log sync in log clear if initially all set Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 09/12] KVM: arm64: Steply write protect page table by mask bit Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 10/12] KVM: arm64: Save stage2 PTE dirty status if it is coverred Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 11/12] KVM: arm64: Support disable hw dirty log after enable Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35 ` [PATCH 12/12] KVM: arm64: Enable stage2 hardware DBM Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-16  9:35   ` Keqian Zhu
2020-06-18  4:13 ` [PATCH 00/12] KVM: arm64: Support " zhukeqian
2020-06-18  4:13   ` zhukeqian
2020-06-18  4:13   ` zhukeqian

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200616093553.27512-5-zhukeqian1@huawei.com \
    --to=zhukeqian1@huawei.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexios.zavras@intel.com \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=james.morse@arm.com \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=liangpeng10@huawei.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=sean.j.christopherson@intel.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tglx@linutronix.de \
    --cc=wanghaibin.wang@huawei.com \
    --cc=will@kernel.org \
    --cc=zhengxiang9@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.