All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Marc Zyngier <maz@kernel.org>, Paul Mackerras <paulus@ozlabs.org>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: James Morse <james.morse@arm.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, kvm-ppc@vger.kernel.org,
	linux-kernel@vger.kernel.org, Peter Feiner <pfeiner@google.com>,
	Peter Shier <pshier@google.com>,
	Junaid Shahid <junaids@google.com>,
	Ben Gardon <bgardon@google.com>,
	Christoffer Dall <christoffer.dall@arm.com>
Subject: [PATCH 18/21] KVM: arm64: Use common KVM implementation of MMU memory caches
Date: Fri,  5 Jun 2020 14:38:50 -0700	[thread overview]
Message-ID: <20200605213853.14959-19-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200605213853.14959-1-sean.j.christopherson@intel.com>

Move to the common MMU memory cache implementation now that the common
code and arm64's existing code are semantically compatible.

No functional change intended.

Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/arm64/include/asm/kvm_host.h  | 12 -------
 arch/arm64/include/asm/kvm_types.h |  2 ++
 arch/arm64/kvm/mmu.c               | 51 ++++++------------------------
 3 files changed, 12 insertions(+), 53 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2385dede96e0..d221b6b129fd 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -97,18 +97,6 @@ struct kvm_arch {
 	bool return_nisv_io_abort_to_user;
 };
 
-#define KVM_NR_MEM_OBJS     40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
-	int nobjs;
-	gfp_t gfp_zero;
-	void *objects[KVM_NR_MEM_OBJS];
-};
-
 struct kvm_vcpu_fault_info {
 	u32 esr_el2;		/* Hyp Syndrom Register */
 	u64 far_el2;		/* Hyp Fault Address Register */
diff --git a/arch/arm64/include/asm/kvm_types.h b/arch/arm64/include/asm/kvm_types.h
index d0987007d581..9a126b9e2d7c 100644
--- a/arch/arm64/include/asm/kvm_types.h
+++ b/arch/arm64/include/asm/kvm_types.h
@@ -2,5 +2,7 @@
 #ifndef _ASM_ARM64_KVM_TYPES_H
 #define _ASM_ARM64_KVM_TYPES_H
 
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+
 #endif /* _ASM_ARM64_KVM_TYPES_H */
 
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 688213ef34f0..976405e2fbb2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -124,37 +124,6 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
 	put_page(virt_to_page(pudp));
 }
 
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
-{
-	void *page;
-
-	if (cache->nobjs >= min)
-		return 0;
-	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-		page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT |
-					       cache->gfp_zero);
-		if (!page)
-			return -ENOMEM;
-		cache->objects[cache->nobjs++] = page;
-	}
-	return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
-	while (mc->nobjs)
-		free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
-	void *p;
-
-	BUG_ON(!mc || !mc->nobjs);
-	p = mc->objects[--mc->nobjs];
-	return p;
-}
-
 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
 {
 	pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
@@ -1024,7 +993,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
 	if (stage2_pgd_none(kvm, *pgd)) {
 		if (!cache)
 			return NULL;
-		pud = mmu_memory_cache_alloc(cache);
+		pud = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pgd_populate(kvm, pgd, pud);
 		get_page(virt_to_page(pgd));
 	}
@@ -1045,7 +1014,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
 	if (stage2_pud_none(kvm, *pud)) {
 		if (!cache)
 			return NULL;
-		pmd = mmu_memory_cache_alloc(cache);
+		pmd = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pud_populate(kvm, pud, pmd);
 		get_page(virt_to_page(pud));
 	}
@@ -1251,7 +1220,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
 	if (stage2_pud_none(kvm, *pud)) {
 		if (!cache)
 			return 0; /* ignore calls from kvm_set_spte_hva */
-		pmd = mmu_memory_cache_alloc(cache);
+		pmd = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pud_populate(kvm, pud, pmd);
 		get_page(virt_to_page(pud));
 	}
@@ -1276,7 +1245,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
 	if (pmd_none(*pmd)) {
 		if (!cache)
 			return 0; /* ignore calls from kvm_set_spte_hva */
-		pte = mmu_memory_cache_alloc(cache);
+		pte = kvm_mmu_memory_cache_alloc(cache);
 		kvm_pmd_populate(pmd, pte);
 		get_page(virt_to_page(pmd));
 	}
@@ -1343,7 +1312,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 	phys_addr_t addr, end;
 	int ret = 0;
 	unsigned long pfn;
-	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, };
+	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
 
 	end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
 	pfn = __phys_to_pfn(pa);
@@ -1354,8 +1323,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 		if (writable)
 			pte = kvm_s2pte_mkwrite(pte);
 
-		ret = mmu_topup_memory_cache(&cache,
-					     kvm_mmu_cache_min_pages(kvm));
+		ret = kvm_mmu_topup_memory_cache(&cache,
+						 kvm_mmu_cache_min_pages(kvm));
 		if (ret)
 			goto out;
 		spin_lock(&kvm->mmu_lock);
@@ -1369,7 +1338,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 	}
 
 out:
-	mmu_free_memory_cache(&cache);
+	kvm_mmu_free_memory_cache(&cache);
 	return ret;
 }
 
@@ -1735,7 +1704,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	up_read(&current->mm->mmap_sem);
 
 	/* We need minimum second+third level pages */
-	ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
+	ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
 	if (ret)
 		return ret;
 
@@ -2158,7 +2127,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
 }
 
 phys_addr_t kvm_mmu_get_httbr(void)
-- 
2.26.0


WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Marc Zyngier <maz@kernel.org>, Paul Mackerras <paulus@ozlabs.org>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, David Hildenbrand <david@redhat.com>,
	linux-mips@vger.kernel.org, Ben Gardon <bgardon@google.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	kvmarm@lists.cs.columbia.edu, Joerg Roedel <joro@8bytes.org>,
	Junaid Shahid <junaids@google.com>,
	kvm-ppc@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	Jim Mattson <jmattson@google.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Peter Shier <pshier@google.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	linux-kernel@vger.kernel.org,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Peter Feiner <pfeiner@google.com>
Subject: [PATCH 18/21] KVM: arm64: Use common KVM implementation of MMU memory caches
Date: Fri,  5 Jun 2020 14:38:50 -0700	[thread overview]
Message-ID: <20200605213853.14959-19-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200605213853.14959-1-sean.j.christopherson@intel.com>

Move to the common MMU memory cache implementation now that the common
code and arm64's existing code are semantically compatible.

No functional change intended.

Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/arm64/include/asm/kvm_host.h  | 12 -------
 arch/arm64/include/asm/kvm_types.h |  2 ++
 arch/arm64/kvm/mmu.c               | 51 ++++++------------------------
 3 files changed, 12 insertions(+), 53 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2385dede96e0..d221b6b129fd 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -97,18 +97,6 @@ struct kvm_arch {
 	bool return_nisv_io_abort_to_user;
 };
 
-#define KVM_NR_MEM_OBJS     40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
-	int nobjs;
-	gfp_t gfp_zero;
-	void *objects[KVM_NR_MEM_OBJS];
-};
-
 struct kvm_vcpu_fault_info {
 	u32 esr_el2;		/* Hyp Syndrom Register */
 	u64 far_el2;		/* Hyp Fault Address Register */
diff --git a/arch/arm64/include/asm/kvm_types.h b/arch/arm64/include/asm/kvm_types.h
index d0987007d581..9a126b9e2d7c 100644
--- a/arch/arm64/include/asm/kvm_types.h
+++ b/arch/arm64/include/asm/kvm_types.h
@@ -2,5 +2,7 @@
 #ifndef _ASM_ARM64_KVM_TYPES_H
 #define _ASM_ARM64_KVM_TYPES_H
 
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+
 #endif /* _ASM_ARM64_KVM_TYPES_H */
 
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 688213ef34f0..976405e2fbb2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -124,37 +124,6 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
 	put_page(virt_to_page(pudp));
 }
 
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
-{
-	void *page;
-
-	if (cache->nobjs >= min)
-		return 0;
-	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-		page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT |
-					       cache->gfp_zero);
-		if (!page)
-			return -ENOMEM;
-		cache->objects[cache->nobjs++] = page;
-	}
-	return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
-	while (mc->nobjs)
-		free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
-	void *p;
-
-	BUG_ON(!mc || !mc->nobjs);
-	p = mc->objects[--mc->nobjs];
-	return p;
-}
-
 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
 {
 	pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
@@ -1024,7 +993,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
 	if (stage2_pgd_none(kvm, *pgd)) {
 		if (!cache)
 			return NULL;
-		pud = mmu_memory_cache_alloc(cache);
+		pud = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pgd_populate(kvm, pgd, pud);
 		get_page(virt_to_page(pgd));
 	}
@@ -1045,7 +1014,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
 	if (stage2_pud_none(kvm, *pud)) {
 		if (!cache)
 			return NULL;
-		pmd = mmu_memory_cache_alloc(cache);
+		pmd = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pud_populate(kvm, pud, pmd);
 		get_page(virt_to_page(pud));
 	}
@@ -1251,7 +1220,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
 	if (stage2_pud_none(kvm, *pud)) {
 		if (!cache)
 			return 0; /* ignore calls from kvm_set_spte_hva */
-		pmd = mmu_memory_cache_alloc(cache);
+		pmd = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pud_populate(kvm, pud, pmd);
 		get_page(virt_to_page(pud));
 	}
@@ -1276,7 +1245,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
 	if (pmd_none(*pmd)) {
 		if (!cache)
 			return 0; /* ignore calls from kvm_set_spte_hva */
-		pte = mmu_memory_cache_alloc(cache);
+		pte = kvm_mmu_memory_cache_alloc(cache);
 		kvm_pmd_populate(pmd, pte);
 		get_page(virt_to_page(pmd));
 	}
@@ -1343,7 +1312,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 	phys_addr_t addr, end;
 	int ret = 0;
 	unsigned long pfn;
-	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, };
+	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
 
 	end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
 	pfn = __phys_to_pfn(pa);
@@ -1354,8 +1323,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 		if (writable)
 			pte = kvm_s2pte_mkwrite(pte);
 
-		ret = mmu_topup_memory_cache(&cache,
-					     kvm_mmu_cache_min_pages(kvm));
+		ret = kvm_mmu_topup_memory_cache(&cache,
+						 kvm_mmu_cache_min_pages(kvm));
 		if (ret)
 			goto out;
 		spin_lock(&kvm->mmu_lock);
@@ -1369,7 +1338,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 	}
 
 out:
-	mmu_free_memory_cache(&cache);
+	kvm_mmu_free_memory_cache(&cache);
 	return ret;
 }
 
@@ -1735,7 +1704,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	up_read(&current->mm->mmap_sem);
 
 	/* We need minimum second+third level pages */
-	ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
+	ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
 	if (ret)
 		return ret;
 
@@ -2158,7 +2127,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
 }
 
 phys_addr_t kvm_mmu_get_httbr(void)
-- 
2.26.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Marc Zyngier <maz@kernel.org>, Paul Mackerras <paulus@ozlabs.org>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, David Hildenbrand <david@redhat.com>,
	linux-mips@vger.kernel.org, Ben Gardon <bgardon@google.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	kvmarm@lists.cs.columbia.edu, Joerg Roedel <joro@8bytes.org>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Junaid Shahid <junaids@google.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	kvm-ppc@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	Jim Mattson <jmattson@google.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Peter Shier <pshier@google.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	linux-kernel@vger.kernel.org, James Morse <james.morse@arm.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Peter Feiner <pfeiner@google.com>
Subject: [PATCH 18/21] KVM: arm64: Use common KVM implementation of MMU memory caches
Date: Fri,  5 Jun 2020 14:38:50 -0700	[thread overview]
Message-ID: <20200605213853.14959-19-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200605213853.14959-1-sean.j.christopherson@intel.com>

Move to the common MMU memory cache implementation now that the common
code and arm64's existing code are semantically compatible.

No functional change intended.

Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/arm64/include/asm/kvm_host.h  | 12 -------
 arch/arm64/include/asm/kvm_types.h |  2 ++
 arch/arm64/kvm/mmu.c               | 51 ++++++------------------------
 3 files changed, 12 insertions(+), 53 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2385dede96e0..d221b6b129fd 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -97,18 +97,6 @@ struct kvm_arch {
 	bool return_nisv_io_abort_to_user;
 };
 
-#define KVM_NR_MEM_OBJS     40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
-	int nobjs;
-	gfp_t gfp_zero;
-	void *objects[KVM_NR_MEM_OBJS];
-};
-
 struct kvm_vcpu_fault_info {
 	u32 esr_el2;		/* Hyp Syndrom Register */
 	u64 far_el2;		/* Hyp Fault Address Register */
diff --git a/arch/arm64/include/asm/kvm_types.h b/arch/arm64/include/asm/kvm_types.h
index d0987007d581..9a126b9e2d7c 100644
--- a/arch/arm64/include/asm/kvm_types.h
+++ b/arch/arm64/include/asm/kvm_types.h
@@ -2,5 +2,7 @@
 #ifndef _ASM_ARM64_KVM_TYPES_H
 #define _ASM_ARM64_KVM_TYPES_H
 
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+
 #endif /* _ASM_ARM64_KVM_TYPES_H */
 
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 688213ef34f0..976405e2fbb2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -124,37 +124,6 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
 	put_page(virt_to_page(pudp));
 }
 
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
-{
-	void *page;
-
-	if (cache->nobjs >= min)
-		return 0;
-	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-		page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT |
-					       cache->gfp_zero);
-		if (!page)
-			return -ENOMEM;
-		cache->objects[cache->nobjs++] = page;
-	}
-	return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
-	while (mc->nobjs)
-		free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
-	void *p;
-
-	BUG_ON(!mc || !mc->nobjs);
-	p = mc->objects[--mc->nobjs];
-	return p;
-}
-
 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
 {
 	pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
@@ -1024,7 +993,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
 	if (stage2_pgd_none(kvm, *pgd)) {
 		if (!cache)
 			return NULL;
-		pud = mmu_memory_cache_alloc(cache);
+		pud = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pgd_populate(kvm, pgd, pud);
 		get_page(virt_to_page(pgd));
 	}
@@ -1045,7 +1014,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
 	if (stage2_pud_none(kvm, *pud)) {
 		if (!cache)
 			return NULL;
-		pmd = mmu_memory_cache_alloc(cache);
+		pmd = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pud_populate(kvm, pud, pmd);
 		get_page(virt_to_page(pud));
 	}
@@ -1251,7 +1220,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
 	if (stage2_pud_none(kvm, *pud)) {
 		if (!cache)
 			return 0; /* ignore calls from kvm_set_spte_hva */
-		pmd = mmu_memory_cache_alloc(cache);
+		pmd = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pud_populate(kvm, pud, pmd);
 		get_page(virt_to_page(pud));
 	}
@@ -1276,7 +1245,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
 	if (pmd_none(*pmd)) {
 		if (!cache)
 			return 0; /* ignore calls from kvm_set_spte_hva */
-		pte = mmu_memory_cache_alloc(cache);
+		pte = kvm_mmu_memory_cache_alloc(cache);
 		kvm_pmd_populate(pmd, pte);
 		get_page(virt_to_page(pmd));
 	}
@@ -1343,7 +1312,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 	phys_addr_t addr, end;
 	int ret = 0;
 	unsigned long pfn;
-	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, };
+	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
 
 	end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
 	pfn = __phys_to_pfn(pa);
@@ -1354,8 +1323,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 		if (writable)
 			pte = kvm_s2pte_mkwrite(pte);
 
-		ret = mmu_topup_memory_cache(&cache,
-					     kvm_mmu_cache_min_pages(kvm));
+		ret = kvm_mmu_topup_memory_cache(&cache,
+						 kvm_mmu_cache_min_pages(kvm));
 		if (ret)
 			goto out;
 		spin_lock(&kvm->mmu_lock);
@@ -1369,7 +1338,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 	}
 
 out:
-	mmu_free_memory_cache(&cache);
+	kvm_mmu_free_memory_cache(&cache);
 	return ret;
 }
 
@@ -1735,7 +1704,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	up_read(&current->mm->mmap_sem);
 
 	/* We need minimum second+third level pages */
-	ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
+	ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
 	if (ret)
 		return ret;
 
@@ -2158,7 +2127,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
 }
 
 phys_addr_t kvm_mmu_get_httbr(void)
-- 
2.26.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Marc Zyngier <maz@kernel.org>, Paul Mackerras <paulus@ozlabs.org>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: James Morse <james.morse@arm.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, kvm-ppc@vger.kernel.org,
	linux-kernel@vger.kernel.org, Peter Feiner <pfeiner@google.com>,
	Peter Shier <pshier@google.com>,
	Junaid Shahid <junaids@google.com>,
	Ben Gardon <bgardon@google.com>,
	Christoffer Dall <christoffer.dall@arm.com>
Subject: [PATCH 18/21] KVM: arm64: Use common KVM implementation of MMU memory caches
Date: Fri, 05 Jun 2020 21:38:50 +0000	[thread overview]
Message-ID: <20200605213853.14959-19-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200605213853.14959-1-sean.j.christopherson@intel.com>

Move to the common MMU memory cache implementation now that the common
code and arm64's existing code are semantically compatible.

No functional change intended.

Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/arm64/include/asm/kvm_host.h  | 12 -------
 arch/arm64/include/asm/kvm_types.h |  2 ++
 arch/arm64/kvm/mmu.c               | 51 ++++++------------------------
 3 files changed, 12 insertions(+), 53 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2385dede96e0..d221b6b129fd 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -97,18 +97,6 @@ struct kvm_arch {
 	bool return_nisv_io_abort_to_user;
 };
 
-#define KVM_NR_MEM_OBJS     40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
-	int nobjs;
-	gfp_t gfp_zero;
-	void *objects[KVM_NR_MEM_OBJS];
-};
-
 struct kvm_vcpu_fault_info {
 	u32 esr_el2;		/* Hyp Syndrom Register */
 	u64 far_el2;		/* Hyp Fault Address Register */
diff --git a/arch/arm64/include/asm/kvm_types.h b/arch/arm64/include/asm/kvm_types.h
index d0987007d581..9a126b9e2d7c 100644
--- a/arch/arm64/include/asm/kvm_types.h
+++ b/arch/arm64/include/asm/kvm_types.h
@@ -2,5 +2,7 @@
 #ifndef _ASM_ARM64_KVM_TYPES_H
 #define _ASM_ARM64_KVM_TYPES_H
 
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+
 #endif /* _ASM_ARM64_KVM_TYPES_H */
 
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 688213ef34f0..976405e2fbb2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -124,37 +124,6 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
 	put_page(virt_to_page(pudp));
 }
 
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
-{
-	void *page;
-
-	if (cache->nobjs >= min)
-		return 0;
-	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-		page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT |
-					       cache->gfp_zero);
-		if (!page)
-			return -ENOMEM;
-		cache->objects[cache->nobjs++] = page;
-	}
-	return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
-	while (mc->nobjs)
-		free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
-	void *p;
-
-	BUG_ON(!mc || !mc->nobjs);
-	p = mc->objects[--mc->nobjs];
-	return p;
-}
-
 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
 {
 	pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
@@ -1024,7 +993,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
 	if (stage2_pgd_none(kvm, *pgd)) {
 		if (!cache)
 			return NULL;
-		pud = mmu_memory_cache_alloc(cache);
+		pud = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pgd_populate(kvm, pgd, pud);
 		get_page(virt_to_page(pgd));
 	}
@@ -1045,7 +1014,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
 	if (stage2_pud_none(kvm, *pud)) {
 		if (!cache)
 			return NULL;
-		pmd = mmu_memory_cache_alloc(cache);
+		pmd = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pud_populate(kvm, pud, pmd);
 		get_page(virt_to_page(pud));
 	}
@@ -1251,7 +1220,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
 	if (stage2_pud_none(kvm, *pud)) {
 		if (!cache)
 			return 0; /* ignore calls from kvm_set_spte_hva */
-		pmd = mmu_memory_cache_alloc(cache);
+		pmd = kvm_mmu_memory_cache_alloc(cache);
 		stage2_pud_populate(kvm, pud, pmd);
 		get_page(virt_to_page(pud));
 	}
@@ -1276,7 +1245,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
 	if (pmd_none(*pmd)) {
 		if (!cache)
 			return 0; /* ignore calls from kvm_set_spte_hva */
-		pte = mmu_memory_cache_alloc(cache);
+		pte = kvm_mmu_memory_cache_alloc(cache);
 		kvm_pmd_populate(pmd, pte);
 		get_page(virt_to_page(pmd));
 	}
@@ -1343,7 +1312,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 	phys_addr_t addr, end;
 	int ret = 0;
 	unsigned long pfn;
-	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, };
+	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
 
 	end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
 	pfn = __phys_to_pfn(pa);
@@ -1354,8 +1323,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 		if (writable)
 			pte = kvm_s2pte_mkwrite(pte);
 
-		ret = mmu_topup_memory_cache(&cache,
-					     kvm_mmu_cache_min_pages(kvm));
+		ret = kvm_mmu_topup_memory_cache(&cache,
+						 kvm_mmu_cache_min_pages(kvm));
 		if (ret)
 			goto out;
 		spin_lock(&kvm->mmu_lock);
@@ -1369,7 +1338,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 	}
 
 out:
-	mmu_free_memory_cache(&cache);
+	kvm_mmu_free_memory_cache(&cache);
 	return ret;
 }
 
@@ -1735,7 +1704,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	up_read(&current->mm->mmap_sem);
 
 	/* We need minimum second+third level pages */
-	ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
+	ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
 	if (ret)
 		return ret;
 
@@ -2158,7 +2127,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
 }
 
 phys_addr_t kvm_mmu_get_httbr(void)
-- 
2.26.0

  parent reply	other threads:[~2020-06-05 21:42 UTC|newest]

Thread overview: 214+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-05 21:38 [PATCH 00/21] KVM: Cleanup and unify kvm_mmu_memory_cache usage Sean Christopherson
2020-06-05 21:38 ` Sean Christopherson
2020-06-05 21:38 ` Sean Christopherson
2020-06-05 21:38 ` Sean Christopherson
2020-06-05 21:38 ` [PATCH 01/21] KVM: x86/mmu: Track the associated kmem_cache in the MMU caches Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-09 21:07   ` Ben Gardon
2020-06-09 21:07     ` Ben Gardon
2020-06-09 21:07     ` Ben Gardon
2020-06-09 21:07     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 02/21] KVM: x86/mmu: Consolidate "page" variant of memory cache helpers Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-09 22:54   ` Ben Gardon
2020-06-09 22:54     ` Ben Gardon
2020-06-09 22:54     ` Ben Gardon
2020-06-09 22:54     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 03/21] KVM: x86/mmu: Use consistent "mc" name for kvm_mmu_memory_cache locals Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 22:03   ` Ben Gardon
2020-06-10 22:03     ` Ben Gardon
2020-06-10 22:03     ` Ben Gardon
2020-06-10 22:03     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 04/21] KVM: x86/mmu: Remove superfluous gotos from mmu_topup_memory_caches() Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-09 22:57   ` Ben Gardon
2020-06-09 22:57     ` Ben Gardon
2020-06-09 22:57     ` Ben Gardon
2020-06-09 22:57     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 05/21] KVM: x86/mmu: Try to avoid crashing KVM if a MMU memory cache is empty Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 22:12   ` Ben Gardon
2020-06-10 22:12     ` Ben Gardon
2020-06-10 22:12     ` Ben Gardon
2020-06-10 22:12     ` Ben Gardon
2020-06-17  0:53     ` Sean Christopherson
2020-06-17  0:53       ` Sean Christopherson
2020-06-17  0:53       ` Sean Christopherson
2020-06-17  0:53       ` Sean Christopherson
2020-06-17 16:36       ` Ben Gardon
2020-06-17 16:36         ` Ben Gardon
2020-06-17 16:36         ` Ben Gardon
2020-06-17 16:36         ` Ben Gardon
2020-06-05 21:38 ` [PATCH 06/21] KVM: x86/mmu: Move fast_page_fault() call above mmu_topup_memory_caches() Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-09 23:03   ` Ben Gardon
2020-06-09 23:03     ` Ben Gardon
2020-06-09 23:03     ` Ben Gardon
2020-06-09 23:03     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 07/21] KVM: x86/mmu: Topup memory caches after walking GVA->GPA Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 22:34   ` Ben Gardon
2020-06-10 22:34     ` Ben Gardon
2020-06-10 22:34     ` Ben Gardon
2020-06-10 22:34     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 08/21] KVM: x86/mmu: Clean up the gorilla math in mmu_topup_memory_caches() Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 22:20   ` Ben Gardon
2020-06-10 22:20     ` Ben Gardon
2020-06-10 22:20     ` Ben Gardon
2020-06-10 22:20     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 09/21] KVM: x86/mmu: Separate the memory caches for shadow pages and gfn arrays Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-09 23:56   ` Ben Gardon
2020-06-09 23:56     ` Ben Gardon
2020-06-09 23:56     ` Ben Gardon
2020-06-09 23:56     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 10/21] KVM: x86/mmu: Make __GFP_ZERO a property of the memory cache Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 18:57   ` Ben Gardon
2020-06-10 18:57     ` Ben Gardon
2020-06-10 18:57     ` Ben Gardon
2020-06-10 18:57     ` Ben Gardon
2020-06-22 19:40     ` Sean Christopherson
2020-06-22 19:40       ` Sean Christopherson
2020-06-22 19:40       ` Sean Christopherson
2020-06-05 21:38 ` [PATCH 11/21] KVM: x86/mmu: Zero allocate shadow pages (outside of mmu_lock) Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 18:49   ` Ben Gardon
2020-06-10 18:49     ` Ben Gardon
2020-06-10 18:49     ` Ben Gardon
2020-06-10 18:49     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 18:52   ` Ben Gardon
2020-06-10 18:52     ` Ben Gardon
2020-06-10 18:52     ` Ben Gardon
2020-06-10 18:52     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 13/21] KVM: x86/mmu: Prepend "kvm_" to memory cache helpers that will be global Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 18:56   ` Ben Gardon
2020-06-10 18:56     ` Ben Gardon
2020-06-10 18:56     ` Ben Gardon
2020-06-10 18:56     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 14/21] KVM: Move x86's version of struct kvm_mmu_memory_cache to common code Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 19:01   ` Ben Gardon
2020-06-10 19:01     ` Ben Gardon
2020-06-10 19:01     ` Ben Gardon
2020-06-10 19:01     ` Ben Gardon
2020-06-10 21:58     ` Ben Gardon
2020-06-10 21:58       ` Ben Gardon
2020-06-10 21:58       ` Ben Gardon
2020-06-10 21:58       ` Ben Gardon
2020-06-22 16:57       ` Sean Christopherson
2020-06-22 16:57         ` Sean Christopherson
2020-06-22 16:57         ` Sean Christopherson
2020-06-11  7:42   ` Marc Zyngier
2020-06-11  7:42     ` Marc Zyngier
2020-06-11  7:42     ` Marc Zyngier
2020-06-11  7:42     ` Marc Zyngier
2020-06-05 21:38 ` [PATCH 15/21] KVM: Move x86's MMU memory cache helpers to common KVM code Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 20:24   ` Ben Gardon
2020-06-10 20:24     ` Ben Gardon
2020-06-10 20:24     ` Ben Gardon
2020-06-10 20:24     ` Ben Gardon
2020-06-05 21:38 ` [PATCH 16/21] KVM: arm64: Drop @max param from mmu_topup_memory_cache() Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-10 22:00   ` Ben Gardon
2020-06-10 22:00     ` Ben Gardon
2020-06-10 22:00     ` Ben Gardon
2020-06-10 22:00     ` Ben Gardon
2020-06-11 15:59     ` Sean Christopherson
2020-06-11 15:59       ` Sean Christopherson
2020-06-11 15:59       ` Sean Christopherson
2020-06-11 15:59       ` Sean Christopherson
2020-06-05 21:38 ` [PATCH 17/21] KVM: arm64: Use common code's approach for __GFP_ZERO with memory caches Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-11  7:59   ` Marc Zyngier
2020-06-11  7:59     ` Marc Zyngier
2020-06-11  7:59     ` Marc Zyngier
2020-06-11  7:59     ` Marc Zyngier
2020-06-11 15:43     ` Sean Christopherson
2020-06-11 15:43       ` Sean Christopherson
2020-06-11 15:43       ` Sean Christopherson
2020-06-11 15:43       ` Sean Christopherson
2020-06-11 15:51       ` Marc Zyngier
2020-06-11 15:51         ` Marc Zyngier
2020-06-11 15:51         ` Marc Zyngier
2020-06-11 15:51         ` Marc Zyngier
2020-06-05 21:38 ` Sean Christopherson [this message]
2020-06-05 21:38   ` [PATCH 18/21] KVM: arm64: Use common KVM implementation of MMU " Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-11  8:01   ` Marc Zyngier
2020-06-11  8:01     ` Marc Zyngier
2020-06-11  8:01     ` Marc Zyngier
2020-06-11  8:01     ` Marc Zyngier
2020-06-11 15:46     ` Sean Christopherson
2020-06-11 15:46       ` Sean Christopherson
2020-06-11 15:46       ` Sean Christopherson
2020-06-11 15:46       ` Sean Christopherson
2020-06-05 21:38 ` [PATCH 19/21] KVM: MIPS: Drop @max param from mmu_topup_memory_cache() Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-08  8:56   ` Huacai Chen
2020-06-08  8:56     ` Huacai Chen
2020-06-08  8:56     ` Huacai Chen
2020-06-08  8:56     ` Huacai Chen
2020-06-05 21:38 ` [PATCH 20/21] KVM: MIPS: Account pages used for GPA page tables Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-08  8:56   ` Huacai Chen
2020-06-08  8:56     ` Huacai Chen
2020-06-08  8:56     ` Huacai Chen
2020-06-08  8:56     ` Huacai Chen
2020-06-05 21:38 ` [PATCH 21/21] KVM: MIPS: Use common KVM implementation of MMU memory caches Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-05 21:38   ` Sean Christopherson
2020-06-08  8:57   ` Huacai Chen
2020-06-08  8:57     ` Huacai Chen
2020-06-08  8:57     ` Huacai Chen
2020-06-08  8:57     ` Huacai Chen
2020-06-11  8:06 ` [PATCH 00/21] KVM: Cleanup and unify kvm_mmu_memory_cache usage Marc Zyngier
2020-06-11  8:06   ` Marc Zyngier
2020-06-11  8:06   ` Marc Zyngier
2020-06-11  8:06   ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200605213853.14959-19-sean.j.christopherson@intel.com \
    --to=sean.j.christopherson@intel.com \
    --cc=bgardon@google.com \
    --cc=borntraeger@de.ibm.com \
    --cc=christoffer.dall@arm.com \
    --cc=cohuck@redhat.com \
    --cc=david@redhat.com \
    --cc=frankja@linux.ibm.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=james.morse@arm.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=junaids@google.com \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=paulus@ozlabs.org \
    --cc=pbonzini@redhat.com \
    --cc=pfeiner@google.com \
    --cc=pshier@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.