From mboxrd@z Thu Jan 1 00:00:00 1970 From: Sean Christopherson Subject: [PATCH v3 21/21] KVM: MIPS: Use common KVM implementation of MMU memory caches Date: Thu, 2 Jul 2020 19:35:45 -0700 Message-ID: <20200703023545.8771-22-sean.j.christopherson@intel.com> References: <20200703023545.8771-1-sean.j.christopherson@intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <20200703023545.8771-1-sean.j.christopherson@intel.com> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: kvmarm-bounces@lists.cs.columbia.edu Sender: kvmarm-bounces@lists.cs.columbia.edu To: Marc Zyngier , Paolo Bonzini , Arnd Bergmann Cc: linux-arch@vger.kernel.org, Junaid Shahid , Wanpeng Li , kvm@vger.kernel.org, Joerg Roedel , Peter Shier , linux-mips@vger.kernel.org, Sean Christopherson , linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org, Ben Gardon , Vitaly Kuznetsov , Peter Feiner , kvmarm@lists.cs.columbia.edu, Jim Mattson List-Id: linux-arch.vger.kernel.org Move to the common MMU memory cache implementation now that the common code and MIPS's existing code are semantically compatible. No functional change intended. Suggested-by: Christoffer Dall Signed-off-by: Sean Christopherson --- arch/mips/include/asm/Kbuild | 1 - arch/mips/include/asm/kvm_host.h | 11 --------- arch/mips/include/asm/kvm_types.h | 7 ++++++ arch/mips/kvm/mmu.c | 40 ++++--------------------------- 4 files changed, 12 insertions(+), 47 deletions(-) create mode 100644 arch/mips/include/asm/kvm_types.h diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 397e6d24d2ab..8643d313890e 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -5,7 +5,6 @@ generated-y += syscall_table_64_n32.h generated-y += syscall_table_64_n64.h generated-y += syscall_table_64_o32.h generic-y += export.h -generic-y += kvm_types.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += parport.h diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 363e7a89d173..f49617175f60 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -335,17 +335,6 @@ struct kvm_mips_tlb { long tlb_lo[2]; }; -#define KVM_NR_MEM_OBJS 4 - -/* - * We don't want allocation failures within the mmu code, so we preallocate - * enough memory for a single page fault in a cache. - */ -struct kvm_mmu_memory_cache { - int nobjs; - void *objects[KVM_NR_MEM_OBJS]; -}; - #define KVM_MIPS_AUX_FPU 0x1 #define KVM_MIPS_AUX_MSA 0x2 diff --git a/arch/mips/include/asm/kvm_types.h b/arch/mips/include/asm/kvm_types.h new file mode 100644 index 000000000000..213754d9ef6b --- /dev/null +++ b/arch/mips/include/asm/kvm_types.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_MIPS_KVM_TYPES_H +#define _ASM_MIPS_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 4 + +#endif /* _ASM_MIPS_KVM_TYPES_H */ diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 9d3c8c025624..87fa8d8a1031 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -25,39 +25,9 @@ #define KVM_MMU_CACHE_MIN_PAGES 2 #endif -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) -{ - void *page; - - if (cache->nobjs >= min) - return 0; - while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); - if (!page) - return -ENOMEM; - cache->objects[cache->nobjs++] = page; - } - return 0; -} - -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) -{ - while (mc->nobjs) - free_page((unsigned long)mc->objects[--mc->nobjs]); -} - -static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) -{ - void *p; - - BUG_ON(!mc || !mc->nobjs); - p = mc->objects[--mc->nobjs]; - return p; -} - void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) { - mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); } /** @@ -151,7 +121,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, if (!cache) return NULL; - new_pmd = mmu_memory_cache_alloc(cache); + new_pmd = kvm_mmu_memory_cache_alloc(cache); pmd_init((unsigned long)new_pmd, (unsigned long)invalid_pte_table); pud_populate(NULL, pud, new_pmd); @@ -162,7 +132,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, if (!cache) return NULL; - new_pte = mmu_memory_cache_alloc(cache); + new_pte = kvm_mmu_memory_cache_alloc(cache); clear_page(new_pte); pmd_populate_kernel(NULL, pmd, new_pte); } @@ -709,7 +679,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, goto out; /* We need a minimum of cached pages ready for page table creation */ - err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); + err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); if (err) goto out; @@ -793,7 +763,7 @@ static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, int ret; /* We need a minimum of cached pages ready for page table creation */ - ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); + ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); if (ret) return NULL; -- 2.26.0 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga05.intel.com ([192.55.52.43]:33754 "EHLO mga05.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726340AbgGCCgO (ORCPT ); Thu, 2 Jul 2020 22:36:14 -0400 From: Sean Christopherson Subject: [PATCH v3 21/21] KVM: MIPS: Use common KVM implementation of MMU memory caches Date: Thu, 2 Jul 2020 19:35:45 -0700 Message-ID: <20200703023545.8771-22-sean.j.christopherson@intel.com> In-Reply-To: <20200703023545.8771-1-sean.j.christopherson@intel.com> References: <20200703023545.8771-1-sean.j.christopherson@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Sender: linux-arch-owner@vger.kernel.org List-ID: To: Marc Zyngier , Paolo Bonzini , Arnd Bergmann Cc: James Morse , Julien Thierry , Suzuki K Poulose , Sean Christopherson , Vitaly Kuznetsov , Wanpeng Li , Jim Mattson , Joerg Roedel , linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org, kvm@vger.kernel.org, linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org, Ben Gardon , Peter Feiner , Peter Shier , Junaid Shahid , Christoffer Dall Message-ID: <20200703023545.aopYw7t4uhsIi5HLpNxiBhNApM1xiqEf0kBEPDEmuxI@z> Move to the common MMU memory cache implementation now that the common code and MIPS's existing code are semantically compatible. No functional change intended. Suggested-by: Christoffer Dall Signed-off-by: Sean Christopherson --- arch/mips/include/asm/Kbuild | 1 - arch/mips/include/asm/kvm_host.h | 11 --------- arch/mips/include/asm/kvm_types.h | 7 ++++++ arch/mips/kvm/mmu.c | 40 ++++--------------------------- 4 files changed, 12 insertions(+), 47 deletions(-) create mode 100644 arch/mips/include/asm/kvm_types.h diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 397e6d24d2ab..8643d313890e 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -5,7 +5,6 @@ generated-y += syscall_table_64_n32.h generated-y += syscall_table_64_n64.h generated-y += syscall_table_64_o32.h generic-y += export.h -generic-y += kvm_types.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += parport.h diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 363e7a89d173..f49617175f60 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -335,17 +335,6 @@ struct kvm_mips_tlb { long tlb_lo[2]; }; -#define KVM_NR_MEM_OBJS 4 - -/* - * We don't want allocation failures within the mmu code, so we preallocate - * enough memory for a single page fault in a cache. - */ -struct kvm_mmu_memory_cache { - int nobjs; - void *objects[KVM_NR_MEM_OBJS]; -}; - #define KVM_MIPS_AUX_FPU 0x1 #define KVM_MIPS_AUX_MSA 0x2 diff --git a/arch/mips/include/asm/kvm_types.h b/arch/mips/include/asm/kvm_types.h new file mode 100644 index 000000000000..213754d9ef6b --- /dev/null +++ b/arch/mips/include/asm/kvm_types.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_MIPS_KVM_TYPES_H +#define _ASM_MIPS_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 4 + +#endif /* _ASM_MIPS_KVM_TYPES_H */ diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 9d3c8c025624..87fa8d8a1031 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -25,39 +25,9 @@ #define KVM_MMU_CACHE_MIN_PAGES 2 #endif -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) -{ - void *page; - - if (cache->nobjs >= min) - return 0; - while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); - if (!page) - return -ENOMEM; - cache->objects[cache->nobjs++] = page; - } - return 0; -} - -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) -{ - while (mc->nobjs) - free_page((unsigned long)mc->objects[--mc->nobjs]); -} - -static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) -{ - void *p; - - BUG_ON(!mc || !mc->nobjs); - p = mc->objects[--mc->nobjs]; - return p; -} - void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) { - mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); } /** @@ -151,7 +121,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, if (!cache) return NULL; - new_pmd = mmu_memory_cache_alloc(cache); + new_pmd = kvm_mmu_memory_cache_alloc(cache); pmd_init((unsigned long)new_pmd, (unsigned long)invalid_pte_table); pud_populate(NULL, pud, new_pmd); @@ -162,7 +132,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, if (!cache) return NULL; - new_pte = mmu_memory_cache_alloc(cache); + new_pte = kvm_mmu_memory_cache_alloc(cache); clear_page(new_pte); pmd_populate_kernel(NULL, pmd, new_pte); } @@ -709,7 +679,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, goto out; /* We need a minimum of cached pages ready for page table creation */ - err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); + err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); if (err) goto out; @@ -793,7 +763,7 @@ static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, int ret; /* We need a minimum of cached pages ready for page table creation */ - ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); + ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); if (ret) return NULL; -- 2.26.0