From mboxrd@z Thu Jan 1 00:00:00 1970 From: Sean Christopherson Subject: [PATCH v3 13/21] KVM: x86/mmu: Prepend "kvm_" to memory cache helpers that will be global Date: Thu, 2 Jul 2020 19:35:37 -0700 Message-ID: <20200703023545.8771-14-sean.j.christopherson@intel.com> References: <20200703023545.8771-1-sean.j.christopherson@intel.com> Mime-Version: 1.0 Content-Transfer-Encoding: 8bit Return-path: Received: from mga09.intel.com ([134.134.136.24]:3198 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726352AbgGCCgO (ORCPT ); Thu, 2 Jul 2020 22:36:14 -0400 In-Reply-To: <20200703023545.8771-1-sean.j.christopherson@intel.com> Sender: linux-arch-owner@vger.kernel.org List-ID: To: Marc Zyngier , Paolo Bonzini , Arnd Bergmann Cc: James Morse , Julien Thierry , Suzuki K Poulose , Sean Christopherson , Vitaly Kuznetsov , Wanpeng Li , Jim Mattson , Joerg Roedel , linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org, kvm@vger.kernel.org, linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org, Ben Gardon , Peter Feiner , Peter Shier , Junaid Shahid , Christoffer Dall Rename the memory helpers that will soon be moved to common code and be made globaly available via linux/kvm_host.h. "mmu" alone is not a sufficient namespace for globally available KVM symbols. Opportunistically add "nr_" in mmu_memory_cache_free_objects() to make it clear the function returns the number of free objects, as opposed to freeing existing objects. Suggested-by: Christoffer Dall Reviewed-by: Ben Gardon Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 586d63de0e78..f4c8dae476bb 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1071,7 +1071,7 @@ static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, return (void *)__get_free_page(gfp_flags); } -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) +static int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) { void *obj; @@ -1086,12 +1086,12 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) return 0; } -static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *mc) +static int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) { return mc->nobjs; } -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) +static void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) { while (mc->nobjs) { if (mc->kmem_cache) @@ -1106,33 +1106,33 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) int r; /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */ - r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, - 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM); + r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, + 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM); if (r) return r; - r = mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache, - PT64_ROOT_MAX_LEVEL); + r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache, + PT64_ROOT_MAX_LEVEL); if (r) return r; if (maybe_indirect) { - r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, - PT64_ROOT_MAX_LEVEL); + r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, + PT64_ROOT_MAX_LEVEL); if (r) return r; } - return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, - PT64_ROOT_MAX_LEVEL); + return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, + PT64_ROOT_MAX_LEVEL); } static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) { - mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); - mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache); - mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache); - mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); } -static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +static void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) { void *p; @@ -1146,7 +1146,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) { - return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); + return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); } static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) @@ -1417,7 +1417,7 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu) struct kvm_mmu_memory_cache *mc; mc = &vcpu->arch.mmu_pte_list_desc_cache; - return mmu_memory_cache_free_objects(mc); + return kvm_mmu_memory_cache_nr_free_objects(mc); } static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) @@ -2104,10 +2104,10 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct { struct kvm_mmu_page *sp; - sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); - sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); + sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); + sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); if (!direct) - sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache); + sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache); set_page_private(virt_to_page(sp->spt), (unsigned long)sp); /* -- 2.26.0