From mboxrd@z Thu Jan 1 00:00:00 1970 From: Sean Christopherson Subject: [PATCH v3 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups Date: Thu, 2 Jul 2020 19:35:36 -0700 Message-ID: <20200703023545.8771-13-sean.j.christopherson@intel.com> References: <20200703023545.8771-1-sean.j.christopherson@intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <20200703023545.8771-1-sean.j.christopherson@intel.com> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: kvmarm-bounces@lists.cs.columbia.edu Sender: kvmarm-bounces@lists.cs.columbia.edu To: Marc Zyngier , Paolo Bonzini , Arnd Bergmann Cc: linux-arch@vger.kernel.org, Junaid Shahid , Wanpeng Li , kvm@vger.kernel.org, Joerg Roedel , Peter Shier , linux-mips@vger.kernel.org, Sean Christopherson , linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org, Ben Gardon , Vitaly Kuznetsov , Peter Feiner , kvmarm@lists.cs.columbia.edu, Jim Mattson List-Id: linux-arch.vger.kernel.org Don't bother filling the gfn array cache when the caller is a fully direct MMU, i.e. won't need a gfn array for shadow pages. Reviewed-by: Ben Gardon Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 18 ++++++++++-------- arch/x86/kvm/mmu/paging_tmpl.h | 4 ++-- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 60b0d460bbf5..586d63de0e78 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1101,7 +1101,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) } } -static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) +static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) { int r; @@ -1114,10 +1114,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) PT64_ROOT_MAX_LEVEL); if (r) return r; - r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, - PT64_ROOT_MAX_LEVEL); - if (r) - return r; + if (maybe_indirect) { + r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, + PT64_ROOT_MAX_LEVEL); + if (r) + return r; + } return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, PT64_ROOT_MAX_LEVEL); } @@ -4107,7 +4109,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, if (fast_page_fault(vcpu, gpa, error_code)) return RET_PF_RETRY; - r = mmu_topup_memory_caches(vcpu); + r = mmu_topup_memory_caches(vcpu, false); if (r) return r; @@ -5142,7 +5144,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) { int r; - r = mmu_topup_memory_caches(vcpu); + r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); if (r) goto out; r = mmu_alloc_roots(vcpu); @@ -5336,7 +5338,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, * or not since pte prefetch is skiped if it does not have * enough objects in the cache. */ - mmu_topup_memory_caches(vcpu); + mmu_topup_memory_caches(vcpu, true); spin_lock(&vcpu->kvm->mmu_lock); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 451d7aa7d959..8d2159ae3bdf 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -815,7 +815,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, return RET_PF_EMULATE; } - r = mmu_topup_memory_caches(vcpu); + r = mmu_topup_memory_caches(vcpu, true); if (r) return r; @@ -902,7 +902,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) * No need to check return value here, rmap_can_add() can * help us to skip pte prefetch later. */ - mmu_topup_memory_caches(vcpu); + mmu_topup_memory_caches(vcpu, true); if (!VALID_PAGE(root_hpa)) { WARN_ON(1); -- 2.26.0 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga09.intel.com ([134.134.136.24]:3198 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726281AbgGCCgM (ORCPT ); Thu, 2 Jul 2020 22:36:12 -0400 From: Sean Christopherson Subject: [PATCH v3 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups Date: Thu, 2 Jul 2020 19:35:36 -0700 Message-ID: <20200703023545.8771-13-sean.j.christopherson@intel.com> In-Reply-To: <20200703023545.8771-1-sean.j.christopherson@intel.com> References: <20200703023545.8771-1-sean.j.christopherson@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Sender: linux-arch-owner@vger.kernel.org List-ID: To: Marc Zyngier , Paolo Bonzini , Arnd Bergmann Cc: James Morse , Julien Thierry , Suzuki K Poulose , Sean Christopherson , Vitaly Kuznetsov , Wanpeng Li , Jim Mattson , Joerg Roedel , linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org, kvm@vger.kernel.org, linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org, Ben Gardon , Peter Feiner , Peter Shier , Junaid Shahid , Christoffer Dall Message-ID: <20200703023536.2pC7nqyyEaf8dJ-4_ERkkYETj5nT3_4gDUgRV8b878o@z> Don't bother filling the gfn array cache when the caller is a fully direct MMU, i.e. won't need a gfn array for shadow pages. Reviewed-by: Ben Gardon Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 18 ++++++++++-------- arch/x86/kvm/mmu/paging_tmpl.h | 4 ++-- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 60b0d460bbf5..586d63de0e78 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1101,7 +1101,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) } } -static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) +static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) { int r; @@ -1114,10 +1114,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) PT64_ROOT_MAX_LEVEL); if (r) return r; - r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, - PT64_ROOT_MAX_LEVEL); - if (r) - return r; + if (maybe_indirect) { + r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, + PT64_ROOT_MAX_LEVEL); + if (r) + return r; + } return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, PT64_ROOT_MAX_LEVEL); } @@ -4107,7 +4109,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, if (fast_page_fault(vcpu, gpa, error_code)) return RET_PF_RETRY; - r = mmu_topup_memory_caches(vcpu); + r = mmu_topup_memory_caches(vcpu, false); if (r) return r; @@ -5142,7 +5144,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) { int r; - r = mmu_topup_memory_caches(vcpu); + r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); if (r) goto out; r = mmu_alloc_roots(vcpu); @@ -5336,7 +5338,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, * or not since pte prefetch is skiped if it does not have * enough objects in the cache. */ - mmu_topup_memory_caches(vcpu); + mmu_topup_memory_caches(vcpu, true); spin_lock(&vcpu->kvm->mmu_lock); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 451d7aa7d959..8d2159ae3bdf 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -815,7 +815,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, return RET_PF_EMULATE; } - r = mmu_topup_memory_caches(vcpu); + r = mmu_topup_memory_caches(vcpu, true); if (r) return r; @@ -902,7 +902,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) * No need to check return value here, rmap_can_add() can * help us to skip pte prefetch later. */ - mmu_topup_memory_caches(vcpu); + mmu_topup_memory_caches(vcpu, true); if (!VALID_PAGE(root_hpa)) { WARN_ON(1); -- 2.26.0