All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Marc Zyngier <maz@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Arnd Bergmann <arnd@arndb.de>
Cc: James Morse <james.morse@arm.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-kernel@vger.kernel.org, Ben Gardon <bgardon@google.com>,
	Peter Feiner <pfeiner@google.com>,
	Peter Shier <pshier@google.com>,
	Junaid Shahid <junaids@google.com>,
	Christoffer Dall <christoffer.dall@arm.com>
Subject: [PATCH v3 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups
Date: Thu,  2 Jul 2020 19:35:36 -0700	[thread overview]
Message-ID: <20200703023545.8771-13-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200703023545.8771-1-sean.j.christopherson@intel.com>

Don't bother filling the gfn array cache when the caller is a fully
direct MMU, i.e. won't need a gfn array for shadow pages.

Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu/mmu.c         | 18 ++++++++++--------
 arch/x86/kvm/mmu/paging_tmpl.h |  4 ++--
 2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 60b0d460bbf5..586d63de0e78 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1101,7 +1101,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 	}
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
 	int r;
 
@@ -1114,10 +1114,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
 				   PT64_ROOT_MAX_LEVEL);
 	if (r)
 		return r;
-	r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
-				   PT64_ROOT_MAX_LEVEL);
-	if (r)
-		return r;
+	if (maybe_indirect) {
+		r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
+					   PT64_ROOT_MAX_LEVEL);
+		if (r)
+			return r;
+	}
 	return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
 				      PT64_ROOT_MAX_LEVEL);
 }
@@ -4107,7 +4109,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 	if (fast_page_fault(vcpu, gpa, error_code))
 		return RET_PF_RETRY;
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, false);
 	if (r)
 		return r;
 
@@ -5142,7 +5144,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
 	int r;
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
 	if (r)
 		goto out;
 	r = mmu_alloc_roots(vcpu);
@@ -5336,7 +5338,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	 * or not since pte prefetch is skiped if it does not have
 	 * enough objects in the cache.
 	 */
-	mmu_topup_memory_caches(vcpu);
+	mmu_topup_memory_caches(vcpu, true);
 
 	spin_lock(&vcpu->kvm->mmu_lock);
 
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 451d7aa7d959..8d2159ae3bdf 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -815,7 +815,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
 		return RET_PF_EMULATE;
 	}
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, true);
 	if (r)
 		return r;
 
@@ -902,7 +902,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 	 * No need to check return value here, rmap_can_add() can
 	 * help us to skip pte prefetch later.
 	 */
-	mmu_topup_memory_caches(vcpu);
+	mmu_topup_memory_caches(vcpu, true);
 
 	if (!VALID_PAGE(root_hpa)) {
 		WARN_ON(1);
-- 
2.26.0


WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Marc Zyngier <maz@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Arnd Bergmann <arnd@arndb.de>
Cc: linux-arch@vger.kernel.org, Junaid Shahid <junaids@google.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, Joerg Roedel <joro@8bytes.org>,
	Peter Shier <pshier@google.com>,
	linux-mips@vger.kernel.org,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	Ben Gardon <bgardon@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Peter Feiner <pfeiner@google.com>,
	kvmarm@lists.cs.columbia.edu, Jim Mattson <jmattson@google.com>
Subject: [PATCH v3 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups
Date: Thu,  2 Jul 2020 19:35:36 -0700	[thread overview]
Message-ID: <20200703023545.8771-13-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200703023545.8771-1-sean.j.christopherson@intel.com>

Don't bother filling the gfn array cache when the caller is a fully
direct MMU, i.e. won't need a gfn array for shadow pages.

Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu/mmu.c         | 18 ++++++++++--------
 arch/x86/kvm/mmu/paging_tmpl.h |  4 ++--
 2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 60b0d460bbf5..586d63de0e78 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1101,7 +1101,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 	}
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
 	int r;
 
@@ -1114,10 +1114,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
 				   PT64_ROOT_MAX_LEVEL);
 	if (r)
 		return r;
-	r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
-				   PT64_ROOT_MAX_LEVEL);
-	if (r)
-		return r;
+	if (maybe_indirect) {
+		r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
+					   PT64_ROOT_MAX_LEVEL);
+		if (r)
+			return r;
+	}
 	return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
 				      PT64_ROOT_MAX_LEVEL);
 }
@@ -4107,7 +4109,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 	if (fast_page_fault(vcpu, gpa, error_code))
 		return RET_PF_RETRY;
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, false);
 	if (r)
 		return r;
 
@@ -5142,7 +5144,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
 	int r;
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
 	if (r)
 		goto out;
 	r = mmu_alloc_roots(vcpu);
@@ -5336,7 +5338,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	 * or not since pte prefetch is skiped if it does not have
 	 * enough objects in the cache.
 	 */
-	mmu_topup_memory_caches(vcpu);
+	mmu_topup_memory_caches(vcpu, true);
 
 	spin_lock(&vcpu->kvm->mmu_lock);
 
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 451d7aa7d959..8d2159ae3bdf 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -815,7 +815,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
 		return RET_PF_EMULATE;
 	}
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, true);
 	if (r)
 		return r;
 
@@ -902,7 +902,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 	 * No need to check return value here, rmap_can_add() can
 	 * help us to skip pte prefetch later.
 	 */
-	mmu_topup_memory_caches(vcpu);
+	mmu_topup_memory_caches(vcpu, true);
 
 	if (!VALID_PAGE(root_hpa)) {
 		WARN_ON(1);
-- 
2.26.0

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Marc Zyngier <maz@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Arnd Bergmann <arnd@arndb.de>
Cc: linux-arch@vger.kernel.org, Junaid Shahid <junaids@google.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, Joerg Roedel <joro@8bytes.org>,
	Peter Shier <pshier@google.com>,
	linux-mips@vger.kernel.org,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	Ben Gardon <bgardon@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Peter Feiner <pfeiner@google.com>,
	kvmarm@lists.cs.columbia.edu, Jim Mattson <jmattson@google.com>
Subject: [PATCH v3 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups
Date: Thu,  2 Jul 2020 19:35:36 -0700	[thread overview]
Message-ID: <20200703023545.8771-13-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200703023545.8771-1-sean.j.christopherson@intel.com>

Don't bother filling the gfn array cache when the caller is a fully
direct MMU, i.e. won't need a gfn array for shadow pages.

Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu/mmu.c         | 18 ++++++++++--------
 arch/x86/kvm/mmu/paging_tmpl.h |  4 ++--
 2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 60b0d460bbf5..586d63de0e78 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1101,7 +1101,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 	}
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
 	int r;
 
@@ -1114,10 +1114,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
 				   PT64_ROOT_MAX_LEVEL);
 	if (r)
 		return r;
-	r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
-				   PT64_ROOT_MAX_LEVEL);
-	if (r)
-		return r;
+	if (maybe_indirect) {
+		r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
+					   PT64_ROOT_MAX_LEVEL);
+		if (r)
+			return r;
+	}
 	return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
 				      PT64_ROOT_MAX_LEVEL);
 }
@@ -4107,7 +4109,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 	if (fast_page_fault(vcpu, gpa, error_code))
 		return RET_PF_RETRY;
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, false);
 	if (r)
 		return r;
 
@@ -5142,7 +5144,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
 	int r;
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
 	if (r)
 		goto out;
 	r = mmu_alloc_roots(vcpu);
@@ -5336,7 +5338,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	 * or not since pte prefetch is skiped if it does not have
 	 * enough objects in the cache.
 	 */
-	mmu_topup_memory_caches(vcpu);
+	mmu_topup_memory_caches(vcpu, true);
 
 	spin_lock(&vcpu->kvm->mmu_lock);
 
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 451d7aa7d959..8d2159ae3bdf 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -815,7 +815,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
 		return RET_PF_EMULATE;
 	}
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, true);
 	if (r)
 		return r;
 
@@ -902,7 +902,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 	 * No need to check return value here, rmap_can_add() can
 	 * help us to skip pte prefetch later.
 	 */
-	mmu_topup_memory_caches(vcpu);
+	mmu_topup_memory_caches(vcpu, true);
 
 	if (!VALID_PAGE(root_hpa)) {
 		WARN_ON(1);
-- 
2.26.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Marc Zyngier <maz@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Arnd Bergmann <arnd@arndb.de>
Cc: linux-arch@vger.kernel.org, Junaid Shahid <junaids@google.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, Suzuki K Poulose <suzuki.poulose@arm.com>,
	Joerg Roedel <joro@8bytes.org>, Peter Shier <pshier@google.com>,
	linux-mips@vger.kernel.org,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	linux-kernel@vger.kernel.org, James Morse <james.morse@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Ben Gardon <bgardon@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Peter Feiner <pfeiner@google.com>,
	kvmarm@lists.cs.columbia.edu,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Jim Mattson <jmattson@google.com>
Subject: [PATCH v3 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups
Date: Thu,  2 Jul 2020 19:35:36 -0700	[thread overview]
Message-ID: <20200703023545.8771-13-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200703023545.8771-1-sean.j.christopherson@intel.com>

Don't bother filling the gfn array cache when the caller is a fully
direct MMU, i.e. won't need a gfn array for shadow pages.

Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu/mmu.c         | 18 ++++++++++--------
 arch/x86/kvm/mmu/paging_tmpl.h |  4 ++--
 2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 60b0d460bbf5..586d63de0e78 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1101,7 +1101,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 	}
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
 	int r;
 
@@ -1114,10 +1114,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
 				   PT64_ROOT_MAX_LEVEL);
 	if (r)
 		return r;
-	r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
-				   PT64_ROOT_MAX_LEVEL);
-	if (r)
-		return r;
+	if (maybe_indirect) {
+		r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
+					   PT64_ROOT_MAX_LEVEL);
+		if (r)
+			return r;
+	}
 	return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
 				      PT64_ROOT_MAX_LEVEL);
 }
@@ -4107,7 +4109,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 	if (fast_page_fault(vcpu, gpa, error_code))
 		return RET_PF_RETRY;
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, false);
 	if (r)
 		return r;
 
@@ -5142,7 +5144,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
 	int r;
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
 	if (r)
 		goto out;
 	r = mmu_alloc_roots(vcpu);
@@ -5336,7 +5338,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	 * or not since pte prefetch is skiped if it does not have
 	 * enough objects in the cache.
 	 */
-	mmu_topup_memory_caches(vcpu);
+	mmu_topup_memory_caches(vcpu, true);
 
 	spin_lock(&vcpu->kvm->mmu_lock);
 
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 451d7aa7d959..8d2159ae3bdf 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -815,7 +815,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
 		return RET_PF_EMULATE;
 	}
 
-	r = mmu_topup_memory_caches(vcpu);
+	r = mmu_topup_memory_caches(vcpu, true);
 	if (r)
 		return r;
 
@@ -902,7 +902,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 	 * No need to check return value here, rmap_can_add() can
 	 * help us to skip pte prefetch later.
 	 */
-	mmu_topup_memory_caches(vcpu);
+	mmu_topup_memory_caches(vcpu, true);
 
 	if (!VALID_PAGE(root_hpa)) {
 		WARN_ON(1);
-- 
2.26.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2020-07-03  2:37 UTC|newest]

Thread overview: 86+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-03  2:35 [PATCH v3 00/21] KVM: Cleanup and unify kvm_mmu_memory_cache usage Sean Christopherson
2020-07-03  2:35 ` Sean Christopherson
2020-07-03  2:35 ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 01/21] KVM: x86/mmu: Track the associated kmem_cache in the MMU caches Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 02/21] KVM: x86/mmu: Consolidate "page" variant of memory cache helpers Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 03/21] KVM: x86/mmu: Use consistent "mc" name for kvm_mmu_memory_cache locals Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 04/21] KVM: x86/mmu: Remove superfluous gotos from mmu_topup_memory_caches() Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 05/21] KVM: x86/mmu: Try to avoid crashing KVM if a MMU memory cache is empty Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 06/21] KVM: x86/mmu: Move fast_page_fault() call above mmu_topup_memory_caches() Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 07/21] KVM: x86/mmu: Topup memory caches after walking GVA->GPA Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 08/21] KVM: x86/mmu: Clean up the gorilla math in mmu_topup_memory_caches() Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 09/21] KVM: x86/mmu: Separate the memory caches for shadow pages and gfn arrays Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 10/21] KVM: x86/mmu: Make __GFP_ZERO a property of the memory cache Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 11/21] KVM: x86/mmu: Zero allocate shadow pages (outside of mmu_lock) Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` Sean Christopherson [this message]
2020-07-03  2:35   ` [PATCH v3 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 13/21] KVM: x86/mmu: Prepend "kvm_" to memory cache helpers that will be global Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 14/21] KVM: Move x86's version of struct kvm_mmu_memory_cache to common code Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 15/21] KVM: Move x86's MMU memory cache helpers to common KVM code Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 16/21] KVM: arm64: Drop @max param from mmu_topup_memory_cache() Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 17/21] KVM: arm64: Use common code's approach for __GFP_ZERO with memory caches Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 18/21] KVM: arm64: Use common KVM implementation of MMU " Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 19/21] KVM: MIPS: Drop @max param from mmu_topup_memory_cache() Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 20/21] KVM: MIPS: Account pages used for GPA page tables Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35 ` [PATCH v3 21/21] KVM: MIPS: Use common KVM implementation of MMU memory caches Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-03  2:35   ` Sean Christopherson
2020-07-09 12:14 ` [PATCH v3 00/21] KVM: Cleanup and unify kvm_mmu_memory_cache usage Paolo Bonzini
2020-07-09 12:14   ` Paolo Bonzini
2020-07-09 12:14   ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200703023545.8771-13-sean.j.christopherson@intel.com \
    --to=sean.j.christopherson@intel.com \
    --cc=arnd@arndb.de \
    --cc=bgardon@google.com \
    --cc=christoffer.dall@arm.com \
    --cc=james.morse@arm.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=junaids@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=pfeiner@google.com \
    --cc=pshier@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.