All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	 Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	 Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	 Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, David Hildenbrand <david@redhat.com>,
	linux-kernel@vger.kernel.org, Atish Patra <atish.patra@wdc.com>,
	Ben Gardon <bgardon@google.com>,
	"Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	linux-riscv@lists.infradead.org, Joerg Roedel <joro@8bytes.org>,
	kvmarm@lists.cs.columbia.edu, kvm-ppc@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	Jim Mattson <jmattson@google.com>,
	Cornelia Huck <cohuck@redhat.com>,
	linux-mips@vger.kernel.org, kvm-riscv@lists.infradead.org,
	Vitaly Kuznetsov <vkuznets@redhat.com>
Subject: [PATCH v5.5 21/30] KVM: Integrate gfn_to_memslot_approx() into search_memslots()
Date: Thu,  4 Nov 2021 00:25:22 +0000	[thread overview]
Message-ID: <20211104002531.1176691-22-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

s390 arch has gfn_to_memslot_approx() which is almost identical to
search_memslots(), differing only in that in case the gfn falls in a hole
one of the memslots bordering the hole is returned.

Add this lookup mode as an option to search_memslots() so we don't have two
almost identical functions for looking up a memslot by its gfn.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
[sean: tweaked helper names to keep gfn_to_memslot_approx() in s390]
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/s390/kvm/kvm-s390.c | 45 +++++++---------------------------------
 include/linux/kvm_host.h | 35 ++++++++++++++++++++++++-------
 virt/kvm/kvm_main.c      |  2 +-
 3 files changed, 36 insertions(+), 46 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c4d0ed5f3400..4e032e176216 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1941,41 +1941,6 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 /* for consistency */
 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
 
-/*
- * Similar to gfn_to_memslot, but returns the index of a memslot also when the
- * address falls in a hole. In that case the index of one of the memslots
- * bordering the hole is returned.
- */
-static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
-{
-	int start = 0, end = slots->used_slots;
-	int slot = atomic_read(&slots->last_used_slot);
-	struct kvm_memory_slot *memslots = slots->memslots;
-
-	if (gfn >= memslots[slot].base_gfn &&
-	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
-		return slot;
-
-	while (start < end) {
-		slot = start + (end - start) / 2;
-
-		if (gfn >= memslots[slot].base_gfn)
-			end = slot;
-		else
-			start = slot + 1;
-	}
-
-	if (start >= slots->used_slots)
-		return slots->used_slots - 1;
-
-	if (gfn >= memslots[start].base_gfn &&
-	    gfn < memslots[start].base_gfn + memslots[start].npages) {
-		atomic_set(&slots->last_used_slot, start);
-	}
-
-	return start;
-}
-
 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 			      u8 *res, unsigned long bufsize)
 {
@@ -1999,11 +1964,17 @@ static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 	return 0;
 }
 
+static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
+						     gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, true);
+}
+
 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
 					      unsigned long cur_gfn)
 {
-	int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
-	struct kvm_memory_slot *ms = slots->memslots + slotidx;
+	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
+	int slotidx = ms - slots->memslots;
 	unsigned long ofs = cur_gfn - ms->base_gfn;
 
 	if (ms->base_gfn + ms->npages <= cur_gfn) {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2ef946e94a73..9d46937a3a4e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1230,10 +1230,14 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
  * Returns a pointer to the memslot that contains gfn and records the index of
  * the slot in index. Otherwise returns NULL.
  *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ *
  * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
  */
 static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx)
 {
 	int start = 0, end = slots->used_slots;
 	struct kvm_memory_slot *memslots = slots->memslots;
@@ -1251,22 +1255,26 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
 			start = slot + 1;
 	}
 
+	if (approx && start >= slots->used_slots) {
+		*index = slots->used_slots - 1;
+		return &memslots[slots->used_slots - 1];
+	}
+
 	slot = try_get_memslot(slots, start, gfn);
 	if (slot) {
 		*index = start;
 		return slot;
 	}
+	if (approx) {
+		*index = start;
+		return &memslots[start];
+	}
 
 	return NULL;
 }
 
-/*
- * __gfn_to_memslot() and its descendants are here because it is called from
- * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot()
- * itself isn't here as an inline because that would bloat other code too much.
- */
 static inline struct kvm_memory_slot *
-__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
 {
 	struct kvm_memory_slot *slot;
 	int slot_index = atomic_read(&slots->last_used_slot);
@@ -1275,7 +1283,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	if (slot)
 		return slot;
 
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, approx);
 	if (slot) {
 		atomic_set(&slots->last_used_slot, slot_index);
 		return slot;
@@ -1284,6 +1292,17 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	return NULL;
 }
 
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths.  gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
+static inline struct kvm_memory_slot *
+__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, false);
+}
+
 static inline unsigned long
 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bbaa01afac43..a2d51ce957e1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2126,7 +2126,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 	 * search_memslots() instead of __gfn_to_memslot() to avoid
 	 * thrashing the VM-wide last_used_index in kvm_memslots.
 	 */
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, false);
 	if (slot) {
 		vcpu->last_used_slot = slot_index;
 		return slot;
-- 
2.33.1.1089.g2158813163f-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Atish Patra <atish.patra@wdc.com>,
	David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Sean Christopherson <seanjc@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, kvm-ppc@vger.kernel.org,
	kvm-riscv@lists.infradead.org, linux-riscv@lists.infradead.org,
	linux-kernel@vger.kernel.org, Ben Gardon <bgardon@google.com>,
	"Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>
Subject: [PATCH v5.5 21/30] KVM: Integrate gfn_to_memslot_approx() into search_memslots()
Date: Thu,  4 Nov 2021 00:25:22 +0000	[thread overview]
Message-ID: <20211104002531.1176691-22-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

s390 arch has gfn_to_memslot_approx() which is almost identical to
search_memslots(), differing only in that in case the gfn falls in a hole
one of the memslots bordering the hole is returned.

Add this lookup mode as an option to search_memslots() so we don't have two
almost identical functions for looking up a memslot by its gfn.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
[sean: tweaked helper names to keep gfn_to_memslot_approx() in s390]
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/s390/kvm/kvm-s390.c | 45 +++++++---------------------------------
 include/linux/kvm_host.h | 35 ++++++++++++++++++++++++-------
 virt/kvm/kvm_main.c      |  2 +-
 3 files changed, 36 insertions(+), 46 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c4d0ed5f3400..4e032e176216 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1941,41 +1941,6 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 /* for consistency */
 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
 
-/*
- * Similar to gfn_to_memslot, but returns the index of a memslot also when the
- * address falls in a hole. In that case the index of one of the memslots
- * bordering the hole is returned.
- */
-static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
-{
-	int start = 0, end = slots->used_slots;
-	int slot = atomic_read(&slots->last_used_slot);
-	struct kvm_memory_slot *memslots = slots->memslots;
-
-	if (gfn >= memslots[slot].base_gfn &&
-	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
-		return slot;
-
-	while (start < end) {
-		slot = start + (end - start) / 2;
-
-		if (gfn >= memslots[slot].base_gfn)
-			end = slot;
-		else
-			start = slot + 1;
-	}
-
-	if (start >= slots->used_slots)
-		return slots->used_slots - 1;
-
-	if (gfn >= memslots[start].base_gfn &&
-	    gfn < memslots[start].base_gfn + memslots[start].npages) {
-		atomic_set(&slots->last_used_slot, start);
-	}
-
-	return start;
-}
-
 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 			      u8 *res, unsigned long bufsize)
 {
@@ -1999,11 +1964,17 @@ static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 	return 0;
 }
 
+static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
+						     gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, true);
+}
+
 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
 					      unsigned long cur_gfn)
 {
-	int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
-	struct kvm_memory_slot *ms = slots->memslots + slotidx;
+	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
+	int slotidx = ms - slots->memslots;
 	unsigned long ofs = cur_gfn - ms->base_gfn;
 
 	if (ms->base_gfn + ms->npages <= cur_gfn) {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2ef946e94a73..9d46937a3a4e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1230,10 +1230,14 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
  * Returns a pointer to the memslot that contains gfn and records the index of
  * the slot in index. Otherwise returns NULL.
  *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ *
  * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
  */
 static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx)
 {
 	int start = 0, end = slots->used_slots;
 	struct kvm_memory_slot *memslots = slots->memslots;
@@ -1251,22 +1255,26 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
 			start = slot + 1;
 	}
 
+	if (approx && start >= slots->used_slots) {
+		*index = slots->used_slots - 1;
+		return &memslots[slots->used_slots - 1];
+	}
+
 	slot = try_get_memslot(slots, start, gfn);
 	if (slot) {
 		*index = start;
 		return slot;
 	}
+	if (approx) {
+		*index = start;
+		return &memslots[start];
+	}
 
 	return NULL;
 }
 
-/*
- * __gfn_to_memslot() and its descendants are here because it is called from
- * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot()
- * itself isn't here as an inline because that would bloat other code too much.
- */
 static inline struct kvm_memory_slot *
-__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
 {
 	struct kvm_memory_slot *slot;
 	int slot_index = atomic_read(&slots->last_used_slot);
@@ -1275,7 +1283,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	if (slot)
 		return slot;
 
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, approx);
 	if (slot) {
 		atomic_set(&slots->last_used_slot, slot_index);
 		return slot;
@@ -1284,6 +1292,17 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	return NULL;
 }
 
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths.  gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
+static inline struct kvm_memory_slot *
+__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, false);
+}
+
 static inline unsigned long
 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bbaa01afac43..a2d51ce957e1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2126,7 +2126,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 	 * search_memslots() instead of __gfn_to_memslot() to avoid
 	 * thrashing the VM-wide last_used_index in kvm_memslots.
 	 */
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, false);
 	if (slot) {
 		vcpu->last_used_slot = slot_index;
 		return slot;
-- 
2.33.1.1089.g2158813163f-goog


WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	 Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	 Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	 Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	 Suzuki K Poulose <suzuki.poulose@arm.com>,
	Atish Patra <atish.patra@wdc.com>,
	 David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	 Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Sean Christopherson <seanjc@google.com>,
	 Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	 Jim Mattson <jmattson@google.com>,
	Joerg Roedel <joro@8bytes.org>,
	 linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu,  linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, kvm-ppc@vger.kernel.org,
	 kvm-riscv@lists.infradead.org, linux-riscv@lists.infradead.org,
	 linux-kernel@vger.kernel.org, Ben Gardon <bgardon@google.com>,
	 "Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>
Subject: [PATCH v5.5 21/30] KVM: Integrate gfn_to_memslot_approx() into search_memslots()
Date: Thu,  4 Nov 2021 00:25:22 +0000	[thread overview]
Message-ID: <20211104002531.1176691-22-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

s390 arch has gfn_to_memslot_approx() which is almost identical to
search_memslots(), differing only in that in case the gfn falls in a hole
one of the memslots bordering the hole is returned.

Add this lookup mode as an option to search_memslots() so we don't have two
almost identical functions for looking up a memslot by its gfn.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
[sean: tweaked helper names to keep gfn_to_memslot_approx() in s390]
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/s390/kvm/kvm-s390.c | 45 +++++++---------------------------------
 include/linux/kvm_host.h | 35 ++++++++++++++++++++++++-------
 virt/kvm/kvm_main.c      |  2 +-
 3 files changed, 36 insertions(+), 46 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c4d0ed5f3400..4e032e176216 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1941,41 +1941,6 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 /* for consistency */
 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
 
-/*
- * Similar to gfn_to_memslot, but returns the index of a memslot also when the
- * address falls in a hole. In that case the index of one of the memslots
- * bordering the hole is returned.
- */
-static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
-{
-	int start = 0, end = slots->used_slots;
-	int slot = atomic_read(&slots->last_used_slot);
-	struct kvm_memory_slot *memslots = slots->memslots;
-
-	if (gfn >= memslots[slot].base_gfn &&
-	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
-		return slot;
-
-	while (start < end) {
-		slot = start + (end - start) / 2;
-
-		if (gfn >= memslots[slot].base_gfn)
-			end = slot;
-		else
-			start = slot + 1;
-	}
-
-	if (start >= slots->used_slots)
-		return slots->used_slots - 1;
-
-	if (gfn >= memslots[start].base_gfn &&
-	    gfn < memslots[start].base_gfn + memslots[start].npages) {
-		atomic_set(&slots->last_used_slot, start);
-	}
-
-	return start;
-}
-
 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 			      u8 *res, unsigned long bufsize)
 {
@@ -1999,11 +1964,17 @@ static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 	return 0;
 }
 
+static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
+						     gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, true);
+}
+
 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
 					      unsigned long cur_gfn)
 {
-	int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
-	struct kvm_memory_slot *ms = slots->memslots + slotidx;
+	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
+	int slotidx = ms - slots->memslots;
 	unsigned long ofs = cur_gfn - ms->base_gfn;
 
 	if (ms->base_gfn + ms->npages <= cur_gfn) {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2ef946e94a73..9d46937a3a4e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1230,10 +1230,14 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
  * Returns a pointer to the memslot that contains gfn and records the index of
  * the slot in index. Otherwise returns NULL.
  *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ *
  * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
  */
 static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx)
 {
 	int start = 0, end = slots->used_slots;
 	struct kvm_memory_slot *memslots = slots->memslots;
@@ -1251,22 +1255,26 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
 			start = slot + 1;
 	}
 
+	if (approx && start >= slots->used_slots) {
+		*index = slots->used_slots - 1;
+		return &memslots[slots->used_slots - 1];
+	}
+
 	slot = try_get_memslot(slots, start, gfn);
 	if (slot) {
 		*index = start;
 		return slot;
 	}
+	if (approx) {
+		*index = start;
+		return &memslots[start];
+	}
 
 	return NULL;
 }
 
-/*
- * __gfn_to_memslot() and its descendants are here because it is called from
- * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot()
- * itself isn't here as an inline because that would bloat other code too much.
- */
 static inline struct kvm_memory_slot *
-__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
 {
 	struct kvm_memory_slot *slot;
 	int slot_index = atomic_read(&slots->last_used_slot);
@@ -1275,7 +1283,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	if (slot)
 		return slot;
 
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, approx);
 	if (slot) {
 		atomic_set(&slots->last_used_slot, slot_index);
 		return slot;
@@ -1284,6 +1292,17 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	return NULL;
 }
 
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths.  gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
+static inline struct kvm_memory_slot *
+__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, false);
+}
+
 static inline unsigned long
 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bbaa01afac43..a2d51ce957e1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2126,7 +2126,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 	 * search_memslots() instead of __gfn_to_memslot() to avoid
 	 * thrashing the VM-wide last_used_index in kvm_memslots.
 	 */
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, false);
 	if (slot) {
 		vcpu->last_used_slot = slot_index;
 		return slot;
-- 
2.33.1.1089.g2158813163f-goog


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	 Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	 Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	 Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	 Suzuki K Poulose <suzuki.poulose@arm.com>,
	Atish Patra <atish.patra@wdc.com>,
	 David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	 Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Sean Christopherson <seanjc@google.com>,
	 Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	 Jim Mattson <jmattson@google.com>,
	Joerg Roedel <joro@8bytes.org>,
	 linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu,  linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, kvm-ppc@vger.kernel.org,
	 kvm-riscv@lists.infradead.org, linux-riscv@lists.infradead.org,
	 linux-kernel@vger.kernel.org, Ben Gardon <bgardon@google.com>,
	 "Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>
Subject: [PATCH v5.5 21/30] KVM: Integrate gfn_to_memslot_approx() into search_memslots()
Date: Thu,  4 Nov 2021 00:25:22 +0000	[thread overview]
Message-ID: <20211104002531.1176691-22-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

s390 arch has gfn_to_memslot_approx() which is almost identical to
search_memslots(), differing only in that in case the gfn falls in a hole
one of the memslots bordering the hole is returned.

Add this lookup mode as an option to search_memslots() so we don't have two
almost identical functions for looking up a memslot by its gfn.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
[sean: tweaked helper names to keep gfn_to_memslot_approx() in s390]
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/s390/kvm/kvm-s390.c | 45 +++++++---------------------------------
 include/linux/kvm_host.h | 35 ++++++++++++++++++++++++-------
 virt/kvm/kvm_main.c      |  2 +-
 3 files changed, 36 insertions(+), 46 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c4d0ed5f3400..4e032e176216 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1941,41 +1941,6 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 /* for consistency */
 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
 
-/*
- * Similar to gfn_to_memslot, but returns the index of a memslot also when the
- * address falls in a hole. In that case the index of one of the memslots
- * bordering the hole is returned.
- */
-static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
-{
-	int start = 0, end = slots->used_slots;
-	int slot = atomic_read(&slots->last_used_slot);
-	struct kvm_memory_slot *memslots = slots->memslots;
-
-	if (gfn >= memslots[slot].base_gfn &&
-	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
-		return slot;
-
-	while (start < end) {
-		slot = start + (end - start) / 2;
-
-		if (gfn >= memslots[slot].base_gfn)
-			end = slot;
-		else
-			start = slot + 1;
-	}
-
-	if (start >= slots->used_slots)
-		return slots->used_slots - 1;
-
-	if (gfn >= memslots[start].base_gfn &&
-	    gfn < memslots[start].base_gfn + memslots[start].npages) {
-		atomic_set(&slots->last_used_slot, start);
-	}
-
-	return start;
-}
-
 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 			      u8 *res, unsigned long bufsize)
 {
@@ -1999,11 +1964,17 @@ static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 	return 0;
 }
 
+static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
+						     gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, true);
+}
+
 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
 					      unsigned long cur_gfn)
 {
-	int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
-	struct kvm_memory_slot *ms = slots->memslots + slotidx;
+	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
+	int slotidx = ms - slots->memslots;
 	unsigned long ofs = cur_gfn - ms->base_gfn;
 
 	if (ms->base_gfn + ms->npages <= cur_gfn) {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2ef946e94a73..9d46937a3a4e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1230,10 +1230,14 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
  * Returns a pointer to the memslot that contains gfn and records the index of
  * the slot in index. Otherwise returns NULL.
  *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ *
  * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
  */
 static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx)
 {
 	int start = 0, end = slots->used_slots;
 	struct kvm_memory_slot *memslots = slots->memslots;
@@ -1251,22 +1255,26 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
 			start = slot + 1;
 	}
 
+	if (approx && start >= slots->used_slots) {
+		*index = slots->used_slots - 1;
+		return &memslots[slots->used_slots - 1];
+	}
+
 	slot = try_get_memslot(slots, start, gfn);
 	if (slot) {
 		*index = start;
 		return slot;
 	}
+	if (approx) {
+		*index = start;
+		return &memslots[start];
+	}
 
 	return NULL;
 }
 
-/*
- * __gfn_to_memslot() and its descendants are here because it is called from
- * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot()
- * itself isn't here as an inline because that would bloat other code too much.
- */
 static inline struct kvm_memory_slot *
-__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
 {
 	struct kvm_memory_slot *slot;
 	int slot_index = atomic_read(&slots->last_used_slot);
@@ -1275,7 +1283,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	if (slot)
 		return slot;
 
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, approx);
 	if (slot) {
 		atomic_set(&slots->last_used_slot, slot_index);
 		return slot;
@@ -1284,6 +1292,17 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	return NULL;
 }
 
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths.  gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
+static inline struct kvm_memory_slot *
+__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, false);
+}
+
 static inline unsigned long
 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bbaa01afac43..a2d51ce957e1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2126,7 +2126,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 	 * search_memslots() instead of __gfn_to_memslot() to avoid
 	 * thrashing the VM-wide last_used_index in kvm_memslots.
 	 */
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, false);
 	if (slot) {
 		vcpu->last_used_slot = slot_index;
 		return slot;
-- 
2.33.1.1089.g2158813163f-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, David Hildenbrand <david@redhat.com>,
	linux-kernel@vger.kernel.org, Atish Patra <atish.patra@wdc.com>,
	Ben Gardon <bgardon@google.com>,
	"Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	linux-riscv@lists.infradead.org, Joerg Roedel <joro@8bytes.org>,
	kvmarm@lists.cs.columbia.edu, kvm-ppc@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	Jim Mattson <jmattson@google.com>,
	Cornelia Huck <cohuck@redhat.com>,
	linux-mips@vger.kernel.org, kvm-riscv@lists.infradead.org,
	Vitaly Kuznetsov <vkuznets@redhat.com>
Subject: [PATCH v5.5 21/30] KVM: Integrate gfn_to_memslot_approx() into search_memslots()
Date: Thu, 04 Nov 2021 00:25:22 +0000	[thread overview]
Message-ID: <20211104002531.1176691-22-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

s390 arch has gfn_to_memslot_approx() which is almost identical to
search_memslots(), differing only in that in case the gfn falls in a hole
one of the memslots bordering the hole is returned.

Add this lookup mode as an option to search_memslots() so we don't have two
almost identical functions for looking up a memslot by its gfn.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
[sean: tweaked helper names to keep gfn_to_memslot_approx() in s390]
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/s390/kvm/kvm-s390.c | 45 +++++++---------------------------------
 include/linux/kvm_host.h | 35 ++++++++++++++++++++++++-------
 virt/kvm/kvm_main.c      |  2 +-
 3 files changed, 36 insertions(+), 46 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c4d0ed5f3400..4e032e176216 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1941,41 +1941,6 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 /* for consistency */
 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
 
-/*
- * Similar to gfn_to_memslot, but returns the index of a memslot also when the
- * address falls in a hole. In that case the index of one of the memslots
- * bordering the hole is returned.
- */
-static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
-{
-	int start = 0, end = slots->used_slots;
-	int slot = atomic_read(&slots->last_used_slot);
-	struct kvm_memory_slot *memslots = slots->memslots;
-
-	if (gfn >= memslots[slot].base_gfn &&
-	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
-		return slot;
-
-	while (start < end) {
-		slot = start + (end - start) / 2;
-
-		if (gfn >= memslots[slot].base_gfn)
-			end = slot;
-		else
-			start = slot + 1;
-	}
-
-	if (start >= slots->used_slots)
-		return slots->used_slots - 1;
-
-	if (gfn >= memslots[start].base_gfn &&
-	    gfn < memslots[start].base_gfn + memslots[start].npages) {
-		atomic_set(&slots->last_used_slot, start);
-	}
-
-	return start;
-}
-
 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 			      u8 *res, unsigned long bufsize)
 {
@@ -1999,11 +1964,17 @@ static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
 	return 0;
 }
 
+static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
+						     gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, true);
+}
+
 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
 					      unsigned long cur_gfn)
 {
-	int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
-	struct kvm_memory_slot *ms = slots->memslots + slotidx;
+	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
+	int slotidx = ms - slots->memslots;
 	unsigned long ofs = cur_gfn - ms->base_gfn;
 
 	if (ms->base_gfn + ms->npages <= cur_gfn) {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2ef946e94a73..9d46937a3a4e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1230,10 +1230,14 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
  * Returns a pointer to the memslot that contains gfn and records the index of
  * the slot in index. Otherwise returns NULL.
  *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ *
  * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
  */
 static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx)
 {
 	int start = 0, end = slots->used_slots;
 	struct kvm_memory_slot *memslots = slots->memslots;
@@ -1251,22 +1255,26 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
 			start = slot + 1;
 	}
 
+	if (approx && start >= slots->used_slots) {
+		*index = slots->used_slots - 1;
+		return &memslots[slots->used_slots - 1];
+	}
+
 	slot = try_get_memslot(slots, start, gfn);
 	if (slot) {
 		*index = start;
 		return slot;
 	}
+	if (approx) {
+		*index = start;
+		return &memslots[start];
+	}
 
 	return NULL;
 }
 
-/*
- * __gfn_to_memslot() and its descendants are here because it is called from
- * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot()
- * itself isn't here as an inline because that would bloat other code too much.
- */
 static inline struct kvm_memory_slot *
-__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
 {
 	struct kvm_memory_slot *slot;
 	int slot_index = atomic_read(&slots->last_used_slot);
@@ -1275,7 +1283,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	if (slot)
 		return slot;
 
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, approx);
 	if (slot) {
 		atomic_set(&slots->last_used_slot, slot_index);
 		return slot;
@@ -1284,6 +1292,17 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 	return NULL;
 }
 
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths.  gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
+static inline struct kvm_memory_slot *
+__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+{
+	return ____gfn_to_memslot(slots, gfn, false);
+}
+
 static inline unsigned long
 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bbaa01afac43..a2d51ce957e1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2126,7 +2126,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 	 * search_memslots() instead of __gfn_to_memslot() to avoid
 	 * thrashing the VM-wide last_used_index in kvm_memslots.
 	 */
-	slot = search_memslots(slots, gfn, &slot_index);
+	slot = search_memslots(slots, gfn, &slot_index, false);
 	if (slot) {
 		vcpu->last_used_slot = slot_index;
 		return slot;
-- 
2.33.1.1089.g2158813163f-goog

  parent reply	other threads:[~2021-11-04  0:26 UTC|newest]

Thread overview: 327+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-04  0:25 [PATCH v5.5 00/30] KVM: Scalable memslots implementation Sean Christopherson
2021-11-04  0:25 ` Sean Christopherson
2021-11-04  0:25 ` Sean Christopherson
2021-11-04  0:25 ` Sean Christopherson
2021-11-04  0:25 ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 01/30] KVM: Ensure local memslot copies operate on up-to-date arch-specific data Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04 21:27   ` Ben Gardon
2021-11-04 21:27     ` Ben Gardon
2021-11-04 21:27     ` Ben Gardon
2021-11-04 21:27     ` Ben Gardon
2021-11-04 21:27     ` Ben Gardon
2021-11-04 22:41     ` Sean Christopherson
2021-11-04 22:41       ` Sean Christopherson
2021-11-04 22:41       ` Sean Christopherson
2021-11-04 22:41       ` Sean Christopherson
2021-11-04 22:41       ` Sean Christopherson
2021-11-09  0:37   ` Maciej S. Szmigiero
2021-11-09  0:37     ` Maciej S. Szmigiero
2021-11-09  0:37     ` Maciej S. Szmigiero
2021-11-09  0:37     ` Maciej S. Szmigiero
2021-11-09  0:37     ` Maciej S. Szmigiero
2021-11-09  1:17     ` Sean Christopherson
2021-11-09  1:17       ` Sean Christopherson
2021-11-09  1:17       ` Sean Christopherson
2021-11-09  1:17       ` Sean Christopherson
2021-11-09  1:17       ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 02/30] KVM: Disallow user memslot with size that exceeds "unsigned long" Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:38   ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 03/30] KVM: Require total number of memslot pages to fit in an unsigned long Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:38   ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 04/30] KVM: Open code kvm_delete_memslot() into its only caller Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:38   ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 05/30] KVM: Resync only arch fields when slots_arch_lock gets reacquired Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:38   ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 06/30] KVM: Use "new" memslot's address space ID instead of dedicated param Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:39   ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 07/30] KVM: Let/force architectures to deal with arch specific memslot data Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:39   ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  1:13     ` Sean Christopherson
2021-11-09  1:13       ` Sean Christopherson
2021-11-09  1:13       ` Sean Christopherson
2021-11-09  1:13       ` Sean Christopherson
2021-11-09  1:13       ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 08/30] KVM: arm64: Use "new" memslot instead of userspace memory region Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  6:36   ` Reiji Watanabe
2021-11-09  6:36     ` Reiji Watanabe
2021-11-09  6:36     ` Reiji Watanabe
2021-11-09  6:36     ` Reiji Watanabe
2021-11-09  6:36     ` Reiji Watanabe
2021-11-04  0:25 ` [PATCH v5.5 09/30] KVM: MIPS: Drop pr_debug from memslot commit to avoid using "mem" Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 10/30] KVM: PPC: Avoid referencing userspace memory region in memslot updates Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 11/30] KVM: s390: Use "new" memslot instead of userspace memory region Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 12/30] KVM: x86: " Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:40   ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 13/30] KVM: RISC-V: " Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 14/30] KVM: Stop passing kvm_userspace_memory_region to arch memslot hooks Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:40   ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 15/30] KVM: Use prepare/commit hooks to handle generic memslot metadata updates Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:40   ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 16/30] KVM: x86: Don't assume old/new memslots are non-NULL at memslot commit Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:40   ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 17/30] KVM: s390: Skip gfn/size sanity checks on memslot DELETE or FLAGS_ONLY Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 18/30] KVM: Don't make a full copy of the old memslot in __kvm_set_memory_region() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:41   ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 19/30] KVM: x86: Don't call kvm_mmu_change_mmu_pages() if the count hasn't changed Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 20/30] KVM: x86: Use nr_memslot_pages to avoid traversing the memslots array Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:41   ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  1:34     ` Sean Christopherson
2021-11-09  1:34       ` Sean Christopherson
2021-11-09  1:34       ` Sean Christopherson
2021-11-09  1:34       ` Sean Christopherson
2021-11-09  1:34       ` Sean Christopherson
2021-11-09 16:29       ` Maciej S. Szmigiero
2021-11-09 16:29         ` Maciej S. Szmigiero
2021-11-09 16:29         ` Maciej S. Szmigiero
2021-11-09 16:29         ` Maciej S. Szmigiero
2021-11-09 16:29         ` Maciej S. Szmigiero
2021-11-04  0:25 ` Sean Christopherson [this message]
2021-11-04  0:25   ` [PATCH v5.5 21/30] KVM: Integrate gfn_to_memslot_approx() into search_memslots() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 22/30] KVM: Move WARN on invalid memslot index to update_memslots() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 23/30] KVM: Resolve memslot ID via a hash table instead of via a static array Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:51   ` Maciej S. Szmigiero
2021-11-11 23:51     ` Maciej S. Szmigiero
2021-11-11 23:51     ` Maciej S. Szmigiero
2021-11-11 23:51     ` Maciej S. Szmigiero
2021-11-11 23:51     ` Maciej S. Szmigiero
2021-11-12  1:03     ` Sean Christopherson
2021-11-12  1:03       ` Sean Christopherson
2021-11-12  1:03       ` Sean Christopherson
2021-11-12  1:03       ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 24/30] KVM: Use interval tree to do fast hva lookup in memslots Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:52   ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-12  1:05     ` Sean Christopherson
2021-11-12  1:05       ` Sean Christopherson
2021-11-12  1:05       ` Sean Christopherson
2021-11-12  1:05       ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 25/30] KVM: s390: Introduce kvm_s390_get_gfn_end() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 26/30] KVM: Keep memslots in tree-based structures instead of array-based ones Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:52   ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-12  0:51     ` Sean Christopherson
2021-11-12  0:51       ` Sean Christopherson
2021-11-12  0:51       ` Sean Christopherson
2021-11-12  0:51       ` Sean Christopherson
2021-11-12  0:51       ` Sean Christopherson
2021-11-13 15:22       ` Maciej S. Szmigiero
2021-11-13 15:22         ` Maciej S. Szmigiero
2021-11-13 15:22         ` Maciej S. Szmigiero
2021-11-13 15:22         ` Maciej S. Szmigiero
2021-11-13 15:22         ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 27/30] KVM: Optimize gfn lookup in kvm_zap_gfn_range() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 28/30] KVM: Optimize overlapping memslots check Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 29/30] KVM: Wait 'til the bitter end to initialize the "new" memslot Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:52   ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 30/30] KVM: Dynamically allocate "new" memslots from the get-go Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:53   ` Maciej S. Szmigiero
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-12  1:32     ` Sean Christopherson
2021-11-12  1:32       ` Sean Christopherson
2021-11-12  1:32       ` Sean Christopherson
2021-11-12  1:32       ` Sean Christopherson
2021-11-09  0:43 ` [PATCH v5.5 00/30] KVM: Scalable memslots implementation Maciej S. Szmigiero
2021-11-09  0:43   ` Maciej S. Szmigiero
2021-11-09  0:43   ` Maciej S. Szmigiero
2021-11-09  0:43   ` Maciej S. Szmigiero
2021-11-09  0:43   ` Maciej S. Szmigiero
2021-11-09  1:21   ` Sean Christopherson
2021-11-09  1:21     ` Sean Christopherson
2021-11-09  1:21     ` Sean Christopherson
2021-11-09  1:21     ` Sean Christopherson
2021-11-09  1:21     ` Sean Christopherson
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-11 23:53       ` Maciej S. Szmigiero
2021-11-11 23:53       ` Maciej S. Szmigiero
2021-11-11 23:53       ` Maciej S. Szmigiero
2021-11-11 23:53       ` Maciej S. Szmigiero
2021-11-23 14:42       ` Maciej S. Szmigiero
2021-11-23 14:42         ` Maciej S. Szmigiero
2021-11-23 14:42         ` Maciej S. Szmigiero
2021-11-23 14:42         ` Maciej S. Szmigiero
2021-11-23 14:42         ` Maciej S. Szmigiero
2021-11-26 12:33         ` Paolo Bonzini
2021-11-26 12:33           ` Paolo Bonzini
2021-11-26 12:33           ` Paolo Bonzini
2021-11-26 12:33           ` Paolo Bonzini
2021-11-26 12:33           ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211104002531.1176691-22-seanjc@google.com \
    --to=seanjc@google.com \
    --cc=aleksandar.qemu.devel@gmail.com \
    --cc=anup.patel@wdc.com \
    --cc=aou@eecs.berkeley.edu \
    --cc=atish.patra@wdc.com \
    --cc=bgardon@google.com \
    --cc=borntraeger@de.ibm.com \
    --cc=chenhuacai@kernel.org \
    --cc=cohuck@redhat.com \
    --cc=david@redhat.com \
    --cc=frankja@linux.ibm.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm-riscv@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=maciej.szmigiero@oracle.com \
    --cc=maz@kernel.org \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=paulus@ozlabs.org \
    --cc=pbonzini@redhat.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.