All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	 Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	 Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	 Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, David Hildenbrand <david@redhat.com>,
	linux-kernel@vger.kernel.org, Atish Patra <atish.patra@wdc.com>,
	Ben Gardon <bgardon@google.com>,
	"Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	linux-riscv@lists.infradead.org, Joerg Roedel <joro@8bytes.org>,
	kvmarm@lists.cs.columbia.edu, kvm-ppc@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	Jim Mattson <jmattson@google.com>,
	Cornelia Huck <cohuck@redhat.com>,
	linux-mips@vger.kernel.org, kvm-riscv@lists.infradead.org,
	Vitaly Kuznetsov <vkuznets@redhat.com>
Subject: [PATCH v5.5 23/30] KVM: Resolve memslot ID via a hash table instead of via a static array
Date: Thu,  4 Nov 2021 00:25:24 +0000	[thread overview]
Message-ID: <20211104002531.1176691-24-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

Memslot ID to the corresponding memslot mappings are currently kept as
indices in static id_to_index array.
The size of this array depends on the maximum allowed memslot count
(regardless of the number of memslots actually in use).

This has become especially problematic recently, when memslot count cap was
removed, so the maximum count is now full 32k memslots - the maximum
allowed by the current KVM API.

Keeping these IDs in a hash table (instead of an array) avoids this
problem.

Resolving a memslot ID to the actual memslot (instead of its index) will
also enable transitioning away from an array-based implementation of the
whole memslots structure in a later commit.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 include/linux/kvm_host.h | 16 +++----
 virt/kvm/kvm_main.c      | 96 +++++++++++++++++++++++++++++++---------
 2 files changed, 84 insertions(+), 28 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9d46937a3a4e..81003e3acd53 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -29,6 +29,7 @@
 #include <linux/refcount.h>
 #include <linux/nospec.h>
 #include <linux/notifier.h>
+#include <linux/hashtable.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -425,6 +426,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 
 struct kvm_memory_slot {
+	struct hlist_node id_node;
 	gfn_t base_gfn;
 	unsigned long npages;
 	unsigned long *dirty_bitmap;
@@ -527,7 +529,7 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 struct kvm_memslots {
 	u64 generation;
 	/* The mapping table from slot id to the index in memslots[]. */
-	short id_to_index[KVM_MEM_SLOTS_NUM];
+	DECLARE_HASHTABLE(id_hash, 7);
 	atomic_t last_used_slot;
 	int used_slots;
 	struct kvm_memory_slot memslots[];
@@ -789,16 +791,14 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
 static inline
 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
 {
-	int index = slots->id_to_index[id];
 	struct kvm_memory_slot *slot;
 
-	if (index < 0)
-		return NULL;
+	hash_for_each_possible(slots->id_hash, slot, id_node, id) {
+		if (slot->id == id)
+			return slot;
+	}
 
-	slot = &slots->memslots[index];
-
-	WARN_ON(slot->id != id);
-	return slot;
+	return NULL;
 }
 
 /*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d45d574a5a2d..13c497abaab8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -853,15 +853,13 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm)
 
 static struct kvm_memslots *kvm_alloc_memslots(void)
 {
-	int i;
 	struct kvm_memslots *slots;
 
 	slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
 	if (!slots)
 		return NULL;
 
-	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
-		slots->id_to_index[i] = -1;
+	hash_init(slots->id_hash);
 
 	return slots;
 }
@@ -1259,17 +1257,49 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
 	return 0;
 }
 
+static void kvm_replace_memslot(struct kvm_memslots *slots,
+				struct kvm_memory_slot *old,
+				struct kvm_memory_slot *new)
+{
+	/*
+	 * Remove the old memslot from the hash list, copying the node data
+	 * would corrupt the list.
+	 */
+	if (old) {
+		hash_del(&old->id_node);
+
+		if (!new)
+			return;
+	}
+
+	/* Copy the source *data*, not the pointer, to the destination. */
+	if (old)
+		*new = *old;
+
+	/* (Re)Add the new memslot. */
+	hash_add(slots->id_hash, &new->id_node, new->id);
+}
+
+static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src)
+{
+	struct kvm_memory_slot *mslots = slots->memslots;
+
+	kvm_replace_memslot(slots, &mslots[src], &mslots[dst]);
+}
+
 /*
  * Delete a memslot by decrementing the number of used slots and shifting all
  * other entries in the array forward one spot.
+ * @memslot is a detached dummy struct with just .id and .as_id filled.
  */
 static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 				      struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (WARN_ON(slots->id_to_index[memslot->id] == -1))
+	if (WARN_ON(!oldslot))
 		return;
 
 	slots->used_slots--;
@@ -1277,12 +1307,17 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 	if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
 		atomic_set(&slots->last_used_slot, 0);
 
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
-	}
+	/*
+	 * Remove the to-be-deleted memslot from the list _before_ shifting
+	 * the trailing memslots forward, its data will be overwritten.
+	 * Defer the (somewhat pointless) copying of the memslot until after
+	 * the last slot has been shifted to avoid overwriting said last slot.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
+	for (i = oldslot - mslots; i < slots->used_slots; i++)
+		kvm_shift_memslot(slots, i, i + 1);
 	mslots[i] = *memslot;
-	slots->id_to_index[memslot->id] = -1;
 }
 
 /*
@@ -1300,30 +1335,39 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
  * itself is not preserved in the array, i.e. not swapped at this time, only
  * its new index into the array is tracked.  Returns the changed memslot's
  * current index into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the changed slot.
  */
 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
 					    struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots)
+	if (!oldslot || !slots->used_slots)
 		return -1;
 
+	/*
+	 * Delete the slot from the hash table before sorting the remaining
+	 * slots, the slot's data may be overwritten when copying slots as part
+	 * of the sorting proccess.  update_memslots() will unconditionally
+	 * rewrite the entire slot and re-add it to the hash table.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
 	/*
 	 * Move the target memslot backward in the array by shifting existing
 	 * memslots with a higher GFN (than the target memslot) towards the
 	 * front of the array.
 	 */
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
+	for (i = oldslot - mslots; i < slots->used_slots - 1; i++) {
 		if (memslot->base_gfn > mslots[i + 1].base_gfn)
 			break;
 
 		WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
 
-		/* Shift the next memslot forward one and update its index. */
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i + 1);
 	}
 	return i;
 }
@@ -1334,6 +1378,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
  * is not preserved in the array, i.e. not swapped at this time, only its new
  * index into the array is tracked.  Returns the changed memslot's final index
  * into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the new or
+ * changed slot.
+ * Assumes that the memslot at @start index is not in @slots->id_hash.
  */
 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 					   struct kvm_memory_slot *memslot,
@@ -1348,9 +1396,7 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 
 		WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
 
-		/* Shift the next memslot back one and update its index. */
-		mslots[i] = mslots[i - 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i - 1);
 	}
 	return i;
 }
@@ -1395,6 +1441,9 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
  * most likely to be referenced, sorting it to the front of the array was
  * advantageous.  The current binary search starts from the middle of the array
  * and uses an LRU pointer to improve performance for all memslots and GFNs.
+ *
+ * @memslot is a detached struct, not a part of the current or new memslot
+ * array.
  */
 static void update_memslots(struct kvm_memslots *slots,
 			    struct kvm_memory_slot *memslot,
@@ -1419,7 +1468,7 @@ static void update_memslots(struct kvm_memslots *slots,
 		 * its index accordingly.
 		 */
 		slots->memslots[i] = *memslot;
-		slots->id_to_index[memslot->id] = i;
+		kvm_replace_memslot(slots, NULL, &slots->memslots[i]);
 	}
 }
 
@@ -1512,6 +1561,7 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 {
 	struct kvm_memslots *slots;
 	size_t new_size;
+	struct kvm_memory_slot *memslot;
 
 	if (change == KVM_MR_CREATE)
 		new_size = kvm_memslots_size(old->used_slots + 1);
@@ -1519,8 +1569,14 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 		new_size = kvm_memslots_size(old->used_slots);
 
 	slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
-	if (likely(slots))
-		memcpy(slots, old, kvm_memslots_size(old->used_slots));
+	if (unlikely(!slots))
+		return NULL;
+
+	memcpy(slots, old, kvm_memslots_size(old->used_slots));
+
+	hash_init(slots->id_hash);
+	kvm_for_each_memslot(memslot, slots)
+		hash_add(slots->id_hash, &memslot->id_node, memslot->id);
 
 	return slots;
 }
-- 
2.33.1.1089.g2158813163f-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Atish Patra <atish.patra@wdc.com>,
	David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Sean Christopherson <seanjc@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, kvm-ppc@vger.kernel.org,
	kvm-riscv@lists.infradead.org, linux-riscv@lists.infradead.org,
	linux-kernel@vger.kernel.org, Ben Gardon <bgardon@google.com>,
	"Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>
Subject: [PATCH v5.5 23/30] KVM: Resolve memslot ID via a hash table instead of via a static array
Date: Thu,  4 Nov 2021 00:25:24 +0000	[thread overview]
Message-ID: <20211104002531.1176691-24-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

Memslot ID to the corresponding memslot mappings are currently kept as
indices in static id_to_index array.
The size of this array depends on the maximum allowed memslot count
(regardless of the number of memslots actually in use).

This has become especially problematic recently, when memslot count cap was
removed, so the maximum count is now full 32k memslots - the maximum
allowed by the current KVM API.

Keeping these IDs in a hash table (instead of an array) avoids this
problem.

Resolving a memslot ID to the actual memslot (instead of its index) will
also enable transitioning away from an array-based implementation of the
whole memslots structure in a later commit.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 include/linux/kvm_host.h | 16 +++----
 virt/kvm/kvm_main.c      | 96 +++++++++++++++++++++++++++++++---------
 2 files changed, 84 insertions(+), 28 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9d46937a3a4e..81003e3acd53 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -29,6 +29,7 @@
 #include <linux/refcount.h>
 #include <linux/nospec.h>
 #include <linux/notifier.h>
+#include <linux/hashtable.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -425,6 +426,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 
 struct kvm_memory_slot {
+	struct hlist_node id_node;
 	gfn_t base_gfn;
 	unsigned long npages;
 	unsigned long *dirty_bitmap;
@@ -527,7 +529,7 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 struct kvm_memslots {
 	u64 generation;
 	/* The mapping table from slot id to the index in memslots[]. */
-	short id_to_index[KVM_MEM_SLOTS_NUM];
+	DECLARE_HASHTABLE(id_hash, 7);
 	atomic_t last_used_slot;
 	int used_slots;
 	struct kvm_memory_slot memslots[];
@@ -789,16 +791,14 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
 static inline
 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
 {
-	int index = slots->id_to_index[id];
 	struct kvm_memory_slot *slot;
 
-	if (index < 0)
-		return NULL;
+	hash_for_each_possible(slots->id_hash, slot, id_node, id) {
+		if (slot->id == id)
+			return slot;
+	}
 
-	slot = &slots->memslots[index];
-
-	WARN_ON(slot->id != id);
-	return slot;
+	return NULL;
 }
 
 /*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d45d574a5a2d..13c497abaab8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -853,15 +853,13 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm)
 
 static struct kvm_memslots *kvm_alloc_memslots(void)
 {
-	int i;
 	struct kvm_memslots *slots;
 
 	slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
 	if (!slots)
 		return NULL;
 
-	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
-		slots->id_to_index[i] = -1;
+	hash_init(slots->id_hash);
 
 	return slots;
 }
@@ -1259,17 +1257,49 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
 	return 0;
 }
 
+static void kvm_replace_memslot(struct kvm_memslots *slots,
+				struct kvm_memory_slot *old,
+				struct kvm_memory_slot *new)
+{
+	/*
+	 * Remove the old memslot from the hash list, copying the node data
+	 * would corrupt the list.
+	 */
+	if (old) {
+		hash_del(&old->id_node);
+
+		if (!new)
+			return;
+	}
+
+	/* Copy the source *data*, not the pointer, to the destination. */
+	if (old)
+		*new = *old;
+
+	/* (Re)Add the new memslot. */
+	hash_add(slots->id_hash, &new->id_node, new->id);
+}
+
+static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src)
+{
+	struct kvm_memory_slot *mslots = slots->memslots;
+
+	kvm_replace_memslot(slots, &mslots[src], &mslots[dst]);
+}
+
 /*
  * Delete a memslot by decrementing the number of used slots and shifting all
  * other entries in the array forward one spot.
+ * @memslot is a detached dummy struct with just .id and .as_id filled.
  */
 static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 				      struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (WARN_ON(slots->id_to_index[memslot->id] == -1))
+	if (WARN_ON(!oldslot))
 		return;
 
 	slots->used_slots--;
@@ -1277,12 +1307,17 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 	if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
 		atomic_set(&slots->last_used_slot, 0);
 
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
-	}
+	/*
+	 * Remove the to-be-deleted memslot from the list _before_ shifting
+	 * the trailing memslots forward, its data will be overwritten.
+	 * Defer the (somewhat pointless) copying of the memslot until after
+	 * the last slot has been shifted to avoid overwriting said last slot.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
+	for (i = oldslot - mslots; i < slots->used_slots; i++)
+		kvm_shift_memslot(slots, i, i + 1);
 	mslots[i] = *memslot;
-	slots->id_to_index[memslot->id] = -1;
 }
 
 /*
@@ -1300,30 +1335,39 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
  * itself is not preserved in the array, i.e. not swapped at this time, only
  * its new index into the array is tracked.  Returns the changed memslot's
  * current index into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the changed slot.
  */
 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
 					    struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots)
+	if (!oldslot || !slots->used_slots)
 		return -1;
 
+	/*
+	 * Delete the slot from the hash table before sorting the remaining
+	 * slots, the slot's data may be overwritten when copying slots as part
+	 * of the sorting proccess.  update_memslots() will unconditionally
+	 * rewrite the entire slot and re-add it to the hash table.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
 	/*
 	 * Move the target memslot backward in the array by shifting existing
 	 * memslots with a higher GFN (than the target memslot) towards the
 	 * front of the array.
 	 */
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
+	for (i = oldslot - mslots; i < slots->used_slots - 1; i++) {
 		if (memslot->base_gfn > mslots[i + 1].base_gfn)
 			break;
 
 		WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
 
-		/* Shift the next memslot forward one and update its index. */
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i + 1);
 	}
 	return i;
 }
@@ -1334,6 +1378,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
  * is not preserved in the array, i.e. not swapped at this time, only its new
  * index into the array is tracked.  Returns the changed memslot's final index
  * into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the new or
+ * changed slot.
+ * Assumes that the memslot at @start index is not in @slots->id_hash.
  */
 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 					   struct kvm_memory_slot *memslot,
@@ -1348,9 +1396,7 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 
 		WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
 
-		/* Shift the next memslot back one and update its index. */
-		mslots[i] = mslots[i - 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i - 1);
 	}
 	return i;
 }
@@ -1395,6 +1441,9 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
  * most likely to be referenced, sorting it to the front of the array was
  * advantageous.  The current binary search starts from the middle of the array
  * and uses an LRU pointer to improve performance for all memslots and GFNs.
+ *
+ * @memslot is a detached struct, not a part of the current or new memslot
+ * array.
  */
 static void update_memslots(struct kvm_memslots *slots,
 			    struct kvm_memory_slot *memslot,
@@ -1419,7 +1468,7 @@ static void update_memslots(struct kvm_memslots *slots,
 		 * its index accordingly.
 		 */
 		slots->memslots[i] = *memslot;
-		slots->id_to_index[memslot->id] = i;
+		kvm_replace_memslot(slots, NULL, &slots->memslots[i]);
 	}
 }
 
@@ -1512,6 +1561,7 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 {
 	struct kvm_memslots *slots;
 	size_t new_size;
+	struct kvm_memory_slot *memslot;
 
 	if (change == KVM_MR_CREATE)
 		new_size = kvm_memslots_size(old->used_slots + 1);
@@ -1519,8 +1569,14 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 		new_size = kvm_memslots_size(old->used_slots);
 
 	slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
-	if (likely(slots))
-		memcpy(slots, old, kvm_memslots_size(old->used_slots));
+	if (unlikely(!slots))
+		return NULL;
+
+	memcpy(slots, old, kvm_memslots_size(old->used_slots));
+
+	hash_init(slots->id_hash);
+	kvm_for_each_memslot(memslot, slots)
+		hash_add(slots->id_hash, &memslot->id_node, memslot->id);
 
 	return slots;
 }
-- 
2.33.1.1089.g2158813163f-goog


WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	 Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	 Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	 Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	 Suzuki K Poulose <suzuki.poulose@arm.com>,
	Atish Patra <atish.patra@wdc.com>,
	 David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	 Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Sean Christopherson <seanjc@google.com>,
	 Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	 Jim Mattson <jmattson@google.com>,
	Joerg Roedel <joro@8bytes.org>,
	 linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu,  linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, kvm-ppc@vger.kernel.org,
	 kvm-riscv@lists.infradead.org, linux-riscv@lists.infradead.org,
	 linux-kernel@vger.kernel.org, Ben Gardon <bgardon@google.com>,
	 "Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>
Subject: [PATCH v5.5 23/30] KVM: Resolve memslot ID via a hash table instead of via a static array
Date: Thu,  4 Nov 2021 00:25:24 +0000	[thread overview]
Message-ID: <20211104002531.1176691-24-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

Memslot ID to the corresponding memslot mappings are currently kept as
indices in static id_to_index array.
The size of this array depends on the maximum allowed memslot count
(regardless of the number of memslots actually in use).

This has become especially problematic recently, when memslot count cap was
removed, so the maximum count is now full 32k memslots - the maximum
allowed by the current KVM API.

Keeping these IDs in a hash table (instead of an array) avoids this
problem.

Resolving a memslot ID to the actual memslot (instead of its index) will
also enable transitioning away from an array-based implementation of the
whole memslots structure in a later commit.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 include/linux/kvm_host.h | 16 +++----
 virt/kvm/kvm_main.c      | 96 +++++++++++++++++++++++++++++++---------
 2 files changed, 84 insertions(+), 28 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9d46937a3a4e..81003e3acd53 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -29,6 +29,7 @@
 #include <linux/refcount.h>
 #include <linux/nospec.h>
 #include <linux/notifier.h>
+#include <linux/hashtable.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -425,6 +426,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 
 struct kvm_memory_slot {
+	struct hlist_node id_node;
 	gfn_t base_gfn;
 	unsigned long npages;
 	unsigned long *dirty_bitmap;
@@ -527,7 +529,7 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 struct kvm_memslots {
 	u64 generation;
 	/* The mapping table from slot id to the index in memslots[]. */
-	short id_to_index[KVM_MEM_SLOTS_NUM];
+	DECLARE_HASHTABLE(id_hash, 7);
 	atomic_t last_used_slot;
 	int used_slots;
 	struct kvm_memory_slot memslots[];
@@ -789,16 +791,14 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
 static inline
 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
 {
-	int index = slots->id_to_index[id];
 	struct kvm_memory_slot *slot;
 
-	if (index < 0)
-		return NULL;
+	hash_for_each_possible(slots->id_hash, slot, id_node, id) {
+		if (slot->id == id)
+			return slot;
+	}
 
-	slot = &slots->memslots[index];
-
-	WARN_ON(slot->id != id);
-	return slot;
+	return NULL;
 }
 
 /*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d45d574a5a2d..13c497abaab8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -853,15 +853,13 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm)
 
 static struct kvm_memslots *kvm_alloc_memslots(void)
 {
-	int i;
 	struct kvm_memslots *slots;
 
 	slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
 	if (!slots)
 		return NULL;
 
-	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
-		slots->id_to_index[i] = -1;
+	hash_init(slots->id_hash);
 
 	return slots;
 }
@@ -1259,17 +1257,49 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
 	return 0;
 }
 
+static void kvm_replace_memslot(struct kvm_memslots *slots,
+				struct kvm_memory_slot *old,
+				struct kvm_memory_slot *new)
+{
+	/*
+	 * Remove the old memslot from the hash list, copying the node data
+	 * would corrupt the list.
+	 */
+	if (old) {
+		hash_del(&old->id_node);
+
+		if (!new)
+			return;
+	}
+
+	/* Copy the source *data*, not the pointer, to the destination. */
+	if (old)
+		*new = *old;
+
+	/* (Re)Add the new memslot. */
+	hash_add(slots->id_hash, &new->id_node, new->id);
+}
+
+static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src)
+{
+	struct kvm_memory_slot *mslots = slots->memslots;
+
+	kvm_replace_memslot(slots, &mslots[src], &mslots[dst]);
+}
+
 /*
  * Delete a memslot by decrementing the number of used slots and shifting all
  * other entries in the array forward one spot.
+ * @memslot is a detached dummy struct with just .id and .as_id filled.
  */
 static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 				      struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (WARN_ON(slots->id_to_index[memslot->id] == -1))
+	if (WARN_ON(!oldslot))
 		return;
 
 	slots->used_slots--;
@@ -1277,12 +1307,17 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 	if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
 		atomic_set(&slots->last_used_slot, 0);
 
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
-	}
+	/*
+	 * Remove the to-be-deleted memslot from the list _before_ shifting
+	 * the trailing memslots forward, its data will be overwritten.
+	 * Defer the (somewhat pointless) copying of the memslot until after
+	 * the last slot has been shifted to avoid overwriting said last slot.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
+	for (i = oldslot - mslots; i < slots->used_slots; i++)
+		kvm_shift_memslot(slots, i, i + 1);
 	mslots[i] = *memslot;
-	slots->id_to_index[memslot->id] = -1;
 }
 
 /*
@@ -1300,30 +1335,39 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
  * itself is not preserved in the array, i.e. not swapped at this time, only
  * its new index into the array is tracked.  Returns the changed memslot's
  * current index into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the changed slot.
  */
 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
 					    struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots)
+	if (!oldslot || !slots->used_slots)
 		return -1;
 
+	/*
+	 * Delete the slot from the hash table before sorting the remaining
+	 * slots, the slot's data may be overwritten when copying slots as part
+	 * of the sorting proccess.  update_memslots() will unconditionally
+	 * rewrite the entire slot and re-add it to the hash table.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
 	/*
 	 * Move the target memslot backward in the array by shifting existing
 	 * memslots with a higher GFN (than the target memslot) towards the
 	 * front of the array.
 	 */
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
+	for (i = oldslot - mslots; i < slots->used_slots - 1; i++) {
 		if (memslot->base_gfn > mslots[i + 1].base_gfn)
 			break;
 
 		WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
 
-		/* Shift the next memslot forward one and update its index. */
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i + 1);
 	}
 	return i;
 }
@@ -1334,6 +1378,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
  * is not preserved in the array, i.e. not swapped at this time, only its new
  * index into the array is tracked.  Returns the changed memslot's final index
  * into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the new or
+ * changed slot.
+ * Assumes that the memslot at @start index is not in @slots->id_hash.
  */
 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 					   struct kvm_memory_slot *memslot,
@@ -1348,9 +1396,7 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 
 		WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
 
-		/* Shift the next memslot back one and update its index. */
-		mslots[i] = mslots[i - 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i - 1);
 	}
 	return i;
 }
@@ -1395,6 +1441,9 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
  * most likely to be referenced, sorting it to the front of the array was
  * advantageous.  The current binary search starts from the middle of the array
  * and uses an LRU pointer to improve performance for all memslots and GFNs.
+ *
+ * @memslot is a detached struct, not a part of the current or new memslot
+ * array.
  */
 static void update_memslots(struct kvm_memslots *slots,
 			    struct kvm_memory_slot *memslot,
@@ -1419,7 +1468,7 @@ static void update_memslots(struct kvm_memslots *slots,
 		 * its index accordingly.
 		 */
 		slots->memslots[i] = *memslot;
-		slots->id_to_index[memslot->id] = i;
+		kvm_replace_memslot(slots, NULL, &slots->memslots[i]);
 	}
 }
 
@@ -1512,6 +1561,7 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 {
 	struct kvm_memslots *slots;
 	size_t new_size;
+	struct kvm_memory_slot *memslot;
 
 	if (change == KVM_MR_CREATE)
 		new_size = kvm_memslots_size(old->used_slots + 1);
@@ -1519,8 +1569,14 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 		new_size = kvm_memslots_size(old->used_slots);
 
 	slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
-	if (likely(slots))
-		memcpy(slots, old, kvm_memslots_size(old->used_slots));
+	if (unlikely(!slots))
+		return NULL;
+
+	memcpy(slots, old, kvm_memslots_size(old->used_slots));
+
+	hash_init(slots->id_hash);
+	kvm_for_each_memslot(memslot, slots)
+		hash_add(slots->id_hash, &memslot->id_node, memslot->id);
 
 	return slots;
 }
-- 
2.33.1.1089.g2158813163f-goog


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	 Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	 Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	 Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	 Suzuki K Poulose <suzuki.poulose@arm.com>,
	Atish Patra <atish.patra@wdc.com>,
	 David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	 Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Sean Christopherson <seanjc@google.com>,
	 Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	 Jim Mattson <jmattson@google.com>,
	Joerg Roedel <joro@8bytes.org>,
	 linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu,  linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, kvm-ppc@vger.kernel.org,
	 kvm-riscv@lists.infradead.org, linux-riscv@lists.infradead.org,
	 linux-kernel@vger.kernel.org, Ben Gardon <bgardon@google.com>,
	 "Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>
Subject: [PATCH v5.5 23/30] KVM: Resolve memslot ID via a hash table instead of via a static array
Date: Thu,  4 Nov 2021 00:25:24 +0000	[thread overview]
Message-ID: <20211104002531.1176691-24-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

Memslot ID to the corresponding memslot mappings are currently kept as
indices in static id_to_index array.
The size of this array depends on the maximum allowed memslot count
(regardless of the number of memslots actually in use).

This has become especially problematic recently, when memslot count cap was
removed, so the maximum count is now full 32k memslots - the maximum
allowed by the current KVM API.

Keeping these IDs in a hash table (instead of an array) avoids this
problem.

Resolving a memslot ID to the actual memslot (instead of its index) will
also enable transitioning away from an array-based implementation of the
whole memslots structure in a later commit.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 include/linux/kvm_host.h | 16 +++----
 virt/kvm/kvm_main.c      | 96 +++++++++++++++++++++++++++++++---------
 2 files changed, 84 insertions(+), 28 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9d46937a3a4e..81003e3acd53 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -29,6 +29,7 @@
 #include <linux/refcount.h>
 #include <linux/nospec.h>
 #include <linux/notifier.h>
+#include <linux/hashtable.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -425,6 +426,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 
 struct kvm_memory_slot {
+	struct hlist_node id_node;
 	gfn_t base_gfn;
 	unsigned long npages;
 	unsigned long *dirty_bitmap;
@@ -527,7 +529,7 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 struct kvm_memslots {
 	u64 generation;
 	/* The mapping table from slot id to the index in memslots[]. */
-	short id_to_index[KVM_MEM_SLOTS_NUM];
+	DECLARE_HASHTABLE(id_hash, 7);
 	atomic_t last_used_slot;
 	int used_slots;
 	struct kvm_memory_slot memslots[];
@@ -789,16 +791,14 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
 static inline
 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
 {
-	int index = slots->id_to_index[id];
 	struct kvm_memory_slot *slot;
 
-	if (index < 0)
-		return NULL;
+	hash_for_each_possible(slots->id_hash, slot, id_node, id) {
+		if (slot->id == id)
+			return slot;
+	}
 
-	slot = &slots->memslots[index];
-
-	WARN_ON(slot->id != id);
-	return slot;
+	return NULL;
 }
 
 /*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d45d574a5a2d..13c497abaab8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -853,15 +853,13 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm)
 
 static struct kvm_memslots *kvm_alloc_memslots(void)
 {
-	int i;
 	struct kvm_memslots *slots;
 
 	slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
 	if (!slots)
 		return NULL;
 
-	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
-		slots->id_to_index[i] = -1;
+	hash_init(slots->id_hash);
 
 	return slots;
 }
@@ -1259,17 +1257,49 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
 	return 0;
 }
 
+static void kvm_replace_memslot(struct kvm_memslots *slots,
+				struct kvm_memory_slot *old,
+				struct kvm_memory_slot *new)
+{
+	/*
+	 * Remove the old memslot from the hash list, copying the node data
+	 * would corrupt the list.
+	 */
+	if (old) {
+		hash_del(&old->id_node);
+
+		if (!new)
+			return;
+	}
+
+	/* Copy the source *data*, not the pointer, to the destination. */
+	if (old)
+		*new = *old;
+
+	/* (Re)Add the new memslot. */
+	hash_add(slots->id_hash, &new->id_node, new->id);
+}
+
+static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src)
+{
+	struct kvm_memory_slot *mslots = slots->memslots;
+
+	kvm_replace_memslot(slots, &mslots[src], &mslots[dst]);
+}
+
 /*
  * Delete a memslot by decrementing the number of used slots and shifting all
  * other entries in the array forward one spot.
+ * @memslot is a detached dummy struct with just .id and .as_id filled.
  */
 static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 				      struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (WARN_ON(slots->id_to_index[memslot->id] == -1))
+	if (WARN_ON(!oldslot))
 		return;
 
 	slots->used_slots--;
@@ -1277,12 +1307,17 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 	if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
 		atomic_set(&slots->last_used_slot, 0);
 
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
-	}
+	/*
+	 * Remove the to-be-deleted memslot from the list _before_ shifting
+	 * the trailing memslots forward, its data will be overwritten.
+	 * Defer the (somewhat pointless) copying of the memslot until after
+	 * the last slot has been shifted to avoid overwriting said last slot.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
+	for (i = oldslot - mslots; i < slots->used_slots; i++)
+		kvm_shift_memslot(slots, i, i + 1);
 	mslots[i] = *memslot;
-	slots->id_to_index[memslot->id] = -1;
 }
 
 /*
@@ -1300,30 +1335,39 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
  * itself is not preserved in the array, i.e. not swapped at this time, only
  * its new index into the array is tracked.  Returns the changed memslot's
  * current index into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the changed slot.
  */
 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
 					    struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots)
+	if (!oldslot || !slots->used_slots)
 		return -1;
 
+	/*
+	 * Delete the slot from the hash table before sorting the remaining
+	 * slots, the slot's data may be overwritten when copying slots as part
+	 * of the sorting proccess.  update_memslots() will unconditionally
+	 * rewrite the entire slot and re-add it to the hash table.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
 	/*
 	 * Move the target memslot backward in the array by shifting existing
 	 * memslots with a higher GFN (than the target memslot) towards the
 	 * front of the array.
 	 */
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
+	for (i = oldslot - mslots; i < slots->used_slots - 1; i++) {
 		if (memslot->base_gfn > mslots[i + 1].base_gfn)
 			break;
 
 		WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
 
-		/* Shift the next memslot forward one and update its index. */
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i + 1);
 	}
 	return i;
 }
@@ -1334,6 +1378,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
  * is not preserved in the array, i.e. not swapped at this time, only its new
  * index into the array is tracked.  Returns the changed memslot's final index
  * into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the new or
+ * changed slot.
+ * Assumes that the memslot at @start index is not in @slots->id_hash.
  */
 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 					   struct kvm_memory_slot *memslot,
@@ -1348,9 +1396,7 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 
 		WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
 
-		/* Shift the next memslot back one and update its index. */
-		mslots[i] = mslots[i - 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i - 1);
 	}
 	return i;
 }
@@ -1395,6 +1441,9 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
  * most likely to be referenced, sorting it to the front of the array was
  * advantageous.  The current binary search starts from the middle of the array
  * and uses an LRU pointer to improve performance for all memslots and GFNs.
+ *
+ * @memslot is a detached struct, not a part of the current or new memslot
+ * array.
  */
 static void update_memslots(struct kvm_memslots *slots,
 			    struct kvm_memory_slot *memslot,
@@ -1419,7 +1468,7 @@ static void update_memslots(struct kvm_memslots *slots,
 		 * its index accordingly.
 		 */
 		slots->memslots[i] = *memslot;
-		slots->id_to_index[memslot->id] = i;
+		kvm_replace_memslot(slots, NULL, &slots->memslots[i]);
 	}
 }
 
@@ -1512,6 +1561,7 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 {
 	struct kvm_memslots *slots;
 	size_t new_size;
+	struct kvm_memory_slot *memslot;
 
 	if (change == KVM_MR_CREATE)
 		new_size = kvm_memslots_size(old->used_slots + 1);
@@ -1519,8 +1569,14 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 		new_size = kvm_memslots_size(old->used_slots);
 
 	slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
-	if (likely(slots))
-		memcpy(slots, old, kvm_memslots_size(old->used_slots));
+	if (unlikely(!slots))
+		return NULL;
+
+	memcpy(slots, old, kvm_memslots_size(old->used_slots));
+
+	hash_init(slots->id_hash);
+	kvm_for_each_memslot(memslot, slots)
+		hash_add(slots->id_hash, &memslot->id_node, memslot->id);
 
 	return slots;
 }
-- 
2.33.1.1089.g2158813163f-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Sean Christopherson <seanjc@google.com>
To: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	Anup Patel <anup.patel@wdc.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, David Hildenbrand <david@redhat.com>,
	linux-kernel@vger.kernel.org, Atish Patra <atish.patra@wdc.com>,
	Ben Gardon <bgardon@google.com>,
	"Maciej S . Szmigiero" <maciej.szmigiero@oracle.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	linux-riscv@lists.infradead.org, Joerg Roedel <joro@8bytes.org>,
	kvmarm@lists.cs.columbia.edu, kvm-ppc@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	Jim Mattson <jmattson@google.com>,
	Cornelia Huck <cohuck@redhat.com>,
	linux-mips@vger.kernel.org, kvm-riscv@lists.infradead.org,
	Vitaly Kuznetsov <vkuznets@redhat.com>
Subject: [PATCH v5.5 23/30] KVM: Resolve memslot ID via a hash table instead of via a static array
Date: Thu, 04 Nov 2021 00:25:24 +0000	[thread overview]
Message-ID: <20211104002531.1176691-24-seanjc@google.com> (raw)
In-Reply-To: <20211104002531.1176691-1-seanjc@google.com>

From: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>

Memslot ID to the corresponding memslot mappings are currently kept as
indices in static id_to_index array.
The size of this array depends on the maximum allowed memslot count
(regardless of the number of memslots actually in use).

This has become especially problematic recently, when memslot count cap was
removed, so the maximum count is now full 32k memslots - the maximum
allowed by the current KVM API.

Keeping these IDs in a hash table (instead of an array) avoids this
problem.

Resolving a memslot ID to the actual memslot (instead of its index) will
also enable transitioning away from an array-based implementation of the
whole memslots structure in a later commit.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 include/linux/kvm_host.h | 16 +++----
 virt/kvm/kvm_main.c      | 96 +++++++++++++++++++++++++++++++---------
 2 files changed, 84 insertions(+), 28 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9d46937a3a4e..81003e3acd53 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -29,6 +29,7 @@
 #include <linux/refcount.h>
 #include <linux/nospec.h>
 #include <linux/notifier.h>
+#include <linux/hashtable.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -425,6 +426,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 
 struct kvm_memory_slot {
+	struct hlist_node id_node;
 	gfn_t base_gfn;
 	unsigned long npages;
 	unsigned long *dirty_bitmap;
@@ -527,7 +529,7 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 struct kvm_memslots {
 	u64 generation;
 	/* The mapping table from slot id to the index in memslots[]. */
-	short id_to_index[KVM_MEM_SLOTS_NUM];
+	DECLARE_HASHTABLE(id_hash, 7);
 	atomic_t last_used_slot;
 	int used_slots;
 	struct kvm_memory_slot memslots[];
@@ -789,16 +791,14 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
 static inline
 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
 {
-	int index = slots->id_to_index[id];
 	struct kvm_memory_slot *slot;
 
-	if (index < 0)
-		return NULL;
+	hash_for_each_possible(slots->id_hash, slot, id_node, id) {
+		if (slot->id = id)
+			return slot;
+	}
 
-	slot = &slots->memslots[index];
-
-	WARN_ON(slot->id != id);
-	return slot;
+	return NULL;
 }
 
 /*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d45d574a5a2d..13c497abaab8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -853,15 +853,13 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm)
 
 static struct kvm_memslots *kvm_alloc_memslots(void)
 {
-	int i;
 	struct kvm_memslots *slots;
 
 	slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
 	if (!slots)
 		return NULL;
 
-	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
-		slots->id_to_index[i] = -1;
+	hash_init(slots->id_hash);
 
 	return slots;
 }
@@ -1259,17 +1257,49 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
 	return 0;
 }
 
+static void kvm_replace_memslot(struct kvm_memslots *slots,
+				struct kvm_memory_slot *old,
+				struct kvm_memory_slot *new)
+{
+	/*
+	 * Remove the old memslot from the hash list, copying the node data
+	 * would corrupt the list.
+	 */
+	if (old) {
+		hash_del(&old->id_node);
+
+		if (!new)
+			return;
+	}
+
+	/* Copy the source *data*, not the pointer, to the destination. */
+	if (old)
+		*new = *old;
+
+	/* (Re)Add the new memslot. */
+	hash_add(slots->id_hash, &new->id_node, new->id);
+}
+
+static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src)
+{
+	struct kvm_memory_slot *mslots = slots->memslots;
+
+	kvm_replace_memslot(slots, &mslots[src], &mslots[dst]);
+}
+
 /*
  * Delete a memslot by decrementing the number of used slots and shifting all
  * other entries in the array forward one spot.
+ * @memslot is a detached dummy struct with just .id and .as_id filled.
  */
 static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 				      struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (WARN_ON(slots->id_to_index[memslot->id] = -1))
+	if (WARN_ON(!oldslot))
 		return;
 
 	slots->used_slots--;
@@ -1277,12 +1307,17 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots,
 	if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
 		atomic_set(&slots->last_used_slot, 0);
 
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
-	}
+	/*
+	 * Remove the to-be-deleted memslot from the list _before_ shifting
+	 * the trailing memslots forward, its data will be overwritten.
+	 * Defer the (somewhat pointless) copying of the memslot until after
+	 * the last slot has been shifted to avoid overwriting said last slot.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
+	for (i = oldslot - mslots; i < slots->used_slots; i++)
+		kvm_shift_memslot(slots, i, i + 1);
 	mslots[i] = *memslot;
-	slots->id_to_index[memslot->id] = -1;
 }
 
 /*
@@ -1300,30 +1335,39 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
  * itself is not preserved in the array, i.e. not swapped at this time, only
  * its new index into the array is tracked.  Returns the changed memslot's
  * current index into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the changed slot.
  */
 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
 					    struct kvm_memory_slot *memslot)
 {
 	struct kvm_memory_slot *mslots = slots->memslots;
+	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
 	int i;
 
-	if (slots->id_to_index[memslot->id] = -1 || !slots->used_slots)
+	if (!oldslot || !slots->used_slots)
 		return -1;
 
+	/*
+	 * Delete the slot from the hash table before sorting the remaining
+	 * slots, the slot's data may be overwritten when copying slots as part
+	 * of the sorting proccess.  update_memslots() will unconditionally
+	 * rewrite the entire slot and re-add it to the hash table.
+	 */
+	kvm_replace_memslot(slots, oldslot, NULL);
+
 	/*
 	 * Move the target memslot backward in the array by shifting existing
 	 * memslots with a higher GFN (than the target memslot) towards the
 	 * front of the array.
 	 */
-	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
+	for (i = oldslot - mslots; i < slots->used_slots - 1; i++) {
 		if (memslot->base_gfn > mslots[i + 1].base_gfn)
 			break;
 
 		WARN_ON_ONCE(memslot->base_gfn = mslots[i + 1].base_gfn);
 
-		/* Shift the next memslot forward one and update its index. */
-		mslots[i] = mslots[i + 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i + 1);
 	}
 	return i;
 }
@@ -1334,6 +1378,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
  * is not preserved in the array, i.e. not swapped at this time, only its new
  * index into the array is tracked.  Returns the changed memslot's final index
  * into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the new or
+ * changed slot.
+ * Assumes that the memslot at @start index is not in @slots->id_hash.
  */
 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 					   struct kvm_memory_slot *memslot,
@@ -1348,9 +1396,7 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
 
 		WARN_ON_ONCE(memslot->base_gfn = mslots[i - 1].base_gfn);
 
-		/* Shift the next memslot back one and update its index. */
-		mslots[i] = mslots[i - 1];
-		slots->id_to_index[mslots[i].id] = i;
+		kvm_shift_memslot(slots, i, i - 1);
 	}
 	return i;
 }
@@ -1395,6 +1441,9 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
  * most likely to be referenced, sorting it to the front of the array was
  * advantageous.  The current binary search starts from the middle of the array
  * and uses an LRU pointer to improve performance for all memslots and GFNs.
+ *
+ * @memslot is a detached struct, not a part of the current or new memslot
+ * array.
  */
 static void update_memslots(struct kvm_memslots *slots,
 			    struct kvm_memory_slot *memslot,
@@ -1419,7 +1468,7 @@ static void update_memslots(struct kvm_memslots *slots,
 		 * its index accordingly.
 		 */
 		slots->memslots[i] = *memslot;
-		slots->id_to_index[memslot->id] = i;
+		kvm_replace_memslot(slots, NULL, &slots->memslots[i]);
 	}
 }
 
@@ -1512,6 +1561,7 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 {
 	struct kvm_memslots *slots;
 	size_t new_size;
+	struct kvm_memory_slot *memslot;
 
 	if (change = KVM_MR_CREATE)
 		new_size = kvm_memslots_size(old->used_slots + 1);
@@ -1519,8 +1569,14 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 		new_size = kvm_memslots_size(old->used_slots);
 
 	slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
-	if (likely(slots))
-		memcpy(slots, old, kvm_memslots_size(old->used_slots));
+	if (unlikely(!slots))
+		return NULL;
+
+	memcpy(slots, old, kvm_memslots_size(old->used_slots));
+
+	hash_init(slots->id_hash);
+	kvm_for_each_memslot(memslot, slots)
+		hash_add(slots->id_hash, &memslot->id_node, memslot->id);
 
 	return slots;
 }
-- 
2.33.1.1089.g2158813163f-goog

  parent reply	other threads:[~2021-11-04  0:26 UTC|newest]

Thread overview: 327+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-04  0:25 [PATCH v5.5 00/30] KVM: Scalable memslots implementation Sean Christopherson
2021-11-04  0:25 ` Sean Christopherson
2021-11-04  0:25 ` Sean Christopherson
2021-11-04  0:25 ` Sean Christopherson
2021-11-04  0:25 ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 01/30] KVM: Ensure local memslot copies operate on up-to-date arch-specific data Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04 21:27   ` Ben Gardon
2021-11-04 21:27     ` Ben Gardon
2021-11-04 21:27     ` Ben Gardon
2021-11-04 21:27     ` Ben Gardon
2021-11-04 21:27     ` Ben Gardon
2021-11-04 22:41     ` Sean Christopherson
2021-11-04 22:41       ` Sean Christopherson
2021-11-04 22:41       ` Sean Christopherson
2021-11-04 22:41       ` Sean Christopherson
2021-11-04 22:41       ` Sean Christopherson
2021-11-09  0:37   ` Maciej S. Szmigiero
2021-11-09  0:37     ` Maciej S. Szmigiero
2021-11-09  0:37     ` Maciej S. Szmigiero
2021-11-09  0:37     ` Maciej S. Szmigiero
2021-11-09  0:37     ` Maciej S. Szmigiero
2021-11-09  1:17     ` Sean Christopherson
2021-11-09  1:17       ` Sean Christopherson
2021-11-09  1:17       ` Sean Christopherson
2021-11-09  1:17       ` Sean Christopherson
2021-11-09  1:17       ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 02/30] KVM: Disallow user memslot with size that exceeds "unsigned long" Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:38   ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 03/30] KVM: Require total number of memslot pages to fit in an unsigned long Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:38   ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 04/30] KVM: Open code kvm_delete_memslot() into its only caller Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:38   ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 05/30] KVM: Resync only arch fields when slots_arch_lock gets reacquired Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:38   ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-09  0:38     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 06/30] KVM: Use "new" memslot's address space ID instead of dedicated param Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:39   ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 07/30] KVM: Let/force architectures to deal with arch specific memslot data Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:39   ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  0:39     ` Maciej S. Szmigiero
2021-11-09  1:13     ` Sean Christopherson
2021-11-09  1:13       ` Sean Christopherson
2021-11-09  1:13       ` Sean Christopherson
2021-11-09  1:13       ` Sean Christopherson
2021-11-09  1:13       ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 08/30] KVM: arm64: Use "new" memslot instead of userspace memory region Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  6:36   ` Reiji Watanabe
2021-11-09  6:36     ` Reiji Watanabe
2021-11-09  6:36     ` Reiji Watanabe
2021-11-09  6:36     ` Reiji Watanabe
2021-11-09  6:36     ` Reiji Watanabe
2021-11-04  0:25 ` [PATCH v5.5 09/30] KVM: MIPS: Drop pr_debug from memslot commit to avoid using "mem" Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 10/30] KVM: PPC: Avoid referencing userspace memory region in memslot updates Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 11/30] KVM: s390: Use "new" memslot instead of userspace memory region Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 12/30] KVM: x86: " Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:40   ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 13/30] KVM: RISC-V: " Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 14/30] KVM: Stop passing kvm_userspace_memory_region to arch memslot hooks Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:40   ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 15/30] KVM: Use prepare/commit hooks to handle generic memslot metadata updates Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:40   ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 16/30] KVM: x86: Don't assume old/new memslots are non-NULL at memslot commit Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:40   ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-09  0:40     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 17/30] KVM: s390: Skip gfn/size sanity checks on memslot DELETE or FLAGS_ONLY Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 18/30] KVM: Don't make a full copy of the old memslot in __kvm_set_memory_region() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:41   ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 19/30] KVM: x86: Don't call kvm_mmu_change_mmu_pages() if the count hasn't changed Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 20/30] KVM: x86: Use nr_memslot_pages to avoid traversing the memslots array Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-09  0:41   ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  0:41     ` Maciej S. Szmigiero
2021-11-09  1:34     ` Sean Christopherson
2021-11-09  1:34       ` Sean Christopherson
2021-11-09  1:34       ` Sean Christopherson
2021-11-09  1:34       ` Sean Christopherson
2021-11-09  1:34       ` Sean Christopherson
2021-11-09 16:29       ` Maciej S. Szmigiero
2021-11-09 16:29         ` Maciej S. Szmigiero
2021-11-09 16:29         ` Maciej S. Szmigiero
2021-11-09 16:29         ` Maciej S. Szmigiero
2021-11-09 16:29         ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 21/30] KVM: Integrate gfn_to_memslot_approx() into search_memslots() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 22/30] KVM: Move WARN on invalid memslot index to update_memslots() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` Sean Christopherson [this message]
2021-11-04  0:25   ` [PATCH v5.5 23/30] KVM: Resolve memslot ID via a hash table instead of via a static array Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:51   ` Maciej S. Szmigiero
2021-11-11 23:51     ` Maciej S. Szmigiero
2021-11-11 23:51     ` Maciej S. Szmigiero
2021-11-11 23:51     ` Maciej S. Szmigiero
2021-11-11 23:51     ` Maciej S. Szmigiero
2021-11-12  1:03     ` Sean Christopherson
2021-11-12  1:03       ` Sean Christopherson
2021-11-12  1:03       ` Sean Christopherson
2021-11-12  1:03       ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 24/30] KVM: Use interval tree to do fast hva lookup in memslots Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:52   ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-12  1:05     ` Sean Christopherson
2021-11-12  1:05       ` Sean Christopherson
2021-11-12  1:05       ` Sean Christopherson
2021-11-12  1:05       ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 25/30] KVM: s390: Introduce kvm_s390_get_gfn_end() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 26/30] KVM: Keep memslots in tree-based structures instead of array-based ones Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:52   ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-12  0:51     ` Sean Christopherson
2021-11-12  0:51       ` Sean Christopherson
2021-11-12  0:51       ` Sean Christopherson
2021-11-12  0:51       ` Sean Christopherson
2021-11-12  0:51       ` Sean Christopherson
2021-11-13 15:22       ` Maciej S. Szmigiero
2021-11-13 15:22         ` Maciej S. Szmigiero
2021-11-13 15:22         ` Maciej S. Szmigiero
2021-11-13 15:22         ` Maciej S. Szmigiero
2021-11-13 15:22         ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 27/30] KVM: Optimize gfn lookup in kvm_zap_gfn_range() Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 28/30] KVM: Optimize overlapping memslots check Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25 ` [PATCH v5.5 29/30] KVM: Wait 'til the bitter end to initialize the "new" memslot Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:52   ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-11 23:52     ` Maciej S. Szmigiero
2021-11-04  0:25 ` [PATCH v5.5 30/30] KVM: Dynamically allocate "new" memslots from the get-go Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-04  0:25   ` Sean Christopherson
2021-11-11 23:53   ` Maciej S. Szmigiero
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-12  1:32     ` Sean Christopherson
2021-11-12  1:32       ` Sean Christopherson
2021-11-12  1:32       ` Sean Christopherson
2021-11-12  1:32       ` Sean Christopherson
2021-11-09  0:43 ` [PATCH v5.5 00/30] KVM: Scalable memslots implementation Maciej S. Szmigiero
2021-11-09  0:43   ` Maciej S. Szmigiero
2021-11-09  0:43   ` Maciej S. Szmigiero
2021-11-09  0:43   ` Maciej S. Szmigiero
2021-11-09  0:43   ` Maciej S. Szmigiero
2021-11-09  1:21   ` Sean Christopherson
2021-11-09  1:21     ` Sean Christopherson
2021-11-09  1:21     ` Sean Christopherson
2021-11-09  1:21     ` Sean Christopherson
2021-11-09  1:21     ` Sean Christopherson
2021-11-11 23:53     ` Maciej S. Szmigiero
2021-11-11 23:53       ` Maciej S. Szmigiero
2021-11-11 23:53       ` Maciej S. Szmigiero
2021-11-11 23:53       ` Maciej S. Szmigiero
2021-11-11 23:53       ` Maciej S. Szmigiero
2021-11-23 14:42       ` Maciej S. Szmigiero
2021-11-23 14:42         ` Maciej S. Szmigiero
2021-11-23 14:42         ` Maciej S. Szmigiero
2021-11-23 14:42         ` Maciej S. Szmigiero
2021-11-23 14:42         ` Maciej S. Szmigiero
2021-11-26 12:33         ` Paolo Bonzini
2021-11-26 12:33           ` Paolo Bonzini
2021-11-26 12:33           ` Paolo Bonzini
2021-11-26 12:33           ` Paolo Bonzini
2021-11-26 12:33           ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211104002531.1176691-24-seanjc@google.com \
    --to=seanjc@google.com \
    --cc=aleksandar.qemu.devel@gmail.com \
    --cc=anup.patel@wdc.com \
    --cc=aou@eecs.berkeley.edu \
    --cc=atish.patra@wdc.com \
    --cc=bgardon@google.com \
    --cc=borntraeger@de.ibm.com \
    --cc=chenhuacai@kernel.org \
    --cc=cohuck@redhat.com \
    --cc=david@redhat.com \
    --cc=frankja@linux.ibm.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm-riscv@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=maciej.szmigiero@oracle.com \
    --cc=maz@kernel.org \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=paulus@ozlabs.org \
    --cc=pbonzini@redhat.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.