linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: "Maciej S. Szmigiero" <mail@maciej.szmigiero.name>
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>,
	Igor Mammedov <imammedo@redhat.com>,
	Marc Zyngier <maz@kernel.org>, James Morse <james.morse@arm.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Huacai Chen <chenhuacai@kernel.org>,
	Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>,
	David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Joerg Roedel <joro@8bytes.org>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v5 08/13] KVM: Resolve memslot ID via a hash table instead of via a static array
Date: Wed, 20 Oct 2021 00:43:21 +0000	[thread overview]
Message-ID: <YW9mKTRBEABjGPp7@google.com> (raw)
In-Reply-To: <555f58fdaec120aa7a6f6fbad06cca796a8c9168.1632171479.git.maciej.szmigiero@oracle.com>

On Mon, Sep 20, 2021, Maciej S. Szmigiero wrote:
> ---
>  include/linux/kvm_host.h | 16 +++++------
>  virt/kvm/kvm_main.c      | 61 +++++++++++++++++++++++++++++++---------
>  2 files changed, 55 insertions(+), 22 deletions(-)
> 
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 8fd9644f40b2..d2acc00a6472 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -29,6 +29,7 @@
>  #include <linux/refcount.h>
>  #include <linux/nospec.h>
>  #include <linux/notifier.h>
> +#include <linux/hashtable.h>
>  #include <asm/signal.h>
>  
>  #include <linux/kvm.h>
> @@ -426,6 +427,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
>  #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
>  
>  struct kvm_memory_slot {
> +	struct hlist_node id_node;
>  	gfn_t base_gfn;
>  	unsigned long npages;
>  	unsigned long *dirty_bitmap;
> @@ -528,7 +530,7 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
>  struct kvm_memslots {
>  	u64 generation;
>  	/* The mapping table from slot id to the index in memslots[]. */
> -	short id_to_index[KVM_MEM_SLOTS_NUM];
> +	DECLARE_HASHTABLE(id_hash, 7);

Can you add a comment explaining the rationale for size "7"?  Not necessarily the
justification in choosing "7", more so the tradeoffs between performance, memory,
etc... so that all your work/investigation isn't lost and doesn't have to be repeated
if someone wants to tweak this in the future.

>  	atomic_t last_used_slot;
>  	int used_slots;
>  	struct kvm_memory_slot memslots[];
> @@ -795,16 +797,14 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
>  static inline
>  struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
>  {
> -	int index = slots->id_to_index[id];
>  	struct kvm_memory_slot *slot;
>  
> -	if (index < 0)
> -		return NULL;
> -
> -	slot = &slots->memslots[index];
> +	hash_for_each_possible(slots->id_hash, slot, id_node, id) {
> +		if (slot->id == id)
> +			return slot;

Hmm, related to the hash, it might be worth adding a stat here to count collisions.
Might be more pain than it's worth though since we don't have @kvm.

> +	}
>  
> -	WARN_ON(slot->id != id);
> -	return slot;
> +	return NULL;
>  }
>  
>  /*
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 48d182840060..50597608d085 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -827,15 +827,13 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm)
>  
>  static struct kvm_memslots *kvm_alloc_memslots(void)
>  {
> -	int i;
>  	struct kvm_memslots *slots;
>  
>  	slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
>  	if (!slots)
>  		return NULL;
>  
> -	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
> -		slots->id_to_index[i] = -1;
> +	hash_init(slots->id_hash);
>  
>  	return slots;
>  }
> @@ -1236,14 +1234,16 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
>  /*
>   * Delete a memslot by decrementing the number of used slots and shifting all
>   * other entries in the array forward one spot.
> + * @memslot is a detached dummy struct with just .id and .as_id filled.
>   */
>  static inline void kvm_memslot_delete(struct kvm_memslots *slots,
>  				      struct kvm_memory_slot *memslot)
>  {
>  	struct kvm_memory_slot *mslots = slots->memslots;
> +	struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
>  	int i;
>  
> -	if (WARN_ON(slots->id_to_index[memslot->id] == -1))
> +	if (WARN_ON(!oldslot))
>  		return;
>  
>  	slots->used_slots--;
> @@ -1251,12 +1251,13 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots,
>  	if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
>  		atomic_set(&slots->last_used_slot, 0);
>  
> -	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
> +	for (i = oldslot - mslots; i < slots->used_slots; i++) {
> +		hash_del(&mslots[i].id_node);
>  		mslots[i] = mslots[i + 1];
> -		slots->id_to_index[mslots[i].id] = i;
> +		hash_add(slots->id_hash, &mslots[i].id_node, mslots[i].id);
>  	}
> +	hash_del(&mslots[i].id_node);
>  	mslots[i] = *memslot;
> -	slots->id_to_index[memslot->id] = -1;
>  }
>  
>  /*
> @@ -1274,30 +1275,46 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
>   * itself is not preserved in the array, i.e. not swapped at this time, only
>   * its new index into the array is tracked.  Returns the changed memslot's
>   * current index into the memslots array.
> + * The memslot at the returned index will not be in @slots->id_hash by then.
> + * @memslot is a detached struct with desired final data of the changed slot.
>   */
>  static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
>  					    struct kvm_memory_slot *memslot)
>  {
>  	struct kvm_memory_slot *mslots = slots->memslots;
> +	struct kvm_memory_slot *mmemslot = id_to_memslot(slots, memslot->id);

My comment from v3 about the danger of "mmemslot" still stands.  FWIW, I dislike
"mslots" as well, but that predates me, and all of this will go away in the end :-)

On Wed, May 19, 2021 at 3:31 PM Sean Christopherson <seanjc@google.com> wrote:
> On Sun, May 16, 2021, Maciej S. Szmigiero wrote:
> >       struct kvm_memory_slot *mslots = slots->memslots;
> > +     struct kvm_memory_slot *dmemslot = id_to_memslot(slots, memslot->id);
>
> I vote to call these local vars "old", or something along those lines.  dmemslot
> isn't too bad, but mmemslot in the helpers below is far too similar to memslot,
> and using the wrong will cause nasty explosions.


>  	int i;
>  
> -	if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots)
> +	if (!mmemslot || !slots->used_slots)
>  		return -1;
>  
> +	/*
> +	 * The loop below will (possibly) overwrite the target memslot with
> +	 * data of the next memslot, or a similar loop in
> +	 * kvm_memslot_move_forward() will overwrite it with data of the
> +	 * previous memslot.
> +	 * Then update_memslots() will unconditionally overwrite and re-add
> +	 * it to the hash table.
> +	 * That's why the memslot has to be first removed from the hash table
> +	 * here.
> +	 */

Is this reword accurate?

	/*
	 * Delete the slot from the hash table before sorting the remaining
	 * slots, the slot's data may be overwritten when copying slots as part
	 * of the sorting proccess.  update_memslots() will unconditionally
	 * rewrite the entire slot and re-add it to the hash table.
	 */

> +	hash_del(&mmemslot->id_node);
> +
>  	/*
>  	 * Move the target memslot backward in the array by shifting existing
>  	 * memslots with a higher GFN (than the target memslot) towards the
>  	 * front of the array.
>  	 */
> -	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
> +	for (i = mmemslot - mslots; i < slots->used_slots - 1; i++) {
>  		if (memslot->base_gfn > mslots[i + 1].base_gfn)
>  			break;
>  
>  		WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
>  
>  		/* Shift the next memslot forward one and update its index. */
> +		hash_del(&mslots[i + 1].id_node);
>  		mslots[i] = mslots[i + 1];
> -		slots->id_to_index[mslots[i].id] = i;
> +		hash_add(slots->id_hash, &mslots[i].id_node, mslots[i].id);
>  	}
>  	return i;
>  }
> @@ -1308,6 +1325,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
>   * is not preserved in the array, i.e. not swapped at this time, only its new
>   * index into the array is tracked.  Returns the changed memslot's final index
>   * into the memslots array.
> + * The memslot at the returned index will not be in @slots->id_hash by then.
> + * @memslot is a detached struct with desired final data of the new or
> + * changed slot.
> + * Assumes that the memslot at @start index is not in @slots->id_hash.
>   */
>  static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
>  					   struct kvm_memory_slot *memslot,
> @@ -1323,8 +1344,9 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
>  		WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
>  
>  		/* Shift the next memslot back one and update its index. */
> +		hash_del(&mslots[i - 1].id_node);
>  		mslots[i] = mslots[i - 1];
> -		slots->id_to_index[mslots[i].id] = i;
> +		hash_add(slots->id_hash, &mslots[i].id_node, mslots[i].id);
>  	}
>  	return i;
>  }
> @@ -1369,6 +1391,9 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
>   * most likely to be referenced, sorting it to the front of the array was
>   * advantageous.  The current binary search starts from the middle of the array
>   * and uses an LRU pointer to improve performance for all memslots and GFNs.
> + *
> + * @memslot is a detached struct, not a part of the current or new memslot
> + * array.
>   */
>  static void update_memslots(struct kvm_memslots *slots,
>  			    struct kvm_memory_slot *memslot,
> @@ -1393,7 +1418,8 @@ static void update_memslots(struct kvm_memslots *slots,
>  		 * its index accordingly.
>  		 */
>  		slots->memslots[i] = *memslot;
> -		slots->id_to_index[memslot->id] = i;
> +		hash_add(slots->id_hash, &slots->memslots[i].id_node,
> +			 memslot->id);

Let this poke out past 80 chars, i.e. drop the newline.

>  	}
>  }
>  
> @@ -1501,6 +1527,7 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
>  {
>  	struct kvm_memslots *slots;
>  	size_t new_size;
> +	struct kvm_memory_slot *memslot;
>  
>  	if (change == KVM_MR_CREATE)
>  		new_size = kvm_memslots_size(old->used_slots + 1);
> @@ -1508,8 +1535,14 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
>  		new_size = kvm_memslots_size(old->used_slots);
>  
>  	slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
> -	if (likely(slots))
> -		kvm_copy_memslots(slots, old);
> +	if (unlikely(!slots))
> +		return NULL;
> +
> +	kvm_copy_memslots(slots, old);
> +
> +	hash_init(slots->id_hash);
> +	kvm_for_each_memslot(memslot, slots)
> +		hash_add(slots->id_hash, &memslot->id_node, memslot->id);
>  
>  	return slots;
>  }

  reply	other threads:[~2021-10-20  0:43 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-20 21:38 [PATCH v5 00/13] KVM: Scalable memslots implementation Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 01/13] KVM: x86: Cache total page count to avoid traversing the memslot array Maciej S. Szmigiero
2021-10-19 22:24   ` Sean Christopherson
2021-10-19 22:31     ` Sean Christopherson
2021-10-20 18:40       ` Maciej S. Szmigiero
2021-10-20 18:41     ` Maciej S. Szmigiero
2021-10-20 19:01       ` Sean Christopherson
2021-11-01 22:29         ` Sean Christopherson
2021-11-03 11:59           ` Maciej S. Szmigiero
2021-11-03 14:47             ` Sean Christopherson
2021-11-03 15:38               ` Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 02/13] KVM: x86: Don't call kvm_mmu_change_mmu_pages() if the count hasn't changed Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 03/13] KVM: Add "old" memslot parameter to kvm_arch_prepare_memory_region() Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 04/13] KVM: x86: Move n_memslots_pages recalc " Maciej S. Szmigiero
2021-10-19 22:38   ` Sean Christopherson
2021-10-20 18:41     ` Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 05/13] KVM: Integrate gfn_to_memslot_approx() into search_memslots() Maciej S. Szmigiero
2021-10-19 23:38   ` Sean Christopherson
2021-10-20 18:41     ` Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 06/13] KVM: Move WARN on invalid memslot index to update_memslots() Maciej S. Szmigiero
2021-10-19 23:42   ` Sean Christopherson
2021-09-20 21:38 ` [PATCH v5 07/13] KVM: Just resync arch fields when slots_arch_lock gets reacquired Maciej S. Szmigiero
2021-10-19 23:55   ` Sean Christopherson
2021-10-20 18:41     ` Maciej S. Szmigiero
2021-10-20 18:57       ` Sean Christopherson
2021-10-20 18:58         ` Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 08/13] KVM: Resolve memslot ID via a hash table instead of via a static array Maciej S. Szmigiero
2021-10-20  0:43   ` Sean Christopherson [this message]
2021-10-20 18:42     ` Maciej S. Szmigiero
2021-10-20 22:39   ` Sean Christopherson
2021-10-21 14:15     ` Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 09/13] KVM: Use interval tree to do fast hva lookup in memslots Maciej S. Szmigiero
2021-10-26 18:19   ` Sean Christopherson
2021-10-26 18:46     ` Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 10/13] KVM: s390: Introduce kvm_s390_get_gfn_end() Maciej S. Szmigiero
2021-09-20 21:38 ` [PATCH v5 11/13] KVM: Keep memslots in tree-based structures instead of array-based ones Maciej S. Szmigiero
2021-10-27  0:36   ` Sean Christopherson
2021-10-27 23:54     ` Sean Christopherson
2021-10-28 22:22       ` Sean Christopherson
2021-09-20 21:39 ` [PATCH v5 12/13] KVM: Optimize gfn lookup in kvm_zap_gfn_range() Maciej S. Szmigiero
2021-10-20 23:47   ` Sean Christopherson
2021-10-21 14:16     ` Maciej S. Szmigiero
2021-10-21 16:30       ` Sean Christopherson
2021-10-21 21:44         ` Maciej S. Szmigiero
2021-09-20 21:39 ` [PATCH v5 13/13] KVM: Optimize overlapping memslots check Maciej S. Szmigiero
2021-10-26 18:59   ` Sean Christopherson
2021-10-27 13:48     ` Maciej S. Szmigiero
2021-10-28 17:53       ` Sean Christopherson
2021-10-29 16:23         ` Maciej S. Szmigiero
2021-10-30  0:32           ` Sean Christopherson
2021-10-19 22:07 ` [PATCH v5 00/13] KVM: Scalable memslots implementation Sean Christopherson
2021-10-20 18:40   ` Maciej S. Szmigiero
2021-10-20 19:58     ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YW9mKTRBEABjGPp7@google.com \
    --to=seanjc@google.com \
    --cc=aleksandar.qemu.devel@gmail.com \
    --cc=borntraeger@de.ibm.com \
    --cc=chenhuacai@kernel.org \
    --cc=cohuck@redhat.com \
    --cc=david@redhat.com \
    --cc=frankja@linux.ibm.com \
    --cc=imammedo@redhat.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=james.morse@arm.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mail@maciej.szmigiero.name \
    --cc=maz@kernel.org \
    --cc=paulus@ozlabs.org \
    --cc=pbonzini@redhat.com \
    --cc=suzuki.poulose@arm.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).