All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] KVM: MMU: make PTE_PREFETCH_NUM tunable
@ 2021-10-12  9:14 Sergey Senozhatsky
  2021-10-12 16:50 ` David Matlack
  0 siblings, 1 reply; 3+ messages in thread
From: Sergey Senozhatsky @ 2021-10-12  9:14 UTC (permalink / raw)
  To: Paolo Bonzini, Sean Christopherson, Vitaly Kuznetsov, Wanpeng Li,
	Jim Mattson, Joerg Roedel
  Cc: Suleiman Souhlal, kvm, linux-kernel, Sergey Senozhatsky

Turn PTE_PREFETCH_NUM into a module parameter, so that it
can be tuned per-VM.

- /sys/module/kvm/parameters/pte_prefetch_num 8

             VM-EXIT    Samples  Samples%     Time%    Min Time    Max Time         Avg time

       EPT_VIOLATION     760998    54.85%     7.23%      0.92us  31765.89us      7.78us ( +-   1.46% )
           MSR_WRITE     170599    12.30%     0.53%      0.60us   3334.13us      2.52us ( +-   0.86% )
  EXTERNAL_INTERRUPT     159510    11.50%     1.65%      0.49us  43705.81us      8.45us ( +-   7.54% )
[..]

Total Samples:1387305, Total events handled time:81900258.99us.

- /sys/module/kvm/parameters/pte_prefetch_num 16

             VM-EXIT    Samples  Samples%     Time%    Min Time    Max Time         Avg time

       EPT_VIOLATION     658064    52.58%     7.04%      0.91us  17022.84us      8.34us ( +-   1.52% )
           MSR_WRITE     163776    13.09%     0.54%      0.56us   5192.10us      2.57us ( +-   1.25% )
  EXTERNAL_INTERRUPT     144588    11.55%     1.62%      0.48us  97410.16us      8.75us ( +-  11.44% )
[..]

Total Samples:1251546, Total events handled time:77956187.56us.

Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 arch/x86/kvm/mmu/mmu.c | 31 ++++++++++++++++++++++---------
 1 file changed, 22 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 24a9f4c3f5e7..0ab4490674ec 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -115,6 +115,8 @@ module_param(dbg, bool, 0644);
 #endif
 
 #define PTE_PREFETCH_NUM		8
+static uint __read_mostly pte_prefetch_num = PTE_PREFETCH_NUM;
+module_param(pte_prefetch_num, uint, 0644);
 
 #define PT32_LEVEL_BITS 10
 
@@ -732,7 +734,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 
 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
-				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
+				       1 + PT64_ROOT_MAX_LEVEL + pte_prefetch_num);
 	if (r)
 		return r;
 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
@@ -2753,20 +2755,29 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
 				    struct kvm_mmu_page *sp,
 				    u64 *start, u64 *end)
 {
-	struct page *pages[PTE_PREFETCH_NUM];
+	struct page **pages;
 	struct kvm_memory_slot *slot;
 	unsigned int access = sp->role.access;
 	int i, ret;
 	gfn_t gfn;
 
+	pages = kmalloc_array(pte_prefetch_num, sizeof(struct page *),
+			      GFP_KERNEL);
+	if (!pages)
+		return -1;
+
 	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
-	if (!slot)
-		return -1;
+	if (!slot) {
+		ret = -1;
+		goto out;
+	}
 
 	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
-	if (ret <= 0)
-		return -1;
+	if (ret <= 0) {
+		ret = -1;
+		goto out;
+	}
 
 	for (i = 0; i < ret; i++, gfn++, start++) {
 		mmu_set_spte(vcpu, slot, start, access, gfn,
@@ -2774,7 +2785,9 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
 		put_page(pages[i]);
 	}
 
-	return 0;
+out:
+	kfree(pages);
+	return ret;
 }
 
 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
@@ -2785,10 +2798,10 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
 
 	WARN_ON(!sp->role.direct);
 
-	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
+	i = (sptep - sp->spt) & ~(pte_prefetch_num - 1);
 	spte = sp->spt + i;
 
-	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
+	for (i = 0; i < pte_prefetch_num; i++, spte++) {
 		if (is_shadow_present_pte(*spte) || spte == sptep) {
 			if (!start)
 				continue;
-- 
2.33.0.882.g93a45727a2-goog


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] KVM: MMU: make PTE_PREFETCH_NUM tunable
  2021-10-12  9:14 [PATCH] KVM: MMU: make PTE_PREFETCH_NUM tunable Sergey Senozhatsky
@ 2021-10-12 16:50 ` David Matlack
  2021-10-13  5:52   ` Sergey Senozhatsky
  0 siblings, 1 reply; 3+ messages in thread
From: David Matlack @ 2021-10-12 16:50 UTC (permalink / raw)
  To: Sergey Senozhatsky
  Cc: Paolo Bonzini, Sean Christopherson, Vitaly Kuznetsov, Wanpeng Li,
	Jim Mattson, Joerg Roedel, Suleiman Souhlal, kvm list, LKML

On Tue, Oct 12, 2021 at 2:16 AM Sergey Senozhatsky
<senozhatsky@chromium.org> wrote:
>
> Turn PTE_PREFETCH_NUM into a module parameter, so that it
> can be tuned per-VM.

Module parameters do not allow tuning per VM, they effect every VM on
the machine.

If you want per-VM tuning you could introduce a VM ioctl.

>
> - /sys/module/kvm/parameters/pte_prefetch_num 8
>
>              VM-EXIT    Samples  Samples%     Time%    Min Time    Max Time         Avg time
>
>        EPT_VIOLATION     760998    54.85%     7.23%      0.92us  31765.89us      7.78us ( +-   1.46% )
>            MSR_WRITE     170599    12.30%     0.53%      0.60us   3334.13us      2.52us ( +-   0.86% )
>   EXTERNAL_INTERRUPT     159510    11.50%     1.65%      0.49us  43705.81us      8.45us ( +-   7.54% )
> [..]
>
> Total Samples:1387305, Total events handled time:81900258.99us.
>
> - /sys/module/kvm/parameters/pte_prefetch_num 16
>
>              VM-EXIT    Samples  Samples%     Time%    Min Time    Max Time         Avg time
>
>        EPT_VIOLATION     658064    52.58%     7.04%      0.91us  17022.84us      8.34us ( +-   1.52% )
>            MSR_WRITE     163776    13.09%     0.54%      0.56us   5192.10us      2.57us ( +-   1.25% )
>   EXTERNAL_INTERRUPT     144588    11.55%     1.62%      0.48us  97410.16us      8.75us ( +-  11.44% )
> [..]
>
> Total Samples:1251546, Total events handled time:77956187.56us.
>
> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> ---
>  arch/x86/kvm/mmu/mmu.c | 31 ++++++++++++++++++++++---------

Please also update the shadow paging prefetching code in
arch/x86/kvm/mmu/paging_tmpl.h, unless there is a good reason to
diverge.

>  1 file changed, 22 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 24a9f4c3f5e7..0ab4490674ec 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -115,6 +115,8 @@ module_param(dbg, bool, 0644);
>  #endif
>
>  #define PTE_PREFETCH_NUM               8
> +static uint __read_mostly pte_prefetch_num = PTE_PREFETCH_NUM;
> +module_param(pte_prefetch_num, uint, 0644);
>
>  #define PT32_LEVEL_BITS 10
>
> @@ -732,7 +734,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
>
>         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
>         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> -                                      1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
> +                                      1 + PT64_ROOT_MAX_LEVEL + pte_prefetch_num);

There is a sampling problem. What happens if the user changes
pte_prefetch_num while a fault is being handled?

>         if (r)
>                 return r;
>         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
> @@ -2753,20 +2755,29 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
>                                     struct kvm_mmu_page *sp,
>                                     u64 *start, u64 *end)
>  {
> -       struct page *pages[PTE_PREFETCH_NUM];
> +       struct page **pages;
>         struct kvm_memory_slot *slot;
>         unsigned int access = sp->role.access;
>         int i, ret;
>         gfn_t gfn;
>
> +       pages = kmalloc_array(pte_prefetch_num, sizeof(struct page *),
> +                             GFP_KERNEL);

This code runs with the MMU lock held. From
https://www.kernel.org/doc/html/latest/core-api/memory-allocation.html:

    Note, that using GFP_KERNEL implies GFP_RECLAIM, which means
    that direct reclaim may be triggered under memory pressure; the calling
    context must be allowed to sleep.

In general we avoid doing any dynamic memory allocation while the MMU
lock is held. That's why the memory caches exist. You can avoid
allocating under a lock by allocating the prefetch array when the vCPU
is first initialized. This would also solve the module parameter
sampling problem because you can read it once and store it in struct
kvm_vcpu.

> +       if (!pages)
> +               return -1;
> +
>         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
>         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
> -       if (!slot)
> -               return -1;
> +       if (!slot) {
> +               ret = -1;
> +               goto out;
> +       }
>
>         ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
> -       if (ret <= 0)
> -               return -1;
> +       if (ret <= 0) {
> +               ret = -1;
> +               goto out;
> +       }
>
>         for (i = 0; i < ret; i++, gfn++, start++) {
>                 mmu_set_spte(vcpu, slot, start, access, gfn,
> @@ -2774,7 +2785,9 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
>                 put_page(pages[i]);
>         }
>
> -       return 0;
> +out:
> +       kfree(pages);
> +       return ret;
>  }
>
>  static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
> @@ -2785,10 +2798,10 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
>
>         WARN_ON(!sp->role.direct);
>
> -       i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
> +       i = (sptep - sp->spt) & ~(pte_prefetch_num - 1);

This code assumes pte_prefetch_num is a power of 2, which is now no
longer guaranteed to be true.

>         spte = sp->spt + i;
>
> -       for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
> +       for (i = 0; i < pte_prefetch_num; i++, spte++) {
>                 if (is_shadow_present_pte(*spte) || spte == sptep) {
>                         if (!start)
>                                 continue;
> --
> 2.33.0.882.g93a45727a2-goog
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] KVM: MMU: make PTE_PREFETCH_NUM tunable
  2021-10-12 16:50 ` David Matlack
@ 2021-10-13  5:52   ` Sergey Senozhatsky
  0 siblings, 0 replies; 3+ messages in thread
From: Sergey Senozhatsky @ 2021-10-13  5:52 UTC (permalink / raw)
  To: David Matlack
  Cc: Sergey Senozhatsky, Paolo Bonzini, Sean Christopherson,
	Vitaly Kuznetsov, Wanpeng Li, Jim Mattson, Joerg Roedel,
	Suleiman Souhlal, kvm list, LKML

On (21/10/12 09:50), David Matlack wrote:
> On Tue, Oct 12, 2021 at 2:16 AM Sergey Senozhatsky
> <senozhatsky@chromium.org> wrote:
> >
> > Turn PTE_PREFETCH_NUM into a module parameter, so that it
> > can be tuned per-VM.
> 
> Module parameters do not allow tuning per VM, they effect every VM on
> the machine.
> 
> If you want per-VM tuning you could introduce a VM ioctl.

ACK.

> > ---
> >  arch/x86/kvm/mmu/mmu.c | 31 ++++++++++++++++++++++---------
> 
> Please also update the shadow paging prefetching code in
> arch/x86/kvm/mmu/paging_tmpl.h, unless there is a good reason to
> diverge.

ACK.

> > @@ -732,7 +734,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> >
> >         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
> >         r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> > -                                      1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
> > +                                      1 + PT64_ROOT_MAX_LEVEL + pte_prefetch_num);
> 
> There is a sampling problem. What happens if the user changes
> pte_prefetch_num while a fault is being handled?

Good catch.

> > @@ -2753,20 +2755,29 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
> >                                     struct kvm_mmu_page *sp,
> >                                     u64 *start, u64 *end)
> >  {
> > -       struct page *pages[PTE_PREFETCH_NUM];
> > +       struct page **pages;
> >         struct kvm_memory_slot *slot;
> >         unsigned int access = sp->role.access;
> >         int i, ret;
> >         gfn_t gfn;
> >
> > +       pages = kmalloc_array(pte_prefetch_num, sizeof(struct page *),
> > +                             GFP_KERNEL);
> 
> This code runs with the MMU lock held. From
> In general we avoid doing any dynamic memory allocation while the MMU
> lock is held. That's why the memory caches exist. You can avoid
> allocating under a lock by allocating the prefetch array when the vCPU
> is first initialized. This would also solve the module parameter
> sampling problem because you can read it once and store it in struct
> kvm_vcpu.

I'll do per-VCPU pre-allocation, thanks. GFP_KERNEL is less of a problem
if we hold read kvm->mmu_lock, but more so if we hold write kvm->mmu_lock.

> >  static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
> > @@ -2785,10 +2798,10 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
> >
> >         WARN_ON(!sp->role.direct);
> >
> > -       i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
> > +       i = (sptep - sp->spt) & ~(pte_prefetch_num - 1);
> 
> This code assumes pte_prefetch_num is a power of 2, which is now no
> longer guaranteed to be true.

It does. I can test if it's a pow(2) in ioctl

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-10-13  5:52 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-12  9:14 [PATCH] KVM: MMU: make PTE_PREFETCH_NUM tunable Sergey Senozhatsky
2021-10-12 16:50 ` David Matlack
2021-10-13  5:52   ` Sergey Senozhatsky

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.