kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] KVM: x86/mmu: improve robustness of some functions
@ 2021-01-22 11:18 Stephen Zhang
  2021-01-25  9:54 ` Vitaly Kuznetsov
  0 siblings, 1 reply; 4+ messages in thread
From: Stephen Zhang @ 2021-01-22 11:18 UTC (permalink / raw)
  To: pbonzini, seanjc, vkuznets, wanpengli, jmattson, joro, tglx,
	mingo, bp, x86, hpa
  Cc: kvm, linux-kernel, Stephen Zhang

If the name of this function changes, you can easily
forget to modify the code in the corresponding place.
In fact, such errors already exist in spte_write_protect
 and spte_clear_dirty.

Signed-off-by: Stephen Zhang <stephenzhangzsd@gmail.com>
---
 arch/x86/kvm/mmu/mmu.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6d16481..09462c3d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -844,17 +844,17 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
 	int i, count = 0;
 
 	if (!rmap_head->val) {
-		rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
+		rmap_printk("%s: %p %llx 0->1\n", __func__, spte, *spte);
 		rmap_head->val = (unsigned long)spte;
 	} else if (!(rmap_head->val & 1)) {
-		rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
+		rmap_printk("%s: %p %llx 1->many\n", __func__, spte, *spte);
 		desc = mmu_alloc_pte_list_desc(vcpu);
 		desc->sptes[0] = (u64 *)rmap_head->val;
 		desc->sptes[1] = spte;
 		rmap_head->val = (unsigned long)desc | 1;
 		++count;
 	} else {
-		rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
+		rmap_printk("%s: %p %llx many->many\n",	__func__, spte, *spte);
 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
 		while (desc->sptes[PTE_LIST_EXT-1]) {
 			count += PTE_LIST_EXT;
@@ -1115,7 +1115,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
 	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
 		return false;
 
-	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
+	rmap_printk("%s: spte %p %llx\n", __func__, sptep, *sptep);
 
 	if (pt_protect)
 		spte &= ~SPTE_MMU_WRITEABLE;
@@ -1142,7 +1142,7 @@ static bool spte_clear_dirty(u64 *sptep)
 {
 	u64 spte = *sptep;
 
-	rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
+	rmap_printk("%s: spte %p %llx\n", __func__, sptep, *sptep);
 
 	MMU_WARN_ON(!spte_ad_enabled(spte));
 	spte &= ~shadow_dirty_mask;
@@ -1184,7 +1184,7 @@ static bool spte_set_dirty(u64 *sptep)
 {
 	u64 spte = *sptep;
 
-	rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
+	rmap_printk("%s: spte %p %llx\n", __func__, sptep, *sptep);
 
 	/*
 	 * Similar to the !kvm_x86_ops.slot_disable_log_dirty case,
@@ -1363,8 +1363,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 
 restart:
 	for_each_rmap_spte(rmap_head, &iter, sptep) {
-		rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
-			    sptep, *sptep, gfn, level);
+		rmap_printk("%s: spte %p %llx gfn %llx (%d)\n",
+			      __func__, sptep, *sptep, gfn, level);
 
 		need_flush = 1;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] KVM: x86/mmu: improve robustness of some functions
  2021-01-22 11:18 [PATCH] KVM: x86/mmu: improve robustness of some functions Stephen Zhang
@ 2021-01-25  9:54 ` Vitaly Kuznetsov
  2021-01-25  9:58   ` Paolo Bonzini
  0 siblings, 1 reply; 4+ messages in thread
From: Vitaly Kuznetsov @ 2021-01-25  9:54 UTC (permalink / raw)
  To: Stephen Zhang
  Cc: kvm, linux-kernel, Stephen Zhang, pbonzini, seanjc, wanpengli,
	jmattson, joro, tglx, mingo, bp, x86, hpa

Stephen Zhang <stephenzhangzsd@gmail.com> writes:

> If the name of this function changes, you can easily
> forget to modify the code in the corresponding place.
> In fact, such errors already exist in spte_write_protect
>  and spte_clear_dirty.
>

What if we do something like (completely untested):

diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index bfc6389edc28..5ec15e4160b1 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -12,7 +12,7 @@
 extern bool dbg;
 
 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
-#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
+#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
 #define MMU_WARN_ON(x) WARN_ON(x)
 #else
 #define pgprintk(x...) do { } while (0)

and eliminate the need to pass '__func__,' explicitly? We can probably
do the same to pgprintk().

> Signed-off-by: Stephen Zhang <stephenzhangzsd@gmail.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 6d16481..09462c3d 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -844,17 +844,17 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
>  	int i, count = 0;
>  
>  	if (!rmap_head->val) {
> -		rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
> +		rmap_printk("%s: %p %llx 0->1\n", __func__, spte, *spte);
>  		rmap_head->val = (unsigned long)spte;
>  	} else if (!(rmap_head->val & 1)) {
> -		rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
> +		rmap_printk("%s: %p %llx 1->many\n", __func__, spte, *spte);
>  		desc = mmu_alloc_pte_list_desc(vcpu);
>  		desc->sptes[0] = (u64 *)rmap_head->val;
>  		desc->sptes[1] = spte;
>  		rmap_head->val = (unsigned long)desc | 1;
>  		++count;
>  	} else {
> -		rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
> +		rmap_printk("%s: %p %llx many->many\n",	__func__, spte, *spte);
>  		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
>  		while (desc->sptes[PTE_LIST_EXT-1]) {
>  			count += PTE_LIST_EXT;
> @@ -1115,7 +1115,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
>  	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
>  		return false;
>  
> -	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
> +	rmap_printk("%s: spte %p %llx\n", __func__, sptep, *sptep);
>  
>  	if (pt_protect)
>  		spte &= ~SPTE_MMU_WRITEABLE;
> @@ -1142,7 +1142,7 @@ static bool spte_clear_dirty(u64 *sptep)
>  {
>  	u64 spte = *sptep;
>  
> -	rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
> +	rmap_printk("%s: spte %p %llx\n", __func__, sptep, *sptep);
>  
>  	MMU_WARN_ON(!spte_ad_enabled(spte));
>  	spte &= ~shadow_dirty_mask;
> @@ -1184,7 +1184,7 @@ static bool spte_set_dirty(u64 *sptep)
>  {
>  	u64 spte = *sptep;
>  
> -	rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
> +	rmap_printk("%s: spte %p %llx\n", __func__, sptep, *sptep);
>  
>  	/*
>  	 * Similar to the !kvm_x86_ops.slot_disable_log_dirty case,
> @@ -1363,8 +1363,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
>  
>  restart:
>  	for_each_rmap_spte(rmap_head, &iter, sptep) {
> -		rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
> -			    sptep, *sptep, gfn, level);
> +		rmap_printk("%s: spte %p %llx gfn %llx (%d)\n",
> +			      __func__, sptep, *sptep, gfn, level);
>  
>  		need_flush = 1;

-- 
Vitaly


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] KVM: x86/mmu: improve robustness of some functions
  2021-01-25  9:54 ` Vitaly Kuznetsov
@ 2021-01-25  9:58   ` Paolo Bonzini
  2021-01-25 16:54     ` Sean Christopherson
  0 siblings, 1 reply; 4+ messages in thread
From: Paolo Bonzini @ 2021-01-25  9:58 UTC (permalink / raw)
  To: Vitaly Kuznetsov, Stephen Zhang
  Cc: kvm, linux-kernel, seanjc, wanpengli, jmattson, joro, tglx,
	mingo, bp, x86, hpa

On 25/01/21 10:54, Vitaly Kuznetsov wrote:
> 
> What if we do something like (completely untested):
> 
> diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
> index bfc6389edc28..5ec15e4160b1 100644
> --- a/arch/x86/kvm/mmu/mmu_internal.h
> +++ b/arch/x86/kvm/mmu/mmu_internal.h
> @@ -12,7 +12,7 @@
>  extern bool dbg;
>  
>  #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
> -#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
> +#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
>  #define MMU_WARN_ON(x) WARN_ON(x)
>  #else
>  #define pgprintk(x...) do { } while (0)
> 
> and eliminate the need to pass '__func__,' explicitly? We can probably
> do the same to pgprintk().

Nice indeed.  Though I wonder if anybody has ever used these.  For those 
that I actually needed in the past I created tracepoints instead.

Paolo


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] KVM: x86/mmu: improve robustness of some functions
  2021-01-25  9:58   ` Paolo Bonzini
@ 2021-01-25 16:54     ` Sean Christopherson
  0 siblings, 0 replies; 4+ messages in thread
From: Sean Christopherson @ 2021-01-25 16:54 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Vitaly Kuznetsov, Stephen Zhang, kvm, linux-kernel, wanpengli,
	jmattson, joro, tglx, mingo, bp, x86, hpa

On Mon, Jan 25, 2021, Paolo Bonzini wrote:
> On 25/01/21 10:54, Vitaly Kuznetsov wrote:
> > 
> > What if we do something like (completely untested):
> > 
> > diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
> > index bfc6389edc28..5ec15e4160b1 100644
> > --- a/arch/x86/kvm/mmu/mmu_internal.h
> > +++ b/arch/x86/kvm/mmu/mmu_internal.h
> > @@ -12,7 +12,7 @@
> >  extern bool dbg;
> >  #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
> > -#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
> > +#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
> >  #define MMU_WARN_ON(x) WARN_ON(x)
> >  #else
> >  #define pgprintk(x...) do { } while (0)
> > 
> > and eliminate the need to pass '__func__,' explicitly? We can probably
> > do the same to pgprintk().
> 
> Nice indeed.  Though I wonder if anybody has ever used these.

I've used the ones in pte_list_add() and __pte_list_remove().  I had to add more
info to track down the bug I introduced, but their initial existence was helpful.

That being said, I definitely did not build with MMU_DEBUG defined, I simply
changed a handful of rmap_printks to pr_warn.  Blindly enabling MMU_DEBUG
activates far too much output to be useful.  That may not have been the case
when the core parts of the MMU were under heavy development, but it does feel
like the time has come to excise the bulk of the pgprintk and rmap_printk hooks.
Ditto for mmu_audit.c.

> For those that I actually needed in the past I created tracepoints instead.

Ya.  There are times where I prefer using the kernel log over tracepoints, but
it's easy enough to copy-paste the tracepoint into a pr_* when desired.

I'd be ok with converting a few select rmap_printks to tracepoints, but I vote
to completely remove the bulk of the existing code.  Tracepoints always make me
a bit wary, it's easy to forget/overlook that the inputs to the tracepoint are
still generated even if the tracepoint itself is disabled.  E.g. being too
liberal with tracepoints could theoretically degrade performance.

If we do yank them, I think it makes sense to git rid of mmu_audit.c in the same
commit.  In theory, that would make it easier for someone to restore the hooks
if they need the hooks to debug something in the future.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-01-26 20:29 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-22 11:18 [PATCH] KVM: x86/mmu: improve robustness of some functions Stephen Zhang
2021-01-25  9:54 ` Vitaly Kuznetsov
2021-01-25  9:58   ` Paolo Bonzini
2021-01-25 16:54     ` Sean Christopherson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).