All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] KVM: stats: Add counters for VMX all/L2/nested exit reasons
@ 2021-08-27 17:41 Jing Zhang
  2021-08-27 17:41 ` [PATCH 2/2] KVM: stats: Add counters for SVM " Jing Zhang
  0 siblings, 1 reply; 4+ messages in thread
From: Jing Zhang @ 2021-08-27 17:41 UTC (permalink / raw)
  To: KVM, Paolo Bonzini, David Matlack, Peter Shier, Oliver Upton,
	Sean Christopherson, Peter Feiner, Jim Mattson
  Cc: Jing Zhang

These stats will be used to monitor the nested virtualization use in
VMs. Most importantly: VMXON exits are evidence that the guest has
enabled VMX, VMLAUNCH/VMRESUME exits are evidence that the guest has run
an L2.

Original-by: Peter Feiner <pfeiner@google.com>
Signed-off-by: Jing Zhang <jingzhangos@google.com>
---
 arch/x86/include/asm/kvm_host.h | 4 ++++
 arch/x86/include/uapi/asm/vmx.h | 1 +
 arch/x86/kvm/vmx/vmx.c          | 7 ++++++-
 arch/x86/kvm/x86.c              | 5 ++++-
 include/linux/kvm_host.h        | 4 ++++
 5 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09b256db394a..dd2380c9ea96 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
 #include <asm/kvm_page_track.h>
 #include <asm/kvm_vcpu_regs.h>
 #include <asm/hyperv-tlfs.h>
+#include <asm/vmx.h>
 
 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
 
@@ -1257,6 +1258,9 @@ struct kvm_vcpu_stat {
 	u64 directed_yield_attempted;
 	u64 directed_yield_successful;
 	u64 guest_mode;
+	u64 vmx_all_exits[EXIT_REASON_NUM];
+	u64 vmx_l2_exits[EXIT_REASON_NUM];
+	u64 vmx_nested_exits[EXIT_REASON_NUM];
 };
 
 struct x86_instruction_info;
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 946d761adbd3..563b3901ef7a 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -91,6 +91,7 @@
 #define EXIT_REASON_UMWAIT              67
 #define EXIT_REASON_TPAUSE              68
 #define EXIT_REASON_BUS_LOCK            74
+#define EXIT_REASON_NUM                 (EXIT_REASON_BUS_LOCK + 1)
 
 #define VMX_EXIT_REASONS \
 	{ EXIT_REASON_EXCEPTION_NMI,         "EXCEPTION_NMI" }, \
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index fada1055f325..5cd9f02a458a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5890,6 +5890,8 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 	u32 vectoring_info = vmx->idt_vectoring_info;
 	u16 exit_handler_index;
 
+	++vcpu->stat.vmx_all_exits[exit_reason.basic];
+
 	/*
 	 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
 	 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -5915,6 +5917,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 		return handle_invalid_guest_state(vcpu);
 
 	if (is_guest_mode(vcpu)) {
+		++vcpu->stat.vmx_l2_exits[exit_reason.basic];
 		/*
 		 * PML is never enabled when running L2, bail immediately if a
 		 * PML full exit occurs as something is horribly wrong.
@@ -5935,8 +5938,10 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 		 */
 		nested_mark_vmcs12_pages_dirty(vcpu);
 
-		if (nested_vmx_reflect_vmexit(vcpu))
+		if (nested_vmx_reflect_vmexit(vcpu)) {
+			++vcpu->stat.vmx_nested_exits[exit_reason.basic];
 			return 1;
+		}
 	}
 
 	if (exit_reason.failed_vmentry) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 86539c1686fa..245ad1d147dd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -277,7 +277,10 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
 	STATS_DESC_COUNTER(VCPU, nested_run),
 	STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
 	STATS_DESC_COUNTER(VCPU, directed_yield_successful),
-	STATS_DESC_ICOUNTER(VCPU, guest_mode)
+	STATS_DESC_ICOUNTER(VCPU, guest_mode),
+	STATS_DESC_LINHIST_COUNTER(VCPU, vmx_all_exits, EXIT_REASON_NUM, 1),
+	STATS_DESC_LINHIST_COUNTER(VCPU, vmx_l2_exits, EXIT_REASON_NUM, 1),
+	STATS_DESC_LINHIST_COUNTER(VCPU, vmx_nested_exits, EXIT_REASON_NUM, 1),
 };
 
 const struct kvm_stats_header kvm_vcpu_stats_header = {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e4d712e9f760..47e986450b06 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1429,6 +1429,10 @@ struct _kvm_stats_desc {
 #define STATS_DESC_PCOUNTER(SCOPE, name)				       \
 	STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE,		       \
 		KVM_STATS_BASE_POW10, 0)
+/* Linear histogram for counter */
+#define STATS_DESC_LINHIST_COUNTER(SCOPE, name, sz, bsz)		       \
+	STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_NONE,	       \
+		KVM_STATS_BASE_POW10, 0, sz, bsz)
 
 /* Cumulative time in nanosecond */
 #define STATS_DESC_TIME_NSEC(SCOPE, name)				       \

base-commit: 680c7e3be6a3d502248771fe42c911f99d7e006c
-- 
2.33.0.259.gc128427fd7-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] KVM: stats: Add counters for SVM exit reasons
  2021-08-27 17:41 [PATCH 1/2] KVM: stats: Add counters for VMX all/L2/nested exit reasons Jing Zhang
@ 2021-08-27 17:41 ` Jing Zhang
  2021-08-27 20:12   ` Sean Christopherson
  0 siblings, 1 reply; 4+ messages in thread
From: Jing Zhang @ 2021-08-27 17:41 UTC (permalink / raw)
  To: KVM, Paolo Bonzini, David Matlack, Peter Shier, Oliver Upton,
	Sean Christopherson, Peter Feiner, Jim Mattson
  Cc: Jing Zhang

Three different exit code ranges are named as low, high and vmgexit,
which start from 0x0, 0x400 and 0x80000000.

Original-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Jing Zhang <jingzhangos@google.com>
---
 arch/x86/include/asm/kvm_host.h |  7 +++++++
 arch/x86/include/uapi/asm/svm.h |  7 +++++++
 arch/x86/kvm/svm/svm.c          | 21 +++++++++++++++++++++
 arch/x86/kvm/x86.c              |  9 +++++++++
 4 files changed, 44 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dd2380c9ea96..6e3c11a29afe 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -35,6 +35,7 @@
 #include <asm/kvm_vcpu_regs.h>
 #include <asm/hyperv-tlfs.h>
 #include <asm/vmx.h>
+#include <asm/svm.h>
 
 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
 
@@ -1261,6 +1262,12 @@ struct kvm_vcpu_stat {
 	u64 vmx_all_exits[EXIT_REASON_NUM];
 	u64 vmx_l2_exits[EXIT_REASON_NUM];
 	u64 vmx_nested_exits[EXIT_REASON_NUM];
+	u64 svm_exits_low[SVM_EXIT_LOW_END - SVM_EXIT_LOW_START];
+	u64 svm_exits_high[SVM_EXIT_HIGH_END - SVM_EXIT_HIGH_START];
+	u64 svm_vmgexits[SVM_VMGEXIT_END - SVM_VMGEXIT_START];
+	u64 svm_vmgexit_unsupported_event;
+	u64 svm_exit_sw;
+	u64 svm_exit_err;
 };
 
 struct x86_instruction_info;
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index efa969325ede..153ebc4ac70e 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -2,6 +2,7 @@
 #ifndef _UAPI__SVM_H
 #define _UAPI__SVM_H
 
+#define SVM_EXIT_LOW_START     0x000
 #define SVM_EXIT_READ_CR0      0x000
 #define SVM_EXIT_READ_CR2      0x002
 #define SVM_EXIT_READ_CR3      0x003
@@ -95,17 +96,23 @@
 #define SVM_EXIT_CR14_WRITE_TRAP		0x09e
 #define SVM_EXIT_CR15_WRITE_TRAP		0x09f
 #define SVM_EXIT_INVPCID       0x0a2
+#define SVM_EXIT_LOW_END			0xa3
+
+#define SVM_EXIT_HIGH_START			0x400
 #define SVM_EXIT_NPF           0x400
 #define SVM_EXIT_AVIC_INCOMPLETE_IPI		0x401
 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS	0x402
 #define SVM_EXIT_VMGEXIT       0x403
+#define SVM_EXIT_HIGH_END			0x404
 
 /* SEV-ES software-defined VMGEXIT events */
+#define SVM_VMGEXIT_START			0x80000000
 #define SVM_VMGEXIT_MMIO_READ			0x80000001
 #define SVM_VMGEXIT_MMIO_WRITE			0x80000002
 #define SVM_VMGEXIT_NMI_COMPLETE		0x80000003
 #define SVM_VMGEXIT_AP_HLT_LOOP			0x80000004
 #define SVM_VMGEXIT_AP_JUMP_TABLE		0x80000005
+#define SVM_VMGEXIT_END				0x80000006
 #define SVM_VMGEXIT_SET_AP_JUMP_TABLE		0
 #define SVM_VMGEXIT_GET_AP_JUMP_TABLE		1
 #define SVM_VMGEXIT_UNSUPPORTED_EVENT		0x8000ffff
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 1a70e11f0487..e04bd201dd53 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3291,6 +3291,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_run *kvm_run = vcpu->run;
 	u32 exit_code = svm->vmcb->control.exit_code;
+	size_t index;
 
 	trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
 
@@ -3337,6 +3338,26 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 	if (exit_fastpath != EXIT_FASTPATH_NONE)
 		return 1;
 
+	if (exit_code >= SVM_EXIT_LOW_START &&
+			exit_code < SVM_EXIT_LOW_END) {
+		index = exit_code - SVM_EXIT_LOW_START;
+		++vcpu->stat.svm_exits_low[index];
+	} else if (exit_code >= SVM_EXIT_HIGH_START &&
+			exit_code < SVM_EXIT_HIGH_END) {
+		index = exit_code - SVM_EXIT_HIGH_START;
+		++vcpu->stat.svm_exits_high[index];
+	} else if (exit_code >= SVM_VMGEXIT_START &&
+			exit_code < SVM_VMGEXIT_END) {
+		index = exit_code - SVM_VMGEXIT_START;
+		++vcpu->stat.svm_vmgexits[index];
+	} else if (exit_code == SVM_VMGEXIT_UNSUPPORTED_EVENT) {
+		++vcpu->stat.svm_vmgexit_unsupported_event;
+	} else if (exit_code == SVM_EXIT_SW) {
+		++vcpu->stat.svm_exit_sw;
+	} else if (exit_code == SVM_EXIT_ERR) {
+		++vcpu->stat.svm_exit_err;
+	}
+
 	return svm_invoke_exit_handler(vcpu, exit_code);
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 245ad1d147dd..df75e4b2711e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -281,6 +281,15 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
 	STATS_DESC_LINHIST_COUNTER(VCPU, vmx_all_exits, EXIT_REASON_NUM, 1),
 	STATS_DESC_LINHIST_COUNTER(VCPU, vmx_l2_exits, EXIT_REASON_NUM, 1),
 	STATS_DESC_LINHIST_COUNTER(VCPU, vmx_nested_exits, EXIT_REASON_NUM, 1),
+	STATS_DESC_LINHIST_COUNTER(VCPU, svm_exits_low,
+			SVM_EXIT_LOW_END - SVM_EXIT_LOW_START, 1),
+	STATS_DESC_LINHIST_COUNTER(VCPU, svm_exits_high,
+			SVM_EXIT_HIGH_END - SVM_EXIT_HIGH_START, 1),
+	STATS_DESC_LINHIST_COUNTER(VCPU, svm_vmgexits,
+			SVM_VMGEXIT_END - SVM_VMGEXIT_START, 1),
+	STATS_DESC_COUNTER(VCPU, svm_vmgexit_unsupported_event),
+	STATS_DESC_COUNTER(VCPU, svm_exit_sw),
+	STATS_DESC_COUNTER(VCPU, svm_exit_err),
 };
 
 const struct kvm_stats_header kvm_vcpu_stats_header = {
-- 
2.33.0.259.gc128427fd7-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 2/2] KVM: stats: Add counters for SVM exit reasons
  2021-08-27 17:41 ` [PATCH 2/2] KVM: stats: Add counters for SVM " Jing Zhang
@ 2021-08-27 20:12   ` Sean Christopherson
  2021-08-31 17:27     ` Jing Zhang
  0 siblings, 1 reply; 4+ messages in thread
From: Sean Christopherson @ 2021-08-27 20:12 UTC (permalink / raw)
  To: Jing Zhang
  Cc: KVM, Paolo Bonzini, David Matlack, Peter Shier, Oliver Upton,
	Peter Feiner, Jim Mattson

On Fri, Aug 27, 2021, Jing Zhang wrote:
> Three different exit code ranges are named as low, high and vmgexit,
> which start from 0x0, 0x400 and 0x80000000.
> 
> Original-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Jing Zhang <jingzhangos@google.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  7 +++++++
>  arch/x86/include/uapi/asm/svm.h |  7 +++++++
>  arch/x86/kvm/svm/svm.c          | 21 +++++++++++++++++++++
>  arch/x86/kvm/x86.c              |  9 +++++++++
>  4 files changed, 44 insertions(+)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index dd2380c9ea96..6e3c11a29afe 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -35,6 +35,7 @@
>  #include <asm/kvm_vcpu_regs.h>
>  #include <asm/hyperv-tlfs.h>
>  #include <asm/vmx.h>
> +#include <asm/svm.h>
>  
>  #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
>  
> @@ -1261,6 +1262,12 @@ struct kvm_vcpu_stat {
>  	u64 vmx_all_exits[EXIT_REASON_NUM];
>  	u64 vmx_l2_exits[EXIT_REASON_NUM];
>  	u64 vmx_nested_exits[EXIT_REASON_NUM];
> +	u64 svm_exits_low[SVM_EXIT_LOW_END - SVM_EXIT_LOW_START];
> +	u64 svm_exits_high[SVM_EXIT_HIGH_END - SVM_EXIT_HIGH_START];
> +	u64 svm_vmgexits[SVM_VMGEXIT_END - SVM_VMGEXIT_START];

This is, for lack of a better word, a very lazy approach.  With a bit more (ok,
probably a lot more) effort and abstraction, we can have parity between VMX and
SVM, and eliminate a bunch of dead weight.  Having rough parity would likely be
quite helpful for the end user, e.g. reduces/eliminates vendor specific userspace
code.

E.g. this more or less doubles the memory footprint due to tracking VMX and SVM
separately, SVM has finer granularity than VMX (often too fine),  VMX tracks nested
exits but SVM does not, etc...

If we use KVM-defined exit reasons, then we can omit the exits that should never
happen (SVM has quite a few), and consolidate the ones no one should ever care
about, e.g. DR0..DR4 and DR6 can be collapsed (if I'm remembering my DR aliases
correctly).  And on the VMX side we can provide better granularity than the raw
exit reason, e.g. VMX's bundling of NMIs and exceptions is downright gross.

	#define KVM_GUEST_EXIT_READ_CR0
	#define KVM_GUEST_EXIT_READ_CR3
	#define KVM_GUEST_EXIT_READ_CR4
	#define KVM_GUEST_EXIT_READ_CR8
	#define KVM_GUEST_EXIT_DR_READ
	#define KVM_GUEST_EXIT_DR_WRITE
	#define KVM_GUEST_EXIT_DB_EXCEPTION
	#define KVM_GUEST_EXIT_BP_EXCEPTION
	#define KVM_GUEST_EXIT_UD_EXCEPTION

	...
	#define KVM_NR_GUEST_EXIT_REASONS

	u64 l1_exits[KVM_NR_GUEST_EXIT_REASONS];
	u64 l2_exits[KVM_NR_GUEST_EXIT_REASONS];
	u64 nested_exits[KVM_NR_GUEST_EXIT_REASONS];


The downside is that it will require KVM-defined exit reasons and will have a
slightly higher maintenance cost because of it, but I think overall it would be
a net positive.  Paolo can probably even concoct some clever arithmetic to avoid
conditionals when massaging SVM exits reasons.  VMX will require a bit more
manual effort, especially to play nice with nested, but in the end I think we'll
be happier.

> +	u64 svm_vmgexit_unsupported_event;
> +	u64 svm_exit_sw;
> +	u64 svm_exit_err;
>  };

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 2/2] KVM: stats: Add counters for SVM exit reasons
  2021-08-27 20:12   ` Sean Christopherson
@ 2021-08-31 17:27     ` Jing Zhang
  0 siblings, 0 replies; 4+ messages in thread
From: Jing Zhang @ 2021-08-31 17:27 UTC (permalink / raw)
  To: Sean Christopherson
  Cc: KVM, Paolo Bonzini, David Matlack, Peter Shier, Oliver Upton,
	Peter Feiner, Jim Mattson

Hi Sean,

On Fri, Aug 27, 2021 at 1:12 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Fri, Aug 27, 2021, Jing Zhang wrote:
> > Three different exit code ranges are named as low, high and vmgexit,
> > which start from 0x0, 0x400 and 0x80000000.
> >
> > Original-by: Jim Mattson <jmattson@google.com>
> > Signed-off-by: Jing Zhang <jingzhangos@google.com>
> > ---
> >  arch/x86/include/asm/kvm_host.h |  7 +++++++
> >  arch/x86/include/uapi/asm/svm.h |  7 +++++++
> >  arch/x86/kvm/svm/svm.c          | 21 +++++++++++++++++++++
> >  arch/x86/kvm/x86.c              |  9 +++++++++
> >  4 files changed, 44 insertions(+)
> >
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index dd2380c9ea96..6e3c11a29afe 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -35,6 +35,7 @@
> >  #include <asm/kvm_vcpu_regs.h>
> >  #include <asm/hyperv-tlfs.h>
> >  #include <asm/vmx.h>
> > +#include <asm/svm.h>
> >
> >  #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
> >
> > @@ -1261,6 +1262,12 @@ struct kvm_vcpu_stat {
> >       u64 vmx_all_exits[EXIT_REASON_NUM];
> >       u64 vmx_l2_exits[EXIT_REASON_NUM];
> >       u64 vmx_nested_exits[EXIT_REASON_NUM];
> > +     u64 svm_exits_low[SVM_EXIT_LOW_END - SVM_EXIT_LOW_START];
> > +     u64 svm_exits_high[SVM_EXIT_HIGH_END - SVM_EXIT_HIGH_START];
> > +     u64 svm_vmgexits[SVM_VMGEXIT_END - SVM_VMGEXIT_START];
>
> This is, for lack of a better word, a very lazy approach.  With a bit more (ok,
> probably a lot more) effort and abstraction, we can have parity between VMX and
> SVM, and eliminate a bunch of dead weight.  Having rough parity would likely be
> quite helpful for the end user, e.g. reduces/eliminates vendor specific userspace
> code.
>
> E.g. this more or less doubles the memory footprint due to tracking VMX and SVM
> separately, SVM has finer granularity than VMX (often too fine),  VMX tracks nested
> exits but SVM does not, etc...
>
> If we use KVM-defined exit reasons, then we can omit the exits that should never
> happen (SVM has quite a few), and consolidate the ones no one should ever care
> about, e.g. DR0..DR4 and DR6 can be collapsed (if I'm remembering my DR aliases
> correctly).  And on the VMX side we can provide better granularity than the raw
> exit reason, e.g. VMX's bundling of NMIs and exceptions is downright gross.
>
>         #define KVM_GUEST_EXIT_READ_CR0
>         #define KVM_GUEST_EXIT_READ_CR3
>         #define KVM_GUEST_EXIT_READ_CR4
>         #define KVM_GUEST_EXIT_READ_CR8
>         #define KVM_GUEST_EXIT_DR_READ
>         #define KVM_GUEST_EXIT_DR_WRITE
>         #define KVM_GUEST_EXIT_DB_EXCEPTION
>         #define KVM_GUEST_EXIT_BP_EXCEPTION
>         #define KVM_GUEST_EXIT_UD_EXCEPTION
>
>         ...
>         #define KVM_NR_GUEST_EXIT_REASONS
>
>         u64 l1_exits[KVM_NR_GUEST_EXIT_REASONS];
>         u64 l2_exits[KVM_NR_GUEST_EXIT_REASONS];
>         u64 nested_exits[KVM_NR_GUEST_EXIT_REASONS];
>
>
> The downside is that it will require KVM-defined exit reasons and will have a
> slightly higher maintenance cost because of it, but I think overall it would be
> a net positive.  Paolo can probably even concoct some clever arithmetic to avoid
> conditionals when massaging SVM exits reasons.  VMX will require a bit more
> manual effort, especially to play nice with nested, but in the end I think we'll
> be happier.
>
Thanks for the suggestions. Will try to generalize the exit reasons
and see how it is going.

Jing
> > +     u64 svm_vmgexit_unsupported_event;
> > +     u64 svm_exit_sw;
> > +     u64 svm_exit_err;
> >  };

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-08-31 17:27 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-27 17:41 [PATCH 1/2] KVM: stats: Add counters for VMX all/L2/nested exit reasons Jing Zhang
2021-08-27 17:41 ` [PATCH 2/2] KVM: stats: Add counters for SVM " Jing Zhang
2021-08-27 20:12   ` Sean Christopherson
2021-08-31 17:27     ` Jing Zhang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.