All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mark Langsdorf <mark.langsdorf@amd.com>
To: Joerg Roedel <joerg.roedel@amd.com>,
	peterz@infradead.org, Ingo Molnar <mingo@elte.hu>
Cc: avi@redhat.com, kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH][KVM][retry 4] Add support for Pause Filtering to AMD SVM
Date: Wed, 20 May 2009 17:25:17 -0500	[thread overview]
Message-ID: <200905201725.18046.mark.langsdorf@amd.com> (raw)
In-Reply-To: <200905191356.37071.mark.langsdorf@amd.com>

This feature creates a new field in the VMCB called Pause
Filter Count.  If Pause Filter Count is greater than 0 and
intercepting PAUSEs is enabled, the processor will increment
an internal counter when a PAUSE instruction occurs instead
of intercepting.  When the internal counter reaches the
Pause Filter Count value, a PAUSE intercept will occur.

This feature can be used to detect contended spinlocks,
especially when the lock holding VCPU is not scheduled.
Rescheduling another VCPU prevents the VCPU seeking the
lock from wasting its quantum by spinning idly.  Perform
the reschedule by increasing the the credited time on
the VCPU.

Experimental results show that most spinlocks are held
for less than 1000 PAUSE cycles or more than a few
thousand.  Default the Pause Filter Counter to 3000 to
detect the contended spinlocks.

Processor support for this feature is indicated by a CPUID
bit.

On a 24 core system running 4 guests each with 16 VCPUs,
this patch improved overall performance of each guest's
32 job kernbench by approximately 1%.  Further performance
improvement may be possible with a more sophisticated
yield algorithm.

-Mark Langsdorf
Operating System Research Center
AMD

Signed-off-by: Mark Langsdorf <mark.langsdorf@amd.com>
---
 arch/x86/include/asm/svm.h |    3 ++-
 arch/x86/kvm/svm.c         |   13 +++++++++++++
 include/linux/sched.h      |    7 +++++++
 kernel/sched.c             |   18 ++++++++++++++++++
 4 files changed, 40 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 85574b7..1fecb7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 	u16 intercept_dr_write;
 	u32 intercept_exceptions;
 	u64 intercept;
-	u8 reserved_1[44];
+	u8 reserved_1[42];
+	u16 pause_filter_count;
 	u64 iopm_base_pa;
 	u64 msrpm_base_pa;
 	u64 tsc_offset;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ef43a18..dad6c4b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@ MODULE_LICENSE("GPL");
 #define SVM_FEATURE_NPT  (1 << 0)
 #define SVM_FEATURE_LBRV (1 << 1)
 #define SVM_FEATURE_SVML (1 << 2)
+#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
 
 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
 
@@ -575,6 +576,11 @@ static void init_vmcb(struct vcpu_svm *svm)
 
 	svm->nested_vmcb = 0;
 	svm->vcpu.arch.hflags = HF_GIF_MASK;
+
+	if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+		control->pause_filter_count = 3000;
+		control->intercept |= (1ULL << INTERCEPT_PAUSE);
+	}
 }
 
 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -2087,6 +2093,12 @@ static int interrupt_window_interception(struct vcpu_svm *svm,
 	return 1;
 }
 
+static int pause_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+	sched_delay_yield(1000000);
+	return 1;
+}
+
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
 				      struct kvm_run *kvm_run) = {
 	[SVM_EXIT_READ_CR0]           		= emulate_on_interception,
@@ -2123,6 +2135,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
 	[SVM_EXIT_CPUID]			= cpuid_interception,
 	[SVM_EXIT_IRET]                         = iret_interception,
 	[SVM_EXIT_INVD]                         = emulate_on_interception,
+	[SVM_EXIT_PAUSE]			= pause_interception,
 	[SVM_EXIT_HLT]				= halt_interception,
 	[SVM_EXIT_INVLPG]			= invlpg_interception,
 	[SVM_EXIT_INVLPGA]			= invalid_op_interception,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b4c38bc..9cde585 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2283,6 +2283,9 @@ static inline unsigned int task_cpu(const struct task_struct *p)
 	return task_thread_info(p)->cpu;
 }
 
+extern void sched_delay_yield(unsigned long ns);
+
+
 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
 
 #else
@@ -2292,6 +2295,10 @@ static inline unsigned int task_cpu(const struct task_struct *p)
 	return 0;
 }
 
+void sched_delay_yield(struct task_struct *p, unsigned int delay)
+{
+}
+
 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 {
 }
diff --git a/kernel/sched.c b/kernel/sched.c
index b902e58..3aed2f6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1947,6 +1947,24 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 	return delta < (s64)sysctl_sched_migration_cost;
 }
 
+/*
+ * Interface for yielding a thread by delaying it for a known
+ * interval.  Use at your own risk and not with real-time.
+ *
+ * Like yield, except for SCHED_OTHER/BATCH, where it will
+ * give us @ns time for the 'good' cause.
+ */
+void sched_delay_yield(unsigned long ns)
+{
+	struct task_struct *curr = current;
+	if (curr->sched_class == &fair_sched_class) {
+		struct sched_entity *se = &curr->se;
+		__update_curr(cfs_rq_of(se), se, ns);
+		schedule();
+	} else
+		yield();
+}
+EXPORT_SYMBOL_GPL(sched_delay_yield);
 
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
-- 
1.6.0.2



WARNING: multiple messages have this Message-ID (diff)
From: Mark Langsdorf <mark.langsdorf@amd.com>
To: Joerg Roedel <joerg.roedel@amd.com>,
	peterz@infradead.org, Ingo Molnar <mingo@elte.hu>
Cc: avi@redhat.com, kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH][retry 4] Add support for Pause Filtering to AMD SVM
Date: Wed, 20 May 2009 17:25:17 -0500	[thread overview]
Message-ID: <200905201725.18046.mark.langsdorf@amd.com> (raw)
In-Reply-To: <200905191356.37071.mark.langsdorf@amd.com>

This feature creates a new field in the VMCB called Pause
Filter Count.  If Pause Filter Count is greater than 0 and
intercepting PAUSEs is enabled, the processor will increment
an internal counter when a PAUSE instruction occurs instead
of intercepting.  When the internal counter reaches the
Pause Filter Count value, a PAUSE intercept will occur.

This feature can be used to detect contended spinlocks,
especially when the lock holding VCPU is not scheduled.
Rescheduling another VCPU prevents the VCPU seeking the
lock from wasting its quantum by spinning idly.  Perform
the reschedule by increasing the the credited time on
the VCPU.

Experimental results show that most spinlocks are held
for less than 1000 PAUSE cycles or more than a few
thousand.  Default the Pause Filter Counter to 3000 to
detect the contended spinlocks.

Processor support for this feature is indicated by a CPUID
bit.

On a 24 core system running 4 guests each with 16 VCPUs,
this patch improved overall performance of each guest's
32 job kernbench by approximately 1%.  Further performance
improvement may be possible with a more sophisticated
yield algorithm.

-Mark Langsdorf
Operating System Research Center
AMD

Signed-off-by: Mark Langsdorf <mark.langsdorf@amd.com>
---
 arch/x86/include/asm/svm.h |    3 ++-
 arch/x86/kvm/svm.c         |   13 +++++++++++++
 include/linux/sched.h      |    7 +++++++
 kernel/sched.c             |   18 ++++++++++++++++++
 4 files changed, 40 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 85574b7..1fecb7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 	u16 intercept_dr_write;
 	u32 intercept_exceptions;
 	u64 intercept;
-	u8 reserved_1[44];
+	u8 reserved_1[42];
+	u16 pause_filter_count;
 	u64 iopm_base_pa;
 	u64 msrpm_base_pa;
 	u64 tsc_offset;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ef43a18..dad6c4b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@ MODULE_LICENSE("GPL");
 #define SVM_FEATURE_NPT  (1 << 0)
 #define SVM_FEATURE_LBRV (1 << 1)
 #define SVM_FEATURE_SVML (1 << 2)
+#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
 
 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
 
@@ -575,6 +576,11 @@ static void init_vmcb(struct vcpu_svm *svm)
 
 	svm->nested_vmcb = 0;
 	svm->vcpu.arch.hflags = HF_GIF_MASK;
+
+	if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+		control->pause_filter_count = 3000;
+		control->intercept |= (1ULL << INTERCEPT_PAUSE);
+	}
 }
 
 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -2087,6 +2093,12 @@ static int interrupt_window_interception(struct vcpu_svm *svm,
 	return 1;
 }
 
+static int pause_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+	sched_delay_yield(1000000);
+	return 1;
+}
+
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
 				      struct kvm_run *kvm_run) = {
 	[SVM_EXIT_READ_CR0]           		= emulate_on_interception,
@@ -2123,6 +2135,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
 	[SVM_EXIT_CPUID]			= cpuid_interception,
 	[SVM_EXIT_IRET]                         = iret_interception,
 	[SVM_EXIT_INVD]                         = emulate_on_interception,
+	[SVM_EXIT_PAUSE]			= pause_interception,
 	[SVM_EXIT_HLT]				= halt_interception,
 	[SVM_EXIT_INVLPG]			= invlpg_interception,
 	[SVM_EXIT_INVLPGA]			= invalid_op_interception,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b4c38bc..9cde585 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2283,6 +2283,9 @@ static inline unsigned int task_cpu(const struct task_struct *p)
 	return task_thread_info(p)->cpu;
 }
 
+extern void sched_delay_yield(unsigned long ns);
+
+
 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
 
 #else
@@ -2292,6 +2295,10 @@ static inline unsigned int task_cpu(const struct task_struct *p)
 	return 0;
 }
 
+void sched_delay_yield(struct task_struct *p, unsigned int delay)
+{
+}
+
 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 {
 }
diff --git a/kernel/sched.c b/kernel/sched.c
index b902e58..3aed2f6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1947,6 +1947,24 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 	return delta < (s64)sysctl_sched_migration_cost;
 }
 
+/*
+ * Interface for yielding a thread by delaying it for a known
+ * interval.  Use at your own risk and not with real-time.
+ *
+ * Like yield, except for SCHED_OTHER/BATCH, where it will
+ * give us @ns time for the 'good' cause.
+ */
+void sched_delay_yield(unsigned long ns)
+{
+	struct task_struct *curr = current;
+	if (curr->sched_class == &fair_sched_class) {
+		struct sched_entity *se = &curr->se;
+		__update_curr(cfs_rq_of(se), se, ns);
+		schedule();
+	} else
+		yield();
+}
+EXPORT_SYMBOL_GPL(sched_delay_yield);
 
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
-- 
1.6.0.2



  parent reply	other threads:[~2009-05-20 22:20 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-05-05 14:09 [PATCH][KVM] Add support for Pause Filtering to AMD SVM Mark Langsdorf
2009-05-05 16:05 ` Bert Wesarg
2009-05-07 13:55 ` Joerg Roedel
2009-05-07 15:00   ` [PATCH][KVM][retry 1] " Mark Langsdorf
2009-05-07 15:00     ` [PATCH][retry " Mark Langsdorf
2009-05-07 15:31     ` [PATCH][KVM][retry " Avi Kivity
2009-05-11 14:15       ` Ingo Molnar
2009-05-11 14:15         ` [PATCH][retry " Ingo Molnar
2009-05-11 14:24         ` [PATCH][KVM][retry " Avi Kivity
2009-05-11 14:24           ` [PATCH][retry " Avi Kivity
2009-05-11 14:33           ` [PATCH][KVM][retry " Ingo Molnar
2009-05-11 14:33             ` [PATCH][retry " Ingo Molnar
2009-05-11 14:51             ` [PATCH][KVM][retry " Avi Kivity
2009-05-11 14:51               ` [PATCH][retry " Avi Kivity
2009-05-11 14:59               ` [PATCH][KVM][retry " Ingo Molnar
2009-05-11 14:59                 ` [PATCH][retry " Ingo Molnar
2009-05-11 15:12                 ` [PATCH][KVM][retry " Avi Kivity
2009-05-11 15:12                   ` [PATCH][retry " Avi Kivity
2009-05-11 15:18                   ` [PATCH][KVM][retry " Ingo Molnar
2009-05-11 15:18                     ` [PATCH][retry " Ingo Molnar
2009-05-11 15:28                     ` [PATCH][KVM][retry " Avi Kivity
2009-05-11 15:28                       ` [PATCH][retry " Avi Kivity
2009-05-11 15:36                       ` [PATCH][KVM][retry " Langsdorf, Mark
2009-05-11 15:36                         ` [PATCH][retry " Langsdorf, Mark
2009-05-11 15:40                         ` [PATCH][KVM][retry " Avi Kivity
2009-05-11 15:58                 ` [PATCH][KVM][retry 1] Add support for Pause Filtering to AMDSVM Langsdorf, Mark
2009-05-11 15:58                   ` [PATCH][retry " Langsdorf, Mark
2009-05-11 15:01               ` [PATCH][KVM][retry 1] Add support for Pause Filtering to AMD SVM Peter Zijlstra
2009-05-11 15:01                 ` [PATCH][retry " Peter Zijlstra
2009-05-11 15:06                 ` [PATCH][KVM][retry " Avi Kivity
2009-05-11 15:06                   ` [PATCH][retry " Avi Kivity
2009-05-11 14:42           ` [PATCH][KVM][retry " Peter Zijlstra
2009-05-11 15:05             ` Avi Kivity
2009-05-11 15:05               ` [PATCH][retry " Avi Kivity
2009-05-08 17:03     ` [PATCH][KVM][retry 2] " Mark Langsdorf
2009-05-08 17:03       ` [PATCH][retry " Mark Langsdorf
2009-05-08 18:44       ` [PATCH][KVM][retry " Avi Kivity
2009-05-08 18:44         ` [PATCH][retry " Avi Kivity
2009-05-08 18:47         ` [PATCH][KVM][retry " Langsdorf, Mark
2009-05-08 18:47           ` [PATCH][retry " Langsdorf, Mark
2009-05-19 18:56       ` [PATCH][KVM][retry 3] " Mark Langsdorf
2009-05-19 18:56         ` [PATCH][retry " Mark Langsdorf
2009-05-20  7:40         ` [PATCH][KVM][retry " Ingo Molnar
2009-05-20  7:59         ` Peter Zijlstra
2009-05-20  7:59           ` [PATCH][retry " Peter Zijlstra
2009-05-20  8:38           ` [PATCH][KVM][retry " Avi Kivity
2009-05-20  8:38             ` [PATCH][retry " Avi Kivity
2009-05-20  8:42             ` [PATCH][KVM][retry " Peter Zijlstra
2009-05-20  8:42               ` [PATCH][retry " Peter Zijlstra
2009-05-20  8:49               ` [PATCH][KVM][retry " Avi Kivity
2009-05-20  8:49                 ` [PATCH][retry " Avi Kivity
2009-05-20  8:54                 ` [PATCH][KVM][retry " Peter Zijlstra
2009-05-20  8:54                   ` [PATCH][retry " Peter Zijlstra
2009-05-20  9:04                   ` [PATCH][KVM][retry " Avi Kivity
2009-05-20  9:04                     ` [PATCH][retry " Avi Kivity
2009-05-20  9:10                     ` [PATCH][KVM][retry " Peter Zijlstra
2009-05-20  9:10                       ` [PATCH][retry " Peter Zijlstra
2009-05-20  9:17                       ` [PATCH][KVM][retry " Avi Kivity
2009-05-20  9:17                         ` [PATCH][retry " Avi Kivity
2009-05-20 13:52                       ` [PATCH][KVM][retry " Langsdorf, Mark
2009-05-20 13:52                         ` [PATCH][retry " Langsdorf, Mark
2009-05-20 12:00         ` [PATCH][KVM][retry " Avi Kivity
2009-05-20 12:00           ` [PATCH][retry " Avi Kivity
2009-05-20 22:25         ` Mark Langsdorf [this message]
2009-05-20 22:25           ` [PATCH][retry 4] " Mark Langsdorf
2009-05-21  8:47           ` [PATCH][KVM][retry " Avi Kivity
2009-05-21  8:47             ` [PATCH][retry " Avi Kivity
2009-07-08  5:19           ` [PATCH][KVM][retry " Sheng Yang
2009-07-08  5:19             ` [PATCH][retry " Sheng Yang
2009-07-08 14:59             ` [PATCH][KVM][retry " Langsdorf, Mark
2009-07-08 14:59               ` [PATCH][retry " Langsdorf, Mark
2009-07-09  1:50               ` [PATCH][KVM][retry " Sheng Yang
2009-07-09  1:50                 ` [PATCH][retry " Sheng Yang
2009-07-22 22:40                 ` [PATCH][KVM][retry " Langsdorf, Mark
2009-07-22 22:40                   ` [PATCH][retry " Langsdorf, Mark
2009-08-05  9:08                   ` [PATCH][KVM][retry " Zhai, Edwin
2009-08-05  9:08                     ` [PATCH][retry " Zhai, Edwin
2009-05-11 14:38 ` [PATCH][KVM] " Peter Zijlstra
2009-05-11 14:51   ` Ingo Molnar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=200905201725.18046.mark.langsdorf@amd.com \
    --to=mark.langsdorf@amd.com \
    --cc=avi@redhat.com \
    --cc=joerg.roedel@amd.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.