All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Waiman Long <longman@redhat.com>
Cc: "Jeremy Fitzhardinge" <jeremy@goop.org>,
	"Chris Wright" <chrisw@sous-sol.org>,
	"Alok Kataria" <akataria@vmware.com>,
	"Rusty Russell" <rusty@rustcorp.com.au>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"H. Peter Anvin" <hpa@zytor.com>,
	linux-arch@vger.kernel.org, x86@kernel.org,
	linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	xen-devel@lists.xenproject.org, kvm@vger.kernel.org,
	"Pan Xinhui" <xinhui.pan@linux.vnet.ibm.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Boris Ostrovsky" <boris.ostrovsky@oracle.com>,
	"Juergen Gross" <jgross@suse.com>,
	andrew.cooper3@citrix.com
Subject: Re: [PATCH v4 2/2] x86/kvm: Provide optimized version of vcpu_is_preempted() for x86-64
Date: Thu, 16 Feb 2017 17:48:15 +0100	[thread overview]
Message-ID: <20170216164815.GD6515@twins.programming.kicks-ass.net> (raw)
In-Reply-To: <1487194670-6319-3-git-send-email-longman@redhat.com>

On Wed, Feb 15, 2017 at 04:37:50PM -0500, Waiman Long wrote:
> +/*
> + * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
> + * restoring to/from the stack. It is assumed that the preempted value
> + * is at an offset of 16 from the beginning of the kvm_steal_time structure
> + * which is verified by the BUILD_BUG_ON() macro below.
> + */
> +#define PREEMPTED_OFFSET	16

As per Andrew's suggestion, the 'right' way is something like so.

---
 asm-offsets_64.c |   11 +++++++++++
 kvm.c            |   14 ++++----------
 2 files changed, 15 insertions(+), 10 deletions(-)

--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -13,6 +13,10 @@ static char syscalls_ia32[] = {
 #include <asm/syscalls_32.h>
 };
 
+#ifdef CONFIG_KVM_GUEST
+#include <asm/kvm_para.h>
+#endif
+
 int main(void)
 {
 #ifdef CONFIG_PARAVIRT
@@ -22,6 +26,13 @@ int main(void)
 	BLANK();
 #endif
 
+#ifdef CONFIG_KVM_GUEST
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+	OFFSET(KVM_STEAL_TIME_preempted, kvm_steal_time, preempted);
+	BLANK();
+#endif
+#endif
+
 #define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
 	ENTRY(bx);
 	ENTRY(cx);
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -600,22 +600,21 @@ PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_
 
 #else
 
+#include <asm/asm-offsets.h>
+
 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
 
 /*
  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
- * restoring to/from the stack. It is assumed that the preempted value
- * is at an offset of 16 from the beginning of the kvm_steal_time structure
- * which is verified by the BUILD_BUG_ON() macro below.
+ * restoring to/from the stack. 
  */
-#define PREEMPTED_OFFSET	16
 asm(
 ".pushsection .text;"
 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
 "__raw_callee_save___kvm_vcpu_is_preempted:"
 "movq	__per_cpu_offset(,%rdi,8), %rax;"
-"cmpb	$0, " __stringify(PREEMPTED_OFFSET) "+steal_time(%rax);"
+"cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
 "setne	%al;"
 "ret;"
 ".popsection");
@@ -627,11 +626,6 @@ asm(
  */
 void __init kvm_spinlock_init(void)
 {
-#ifdef CONFIG_X86_64
-	BUILD_BUG_ON((offsetof(struct kvm_steal_time, preempted)
-		!= PREEMPTED_OFFSET) || (sizeof(steal_time.preempted) != 1));
-#endif
-
 	if (!kvm_para_available())
 		return;
 	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */

WARNING: multiple messages have this Message-ID (diff)
From: Peter Zijlstra <peterz@infradead.org>
To: Waiman Long <longman@redhat.com>
Cc: linux-arch@vger.kernel.org, "Juergen Gross" <jgross@suse.com>,
	"Jeremy Fitzhardinge" <jeremy@goop.org>,
	x86@kernel.org, kvm@vger.kernel.org,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Boris Ostrovsky" <boris.ostrovsky@oracle.com>,
	"Pan Xinhui" <xinhui.pan@linux.vnet.ibm.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	"Chris Wright" <chrisw@sous-sol.org>,
	"Ingo Molnar" <mingo@redhat.com>,
	andrew.cooper3@citrix.com, "H. Peter Anvin" <hpa@zytor.com>,
	xen-devel@lists.xenproject.org,
	"Alok Kataria" <akataria@vmware.com>,
	"Thomas Gleixner" <tglx@linutronix.de>
Subject: Re: [PATCH v4 2/2] x86/kvm: Provide optimized version of vcpu_is_preempted() for x86-64
Date: Thu, 16 Feb 2017 17:48:15 +0100	[thread overview]
Message-ID: <20170216164815.GD6515@twins.programming.kicks-ass.net> (raw)
In-Reply-To: <1487194670-6319-3-git-send-email-longman@redhat.com>

On Wed, Feb 15, 2017 at 04:37:50PM -0500, Waiman Long wrote:
> +/*
> + * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
> + * restoring to/from the stack. It is assumed that the preempted value
> + * is at an offset of 16 from the beginning of the kvm_steal_time structure
> + * which is verified by the BUILD_BUG_ON() macro below.
> + */
> +#define PREEMPTED_OFFSET	16

As per Andrew's suggestion, the 'right' way is something like so.

---
 asm-offsets_64.c |   11 +++++++++++
 kvm.c            |   14 ++++----------
 2 files changed, 15 insertions(+), 10 deletions(-)

--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -13,6 +13,10 @@ static char syscalls_ia32[] = {
 #include <asm/syscalls_32.h>
 };
 
+#ifdef CONFIG_KVM_GUEST
+#include <asm/kvm_para.h>
+#endif
+
 int main(void)
 {
 #ifdef CONFIG_PARAVIRT
@@ -22,6 +26,13 @@ int main(void)
 	BLANK();
 #endif
 
+#ifdef CONFIG_KVM_GUEST
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+	OFFSET(KVM_STEAL_TIME_preempted, kvm_steal_time, preempted);
+	BLANK();
+#endif
+#endif
+
 #define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
 	ENTRY(bx);
 	ENTRY(cx);
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -600,22 +600,21 @@ PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_
 
 #else
 
+#include <asm/asm-offsets.h>
+
 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
 
 /*
  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
- * restoring to/from the stack. It is assumed that the preempted value
- * is at an offset of 16 from the beginning of the kvm_steal_time structure
- * which is verified by the BUILD_BUG_ON() macro below.
+ * restoring to/from the stack. 
  */
-#define PREEMPTED_OFFSET	16
 asm(
 ".pushsection .text;"
 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
 "__raw_callee_save___kvm_vcpu_is_preempted:"
 "movq	__per_cpu_offset(,%rdi,8), %rax;"
-"cmpb	$0, " __stringify(PREEMPTED_OFFSET) "+steal_time(%rax);"
+"cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
 "setne	%al;"
 "ret;"
 ".popsection");
@@ -627,11 +626,6 @@ asm(
  */
 void __init kvm_spinlock_init(void)
 {
-#ifdef CONFIG_X86_64
-	BUILD_BUG_ON((offsetof(struct kvm_steal_time, preempted)
-		!= PREEMPTED_OFFSET) || (sizeof(steal_time.preempted) != 1));
-#endif
-
 	if (!kvm_para_available())
 		return;
 	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */

  parent reply	other threads:[~2017-02-16 16:49 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-15 21:37 [PATCH v4 0/2] x86/kvm: Reduce vcpu_is_preempted() overhead Waiman Long
2017-02-15 21:37 ` [PATCH v4 1/2] x86/paravirt: Change vcp_is_preempted() arg type to long Waiman Long
2017-02-15 21:37 ` Waiman Long
2017-02-15 21:37 ` Waiman Long
2017-02-16 16:09   ` Peter Zijlstra
2017-02-16 16:09   ` Peter Zijlstra
2017-02-16 16:09     ` Peter Zijlstra
2017-02-16 21:02     ` Waiman Long
2017-02-16 21:02     ` Waiman Long
2017-02-16 21:02       ` Waiman Long
2017-02-17  9:42       ` Peter Zijlstra
2017-02-17  9:42         ` Peter Zijlstra
2017-02-17  9:42       ` Peter Zijlstra
2017-02-15 21:37 ` [PATCH v4 2/2] x86/kvm: Provide optimized version of vcpu_is_preempted() for x86-64 Waiman Long
2017-02-15 21:37 ` Waiman Long
2017-02-15 21:37   ` Waiman Long
2017-02-16 16:48   ` Peter Zijlstra
2017-02-16 16:48   ` Peter Zijlstra [this message]
2017-02-16 16:48     ` Peter Zijlstra
2017-02-16 21:00     ` Waiman Long
2017-02-16 21:00       ` Waiman Long
2017-02-16 21:00     ` Waiman Long

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170216164815.GD6515@twins.programming.kicks-ass.net \
    --to=peterz@infradead.org \
    --cc=akataria@vmware.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=chrisw@sous-sol.org \
    --cc=hpa@zytor.com \
    --cc=jeremy@goop.org \
    --cc=jgross@suse.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=rusty@rustcorp.com.au \
    --cc=tglx@linutronix.de \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    --cc=xinhui.pan@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.