All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	virtualization@lists.linux-foundation.org,
	linux-s390@vger.kernel.org,
	xen-devel-request@lists.xenproject.org, kvm@vger.kernel.org,
	xen-devel@lists.xenproject.org, x86@kernel.org
Cc: benh@kernel.crashing.org, paulus@samba.org, mpe@ellerman.id.au,
	mingo@redhat.com, peterz@infradead.org,
	paulmck@linux.vnet.ibm.com, will.deacon@arm.com,
	kernellwp@gmail.com, jgross@suse.com, pbonzini@redhat.com,
	bsingharora@gmail.com, boqun.feng@gmail.com,
	borntraeger@de.ibm.com, rkrcmar@redhat.com,
	David.Laight@ACULAB.COM, dave@stgolabs.net,
	konrad.wilk@oracle.com,
	Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Subject: [PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
Date: Wed,  2 Nov 2016 05:08:33 -0400	[thread overview]
Message-ID: <1478077718-37424-7-git-send-email-xinhui.pan@linux.vnet.ibm.com> (raw)
In-Reply-To: <1478077718-37424-1-git-send-email-xinhui.pan@linux.vnet.ibm.com>

This is to fix some lock holder preemption issues. Some other locks
implementation do a spin loop before acquiring the lock itself.
Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It
takes the cpu as parameter and return true if the cpu is preempted.
Then kernel can break the spin loops upon the retval of
vcpu_is_preempted.

As kernel has used this interface, So lets support it.

To deal with kernel and kvm/xen, add vcpu_is_preempted into struct
pv_lock_ops.

Then kvm or xen could provide their own implementation to support
vcpu_is_preempted.

Signed-off-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/paravirt_types.h | 2 ++
 arch/x86/include/asm/spinlock.h       | 8 ++++++++
 arch/x86/kernel/paravirt-spinlocks.c  | 6 ++++++
 3 files changed, 16 insertions(+)

diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0f400c0..38c3bb7 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -310,6 +310,8 @@ struct pv_lock_ops {
 
 	void (*wait)(u8 *ptr, u8 val);
 	void (*kick)(int cpu);
+
+	bool (*vcpu_is_preempted)(int cpu);
 };
 
 /* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 921bea7..0526f59 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,6 +26,14 @@
 extern struct static_key paravirt_ticketlocks_enabled;
 static __always_inline bool static_key_false(struct static_key *key);
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+	return pv_lock_ops.vcpu_is_preempted(cpu);
+}
+#endif
+
 #include <asm/qspinlock.h>
 
 /*
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 2c55a00..2f204dd 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void)
 		__raw_callee_save___native_queued_spin_unlock;
 }
 
+static bool native_vcpu_is_preempted(int cpu)
+{
+	return 0;
+}
+
 struct pv_lock_ops pv_lock_ops = {
 #ifdef CONFIG_SMP
 	.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
 	.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
 	.wait = paravirt_nop,
 	.kick = paravirt_nop,
+	.vcpu_is_preempted = native_vcpu_is_preempted,
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
-- 
2.4.11

WARNING: multiple messages have this Message-ID (diff)
From: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	virtualization@lists.linux-foundation.org,
	linux-s390@vger.kernel.org,
	xen-devel-request@lists.xenproject.org, kvm@vger.kernel.org,
	xen-devel@lists.xenproject.org, x86@kernel.org
Cc: kernellwp@gmail.com, jgross@suse.com, dave@stgolabs.net,
	David.Laight@ACULAB.COM, rkrcmar@redhat.com,
	peterz@infradead.org, benh@kernel.crashing.org,
	konrad.wilk@oracle.com, will.deacon@arm.com,
	Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>,
	mingo@redhat.com, paulus@samba.org, mpe@ellerman.id.au,
	pbonzini@redhat.com, paulmck@linux.vnet.ibm.com,
	boqun.feng@gmail.com
Subject: [PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
Date: Wed,  2 Nov 2016 05:08:33 -0400	[thread overview]
Message-ID: <1478077718-37424-7-git-send-email-xinhui.pan@linux.vnet.ibm.com> (raw)
In-Reply-To: <1478077718-37424-1-git-send-email-xinhui.pan@linux.vnet.ibm.com>

This is to fix some lock holder preemption issues. Some other locks
implementation do a spin loop before acquiring the lock itself.
Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It
takes the cpu as parameter and return true if the cpu is preempted.
Then kernel can break the spin loops upon the retval of
vcpu_is_preempted.

As kernel has used this interface, So lets support it.

To deal with kernel and kvm/xen, add vcpu_is_preempted into struct
pv_lock_ops.

Then kvm or xen could provide their own implementation to support
vcpu_is_preempted.

Signed-off-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/paravirt_types.h | 2 ++
 arch/x86/include/asm/spinlock.h       | 8 ++++++++
 arch/x86/kernel/paravirt-spinlocks.c  | 6 ++++++
 3 files changed, 16 insertions(+)

diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0f400c0..38c3bb7 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -310,6 +310,8 @@ struct pv_lock_ops {
 
 	void (*wait)(u8 *ptr, u8 val);
 	void (*kick)(int cpu);
+
+	bool (*vcpu_is_preempted)(int cpu);
 };
 
 /* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 921bea7..0526f59 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,6 +26,14 @@
 extern struct static_key paravirt_ticketlocks_enabled;
 static __always_inline bool static_key_false(struct static_key *key);
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+	return pv_lock_ops.vcpu_is_preempted(cpu);
+}
+#endif
+
 #include <asm/qspinlock.h>
 
 /*
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 2c55a00..2f204dd 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void)
 		__raw_callee_save___native_queued_spin_unlock;
 }
 
+static bool native_vcpu_is_preempted(int cpu)
+{
+	return 0;
+}
+
 struct pv_lock_ops pv_lock_ops = {
 #ifdef CONFIG_SMP
 	.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
 	.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
 	.wait = paravirt_nop,
 	.kick = paravirt_nop,
+	.vcpu_is_preempted = native_vcpu_is_preempted,
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
-- 
2.4.11

  parent reply	other threads:[~2016-11-02  5:14 UTC|newest]

Thread overview: 74+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-02  9:08 [PATCH v7 00/11] implement vcpu preempted check Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 01/11] kernel/sched: introduce vcpu preempted check interface Pan Xinhui
2016-11-02  9:08   ` Pan Xinhui
2016-11-22 12:31   ` [tip:locking/core] sched/core: Introduce the vcpu_is_preempted(cpu) interface tip-bot for Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 01/11] kernel/sched: introduce vcpu preempted check interface Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 02/11] locking/osq: Drop the overload of osq_lock() Pan Xinhui
2016-11-02  9:08   ` Pan Xinhui
2016-11-22 12:36   ` [tip:locking/core] locking/osq: Break out of spin-wait busy waiting loop for a preempted vCPU in osq_lock() tip-bot for Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 02/11] locking/osq: Drop the overload of osq_lock() Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 03/11] kernel/locking: Drop the overload of {mutex,rwsem}_spin_on_owner Pan Xinhui
2016-11-02  9:08   ` [PATCH v7 03/11] kernel/locking: Drop the overload of {mutex, rwsem}_spin_on_owner Pan Xinhui
2016-11-02  9:08   ` Pan Xinhui
2016-11-22 12:36   ` [tip:locking/core] locking/mutex: Break out of expensive busy-loop on {mutex,rwsem}_spin_on_owner() when owner vCPU is preempted tip-bot for Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 03/11] kernel/locking: Drop the overload of {mutex, rwsem}_spin_on_owner Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 04/11] powerpc/spinlock: support vcpu preempted check Pan Xinhui
2016-11-02  9:08   ` Pan Xinhui
2016-11-22 12:32   ` [tip:locking/core] locking/core, powerpc: Implement vcpu_is_preempted(cpu) tip-bot for Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 04/11] powerpc/spinlock: support vcpu preempted check Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 05/11] s390/spinlock: Provide vcpu_is_preempted Pan Xinhui
2016-11-02  9:08 ` Pan Xinhui
2016-11-22 12:32   ` [tip:locking/core] locking/spinlocks, s390: Implement vcpu_is_preempted(cpu) tip-bot for Christian Borntraeger
2016-11-02  9:08 ` [PATCH v7 05/11] s390/spinlock: Provide vcpu_is_preempted Pan Xinhui
2016-11-02  9:08 ` Pan Xinhui [this message]
2016-11-02  9:08   ` [PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check Pan Xinhui
2016-11-15 15:47   ` Peter Zijlstra
2016-11-15 15:47     ` Peter Zijlstra
2016-11-16  4:19     ` Pan Xinhui
2016-11-16  4:19     ` Pan Xinhui
2016-11-16 10:23       ` Peter Zijlstra
2016-11-16 10:23         ` Peter Zijlstra
2016-11-16 11:29         ` Christian Borntraeger
2016-11-16 11:29         ` Christian Borntraeger
2016-11-16 11:29         ` Christian Borntraeger
2016-11-16 11:43           ` Peter Zijlstra
2016-11-16 11:43             ` Peter Zijlstra
2016-11-16 11:43           ` Peter Zijlstra
2016-11-17  5:16         ` Pan Xinhui
2016-11-17  5:16           ` Pan Xinhui
2016-11-17  5:16         ` Pan Xinhui
2016-11-16 10:23       ` Peter Zijlstra
2016-11-16  4:19     ` Pan Xinhui
2016-11-15 15:47   ` Peter Zijlstra
2016-11-22 12:33   ` [tip:locking/core] locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen guests tip-bot for Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 07/11] KVM: Introduce kvm_write_guest_offset_cached Pan Xinhui
2016-11-02  9:08 ` Pan Xinhui
2016-11-02  9:08 ` Pan Xinhui
2016-11-22 12:33   ` [tip:locking/core] kvm: Introduce kvm_write_guest_offset_cached() tip-bot for Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 08/11] x86, kvm/x86.c: support vcpu preempted check Pan Xinhui
2016-11-02  9:08 ` Pan Xinhui
2016-11-22 12:34   ` [tip:locking/core] x86/kvm: Support the vCPU preemption check tip-bot for Pan Xinhui
2016-12-19 11:42   ` [PATCH v7 08/11] x86, kvm/x86.c: support vcpu preempted check Andrea Arcangeli
2016-12-19 11:42     ` Andrea Arcangeli
2016-12-19 11:42     ` [Qemu-devel] " Andrea Arcangeli
2016-12-19 11:42     ` Andrea Arcangeli
2016-12-19 13:56     ` Pan Xinhui
2016-12-19 13:56       ` [Qemu-devel] " Pan Xinhui
2016-12-19 13:56       ` Pan Xinhui
2016-12-19 14:39       ` Paolo Bonzini
2016-12-19 14:39         ` [Qemu-devel] " Paolo Bonzini
2016-12-19 14:39         ` Paolo Bonzini
2016-11-02  9:08 ` Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 09/11] x86, kernel/kvm.c: " Pan Xinhui
2016-11-02  9:08 ` Pan Xinhui
2016-11-02  9:08 ` Pan Xinhui
2016-11-22 12:34   ` [tip:locking/core] x86/kvm: Support the vCPU preemption check tip-bot for Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 10/11] x86, xen: support vcpu preempted check Pan Xinhui
2016-11-02  9:08 ` Pan Xinhui
2016-11-02  9:08   ` Pan Xinhui
2016-11-22 12:35   ` [tip:locking/core] x86/xen: Support the vCPU preemption check tip-bot for Juergen Gross
2016-11-02  9:08 ` [PATCH v7 11/11] Documentation: virtual: kvm: Support vcpu preempted check Pan Xinhui
2016-11-22 12:35   ` [tip:locking/core] Documentation/virtual/kvm: Support the vCPU preemption check tip-bot for Pan Xinhui
2016-11-02  9:08 ` [PATCH v7 11/11] Documentation: virtual: kvm: Support vcpu preempted check Pan Xinhui
2016-11-02  9:08 ` Pan Xinhui

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1478077718-37424-7-git-send-email-xinhui.pan@linux.vnet.ibm.com \
    --to=xinhui.pan@linux.vnet.ibm.com \
    --cc=David.Laight@ACULAB.COM \
    --cc=benh@kernel.crashing.org \
    --cc=boqun.feng@gmail.com \
    --cc=borntraeger@de.ibm.com \
    --cc=bsingharora@gmail.com \
    --cc=dave@stgolabs.net \
    --cc=jgross@suse.com \
    --cc=kernellwp@gmail.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mingo@redhat.com \
    --cc=mpe@ellerman.id.au \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=paulus@samba.org \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rkrcmar@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=will.deacon@arm.com \
    --cc=x86@kernel.org \
    --cc=xen-devel-request@lists.xenproject.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.