All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
@ 2017-02-08 18:00 Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2017-02-08 18:00 UTC (permalink / raw)
  To: Jeremy Fitzhardinge, Chris Wright, Alok Kataria, Rusty Russell,
	Peter Zijlstra, Ingo Molnar, Thomas Gleixner, H. Peter Anvin
  Cc: linux-arch, Juergen Gross, kvm, Radim Krčmář,
	Pan Xinhui, x86, linux-kernel, virtualization, Waiman Long,
	Paolo Bonzini, xen-devel, Boris Ostrovsky

It was found when running fio sequential write test with a XFS ramdisk
on a 2-socket x86-64 system, the %CPU times as reported by perf were
as follows:

 71.27%  0.28%  fio  [k] down_write
 70.99%  0.01%  fio  [k] call_rwsem_down_write_failed
 69.43%  1.18%  fio  [k] rwsem_down_write_failed
 65.51% 54.57%  fio  [k] osq_lock
  9.72%  7.99%  fio  [k] __raw_callee_save___kvm_vcpu_is_preempted
  4.16%  4.16%  fio  [k] __kvm_vcpu_is_preempted

So making vcpu_is_preempted() a callee-save function has a pretty high
cost associated with it. As vcpu_is_preempted() is called within the
spinlock, mutex and rwsem slowpaths, there isn't much to gain by making
it callee-save. So it is now changed to a normal function call instead.

With this patch applied, the aggregrate bandwidth of the fio sequential
write test increased slightly from 2563.3MB/s to 2588.1MB/s (about 1%).

Signed-off-by: Waiman Long <longman@redhat.com>
---
 arch/x86/include/asm/paravirt.h       | 2 +-
 arch/x86/include/asm/paravirt_types.h | 2 +-
 arch/x86/kernel/kvm.c                 | 7 ++-----
 arch/x86/kernel/paravirt-spinlocks.c  | 6 ++----
 arch/x86/xen/spinlock.c               | 4 +---
 5 files changed, 7 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 864f57b..2515885 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -676,7 +676,7 @@ static __always_inline void pv_kick(int cpu)
 
 static __always_inline bool pv_vcpu_is_preempted(int cpu)
 {
-	return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+	return PVOP_CALL1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
 }
 
 #endif /* SMP && PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index bb2de45..88dc852 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -309,7 +309,7 @@ struct pv_lock_ops {
 	void (*wait)(u8 *ptr, u8 val);
 	void (*kick)(int cpu);
 
-	struct paravirt_callee_save vcpu_is_preempted;
+	bool (*vcpu_is_preempted)(int cpu);
 };
 
 /* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 099fcba..eb3753d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu)
 
 	return !!src->preempted;
 }
-PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
 
 /*
  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void)
 	pv_lock_ops.wait = kvm_wait;
 	pv_lock_ops.kick = kvm_kick_cpu;
 
-	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
-		pv_lock_ops.vcpu_is_preempted =
-			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
-	}
+	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
+		pv_lock_ops.vcpu_is_preempted = __kvm_vcpu_is_preempted;
 }
 
 #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 6259327..da050bc 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -24,12 +24,10 @@ __visible bool __native_vcpu_is_preempted(int cpu)
 {
 	return false;
 }
-PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
 
 bool pv_is_native_vcpu_is_preempted(void)
 {
-	return pv_lock_ops.vcpu_is_preempted.func ==
-		__raw_callee_save___native_vcpu_is_preempted;
+	return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted;
 }
 
 struct pv_lock_ops pv_lock_ops = {
@@ -38,7 +36,7 @@ struct pv_lock_ops pv_lock_ops = {
 	.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
 	.wait = paravirt_nop,
 	.kick = paravirt_nop,
-	.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+	.vcpu_is_preempted = __native_vcpu_is_preempted,
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 25a7c43..c85bb8f 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -114,8 +114,6 @@ void xen_uninit_lock_cpu(int cpu)
 	per_cpu(irq_name, cpu) = NULL;
 }
 
-PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
-
 /*
  * Our init of PV spinlocks is split in two init functions due to us
  * using paravirt patching and jump labels patching and having to do
@@ -138,7 +136,7 @@ void __init xen_init_spinlocks(void)
 	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
 	pv_lock_ops.wait = xen_qlock_wait;
 	pv_lock_ops.kick = xen_qlock_kick;
-	pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
+	pv_lock_ops.vcpu_is_preempted = xen_vcpu_stolen;
 }
 
 static __init int xen_parse_nopvspin(char *arg)
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
  2017-02-08 19:05   ` Peter Zijlstra
@ 2017-02-08 20:17     ` Waiman Long
  -1 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2017-02-08 20:17 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Jeremy Fitzhardinge, Chris Wright, Alok Kataria, Rusty Russell,
	Ingo Molnar, Thomas Gleixner, H. Peter Anvin, linux-arch, x86,
	linux-kernel, virtualization, xen-devel, kvm, Pan Xinhui,
	Paolo Bonzini, Radim Krčmář,
	Boris Ostrovsky, Juergen Gross

On 02/08/2017 02:05 PM, Peter Zijlstra wrote:
> On Wed, Feb 08, 2017 at 01:00:24PM -0500, Waiman Long wrote:
>> It was found when running fio sequential write test with a XFS ramdisk
>> on a 2-socket x86-64 system, the %CPU times as reported by perf were
>> as follows:
>>
>>  71.27%  0.28%  fio  [k] down_write
>>  70.99%  0.01%  fio  [k] call_rwsem_down_write_failed
>>  69.43%  1.18%  fio  [k] rwsem_down_write_failed
>>  65.51% 54.57%  fio  [k] osq_lock
>>   9.72%  7.99%  fio  [k] __raw_callee_save___kvm_vcpu_is_preempted
>>   4.16%  4.16%  fio  [k] __kvm_vcpu_is_preempted
>>
>> So making vcpu_is_preempted() a callee-save function has a pretty high
>> cost associated with it. As vcpu_is_preempted() is called within the
>> spinlock, mutex and rwsem slowpaths, there isn't much to gain by making
>> it callee-save. So it is now changed to a normal function call instead.
>>
> Numbers for bare metal too please.

I will run the test on bare metal, but I doubt there will be noticeable
difference.

Cheers,
Longman

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
@ 2017-02-08 20:17     ` Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2017-02-08 20:17 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: linux-arch, Juergen Gross, Jeremy Fitzhardinge, x86, kvm,
	Radim Krčmář,
	Boris Ostrovsky, Pan Xinhui, Paolo Bonzini, linux-kernel,
	virtualization, Chris Wright, Ingo Molnar, H. Peter Anvin,
	xen-devel, Alok Kataria, Thomas Gleixner

On 02/08/2017 02:05 PM, Peter Zijlstra wrote:
> On Wed, Feb 08, 2017 at 01:00:24PM -0500, Waiman Long wrote:
>> It was found when running fio sequential write test with a XFS ramdisk
>> on a 2-socket x86-64 system, the %CPU times as reported by perf were
>> as follows:
>>
>>  71.27%  0.28%  fio  [k] down_write
>>  70.99%  0.01%  fio  [k] call_rwsem_down_write_failed
>>  69.43%  1.18%  fio  [k] rwsem_down_write_failed
>>  65.51% 54.57%  fio  [k] osq_lock
>>   9.72%  7.99%  fio  [k] __raw_callee_save___kvm_vcpu_is_preempted
>>   4.16%  4.16%  fio  [k] __kvm_vcpu_is_preempted
>>
>> So making vcpu_is_preempted() a callee-save function has a pretty high
>> cost associated with it. As vcpu_is_preempted() is called within the
>> spinlock, mutex and rwsem slowpaths, there isn't much to gain by making
>> it callee-save. So it is now changed to a normal function call instead.
>>
> Numbers for bare metal too please.

I will run the test on bare metal, but I doubt there will be noticeable
difference.

Cheers,
Longman

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
  2017-02-08 19:05   ` Peter Zijlstra
  (?)
@ 2017-02-08 20:17   ` Waiman Long
  -1 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2017-02-08 20:17 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: linux-arch, Juergen Gross, Jeremy Fitzhardinge, x86, kvm,
	Radim Krčmář,
	Boris Ostrovsky, Pan Xinhui, Paolo Bonzini, Rusty Russell,
	linux-kernel, virtualization, Chris Wright, Ingo Molnar,
	H. Peter Anvin, xen-devel, Alok Kataria, Thomas Gleixner

On 02/08/2017 02:05 PM, Peter Zijlstra wrote:
> On Wed, Feb 08, 2017 at 01:00:24PM -0500, Waiman Long wrote:
>> It was found when running fio sequential write test with a XFS ramdisk
>> on a 2-socket x86-64 system, the %CPU times as reported by perf were
>> as follows:
>>
>>  71.27%  0.28%  fio  [k] down_write
>>  70.99%  0.01%  fio  [k] call_rwsem_down_write_failed
>>  69.43%  1.18%  fio  [k] rwsem_down_write_failed
>>  65.51% 54.57%  fio  [k] osq_lock
>>   9.72%  7.99%  fio  [k] __raw_callee_save___kvm_vcpu_is_preempted
>>   4.16%  4.16%  fio  [k] __kvm_vcpu_is_preempted
>>
>> So making vcpu_is_preempted() a callee-save function has a pretty high
>> cost associated with it. As vcpu_is_preempted() is called within the
>> spinlock, mutex and rwsem slowpaths, there isn't much to gain by making
>> it callee-save. So it is now changed to a normal function call instead.
>>
> Numbers for bare metal too please.

I will run the test on bare metal, but I doubt there will be noticeable
difference.

Cheers,
Longman


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
  2017-02-08 18:00 ` Waiman Long
@ 2017-02-08 19:05   ` Peter Zijlstra
  -1 siblings, 0 replies; 9+ messages in thread
From: Peter Zijlstra @ 2017-02-08 19:05 UTC (permalink / raw)
  To: Waiman Long
  Cc: Jeremy Fitzhardinge, Chris Wright, Alok Kataria, Rusty Russell,
	Ingo Molnar, Thomas Gleixner, H. Peter Anvin, linux-arch, x86,
	linux-kernel, virtualization, xen-devel, kvm, Pan Xinhui,
	Paolo Bonzini, Radim Krčmář,
	Boris Ostrovsky, Juergen Gross

On Wed, Feb 08, 2017 at 01:00:24PM -0500, Waiman Long wrote:
> It was found when running fio sequential write test with a XFS ramdisk
> on a 2-socket x86-64 system, the %CPU times as reported by perf were
> as follows:
> 
>  71.27%  0.28%  fio  [k] down_write
>  70.99%  0.01%  fio  [k] call_rwsem_down_write_failed
>  69.43%  1.18%  fio  [k] rwsem_down_write_failed
>  65.51% 54.57%  fio  [k] osq_lock
>   9.72%  7.99%  fio  [k] __raw_callee_save___kvm_vcpu_is_preempted
>   4.16%  4.16%  fio  [k] __kvm_vcpu_is_preempted
> 
> So making vcpu_is_preempted() a callee-save function has a pretty high
> cost associated with it. As vcpu_is_preempted() is called within the
> spinlock, mutex and rwsem slowpaths, there isn't much to gain by making
> it callee-save. So it is now changed to a normal function call instead.
> 

Numbers for bare metal too please.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
@ 2017-02-08 19:05   ` Peter Zijlstra
  0 siblings, 0 replies; 9+ messages in thread
From: Peter Zijlstra @ 2017-02-08 19:05 UTC (permalink / raw)
  To: Waiman Long
  Cc: linux-arch, Juergen Gross, Jeremy Fitzhardinge, x86, kvm,
	Radim Krčmář,
	Boris Ostrovsky, Pan Xinhui, Paolo Bonzini, linux-kernel,
	virtualization, Chris Wright, Ingo Molnar, H. Peter Anvin,
	xen-devel, Alok Kataria, Thomas Gleixner

On Wed, Feb 08, 2017 at 01:00:24PM -0500, Waiman Long wrote:
> It was found when running fio sequential write test with a XFS ramdisk
> on a 2-socket x86-64 system, the %CPU times as reported by perf were
> as follows:
> 
>  71.27%  0.28%  fio  [k] down_write
>  70.99%  0.01%  fio  [k] call_rwsem_down_write_failed
>  69.43%  1.18%  fio  [k] rwsem_down_write_failed
>  65.51% 54.57%  fio  [k] osq_lock
>   9.72%  7.99%  fio  [k] __raw_callee_save___kvm_vcpu_is_preempted
>   4.16%  4.16%  fio  [k] __kvm_vcpu_is_preempted
> 
> So making vcpu_is_preempted() a callee-save function has a pretty high
> cost associated with it. As vcpu_is_preempted() is called within the
> spinlock, mutex and rwsem slowpaths, there isn't much to gain by making
> it callee-save. So it is now changed to a normal function call instead.
> 

Numbers for bare metal too please.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
  2017-02-08 18:00 ` Waiman Long
  (?)
  (?)
@ 2017-02-08 19:05 ` Peter Zijlstra
  -1 siblings, 0 replies; 9+ messages in thread
From: Peter Zijlstra @ 2017-02-08 19:05 UTC (permalink / raw)
  To: Waiman Long
  Cc: linux-arch, Juergen Gross, Jeremy Fitzhardinge, x86, kvm,
	Radim Krčmář,
	Boris Ostrovsky, Pan Xinhui, Paolo Bonzini, Rusty Russell,
	linux-kernel, virtualization, Chris Wright, Ingo Molnar,
	H. Peter Anvin, xen-devel, Alok Kataria, Thomas Gleixner

On Wed, Feb 08, 2017 at 01:00:24PM -0500, Waiman Long wrote:
> It was found when running fio sequential write test with a XFS ramdisk
> on a 2-socket x86-64 system, the %CPU times as reported by perf were
> as follows:
> 
>  71.27%  0.28%  fio  [k] down_write
>  70.99%  0.01%  fio  [k] call_rwsem_down_write_failed
>  69.43%  1.18%  fio  [k] rwsem_down_write_failed
>  65.51% 54.57%  fio  [k] osq_lock
>   9.72%  7.99%  fio  [k] __raw_callee_save___kvm_vcpu_is_preempted
>   4.16%  4.16%  fio  [k] __kvm_vcpu_is_preempted
> 
> So making vcpu_is_preempted() a callee-save function has a pretty high
> cost associated with it. As vcpu_is_preempted() is called within the
> spinlock, mutex and rwsem slowpaths, there isn't much to gain by making
> it callee-save. So it is now changed to a normal function call instead.
> 

Numbers for bare metal too please.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
@ 2017-02-08 18:00 ` Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2017-02-08 18:00 UTC (permalink / raw)
  To: Jeremy Fitzhardinge, Chris Wright, Alok Kataria, Rusty Russell,
	Peter Zijlstra, Ingo Molnar, Thomas Gleixner, H. Peter Anvin
  Cc: linux-arch, x86, linux-kernel, virtualization, xen-devel, kvm,
	Pan Xinhui, Paolo Bonzini, Radim Krčmář,
	Boris Ostrovsky, Juergen Gross, Waiman Long

It was found when running fio sequential write test with a XFS ramdisk
on a 2-socket x86-64 system, the %CPU times as reported by perf were
as follows:

 71.27%  0.28%  fio  [k] down_write
 70.99%  0.01%  fio  [k] call_rwsem_down_write_failed
 69.43%  1.18%  fio  [k] rwsem_down_write_failed
 65.51% 54.57%  fio  [k] osq_lock
  9.72%  7.99%  fio  [k] __raw_callee_save___kvm_vcpu_is_preempted
  4.16%  4.16%  fio  [k] __kvm_vcpu_is_preempted

So making vcpu_is_preempted() a callee-save function has a pretty high
cost associated with it. As vcpu_is_preempted() is called within the
spinlock, mutex and rwsem slowpaths, there isn't much to gain by making
it callee-save. So it is now changed to a normal function call instead.

With this patch applied, the aggregrate bandwidth of the fio sequential
write test increased slightly from 2563.3MB/s to 2588.1MB/s (about 1%).

Signed-off-by: Waiman Long <longman@redhat.com>
---
 arch/x86/include/asm/paravirt.h       | 2 +-
 arch/x86/include/asm/paravirt_types.h | 2 +-
 arch/x86/kernel/kvm.c                 | 7 ++-----
 arch/x86/kernel/paravirt-spinlocks.c  | 6 ++----
 arch/x86/xen/spinlock.c               | 4 +---
 5 files changed, 7 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 864f57b..2515885 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -676,7 +676,7 @@ static __always_inline void pv_kick(int cpu)
 
 static __always_inline bool pv_vcpu_is_preempted(int cpu)
 {
-	return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+	return PVOP_CALL1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
 }
 
 #endif /* SMP && PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index bb2de45..88dc852 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -309,7 +309,7 @@ struct pv_lock_ops {
 	void (*wait)(u8 *ptr, u8 val);
 	void (*kick)(int cpu);
 
-	struct paravirt_callee_save vcpu_is_preempted;
+	bool (*vcpu_is_preempted)(int cpu);
 };
 
 /* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 099fcba..eb3753d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu)
 
 	return !!src->preempted;
 }
-PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
 
 /*
  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void)
 	pv_lock_ops.wait = kvm_wait;
 	pv_lock_ops.kick = kvm_kick_cpu;
 
-	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
-		pv_lock_ops.vcpu_is_preempted =
-			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
-	}
+	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
+		pv_lock_ops.vcpu_is_preempted = __kvm_vcpu_is_preempted;
 }
 
 #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 6259327..da050bc 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -24,12 +24,10 @@ __visible bool __native_vcpu_is_preempted(int cpu)
 {
 	return false;
 }
-PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
 
 bool pv_is_native_vcpu_is_preempted(void)
 {
-	return pv_lock_ops.vcpu_is_preempted.func ==
-		__raw_callee_save___native_vcpu_is_preempted;
+	return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted;
 }
 
 struct pv_lock_ops pv_lock_ops = {
@@ -38,7 +36,7 @@ struct pv_lock_ops pv_lock_ops = {
 	.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
 	.wait = paravirt_nop,
 	.kick = paravirt_nop,
-	.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+	.vcpu_is_preempted = __native_vcpu_is_preempted,
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 25a7c43..c85bb8f 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -114,8 +114,6 @@ void xen_uninit_lock_cpu(int cpu)
 	per_cpu(irq_name, cpu) = NULL;
 }
 
-PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
-
 /*
  * Our init of PV spinlocks is split in two init functions due to us
  * using paravirt patching and jump labels patching and having to do
@@ -138,7 +136,7 @@ void __init xen_init_spinlocks(void)
 	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
 	pv_lock_ops.wait = xen_qlock_wait;
 	pv_lock_ops.kick = xen_qlock_kick;
-	pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
+	pv_lock_ops.vcpu_is_preempted = xen_vcpu_stolen;
 }
 
 static __init int xen_parse_nopvspin(char *arg)
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
@ 2017-02-08 18:00 ` Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2017-02-08 18:00 UTC (permalink / raw)
  To: Jeremy Fitzhardinge, Chris Wright, Alok Kataria, Rusty Russell,
	Peter Zijlstra, Ingo Molnar, Thomas Gleixner, H. Peter Anvin
  Cc: linux-arch, Juergen Gross, kvm, Radim Krčmář,
	Pan Xinhui, x86, linux-kernel, virtualization, Waiman Long,
	Paolo Bonzini, xen-devel, Boris Ostrovsky

It was found when running fio sequential write test with a XFS ramdisk
on a 2-socket x86-64 system, the %CPU times as reported by perf were
as follows:

 71.27%  0.28%  fio  [k] down_write
 70.99%  0.01%  fio  [k] call_rwsem_down_write_failed
 69.43%  1.18%  fio  [k] rwsem_down_write_failed
 65.51% 54.57%  fio  [k] osq_lock
  9.72%  7.99%  fio  [k] __raw_callee_save___kvm_vcpu_is_preempted
  4.16%  4.16%  fio  [k] __kvm_vcpu_is_preempted

So making vcpu_is_preempted() a callee-save function has a pretty high
cost associated with it. As vcpu_is_preempted() is called within the
spinlock, mutex and rwsem slowpaths, there isn't much to gain by making
it callee-save. So it is now changed to a normal function call instead.

With this patch applied, the aggregrate bandwidth of the fio sequential
write test increased slightly from 2563.3MB/s to 2588.1MB/s (about 1%).

Signed-off-by: Waiman Long <longman@redhat.com>
---
 arch/x86/include/asm/paravirt.h       | 2 +-
 arch/x86/include/asm/paravirt_types.h | 2 +-
 arch/x86/kernel/kvm.c                 | 7 ++-----
 arch/x86/kernel/paravirt-spinlocks.c  | 6 ++----
 arch/x86/xen/spinlock.c               | 4 +---
 5 files changed, 7 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 864f57b..2515885 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -676,7 +676,7 @@ static __always_inline void pv_kick(int cpu)
 
 static __always_inline bool pv_vcpu_is_preempted(int cpu)
 {
-	return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+	return PVOP_CALL1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
 }
 
 #endif /* SMP && PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index bb2de45..88dc852 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -309,7 +309,7 @@ struct pv_lock_ops {
 	void (*wait)(u8 *ptr, u8 val);
 	void (*kick)(int cpu);
 
-	struct paravirt_callee_save vcpu_is_preempted;
+	bool (*vcpu_is_preempted)(int cpu);
 };
 
 /* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 099fcba..eb3753d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu)
 
 	return !!src->preempted;
 }
-PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
 
 /*
  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void)
 	pv_lock_ops.wait = kvm_wait;
 	pv_lock_ops.kick = kvm_kick_cpu;
 
-	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
-		pv_lock_ops.vcpu_is_preempted =
-			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
-	}
+	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
+		pv_lock_ops.vcpu_is_preempted = __kvm_vcpu_is_preempted;
 }
 
 #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 6259327..da050bc 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -24,12 +24,10 @@ __visible bool __native_vcpu_is_preempted(int cpu)
 {
 	return false;
 }
-PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
 
 bool pv_is_native_vcpu_is_preempted(void)
 {
-	return pv_lock_ops.vcpu_is_preempted.func ==
-		__raw_callee_save___native_vcpu_is_preempted;
+	return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted;
 }
 
 struct pv_lock_ops pv_lock_ops = {
@@ -38,7 +36,7 @@ struct pv_lock_ops pv_lock_ops = {
 	.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
 	.wait = paravirt_nop,
 	.kick = paravirt_nop,
-	.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+	.vcpu_is_preempted = __native_vcpu_is_preempted,
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 25a7c43..c85bb8f 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -114,8 +114,6 @@ void xen_uninit_lock_cpu(int cpu)
 	per_cpu(irq_name, cpu) = NULL;
 }
 
-PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
-
 /*
  * Our init of PV spinlocks is split in two init functions due to us
  * using paravirt patching and jump labels patching and having to do
@@ -138,7 +136,7 @@ void __init xen_init_spinlocks(void)
 	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
 	pv_lock_ops.wait = xen_qlock_wait;
 	pv_lock_ops.kick = xen_qlock_kick;
-	pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
+	pv_lock_ops.vcpu_is_preempted = xen_vcpu_stolen;
 }
 
 static __init int xen_parse_nopvspin(char *arg)
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-02-08 20:42 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-08 18:00 [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function Waiman Long
2017-02-08 18:00 Waiman Long
2017-02-08 18:00 ` Waiman Long
2017-02-08 19:05 ` Peter Zijlstra
2017-02-08 19:05   ` Peter Zijlstra
2017-02-08 20:17   ` Waiman Long
2017-02-08 20:17   ` Waiman Long
2017-02-08 20:17     ` Waiman Long
2017-02-08 19:05 ` Peter Zijlstra

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.