linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Linus Torvalds <torvalds@linux-foundation.org>,
	Ingo Molnar <mingo@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>, Peter Anvin <hpa@zytor.com>,
	Mike Galbraith <bitbucket@online.de>,
	Thomas Gleixner <tglx@linutronix.de>,
	Arjan van de Ven <arjan@linux.intel.com>,
	Frederic Weisbecker <fweisbec@gmail.com>,
	linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	Peter Zijlstra <peterz@infradead.org>
Subject: [PATCH 10/11] sched, x86: Provide a per-cpu preempt_count implementation
Date: Tue, 17 Sep 2013 11:10:56 +0200	[thread overview]
Message-ID: <20130917091144.031795524@infradead.org> (raw)
In-Reply-To: 20130917082838.218329307@infradead.org

[-- Attachment #1: peterz-x86-per-cpu-preempt_count.patch --]
[-- Type: text/plain, Size: 8120 bytes --]

Convert x86 to use a per-cpu preemption count. The reason for doing so
is that accessing per-cpu variables is a lot cheaper than accessing
thread_info variables.

We still need to save/restore the actual preemption count due to
PREEMPT_ACTIVE so we place the per-cpu __preempt_count variable in the
same cache-line as the other hot __switch_to() variables such as
current_task.

Also rename thread_info::preempt_count to ensure nobody is
'accidentally' still poking at it.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
---
 arch/x86/include/asm/Kbuild        |    1 
 arch/x86/include/asm/preempt.h     |   98 +++++++++++++++++++++++++++++++++++++
 arch/x86/include/asm/thread_info.h |    5 -
 arch/x86/kernel/asm-offsets.c      |    1 
 arch/x86/kernel/cpu/common.c       |    5 +
 arch/x86/kernel/entry_32.S         |    7 --
 arch/x86/kernel/entry_64.S         |    4 -
 arch/x86/kernel/process_32.c       |   10 +++
 arch/x86/kernel/process_64.c       |   10 +++
 9 files changed, 128 insertions(+), 13 deletions(-)

--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -5,4 +5,3 @@ genhdr-y += unistd_64.h
 genhdr-y += unistd_x32.h
 
 generic-y += clkdev.h
-generic-y += preempt.h
--- /dev/null
+++ b/arch/x86/include/asm/preempt.h
@@ -0,0 +1,98 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <asm/rmwcc.h>
+#include <asm/percpu.h>
+#include <linux/thread_info.h>
+
+DECLARE_PER_CPU(int, __preempt_count);
+
+/*
+ * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
+ * that think a non-zero value indicates we cannot preempt.
+ */
+static __always_inline int preempt_count(void)
+{
+	return __this_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline int *preempt_count_ptr(void)
+{
+	return &__raw_get_cpu_var(__preempt_count);
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define task_preempt_count(p) \
+	(task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED)
+
+#define init_task_preempt_count(p) do { \
+	task_thread_info(p)->saved_preempt_count = 1 | PREEMPT_NEED_RESCHED; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+	task_thread_info(p)->saved_preempt_count = 0; \
+	per_cpu(__preempt_count, (cpu)) = 0; \
+} while (0)
+
+/*
+ * We fold the NEED_RESCHED bit into the preempt count such that
+ * preempt_enable() can decrement and test for needing to reschedule with a
+ * single instruction.
+ *
+ * We invert the actual bit, so that when the decrement hits 0 we know we both
+ * need to resched (the bit is cleared) and can resched (no preempt count).
+ */
+
+static __always_inline void set_preempt_need_resched(void)
+{
+	__this_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+	__this_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+	return !(__this_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+	__this_cpu_add_4(__preempt_count, val);
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+	__this_cpu_add_4(__preempt_count, -val);
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+}
+
+/*
+ * Returns true when we need to resched -- even if we can not.
+ */
+static __always_inline bool need_resched(void)
+{
+	return unlikely(test_preempt_need_resched());
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(void)
+{
+	return unlikely(!__this_cpu_read_4(__preempt_count));
+}
+
+#endif /* __ASM_PREEMPT_H */
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -28,8 +28,7 @@ struct thread_info {
 	__u32			flags;		/* low level flags */
 	__u32			status;		/* thread synchronous flags */
 	__u32			cpu;		/* current CPU */
-	int			preempt_count;	/* 0 => preemptable,
-						   <0 => BUG */
+	int			saved_preempt_count;
 	mm_segment_t		addr_limit;
 	struct restart_block    restart_block;
 	void __user		*sysenter_return;
@@ -49,7 +48,7 @@ struct thread_info {
 	.exec_domain	= &default_exec_domain,	\
 	.flags		= 0,			\
 	.cpu		= 0,			\
-	.preempt_count	= INIT_PREEMPT_COUNT,	\
+	.saved_preempt_count = INIT_PREEMPT_COUNT,	\
 	.addr_limit	= KERNEL_DS,		\
 	.restart_block = {			\
 		.fn = do_no_restart_syscall,	\
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -32,7 +32,6 @@ void common(void) {
 	OFFSET(TI_flags, thread_info, flags);
 	OFFSET(TI_status, thread_info, status);
 	OFFSET(TI_addr_limit, thread_info, addr_limit);
-	OFFSET(TI_preempt_count, thread_info, preempt_count);
 
 	BLANK();
 	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1095,6 +1095,9 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) =
 
 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 
+DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
+EXPORT_PER_CPU_SYMBOL(__preempt_count);
+
 DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
 
 /*
@@ -1169,6 +1172,8 @@ void debug_stack_reset(void)
 
 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
+DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
+EXPORT_PER_CPU_SYMBOL(__preempt_count);
 DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
 
 #ifdef CONFIG_CC_STACKPROTECTOR
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -362,12 +362,9 @@ END(ret_from_exception)
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
 	DISABLE_INTERRUPTS(CLBR_ANY)
-	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
-	jnz restore_all
 need_resched:
-	movl TI_flags(%ebp), %ecx	# need_resched set ?
-	testb $_TIF_NEED_RESCHED, %cl
-	jz restore_all
+	cmpl $0,PER_CPU_VAR(__preempt_count)
+	jnz restore_all
 	testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)	# interrupts off (exception path) ?
 	jz restore_all
 	call preempt_schedule_irq
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1103,10 +1103,8 @@ ENTRY(native_iret)
 	/* Returning to kernel space. Check if we need preemption */
 	/* rcx:	 threadinfo. interrupts off. */
 ENTRY(retint_kernel)
-	cmpl $0,TI_preempt_count(%rcx)
+	cmpl $0,PER_CPU_VAR(__preempt_count)
 	jnz  retint_restore_args
-	bt  $TIF_NEED_RESCHED,TI_flags(%rcx)
-	jnc  retint_restore_args
 	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
 	jnc  retint_restore_args
 	call preempt_schedule_irq
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -291,6 +291,16 @@ __switch_to(struct task_struct *prev_p,
 	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
 		set_iopl_mask(next->iopl);
 
+#ifdef CONFIG_PREEMPT_COUNT
+	/*
+	 * If it were not for PREEMPT_ACTIVE we could guarantee that the
+	 * preempt_count of all tasks was equal here and this would not be
+	 * needed.
+	 */
+	task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
+	this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
+#endif
+
 	/*
 	 * Now maybe handle debug registers and/or IO bitmaps
 	 */
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -363,6 +363,16 @@ __switch_to(struct task_struct *prev_p,
 	this_cpu_write(old_rsp, next->usersp);
 	this_cpu_write(current_task, next_p);
 
+#ifdef CONFIG_PREEMPT_COUNT
+	/*
+	 * If it were not for PREEMPT_ACTIVE we could guarantee that the
+	 * preempt_count of all tasks was equal here and this would not be
+	 * needed.
+	 */
+	task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
+	this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
+#endif
+
 	this_cpu_write(kernel_stack,
 		  (unsigned long)task_stack_page(next_p) +
 		  THREAD_SIZE - KERNEL_STACK_OFFSET);



  parent reply	other threads:[~2013-09-17  9:18 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-09-17  9:10 [PATCH 00/11] preempt_count rework -v3 Peter Zijlstra
2013-09-17  9:10 ` [PATCH 01/11] x86: Use asm goto to implement better modify_and_test() functions Peter Zijlstra
2013-09-18 18:44   ` Linus Torvalds
     [not found]     ` <4ec87843-c29a-401a-a54f-2cd4d61fba62@email.android.com>
2013-09-19  8:31       ` Andi Kleen
2013-09-19  9:39         ` Ingo Molnar
2013-09-20  4:43         ` H. Peter Anvin
2013-09-17  9:10 ` [PATCH 02/11] sched, rcu: Make RCU use resched_cpu() Peter Zijlstra
2013-09-17 14:40   ` Peter Zijlstra
2013-09-23 16:55     ` Paul E. McKenney
2013-09-23 21:18       ` Paul E. McKenney
2013-09-24  8:07         ` Peter Zijlstra
2013-09-24 13:37           ` Paul E. McKenney
2013-09-17  9:10 ` [PATCH 03/11] sched: Remove {set,clear}_need_resched Peter Zijlstra
2013-09-17  9:10 ` [PATCH 04/11] sched, idle: Fix the idle polling state logic Peter Zijlstra
2013-09-17  9:10 ` [PATCH 05/11] sched: Introduce preempt_count accessor functions Peter Zijlstra
2013-09-17  9:10 ` [PATCH 06/11] sched: Add NEED_RESCHED to the preempt_count Peter Zijlstra
2013-09-17  9:10 ` [PATCH 07/11] sched, arch: Create asm/preempt.h Peter Zijlstra
2013-09-17  9:10 ` [PATCH 08/11] sched: Create more preempt_count accessors Peter Zijlstra
2013-09-17  9:10 ` [PATCH 09/11] sched: Extract the basic add/sub preempt_count modifiers Peter Zijlstra
2013-09-17  9:10 ` Peter Zijlstra [this message]
2013-09-17  9:10 ` [PATCH 11/11] sched, x86: Optimize the preempt_schedule() call Peter Zijlstra
2013-09-17 20:23   ` Peter Zijlstra
2013-09-17 10:53 ` [PATCH 00/11] preempt_count rework -v3 Ingo Molnar
2013-09-17 11:22   ` Peter Zijlstra
2013-09-17 18:53 ` [patch 0/6] Make all preempt_count related constants generic Thomas Gleixner
2013-09-17 18:53   ` [patch 1/6] hardirq: Make hardirq bits generic Thomas Gleixner
2013-09-17 20:00     ` Geert Uytterhoeven
2013-09-17 21:24       ` Thomas Gleixner
2013-09-18 14:06         ` Thomas Gleixner
2013-09-19 15:14           ` Thomas Gleixner
2013-09-19 17:02             ` Andreas Schwab
2013-09-19 18:19               ` Geert Uytterhoeven
2013-09-20  9:26                 ` Thomas Gleixner
2013-11-04 12:06                 ` Thomas Gleixner
2013-11-04 19:44                   ` Geert Uytterhoeven
2013-11-06 17:23                     ` Thomas Gleixner
2013-11-07 14:12                       ` Geert Uytterhoeven
2013-11-07 16:39                         ` Thomas Gleixner
2013-11-10  8:49                           ` Michael Schmitz
2013-11-10  9:12                             ` Geert Uytterhoeven
2013-11-11 14:11                               ` Thomas Gleixner
2013-11-11 19:34                                 ` Thomas Gleixner
2013-11-11 20:52                                   ` Thomas Gleixner
2013-11-12  6:56                                     ` Michael Schmitz
2013-11-12  8:44                                       ` schmitz
2013-11-12 15:08                                     ` Geert Uytterhoeven
2013-11-13 19:42                                     ` [tip:irq/urgent] m68k: Simplify low level interrupt handling code tip-bot for Thomas Gleixner
2013-11-12 14:09                                   ` [patch 1/6] hardirq: Make hardirq bits generic Geert Uytterhoeven
2013-11-11 19:42                                 ` Andreas Schwab
2013-11-12  9:18                                   ` Thomas Gleixner
2013-11-13 19:42     ` [tip:irq/urgent] " tip-bot for Thomas Gleixner
2013-09-17 18:53   ` [patch 2/6] h8300: Use schedule_preempt_irq Thomas Gleixner
2013-09-20 17:41     ` Guenter Roeck
2013-09-20 21:46       ` Thomas Gleixner
2013-09-17 18:53   ` [patch 3/6] m32r: Use preempt_schedule_irq Thomas Gleixner
2013-11-13 19:42     ` [tip:irq/urgent] " tip-bot for Thomas Gleixner
2013-09-17 18:53   ` [patch 5/6] sparc: " Thomas Gleixner
2013-09-17 22:54     ` David Miller
2013-09-17 23:23       ` Thomas Gleixner
2013-09-18  0:12         ` David Miller
2013-11-13 19:43     ` [tip:irq/urgent] " tip-bot for Thomas Gleixner
2013-09-17 18:53   ` [patch 4/6] ia64: " Thomas Gleixner
2013-11-13 19:43     ` [tip:irq/urgent] " tip-bot for Thomas Gleixner
2013-11-20 19:59     ` [patch 4/6] " Tony Luck
2013-11-20 20:57       ` Thomas Gleixner
2013-11-21 11:41         ` Thomas Gleixner
2013-11-21 12:39           ` Frederic Weisbecker
2013-11-21 13:06           ` Peter Zijlstra
2013-11-21 13:30             ` Thomas Gleixner
2013-11-21 18:57               ` Tony Luck
2013-11-26 18:37                 ` Tony Luck
2013-11-26 18:58                   ` Peter Zijlstra
2013-11-27 13:36                     ` Ingo Molnar
2013-11-27 14:07           ` [tip:sched/urgent] sched: Expose preempt_schedule_irq() tip-bot for Thomas Gleixner
2013-09-17 18:53   ` [patch 6/6] preempt: Make PREEMPT_ACTIVE generic Thomas Gleixner
2013-09-18 10:48     ` Peter Zijlstra
2013-11-13 19:43     ` [tip:irq/urgent] " tip-bot for Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130917091144.031795524@infradead.org \
    --to=peterz@infradead.org \
    --cc=ak@linux.intel.com \
    --cc=arjan@linux.intel.com \
    --cc=bitbucket@online.de \
    --cc=fweisbec@gmail.com \
    --cc=hpa@zytor.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).