From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, Steven Rostedt <rostedt@goodmis.org>,
"Paul E. McKenney" <paulmck@linux.ibm.com>,
Masami Hiramatsu <mhiramat@kernel.org>,
Paolo Bonzini <pbonzini@redhat.com>
Subject: [patch 1/8] preempt: Use CONFIG_PREEMPTION where appropriate
Date: Fri, 26 Jul 2019 23:19:37 +0200 [thread overview]
Message-ID: <20190726212124.117528401@linutronix.de> (raw)
In-Reply-To: 20190726211936.226129163@linutronix.de
CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by
CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same
functionality which today depends on CONFIG_PREEMPT.
Switch the preemption code, scheduler and init task over to use
CONFIG_PREEMPTION.
That's the first step towards RT in that area. The more complex changes are
coming separately.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/asm-generic/preempt.h | 4 ++--
include/linux/preempt.h | 6 +++---
include/linux/sched.h | 6 +++---
init/init_task.c | 2 +-
init/main.c | 2 +-
kernel/sched/core.c | 14 +++++++-------
kernel/sched/fair.c | 2 +-
kernel/sched/sched.h | 4 ++--
8 files changed, 20 insertions(+), 20 deletions(-)
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -78,11 +78,11 @@ static __always_inline bool should_resch
tif_need_resched());
}
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -182,7 +182,7 @@ do { \
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
#define preempt_enable() \
do { \
barrier(); \
@@ -203,7 +203,7 @@ do { \
__preempt_schedule(); \
} while (0)
-#else /* !CONFIG_PREEMPT */
+#else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
barrier(); \
@@ -217,7 +217,7 @@ do { \
} while (0)
#define preempt_check_resched() do { } while (0)
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
#define preempt_disable_notrace() \
do { \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1759,7 +1759,7 @@ static inline int test_tsk_need_resched(
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
*/
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
extern int _cond_resched(void);
#else
static inline int _cond_resched(void) { return 0; }
@@ -1788,12 +1788,12 @@ static inline void cond_resched_rcu(void
/*
* Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPT,
+ * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
return spin_is_contended(lock);
#else
return 0;
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -174,7 +174,7 @@ struct task_struct init_task
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.ret_stack = NULL,
#endif
-#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT)
+#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
.trace_recursion = 0,
#endif
#ifdef CONFIG_LIVEPATCH
--- a/init/main.c
+++ b/init/main.c
@@ -433,7 +433,7 @@ noinline void __ref rest_init(void)
/*
* Enable might_sleep() and smp_processor_id() checks.
- * They cannot be enabled earlier because with CONFIG_PREEMPT=y
+ * They cannot be enabled earlier because with CONFIG_PREEMPTION=y
* kernel_thread() would trigger might_sleep() splats. With
* CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
* already, but it's stuck on the kthreadd_done completion.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3581,7 +3581,7 @@ static inline void sched_tick_start(int
static inline void sched_tick_stop(int cpu) { }
#endif
-#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
+#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_TRACE_PREEMPT_TOGGLE))
/*
* If the value passed in is equal to the current preempt count
@@ -3782,7 +3782,7 @@ pick_next_task(struct rq *rq, struct tas
* task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
* called on the nearest possible occasion:
*
- * - If the kernel is preemptible (CONFIG_PREEMPT=y):
+ * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
*
* - in syscall or exception context, at the next outmost
* preempt_enable(). (this might be as soon as the wake_up()'s
@@ -3791,7 +3791,7 @@ pick_next_task(struct rq *rq, struct tas
* - in IRQ context, return from interrupt-handler to
* preemptible context
*
- * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
+ * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
* then at the next:
*
* - cond_resched() call
@@ -4033,7 +4033,7 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
/*
* this is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt
@@ -4105,7 +4105,7 @@ asmlinkage __visible void __sched notrac
}
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
/*
* this is the entry point to schedule() from kernel preemption
@@ -5416,7 +5416,7 @@ SYSCALL_DEFINE0(sched_yield)
return 0;
}
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
int __sched _cond_resched(void)
{
if (should_resched(0)) {
@@ -5433,7 +5433,7 @@ EXPORT_SYMBOL(_cond_resched);
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
- * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
+ * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7376,7 +7376,7 @@ static int detach_tasks(struct lb_env *e
detached++;
env->imbalance -= load;
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
/*
* NEWIDLE balancing is a source of latency, so preemptible
* kernels will stop after the first task is detached to minimize
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1943,7 +1943,7 @@ unsigned long arch_scale_freq_capacity(i
#endif
#ifdef CONFIG_SMP
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
@@ -1995,7 +1995,7 @@ static inline int _double_lock_balance(s
return ret;
}
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
next prev parent reply other threads:[~2019-07-26 21:24 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-26 21:19 [patch 0/8] core, x86: Preparatory steps for RT Thomas Gleixner
2019-07-26 21:19 ` Thomas Gleixner [this message]
2019-07-31 17:55 ` [tip:sched/rt] sched/preempt: Use CONFIG_PREEMPTION where appropriate tip-bot for Thomas Gleixner
2019-07-26 21:19 ` [patch 2/8] rcu: Use CONFIG_PREEMPTION Thomas Gleixner
2019-07-31 17:56 ` [tip:sched/rt] " tip-bot for Thomas Gleixner
2019-07-26 21:19 ` [patch 3/8] locking: " Thomas Gleixner
2019-07-31 17:56 ` [tip:sched/rt] locking/spinlocks: " tip-bot for Thomas Gleixner
2019-07-26 21:19 ` [patch 4/8] tracing: " Thomas Gleixner
2019-07-31 17:57 ` [tip:sched/rt] " tip-bot for Thomas Gleixner
2019-07-26 21:19 ` [patch 5/8] kprobes: " Thomas Gleixner
2019-07-31 17:58 ` [tip:sched/rt] " tip-bot for Thomas Gleixner
2019-07-26 21:19 ` [patch 6/8] x86: " Thomas Gleixner
2019-07-31 17:59 ` [tip:sched/rt] " tip-bot for Thomas Gleixner
2019-07-26 21:19 ` [patch 7/8] x86/dumpstack: Indicate PREEMPT_RT in dumps Thomas Gleixner
2019-07-31 17:59 ` [tip:sched/rt] " tip-bot for Thomas Gleixner
2019-07-26 21:19 ` [patch 8/8] x86/kvm: Use CONFIG_PREEMPTION Thomas Gleixner
2019-07-31 18:00 ` [tip:sched/rt] " tip-bot for Thomas Gleixner
2019-07-27 1:30 ` [patch 0/8] core, x86: Preparatory steps for RT Steven Rostedt
2019-07-27 6:16 ` Thomas Gleixner
2019-07-29 19:48 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190726212124.117528401@linutronix.de \
--to=tglx@linutronix.de \
--cc=linux-kernel@vger.kernel.org \
--cc=mhiramat@kernel.org \
--cc=paulmck@linux.ibm.com \
--cc=pbonzini@redhat.com \
--cc=rostedt@goodmis.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).