[05/16] lockdep: Cleanup PREEMPT_COUNT leftovers
diff mbox series

Message ID 20201029165019.14218-5-urezki@gmail.com
State New
Headers show
Series
  • [01/16] rcu/tree: Add a work to allocate pages from regular context
Related show

Commit Message

Uladzislau Rezki Oct. 29, 2020, 4:50 p.m. UTC
From: Thomas Gleixner <tglx@linutronix.de>

CONFIG_PREEMPT_COUNT is now unconditionally enabled and will be
removed. Cleanup the leftovers before doing so.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Will Deacon <will@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
[ Rezki: Adopted for 5.10.0-rc1 kernel. ]
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 include/linux/lockdep.h | 6 ++----
 lib/Kconfig.debug       | 1 -
 2 files changed, 2 insertions(+), 5 deletions(-)

Patch
diff mbox series

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f5594879175a..d05db575f60f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -580,16 +580,14 @@  do {									\
 
 #define lockdep_assert_preemption_enabled()				\
 do {									\
-	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
-		     __lockdep_enabled			&&		\
+	WARN_ON_ONCE(__lockdep_enabled			&&		\
 		     (preempt_count() != 0		||		\
 		      !this_cpu_read(hardirqs_enabled)));		\
 } while (0)
 
 #define lockdep_assert_preemption_disabled()				\
 do {									\
-	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
-		     __lockdep_enabled			&&		\
+	WARN_ON_ONCE(__lockdep_enabled			&&		\
 		     (preempt_count() == 0		&&		\
 		      this_cpu_read(hardirqs_enabled)));		\
 } while (0)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 89c9a177fb9b..03a85065805e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1159,7 +1159,6 @@  config PROVE_LOCKING
 	select DEBUG_RWSEMS
 	select DEBUG_WW_MUTEX_SLOWPATH
 	select DEBUG_LOCK_ALLOC
-	select PREEMPT_COUNT
 	select TRACE_IRQFLAGS
 	default n
 	help