All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: torvalds@linux-foundation.org, keescook@chromium.org,
	gregkh@linuxfoundation.org, pbonzini@redhat.com
Cc: linux-kernel@vger.kernel.org, ojeda@kernel.org,
	ndesaulniers@google.com, peterz@infradead.org, mingo@redhat.com,
	will@kernel.org, longman@redhat.com, boqun.feng@gmail.com,
	juri.lelli@redhat.com, vincent.guittot@linaro.org,
	dietmar.eggemann@arm.com, rostedt@goodmis.org,
	bsegall@google.com, mgorman@suse.de, bristot@redhat.com,
	vschneid@redhat.com, paulmck@kernel.org, frederic@kernel.org,
	quic_neeraju@quicinc.com, joel@joelfernandes.org,
	josh@joshtriplett.org, mathieu.desnoyers@efficios.com,
	jiangshanlai@gmail.com, rcu@vger.kernel.org, tj@kernel.org,
	tglx@linutronix.de
Subject: [PATCH v2 1/2] locking: Introduce __cleanup__ based guards
Date: Fri, 26 May 2023 22:52:05 +0200	[thread overview]
Message-ID: <20230526205855.587341916@infradead.org> (raw)
In-Reply-To: 20230526205204.861311518@infradead.org

Use __attribute__((__cleanup__(func))) to buid pointer and lock
guards.

Actual usage in the next patch

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/compiler_attributes.h |    6 +
 include/linux/guards.h              |  142 ++++++++++++++++++++++++++++++++++++
 include/linux/irqflags.h            |    7 +
 include/linux/mutex.h               |    5 +
 include/linux/preempt.h             |    4 +
 include/linux/rcupdate.h            |    3 
 include/linux/sched/task.h          |    2 
 include/linux/spinlock.h            |   27 ++++++
 scripts/checkpatch.pl               |    2 
 9 files changed, 197 insertions(+), 1 deletion(-)

--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -77,6 +77,12 @@
 #define __attribute_const__             __attribute__((__const__))
 
 /*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
+ */
+#define __cleanup(func)			__attribute__((__cleanup__(func)))
+
+/*
  * Optional: only supported since gcc >= 9
  * Optional: not supported by clang
  *
--- /dev/null
+++ b/include/linux/guards.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GUARDS_H
+#define __LINUX_GUARDS_H
+
+#include <linux/compiler_attributes.h>
+
+/*
+ * Pointer Guards are special pointers (variables) with a scope bound cleanup
+ * function.
+ *
+ * Various guard types can be created using:
+ *
+ *   DEFINE_PTR_GUARD(guard_type, typename, cleanup-exp)
+ *
+ * After which they can be used like so:
+ *
+ *   ptr_guard(guard_type, name) = find_get_object(foo);
+ *
+ * Where the return type of find_get_object() should match the guard_type's
+ * 'typname *'. And when @name goes out of scope cleanup-exp is ran (inserted
+ * by the compiler) when !NULL at that time. Also see the __cleanup attribute.
+ */
+
+#define DEFINE_PTR_GUARD(_type, _Type, _Put)					\
+typedef _Type *ptr_guard_##_type##_t;						\
+										\
+static inline void ptr_guard_##_type##_cleanup(_Type **_ptr)			\
+{										\
+	_Type *_G = *_ptr;							\
+	if (_G)									\
+		_Put(_G);							\
+}
+
+#define ptr_guard(_type, _name)							\
+	ptr_guard_##_type##_t _name __cleanup(ptr_guard_##_type##_cleanup)
+
+
+/*
+ * Lock Guards are like the pointer guards above except they also have
+ * a fixed initializor to cover both the Lock and Unlock of the lock type.
+ *
+ * Lock guards types can be created using:
+ *
+ *   DEFINE_LOCK_GUARD_0(guard_type, Lock, Unlock, [extra guard members])
+ *   DEFINE_LOCK_GUARD_1(guard_type, typename, Lock, Unlock, ...)
+ *   DEFINE_LOCK_GUARD_2(guard_type, typename, Lock, Unlock, ...)
+ *
+ * Where the _n suffix indicates the number of arguments of 'typename *' the
+ * Lock function requires.
+ *
+ * Once defined, the lock guards can be used in one of two ways:
+ *
+ *	guard(guard_type, name, var...);
+ *
+ * or:
+ *
+ *	scoped (guard_type, var...) {
+ *		...
+ *	}
+ *
+ * The first creates a named variable that is initialized with the Lock
+ * function and will call the Unlock function when it goes out of scope.
+ *
+ * The second creates an explicit scope, using a for-loop with an implicit
+ * named _scope variable. Again, Lock is called before the scope is entered and
+ * Unlock will be called when the scope is left.
+ *
+ * Both Lock and Unlock are expressions and can access the guard object through
+ * the _G pointer. The guard object will have _n implicit members called of
+ * type 'typename *' called 'lock' and 'lock2' as well as any additional
+ * members specified in the definition.
+ */
+
+#define DEFINE_LOCK_GUARD_0(_type, _Lock, _Unlock, ...)				\
+typedef struct {								\
+	__VA_ARGS__;								\
+} lock_guard_##_type##_t;							\
+										\
+static inline void lock_guard_##_type##_cleanup(lock_guard_##_type##_t *_G)	\
+{										\
+	_Unlock;								\
+}										\
+										\
+static inline lock_guard_##_type##_t lock_guard_##_type##_init(void)		\
+{										\
+	lock_guard_##_type##_t _g = { }, *_G __maybe_unused = &_g;		\
+	_Lock;									\
+	return _g;								\
+}
+
+#define DEFINE_LOCK_GUARD_1(_type, _Type, _Lock, _Unlock, ...)			\
+typedef struct {								\
+	_Type *lock;								\
+	__VA_ARGS__;								\
+} lock_guard_##_type##_t;							\
+										\
+static inline void lock_guard_##_type##_cleanup(lock_guard_##_type##_t *_G)	\
+{										\
+	_Unlock;								\
+}										\
+										\
+static inline lock_guard_##_type##_t lock_guard_##_type##_init(_Type *lock)	\
+{										\
+	lock_guard_##_type##_t _g = { .lock = lock }, *_G = &_g;		\
+	_Lock;									\
+	return _g;								\
+}
+
+#define DEFINE_LOCK_GUARD_2(_type, _Type, _Lock, _Unlock, ...)			\
+typedef struct {								\
+	_Type *lock;								\
+	_Type *lock2;								\
+	__VA_ARGS__;								\
+} lock_guard_##_type##_t;							\
+										\
+static inline void lock_guard_##_type##_cleanup(lock_guard_##_type##_t *_G)	\
+{										\
+	_Unlock;								\
+}										\
+										\
+static inline lock_guard_##_type##_t						\
+lock_guard_##_type##_init(_Type *lock, _Type *lock2)				\
+{										\
+	lock_guard_##_type##_t _g = { .lock = lock, .lock2 = lock2 }, *_G = &_g;\
+	_Lock;									\
+	return _g;								\
+}
+
+#define variable_scope(_type, _enter, _exit)					\
+	for (_type *_done = NULL, _scope __cleanup(_exit) = _enter;		\
+	     !_done; _done = (void *)8)
+
+#define scoped(_type, _var...)							\
+	variable_scope(lock_guard_##_type##_t,					\
+		       lock_guard_##_type##_init(_var),				\
+		       lock_guard_##_type##_cleanup)
+
+#define guard(_type, _name, _var...)						\
+	lock_guard_##_type##_t __cleanup(lock_guard_##_type##_cleanup) _name =	\
+		lock_guard_##_type##_init(_var)
+
+#endif /* __LINUX_GUARDS_H */
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -13,6 +13,7 @@
 #define _LINUX_TRACE_IRQFLAGS_H
 
 #include <linux/typecheck.h>
+#include <linux/guards.h>
 #include <asm/irqflags.h>
 #include <asm/percpu.h>
 
@@ -267,4 +268,10 @@ extern void warn_bogus_irq_restore(void)
 
 #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
 
+DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
+DEFINE_LOCK_GUARD_0(irqsave,
+		    local_irq_save(_G->flags),
+		    local_irq_restore(_G->flags),
+		    unsigned long flags)
+
 #endif
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -19,6 +19,7 @@
 #include <asm/processor.h>
 #include <linux/osq_lock.h>
 #include <linux/debug_locks.h>
+#include <linux/guards.h>
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define __DEP_MAP_MUTEX_INITIALIZER(lockname)			\
@@ -219,4 +220,8 @@ extern void mutex_unlock(struct mutex *l
 
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 
+DEFINE_LOCK_GUARD_1(mutex, struct mutex,
+		    mutex_lock(_G->lock),
+		    mutex_unlock(_G->lock))
+
 #endif /* __LINUX_MUTEX_H */
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -8,6 +8,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/guards.h>
 #include <linux/list.h>
 
 /*
@@ -463,4 +464,7 @@ static __always_inline void preempt_enab
 		preempt_enable();
 }
 
+DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
+DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+
 #endif /* __LINUX_PREEMPT_H */
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -27,6 +27,7 @@
 #include <linux/preempt.h>
 #include <linux/bottom_half.h>
 #include <linux/lockdep.h>
+#include <linux/guards.h>
 #include <asm/processor.h>
 #include <linux/cpumask.h>
 #include <linux/context_tracking_irq.h>
@@ -1095,4 +1096,6 @@ rcu_head_after_call_rcu(struct rcu_head
 extern int rcu_expedited;
 extern int rcu_normal;
 
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+
 #endif /* __LINUX_RCUPDATE_H */
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -126,6 +126,8 @@ static inline void put_task_struct(struc
 		__put_task_struct(t);
 }
 
+DEFINE_PTR_GUARD(put_task, struct task_struct, put_task_struct)
+
 static inline void put_task_struct_many(struct task_struct *t, int nr)
 {
 	if (refcount_sub_and_test(nr, &t->usage))
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -61,6 +61,7 @@
 #include <linux/stringify.h>
 #include <linux/bottom_half.h>
 #include <linux/lockdep.h>
+#include <linux/guards.h>
 #include <asm/barrier.h>
 #include <asm/mmiowb.h>
 
@@ -502,5 +503,31 @@ int __alloc_bucket_spinlocks(spinlock_t
 
 void free_bucket_spinlocks(spinlock_t *locks);
 
+DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
+		    raw_spin_lock(_G->lock),
+		    raw_spin_unlock(_G->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
+		    raw_spin_lock_irq(_G->lock),
+		    raw_spin_unlock_irq(_G->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
+		    raw_spin_lock_irqsave(_G->lock, _G->flags),
+		    raw_spin_unlock_irqrestore(_G->lock, _G->flags),
+		    unsigned long flags)
+
+DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
+		    spin_lock(_G->lock),
+		    spin_unlock(_G->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
+		    spin_lock_irq(_G->lock),
+		    spin_unlock_irq(_G->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
+		    spin_lock_irqsave(_G->lock, _G->flags),
+		    spin_unlock_irqrestore(_G->lock, _G->flags),
+		    unsigned long flags)
+
 #undef __LINUX_INSIDE_SPINLOCK_H
 #endif /* __LINUX_SPINLOCK_H */
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5046,7 +5046,7 @@ sub process {
 				if|for|while|switch|return|case|
 				volatile|__volatile__|
 				__attribute__|format|__extension__|
-				asm|__asm__)$/x)
+				asm|__asm__|scoped)$/x)
 			{
 			# cpp #define statements have non-optional spaces, ie
 			# if there is a space between the name and the open



  reply	other threads:[~2023-05-26 21:01 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-26 20:52 [PATCH v2 0/2] Lock and Pointer guards Peter Zijlstra
2023-05-26 20:52 ` Peter Zijlstra [this message]
2023-05-26 21:24   ` [PATCH v2 1/2] locking: Introduce __cleanup__ based guards Peter Zijlstra
2023-05-26 21:54     ` Linus Torvalds
2023-05-27  8:57       ` Peter Zijlstra
2023-05-26 20:52 ` [PATCH v2 2/2] sched: Use fancy new guards Peter Zijlstra
2023-05-27 17:21 ` [PATCH v2 0/2] Lock and Pointer guards Mathieu Desnoyers
2023-05-27 19:18 ` Linus Torvalds
2023-05-29 12:09   ` Paolo Bonzini
2023-05-29 19:04     ` Linus Torvalds
2023-05-29 21:27       ` Ian Lance Taylor
2023-05-30  0:06         ` Linus Torvalds
2023-05-30  9:23   ` Peter Zijlstra
2023-05-30  9:34     ` Peter Zijlstra
2023-05-30 13:58     ` Valentin Schneider
2023-06-06  9:42     ` Peter Zijlstra
2023-06-06 13:17       ` Linus Torvalds
2023-06-06 13:40         ` Peter Zijlstra
2023-06-06 14:50           ` Linus Torvalds
2023-06-06 16:06             ` Kees Cook
2023-06-06 18:08             ` Peter Zijlstra
2023-06-06 23:22               ` Linus Torvalds
2023-06-07  9:41                 ` Peter Zijlstra
2023-06-08  8:52                   ` Peter Zijlstra
2023-06-08  9:04                     ` Greg KH
2023-06-08 15:45                     ` Linus Torvalds
2023-06-08 16:47                       ` Kees Cook
2023-06-08 16:59                         ` Linus Torvalds
2023-06-08 17:20                         ` Nick Desaulniers
2023-06-08 18:51                           ` Peter Zijlstra
2023-06-08 20:14                             ` Nick Desaulniers
2023-06-09 10:20                               ` Paolo Bonzini
2023-06-08 20:06                       ` Peter Zijlstra
2023-06-09  2:25                         ` Linus Torvalds
2023-06-09  8:14                           ` Peter Zijlstra
2023-06-09 21:18                           ` Kees Cook
2023-06-09  8:27                       ` Rasmus Villemoes
2023-06-06 15:31       ` Kees Cook
2023-06-06 15:45         ` Linus Torvalds
2023-06-06 16:08           ` Kees Cook
2023-06-08 16:25           ` David Laight
2023-05-30  9:26   ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230526205855.587341916@infradead.org \
    --to=peterz@infradead.org \
    --cc=boqun.feng@gmail.com \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=frederic@kernel.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=jiangshanlai@gmail.com \
    --cc=joel@joelfernandes.org \
    --cc=josh@joshtriplett.org \
    --cc=juri.lelli@redhat.com \
    --cc=keescook@chromium.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mgorman@suse.de \
    --cc=mingo@redhat.com \
    --cc=ndesaulniers@google.com \
    --cc=ojeda@kernel.org \
    --cc=paulmck@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=quic_neeraju@quicinc.com \
    --cc=rcu@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=vincent.guittot@linaro.org \
    --cc=vschneid@redhat.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.