linux-modules.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: kent.overstreet@linux.dev, mhocko@suse.com, vbabka@suse.cz,
	hannes@cmpxchg.org, roman.gushchin@linux.dev, mgorman@suse.de,
	dave@stgolabs.net, willy@infradead.org, liam.howlett@oracle.com,
	void@manifault.com, peterz@infradead.org, juri.lelli@redhat.com,
	ldufour@linux.ibm.com, peterx@redhat.com, david@redhat.com,
	axboe@kernel.dk, mcgrof@kernel.org, masahiroy@kernel.org,
	nathan@kernel.org, changbin.du@intel.com, ytcoode@gmail.com,
	vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	rostedt@goodmis.org, bsegall@google.com, bristot@redhat.com,
	vschneid@redhat.com, cl@linux.com, penberg@kernel.org,
	iamjoonsoo.kim@lge.com, 42.hyeyoo@gmail.com, glider@google.com,
	elver@google.com, dvyukov@google.com, shakeelb@google.com,
	songmuchun@bytedance.com, arnd@arndb.de, jbaron@akamai.com,
	rientjes@google.com, minchan@google.com, kaleshsingh@google.com,
	surenb@google.com, kernel-team@android.com, linux-mm@kvack.org,
	iommu@lists.linux.dev, kasan-dev@googlegroups.com,
	io-uring@vger.kernel.org, linux-arch@vger.kernel.org,
	xen-devel@lists.xenproject.org, linux-bcache@vger.kernel.org,
	linux-modules@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 03/30] Lazy percpu counters
Date: Tue, 30 Aug 2022 14:48:52 -0700	[thread overview]
Message-ID: <20220830214919.53220-4-surenb@google.com> (raw)
In-Reply-To: <20220830214919.53220-1-surenb@google.com>

From: Kent Overstreet <kent.overstreet@linux.dev>

This patch adds lib/lazy-percpu-counter.c, which implements counters
that start out as atomics, but lazily switch to percpu mode if the
update rate crosses some threshold (arbitrarily set at 256 per second).

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
---
 include/linux/lazy-percpu-counter.h |  67 +++++++++++++
 lib/Kconfig                         |   3 +
 lib/Makefile                        |   2 +
 lib/lazy-percpu-counter.c           | 141 ++++++++++++++++++++++++++++
 4 files changed, 213 insertions(+)
 create mode 100644 include/linux/lazy-percpu-counter.h
 create mode 100644 lib/lazy-percpu-counter.c

diff --git a/include/linux/lazy-percpu-counter.h b/include/linux/lazy-percpu-counter.h
new file mode 100644
index 000000000000..a22a2b9a9f32
--- /dev/null
+++ b/include/linux/lazy-percpu-counter.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Lazy percpu counters:
+ * (C) 2022 Kent Overstreet
+ *
+ * Lazy percpu counters start out in atomic mode, then switch to percpu mode if
+ * the update rate crosses some threshold.
+ *
+ * This means we don't have to decide between low memory overhead atomic
+ * counters and higher performance percpu counters - we can have our cake and
+ * eat it, too!
+ *
+ * Internally we use an atomic64_t, where the low bit indicates whether we're in
+ * percpu mode, and the high 8 bits are a secondary counter that's incremented
+ * when the counter is modified - meaning 55 bits of precision are available for
+ * the counter itself.
+ *
+ * lazy_percpu_counter is 16 bytes (on 64 bit machines), raw_lazy_percpu_counter
+ * is 8 bytes but requires a separate unsigned long to record when the counter
+ * wraps - because sometimes multiple counters are used together and can share
+ * the same timestamp.
+ */
+
+#ifndef _LINUX_LAZY_PERCPU_COUNTER_H
+#define _LINUX_LAZY_PERCPU_COUNTER_H
+
+struct raw_lazy_percpu_counter {
+	atomic64_t			v;
+};
+
+void __lazy_percpu_counter_exit(struct raw_lazy_percpu_counter *c);
+void __lazy_percpu_counter_add(struct raw_lazy_percpu_counter *c,
+			       unsigned long *last_wrap, s64 i);
+s64 __lazy_percpu_counter_read(struct raw_lazy_percpu_counter *c);
+
+static inline void __lazy_percpu_counter_sub(struct raw_lazy_percpu_counter *c,
+					     unsigned long *last_wrap, s64 i)
+{
+	__lazy_percpu_counter_add(c, last_wrap, -i);
+}
+
+struct lazy_percpu_counter {
+	struct raw_lazy_percpu_counter	v;
+	unsigned long			last_wrap;
+};
+
+static inline void lazy_percpu_counter_exit(struct lazy_percpu_counter *c)
+{
+	__lazy_percpu_counter_exit(&c->v);
+}
+
+static inline void lazy_percpu_counter_add(struct lazy_percpu_counter *c, s64 i)
+{
+	__lazy_percpu_counter_add(&c->v, &c->last_wrap, i);
+}
+
+static inline void lazy_percpu_counter_sub(struct lazy_percpu_counter *c, s64 i)
+{
+	__lazy_percpu_counter_sub(&c->v, &c->last_wrap, i);
+}
+
+static inline s64 lazy_percpu_counter_read(struct lazy_percpu_counter *c)
+{
+	return __lazy_percpu_counter_read(&c->v);
+}
+
+#endif /* _LINUX_LAZY_PERCPU_COUNTER_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index dc1ab2ed1dc6..fc6dbc425728 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -498,6 +498,9 @@ config ASSOCIATIVE_ARRAY
 
 	  for more information.
 
+config LAZY_PERCPU_COUNTER
+	bool
+
 config HAS_IOMEM
 	bool
 	depends on !NO_IOMEM
diff --git a/lib/Makefile b/lib/Makefile
index ffabc30a27d4..cc7762748708 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -163,6 +163,8 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
 
+obj-$(CONFIG_LAZY_PERCPU_COUNTER) += lazy-percpu-counter.o
+
 obj-$(CONFIG_BITREVERSE) += bitrev.o
 obj-$(CONFIG_LINEAR_RANGES) += linear_ranges.o
 obj-$(CONFIG_PACKING)	+= packing.o
diff --git a/lib/lazy-percpu-counter.c b/lib/lazy-percpu-counter.c
new file mode 100644
index 000000000000..299ef36137ee
--- /dev/null
+++ b/lib/lazy-percpu-counter.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/atomic.h>
+#include <linux/gfp.h>
+#include <linux/jiffies.h>
+#include <linux/lazy-percpu-counter.h>
+#include <linux/percpu.h>
+
+/*
+ * We use the high bits of the atomic counter for a secondary counter, which is
+ * incremented every time the counter is touched. When the secondary counter
+ * wraps, we check the time the counter last wrapped, and if it was recent
+ * enough that means the update frequency has crossed our threshold and we
+ * switch to percpu mode:
+ */
+#define COUNTER_MOD_BITS		8
+#define COUNTER_MOD_MASK		~(~0ULL >> COUNTER_MOD_BITS)
+#define COUNTER_MOD_BITS_START		(64 - COUNTER_MOD_BITS)
+
+/*
+ * We use the low bit of the counter to indicate whether we're in atomic mode
+ * (low bit clear), or percpu mode (low bit set, counter is a pointer to actual
+ * percpu counters:
+ */
+#define COUNTER_IS_PCPU_BIT		1
+
+static inline u64 __percpu *lazy_percpu_counter_is_pcpu(u64 v)
+{
+	if (!(v & COUNTER_IS_PCPU_BIT))
+		return NULL;
+
+	v ^= COUNTER_IS_PCPU_BIT;
+	return (u64 __percpu *)(unsigned long)v;
+}
+
+static inline s64 lazy_percpu_counter_atomic_val(s64 v)
+{
+	/* Ensure output is sign extended properly: */
+	return (v << COUNTER_MOD_BITS) >>
+		(COUNTER_MOD_BITS + COUNTER_IS_PCPU_BIT);
+}
+
+static void lazy_percpu_counter_switch_to_pcpu(struct raw_lazy_percpu_counter *c)
+{
+	u64 __percpu *pcpu_v = alloc_percpu_gfp(u64, GFP_ATOMIC|__GFP_NOWARN);
+	u64 old, new, v;
+
+	if (!pcpu_v)
+		return;
+
+	preempt_disable();
+	v = atomic64_read(&c->v);
+	do {
+		if (lazy_percpu_counter_is_pcpu(v)) {
+			free_percpu(pcpu_v);
+			return;
+		}
+
+		old = v;
+		new = (unsigned long)pcpu_v | 1;
+
+		*this_cpu_ptr(pcpu_v) = lazy_percpu_counter_atomic_val(v);
+	} while ((v = atomic64_cmpxchg(&c->v, old, new)) != old);
+	preempt_enable();
+}
+
+/**
+ * __lazy_percpu_counter_exit: Free resources associated with a
+ * raw_lazy_percpu_counter
+ *
+ * @c: counter to exit
+ */
+void __lazy_percpu_counter_exit(struct raw_lazy_percpu_counter *c)
+{
+	free_percpu(lazy_percpu_counter_is_pcpu(atomic64_read(&c->v)));
+}
+EXPORT_SYMBOL_GPL(__lazy_percpu_counter_exit);
+
+/**
+ * __lazy_percpu_counter_read: Read current value of a raw_lazy_percpu_counter
+ *
+ * @c: counter to read
+ */
+s64 __lazy_percpu_counter_read(struct raw_lazy_percpu_counter *c)
+{
+	s64 v = atomic64_read(&c->v);
+	u64 __percpu *pcpu_v = lazy_percpu_counter_is_pcpu(v);
+
+	if (pcpu_v) {
+		int cpu;
+
+		v = 0;
+		for_each_possible_cpu(cpu)
+			v += *per_cpu_ptr(pcpu_v, cpu);
+	} else {
+		v = lazy_percpu_counter_atomic_val(v);
+	}
+
+	return v;
+}
+EXPORT_SYMBOL_GPL(__lazy_percpu_counter_read);
+
+/**
+ * __lazy_percpu_counter_add: Add a value to a lazy_percpu_counter
+ *
+ * @c: counter to modify
+ * @last_wrap: pointer to a timestamp, updated when mod counter wraps
+ * @i: value to add
+ */
+void __lazy_percpu_counter_add(struct raw_lazy_percpu_counter *c,
+			       unsigned long *last_wrap, s64 i)
+{
+	u64 atomic_i;
+	u64 old, v = atomic64_read(&c->v);
+	u64 __percpu *pcpu_v;
+
+	atomic_i  = i << COUNTER_IS_PCPU_BIT;
+	atomic_i &= ~COUNTER_MOD_MASK;
+	atomic_i |= 1ULL << COUNTER_MOD_BITS_START;
+
+	do {
+		pcpu_v = lazy_percpu_counter_is_pcpu(v);
+		if (pcpu_v) {
+			this_cpu_add(*pcpu_v, i);
+			return;
+		}
+
+		old = v;
+	} while ((v = atomic64_cmpxchg(&c->v, old, old + atomic_i)) != old);
+
+	if (unlikely(!(v & COUNTER_MOD_MASK))) {
+		unsigned long now = jiffies;
+
+		if (*last_wrap &&
+		    unlikely(time_after(*last_wrap + HZ, now)))
+			lazy_percpu_counter_switch_to_pcpu(c);
+		else
+			*last_wrap = now;
+	}
+}
+EXPORT_SYMBOL(__lazy_percpu_counter_add);
-- 
2.37.2.672.g94769d06f0-goog


  parent reply	other threads:[~2022-08-30 21:50 UTC|newest]

Thread overview: 138+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-30 21:48 [RFC PATCH 00/30] Code tagging framework and applications Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 01/30] kernel/module: move find_kallsyms_symbol_value declaration Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 02/30] lib/string_helpers: Drop space in string_get_size's output Suren Baghdasaryan
2022-08-30 21:48 ` Suren Baghdasaryan [this message]
2022-08-31 10:02   ` [RFC PATCH 03/30] Lazy percpu counters Mel Gorman
2022-08-31 15:37     ` Suren Baghdasaryan
2022-08-31 16:20     ` Kent Overstreet
2022-09-01  6:51   ` Peter Zijlstra
2022-09-01 14:32     ` Kent Overstreet
2022-09-01 14:48       ` Steven Rostedt
2022-09-01 15:43         ` Kent Overstreet
2022-09-01 18:59       ` Peter Zijlstra
2022-08-30 21:48 ` [RFC PATCH 04/30] scripts/kallysms: Always include __start and __stop symbols Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 05/30] lib: code tagging framework Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 06/30] lib: code tagging module support Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 07/30] lib: add support for allocation tagging Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 08/30] lib: introduce page " Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 09/30] change alloc_pages name in dma_map_ops to avoid name conflicts Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 10/30] mm: enable page allocation tagging for __get_free_pages and alloc_pages Suren Baghdasaryan
2022-08-31 10:11   ` Mel Gorman
2022-08-31 15:45     ` Suren Baghdasaryan
2022-08-31 15:52       ` Suren Baghdasaryan
2022-08-31 17:46     ` Kent Overstreet
2022-09-01  1:07       ` Suren Baghdasaryan
2022-09-01  7:41       ` Peter Zijlstra
2022-08-30 21:49 ` [RFC PATCH 11/30] mm: introduce slabobj_ext to support slab object extensions Suren Baghdasaryan
2022-09-01 23:35   ` Roman Gushchin
2022-09-02  0:23     ` Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 12/30] mm: introduce __GFP_NO_OBJ_EXT flag to selectively prevent slabobj_ext creation Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 13/30] mm/slab: introduce SLAB_NO_OBJ_EXT to avoid obj_ext creation Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 14/30] mm: prevent slabobj_ext allocations for slabobj_ext and kmem_cache objects Suren Baghdasaryan
2022-09-01 23:40   ` Roman Gushchin
2022-09-02  0:24     ` Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 15/30] lib: introduce slab allocation tagging Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 16/30] mm: enable slab allocation tagging for kmalloc and friends Suren Baghdasaryan
2022-09-01 23:50   ` Roman Gushchin
2022-08-30 21:49 ` [RFC PATCH 17/30] lib/string.c: strsep_no_empty() Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 18/30] codetag: add codetag query helper functions Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 19/30] move stack capture functionality into a separate function for reuse Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 20/30] lib: introduce support for storing code tag context Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 21/30] lib: implement context capture support for page and slab allocators Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 22/30] Code tagging based fault injection Suren Baghdasaryan
2022-08-31  1:51   ` Randy Dunlap
2022-08-31 15:56     ` Suren Baghdasaryan
2022-08-31 10:37   ` Dmitry Vyukov
2022-08-31 15:51     ` Suren Baghdasaryan
2022-08-31 17:30     ` Kent Overstreet
2022-09-01  8:43       ` Dmitry Vyukov
2022-08-30 21:49 ` [RFC PATCH 23/30] timekeeping: Add a missing include Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 24/30] wait: Clean up waitqueue_entry initialization Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 25/30] lib/time_stats: New library for statistics on events Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 26/30] bcache: Convert to lib/time_stats Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 27/30] Code tagging based latency tracking Suren Baghdasaryan
2022-08-31  1:53   ` Randy Dunlap
2022-08-31 15:55     ` Suren Baghdasaryan
2022-09-01  7:11   ` Peter Zijlstra
2022-09-01 14:43     ` Kent Overstreet
2022-09-01 21:38   ` Steven Rostedt
2022-09-01 21:46     ` Steven Rostedt
2022-09-01 21:54     ` Kent Overstreet
2022-09-01 22:34       ` Steven Rostedt
2022-09-01 22:55         ` Kent Overstreet
2022-09-02  0:23           ` Steven Rostedt
2022-09-02  1:35             ` Kent Overstreet
2022-09-02  1:59               ` Steven Rostedt
2022-08-30 21:49 ` [RFC PATCH 28/30] Improved symbolic error names Suren Baghdasaryan
2022-09-01 23:19   ` Joe Perches
2022-09-01 23:26     ` Kent Overstreet
2022-08-30 21:49 ` [RFC PATCH 29/30] dyndbg: Convert to code tagging Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 30/30] MAINTAINERS: Add entries for code tagging & related Suren Baghdasaryan
2022-08-31  7:38 ` [RFC PATCH 00/30] Code tagging framework and applications Peter Zijlstra
2022-08-31  8:42   ` Kent Overstreet
2022-08-31 10:19     ` Mel Gorman
2022-08-31 10:47       ` Michal Hocko
2022-08-31 15:28         ` Suren Baghdasaryan
2022-08-31 16:48           ` Suren Baghdasaryan
2022-08-31 19:01         ` Kent Overstreet
2022-08-31 20:56           ` Yosry Ahmed
2022-08-31 21:38             ` Suren Baghdasaryan
2022-09-01 22:27             ` Roman Gushchin
2022-09-01 22:37               ` Kent Overstreet
2022-09-01 22:53                 ` Roman Gushchin
2022-09-01 23:36                   ` Suren Baghdasaryan
2022-09-02  0:17                   ` Kent Overstreet
2022-09-02  1:04                     ` Roman Gushchin
2022-09-02  1:16                       ` Kent Overstreet
2022-09-02 12:02                       ` Jens Axboe
2022-09-02 19:48                         ` Kent Overstreet
2022-09-02 19:53                           ` Jens Axboe
2022-09-02 20:05                             ` Kent Overstreet
2022-09-02 20:23                               ` Jens Axboe
2022-09-01  7:18           ` Michal Hocko
2022-09-01 15:33             ` Suren Baghdasaryan
2022-09-01 19:15               ` Michal Hocko
2022-09-01 19:39                 ` Suren Baghdasaryan
2022-09-01 20:15                   ` Kent Overstreet
2022-09-05  8:49                     ` Michal Hocko
2022-09-05 23:46                       ` Kent Overstreet
2022-09-06  7:23                         ` Michal Hocko
2022-09-06 18:20                           ` Kent Overstreet
2022-09-07 11:00                             ` Michal Hocko
2022-09-07 13:04                               ` Kent Overstreet
2022-09-07 13:45                                 ` Steven Rostedt
2022-09-08  6:35                                   ` Kent Overstreet
2022-09-08  6:49                                     ` Suren Baghdasaryan
2022-09-08  7:07                                       ` Kent Overstreet
2022-09-08  7:12                                     ` Michal Hocko
2022-09-08  7:29                                       ` Kent Overstreet
2022-09-08  7:47                                         ` Michal Hocko
2022-09-05  1:32                 ` Suren Baghdasaryan
2022-09-05  8:12                   ` Michal Hocko
2022-09-05  8:58                     ` Marco Elver
2022-09-05 18:07                       ` Suren Baghdasaryan
2022-09-05 18:03                     ` Suren Baghdasaryan
2022-09-06  8:01                       ` Michal Hocko
2022-09-06 15:35                         ` Suren Baghdasaryan
2022-09-05 15:07                   ` Steven Rostedt
2022-09-05 18:08                     ` Suren Baghdasaryan
2022-09-05 20:42                       ` Kent Overstreet
2022-09-05 22:16                         ` Steven Rostedt
2022-09-05 23:50                           ` Kent Overstreet
2022-09-01  8:05           ` David Hildenbrand
2022-09-01 14:23             ` Kent Overstreet
2022-09-01 15:07               ` David Hildenbrand
2022-09-01 15:39                 ` Suren Baghdasaryan
2022-09-01 15:48                 ` Kent Overstreet
2022-08-31 15:59       ` Kent Overstreet
2022-09-01  7:05         ` Peter Zijlstra
2022-09-01  7:36           ` Daniel Bristot de Oliveira
2022-09-01  7:42           ` Peter Zijlstra
2022-09-01 11:05         ` Mel Gorman
2022-09-01 16:31           ` Kent Overstreet
2022-09-01  7:00       ` Peter Zijlstra
2022-09-01 14:29         ` Kent Overstreet
2022-09-05 18:44       ` Nadav Amit
2022-09-05 19:16         ` Steven Rostedt
2022-09-01  4:52 ` Oscar Salvador
2022-09-01  5:05   ` Suren Baghdasaryan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220830214919.53220-4-surenb@google.com \
    --to=surenb@google.com \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=axboe@kernel.dk \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=changbin.du@intel.com \
    --cc=cl@linux.com \
    --cc=dave@stgolabs.net \
    --cc=david@redhat.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=glider@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=io-uring@vger.kernel.org \
    --cc=iommu@lists.linux.dev \
    --cc=jbaron@akamai.com \
    --cc=juri.lelli@redhat.com \
    --cc=kaleshsingh@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=kent.overstreet@linux.dev \
    --cc=kernel-team@android.com \
    --cc=ldufour@linux.ibm.com \
    --cc=liam.howlett@oracle.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-bcache@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-modules@vger.kernel.org \
    --cc=masahiroy@kernel.org \
    --cc=mcgrof@kernel.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=minchan@google.com \
    --cc=nathan@kernel.org \
    --cc=penberg@kernel.org \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=rostedt@goodmis.org \
    --cc=shakeelb@google.com \
    --cc=songmuchun@bytedance.com \
    --cc=vbabka@suse.cz \
    --cc=vincent.guittot@linaro.org \
    --cc=void@manifault.com \
    --cc=vschneid@redhat.com \
    --cc=willy@infradead.org \
    --cc=xen-devel@lists.xenproject.org \
    --cc=ytcoode@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).