All of lore.kernel.org
 help / color / mirror / Atom feed
From: Roberto Sassu <roberto.sassu@huawei.com>
To: <corbet@lwn.net>, <viro@zeniv.linux.org.uk>, <ast@kernel.org>,
	<daniel@iogearbox.net>, <andrii@kernel.org>, <kpsingh@kernel.org>,
	<shuah@kernel.org>, <mcoquelin.stm32@gmail.com>,
	<alexandre.torgue@foss.st.com>, <zohar@linux.ibm.com>
Cc: <linux-doc@vger.kernel.org>, <linux-fsdevel@vger.kernel.org>,
	<netdev@vger.kernel.org>, <bpf@vger.kernel.org>,
	<linux-kselftest@vger.kernel.org>,
	<linux-stm32@st-md-mailman.stormreply.com>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-integrity@vger.kernel.org>,
	<linux-security-module@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	Roberto Sassu <roberto.sassu@huawei.com>
Subject: [PATCH 08/18] bpf-preload: Generate load_skel()
Date: Mon, 28 Mar 2022 19:50:23 +0200	[thread overview]
Message-ID: <20220328175033.2437312-9-roberto.sassu@huawei.com> (raw)
In-Reply-To: <20220328175033.2437312-1-roberto.sassu@huawei.com>

Generate load_skel() to load and attach the eBPF program, and to retrieve
the objects to be pinned.

Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
---
 kernel/bpf/preload/bpf_preload_kern.c         | 36 -----------
 .../bpf/preload/iterators/iterators.lskel.h   | 42 ++++++++++++
 tools/bpf/bpftool/gen.c                       | 64 +++++++++++++++++++
 3 files changed, 106 insertions(+), 36 deletions(-)

diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
index 0869c889255c..35e9abd1a668 100644
--- a/kernel/bpf/preload/bpf_preload_kern.c
+++ b/kernel/bpf/preload/bpf_preload_kern.c
@@ -10,42 +10,6 @@ static struct bpf_preload_ops ops = {
 	.owner = THIS_MODULE,
 };
 
-static int load_skel(void)
-{
-	int err;
-
-	skel = iterators_bpf__open();
-	if (!skel)
-		return -ENOMEM;
-	err = iterators_bpf__load(skel);
-	if (err)
-		goto out;
-	err = iterators_bpf__attach(skel);
-	if (err)
-		goto out;
-	dump_bpf_map_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd);
-	if (IS_ERR(dump_bpf_map_link)) {
-		err = PTR_ERR(dump_bpf_map_link);
-		goto out;
-	}
-	dump_bpf_prog_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd);
-	if (IS_ERR(dump_bpf_prog_link)) {
-		err = PTR_ERR(dump_bpf_prog_link);
-		goto out;
-	}
-	/* Avoid taking over stdin/stdout/stderr of init process. Zeroing out
-	 * makes skel_closenz() a no-op later in iterators_bpf__destroy().
-	 */
-	close_fd(skel->links.dump_bpf_map_fd);
-	skel->links.dump_bpf_map_fd = 0;
-	close_fd(skel->links.dump_bpf_prog_fd);
-	skel->links.dump_bpf_prog_fd = 0;
-	return 0;
-out:
-	free_objs_and_skel();
-	return err;
-}
-
 static int __init load(void)
 {
 	int err;
diff --git a/kernel/bpf/preload/iterators/iterators.lskel.h b/kernel/bpf/preload/iterators/iterators.lskel.h
index 75b2e94b7547..6faf3708be01 100644
--- a/kernel/bpf/preload/iterators/iterators.lskel.h
+++ b/kernel/bpf/preload/iterators/iterators.lskel.h
@@ -474,4 +474,46 @@ static int preload(struct dentry *parent)
 	return err;
 }
 
+static int load_skel(void)
+{
+	int err;
+
+	skel = iterators_bpf__open();
+	if (!skel)
+		return -ENOMEM;
+
+	err = iterators_bpf__load(skel);
+	if (err)
+		goto out;
+
+	err = iterators_bpf__attach(skel);
+	if (err)
+		goto out;
+
+	dump_bpf_map_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd);
+	if (IS_ERR(dump_bpf_map_link)) {
+		err = PTR_ERR(dump_bpf_map_link);
+		goto out;
+	}
+
+	dump_bpf_prog_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd);
+	if (IS_ERR(dump_bpf_prog_link)) {
+		err = PTR_ERR(dump_bpf_prog_link);
+		goto out;
+	}
+
+	/* Avoid taking over stdin/stdout/stderr of init process. Zeroing out
+	 * makes skel_closenz() a no-op later in iterators_bpf__destroy().
+	 */
+	close_fd(skel->links.dump_bpf_map_fd);
+	skel->links.dump_bpf_map_fd = 0;
+	close_fd(skel->links.dump_bpf_prog_fd);
+	skel->links.dump_bpf_prog_fd = 0;
+
+	return 0;
+out:
+	free_objs_and_skel();
+	return err;
+}
+
 #endif /* __ITERATORS_BPF_SKEL_H__ */
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index fa2c6022b80d..ad948f1c90b5 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -764,6 +764,69 @@ static void codegen_preload(struct bpf_object *obj, const char *obj_name)
 		");
 }
 
+static void codegen_preload_load(struct bpf_object *obj, const char *obj_name)
+{
+	struct bpf_program *prog;
+
+	codegen("\
+		\n\
+		\n\
+		static int load_skel(void)				    \n\
+		{							    \n\
+			int err;					    \n\
+		\n\
+			skel = %1$s__open();				    \n\
+			if (!skel)					    \n\
+				return -ENOMEM;				    \n\
+		\n\
+			err = %1$s__load(skel);				    \n\
+			if (err)					    \n\
+				goto out;				    \n\
+		\n\
+			err = %1$s__attach(skel);			    \n\
+			if (err)					    \n\
+				goto out;				    \n\
+		", obj_name);
+
+	bpf_object__for_each_program(prog, obj) {
+		codegen("\
+			\n\
+			\n\
+				%1$s_link = bpf_link_get_from_fd(skel->links.%1$s_fd);		\n\
+				if (IS_ERR(%1$s_link)) {					\n\
+					err = PTR_ERR(%1$s_link);				\n\
+					goto out;						\n\
+				}								\n\
+			", bpf_program__name(prog));
+	}
+
+	codegen("\
+		\n\
+		\n\
+			/* Avoid taking over stdin/stdout/stderr of init process. Zeroing out	\n\
+			 * makes skel_closenz() a no-op later in iterators_bpf__destroy().	\n\
+			 */									\n\
+		");
+
+	bpf_object__for_each_program(prog, obj) {
+		codegen("\
+			\n\
+				close_fd(skel->links.%1$s_fd);		    \n\
+				skel->links.%1$s_fd = 0;		    \n\
+			", bpf_program__name(prog));
+	}
+
+	codegen("\
+		\n\
+		\n\
+			return 0;					    \n\
+		out:							    \n\
+			free_objs_and_skel();				    \n\
+			return err;					    \n\
+		}							    \n\
+		");
+}
+
 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
 {
 	DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
@@ -916,6 +979,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
 		codegen_preload_vars(obj, obj_name);
 		codegen_preload_free(obj, obj_name);
 		codegen_preload(obj, obj_name);
+		codegen_preload_load(obj, obj_name);
 	}
 
 	codegen("\
-- 
2.32.0


WARNING: multiple messages have this Message-ID (diff)
From: Roberto Sassu <roberto.sassu@huawei.com>
To: <corbet@lwn.net>, <viro@zeniv.linux.org.uk>, <ast@kernel.org>,
	<daniel@iogearbox.net>, <andrii@kernel.org>, <kpsingh@kernel.org>,
	<shuah@kernel.org>, <mcoquelin.stm32@gmail.com>,
	<alexandre.torgue@foss.st.com>, <zohar@linux.ibm.com>
Cc: <linux-doc@vger.kernel.org>, <linux-fsdevel@vger.kernel.org>,
	<netdev@vger.kernel.org>, <bpf@vger.kernel.org>,
	<linux-kselftest@vger.kernel.org>,
	<linux-stm32@st-md-mailman.stormreply.com>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-integrity@vger.kernel.org>,
	<linux-security-module@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	Roberto Sassu <roberto.sassu@huawei.com>
Subject: [PATCH 08/18] bpf-preload: Generate load_skel()
Date: Mon, 28 Mar 2022 19:50:23 +0200	[thread overview]
Message-ID: <20220328175033.2437312-9-roberto.sassu@huawei.com> (raw)
In-Reply-To: <20220328175033.2437312-1-roberto.sassu@huawei.com>

Generate load_skel() to load and attach the eBPF program, and to retrieve
the objects to be pinned.

Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
---
 kernel/bpf/preload/bpf_preload_kern.c         | 36 -----------
 .../bpf/preload/iterators/iterators.lskel.h   | 42 ++++++++++++
 tools/bpf/bpftool/gen.c                       | 64 +++++++++++++++++++
 3 files changed, 106 insertions(+), 36 deletions(-)

diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
index 0869c889255c..35e9abd1a668 100644
--- a/kernel/bpf/preload/bpf_preload_kern.c
+++ b/kernel/bpf/preload/bpf_preload_kern.c
@@ -10,42 +10,6 @@ static struct bpf_preload_ops ops = {
 	.owner = THIS_MODULE,
 };
 
-static int load_skel(void)
-{
-	int err;
-
-	skel = iterators_bpf__open();
-	if (!skel)
-		return -ENOMEM;
-	err = iterators_bpf__load(skel);
-	if (err)
-		goto out;
-	err = iterators_bpf__attach(skel);
-	if (err)
-		goto out;
-	dump_bpf_map_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd);
-	if (IS_ERR(dump_bpf_map_link)) {
-		err = PTR_ERR(dump_bpf_map_link);
-		goto out;
-	}
-	dump_bpf_prog_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd);
-	if (IS_ERR(dump_bpf_prog_link)) {
-		err = PTR_ERR(dump_bpf_prog_link);
-		goto out;
-	}
-	/* Avoid taking over stdin/stdout/stderr of init process. Zeroing out
-	 * makes skel_closenz() a no-op later in iterators_bpf__destroy().
-	 */
-	close_fd(skel->links.dump_bpf_map_fd);
-	skel->links.dump_bpf_map_fd = 0;
-	close_fd(skel->links.dump_bpf_prog_fd);
-	skel->links.dump_bpf_prog_fd = 0;
-	return 0;
-out:
-	free_objs_and_skel();
-	return err;
-}
-
 static int __init load(void)
 {
 	int err;
diff --git a/kernel/bpf/preload/iterators/iterators.lskel.h b/kernel/bpf/preload/iterators/iterators.lskel.h
index 75b2e94b7547..6faf3708be01 100644
--- a/kernel/bpf/preload/iterators/iterators.lskel.h
+++ b/kernel/bpf/preload/iterators/iterators.lskel.h
@@ -474,4 +474,46 @@ static int preload(struct dentry *parent)
 	return err;
 }
 
+static int load_skel(void)
+{
+	int err;
+
+	skel = iterators_bpf__open();
+	if (!skel)
+		return -ENOMEM;
+
+	err = iterators_bpf__load(skel);
+	if (err)
+		goto out;
+
+	err = iterators_bpf__attach(skel);
+	if (err)
+		goto out;
+
+	dump_bpf_map_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd);
+	if (IS_ERR(dump_bpf_map_link)) {
+		err = PTR_ERR(dump_bpf_map_link);
+		goto out;
+	}
+
+	dump_bpf_prog_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd);
+	if (IS_ERR(dump_bpf_prog_link)) {
+		err = PTR_ERR(dump_bpf_prog_link);
+		goto out;
+	}
+
+	/* Avoid taking over stdin/stdout/stderr of init process. Zeroing out
+	 * makes skel_closenz() a no-op later in iterators_bpf__destroy().
+	 */
+	close_fd(skel->links.dump_bpf_map_fd);
+	skel->links.dump_bpf_map_fd = 0;
+	close_fd(skel->links.dump_bpf_prog_fd);
+	skel->links.dump_bpf_prog_fd = 0;
+
+	return 0;
+out:
+	free_objs_and_skel();
+	return err;
+}
+
 #endif /* __ITERATORS_BPF_SKEL_H__ */
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index fa2c6022b80d..ad948f1c90b5 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -764,6 +764,69 @@ static void codegen_preload(struct bpf_object *obj, const char *obj_name)
 		");
 }
 
+static void codegen_preload_load(struct bpf_object *obj, const char *obj_name)
+{
+	struct bpf_program *prog;
+
+	codegen("\
+		\n\
+		\n\
+		static int load_skel(void)				    \n\
+		{							    \n\
+			int err;					    \n\
+		\n\
+			skel = %1$s__open();				    \n\
+			if (!skel)					    \n\
+				return -ENOMEM;				    \n\
+		\n\
+			err = %1$s__load(skel);				    \n\
+			if (err)					    \n\
+				goto out;				    \n\
+		\n\
+			err = %1$s__attach(skel);			    \n\
+			if (err)					    \n\
+				goto out;				    \n\
+		", obj_name);
+
+	bpf_object__for_each_program(prog, obj) {
+		codegen("\
+			\n\
+			\n\
+				%1$s_link = bpf_link_get_from_fd(skel->links.%1$s_fd);		\n\
+				if (IS_ERR(%1$s_link)) {					\n\
+					err = PTR_ERR(%1$s_link);				\n\
+					goto out;						\n\
+				}								\n\
+			", bpf_program__name(prog));
+	}
+
+	codegen("\
+		\n\
+		\n\
+			/* Avoid taking over stdin/stdout/stderr of init process. Zeroing out	\n\
+			 * makes skel_closenz() a no-op later in iterators_bpf__destroy().	\n\
+			 */									\n\
+		");
+
+	bpf_object__for_each_program(prog, obj) {
+		codegen("\
+			\n\
+				close_fd(skel->links.%1$s_fd);		    \n\
+				skel->links.%1$s_fd = 0;		    \n\
+			", bpf_program__name(prog));
+	}
+
+	codegen("\
+		\n\
+		\n\
+			return 0;					    \n\
+		out:							    \n\
+			free_objs_and_skel();				    \n\
+			return err;					    \n\
+		}							    \n\
+		");
+}
+
 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
 {
 	DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
@@ -916,6 +979,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
 		codegen_preload_vars(obj, obj_name);
 		codegen_preload_free(obj, obj_name);
 		codegen_preload(obj, obj_name);
+		codegen_preload_load(obj, obj_name);
 	}
 
 	codegen("\
-- 
2.32.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-03-28 17:53 UTC|newest]

Thread overview: 90+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-28 17:50 [PATCH 00/18] bpf: Secure and authenticated preloading of eBPF programs Roberto Sassu
2022-03-28 17:50 ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 01/18] bpf: Export bpf_link_inc() Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 02/18] bpf-preload: Move bpf_preload.h to include/linux Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 03/18] bpf-preload: Generalize object pinning from the kernel Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 04/18] bpf-preload: Export and call bpf_obj_do_pin_kernel() Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 05/18] bpf-preload: Generate static variables Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-29 23:51   ` Andrii Nakryiko
2022-03-29 23:51     ` Andrii Nakryiko
2022-03-30  7:44     ` Roberto Sassu
2022-03-30  7:44       ` Roberto Sassu
2022-04-04  0:22       ` Andrii Nakryiko
2022-04-04  0:22         ` Andrii Nakryiko
2022-03-30 15:12     ` Roberto Sassu
2022-03-30 15:12       ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 06/18] bpf-preload: Generate free_objs_and_skel() Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 07/18] bpf-preload: Generate preload() Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` Roberto Sassu [this message]
2022-03-28 17:50   ` [PATCH 08/18] bpf-preload: Generate load_skel() Roberto Sassu
2022-03-28 17:50 ` [PATCH 09/18] bpf-preload: Generate code to pin non-internal maps Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 10/18] bpf-preload: Generate bpf_preload_ops Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 11/18] bpf-preload: Store multiple bpf_preload_ops structures in a linked list Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 12/18] bpf-preload: Implement new registration method for preloading eBPF programs Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 13/18] bpf-preload: Move pinned links and maps to a dedicated directory in bpffs Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 14/18] bpf-preload: Switch to new preload registration method Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-29  2:35   ` kernel test robot
2022-03-29  2:35     ` kernel test robot
2022-03-29  3:27   ` kernel test robot
2022-03-29  3:27     ` kernel test robot
2022-03-28 17:50 ` [PATCH 15/18] bpf-preload: Generate code of kernel module to preload Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 16/18] bpf-preload: Do kernel mount to ensure that pinned objects don't disappear Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-29  2:15   ` kernel test robot
2022-03-29  2:15     ` kernel test robot
2022-03-29  4:08   ` kernel test robot
2022-03-29  4:08     ` kernel test robot
2022-03-28 17:50 ` [PATCH 17/18] bpf-preload/selftests: Add test for automatic generation of preload methods Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-28 17:50 ` [PATCH 18/18] bpf-preload/selftests: Preload a test eBPF program and check pinned objects Roberto Sassu
2022-03-28 17:50   ` Roberto Sassu
2022-03-29 23:51 ` [PATCH 00/18] bpf: Secure and authenticated preloading of eBPF programs Andrii Nakryiko
2022-03-29 23:51   ` Andrii Nakryiko
2022-03-30  7:21   ` Roberto Sassu
2022-03-30  7:21     ` Roberto Sassu
2022-03-31  2:27 ` Alexei Starovoitov
2022-03-31  2:27   ` Alexei Starovoitov
2022-03-31  8:25   ` Roberto Sassu
2022-03-31  8:25     ` Roberto Sassu
2022-04-01 23:55     ` Alexei Starovoitov
2022-04-01 23:55       ` Alexei Starovoitov
2022-04-02  1:03       ` KP Singh
2022-04-02  1:03         ` KP Singh
2022-04-04  7:44         ` Djalal Harouni
2022-04-04  7:44           ` Djalal Harouni
2022-04-04 17:20           ` Roberto Sassu
2022-04-04 17:20             ` Roberto Sassu
2022-04-04 22:49             ` Alexei Starovoitov
2022-04-04 22:49               ` Alexei Starovoitov
2022-04-05  0:00               ` KP Singh
2022-04-05  0:00                 ` KP Singh
2022-04-05 13:11                 ` [POC][USER SPACE][PATCH] Introduce LSM to protect pinned objects Roberto Sassu
2022-04-05 13:11                   ` Roberto Sassu
2022-04-05 22:47                   ` Casey Schaufler
2022-04-05 22:47                     ` Casey Schaufler
2022-04-06  6:55                     ` Roberto Sassu
2022-04-06  6:55                       ` Roberto Sassu
2022-04-05 14:49             ` [PATCH 00/18] bpf: Secure and authenticated preloading of eBPF programs Casey Schaufler
2022-04-05 14:49               ` Casey Schaufler
2022-04-05 15:29               ` Roberto Sassu
2022-04-05 15:29                 ` Roberto Sassu
2022-04-05 16:21                 ` Casey Schaufler
2022-04-05 16:21                   ` Casey Schaufler
2022-04-05 16:37                   ` KP Singh
2022-04-05 16:37                     ` KP Singh
2022-04-04 17:41         ` Roberto Sassu
2022-04-04 17:41           ` Roberto Sassu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220328175033.2437312-9-roberto.sassu@huawei.com \
    --to=roberto.sassu@huawei.com \
    --cc=alexandre.torgue@foss.st.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=corbet@lwn.net \
    --cc=daniel@iogearbox.net \
    --cc=kpsingh@kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-integrity@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-security-module@vger.kernel.org \
    --cc=linux-stm32@st-md-mailman.stormreply.com \
    --cc=mcoquelin.stm32@gmail.com \
    --cc=netdev@vger.kernel.org \
    --cc=shuah@kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    --cc=zohar@linux.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.