linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ian Rogers <irogers@google.com>
To: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Jiri Olsa <jolsa@redhat.com>, Namhyung Kim <namhyung@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Masahiro Yamada <yamada.masahiro@socionext.com>,
	Kees Cook <keescook@chromium.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Petr Mladek <pmladek@suse.com>,
	Mauro Carvalho Chehab <mchehab+samsung@kernel.org>,
	Qian Cai <cai@lca.pw>, Joe Lawrence <joe.lawrence@redhat.com>,
	Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>,
	"Uladzislau Rezki (Sony)" <urezki@gmail.com>,
	Andy Shevchenko <andriy.shevchenko@linux.intel.com>,
	Ard Biesheuvel <ardb@kernel.org>,
	"David S. Miller" <davem@davemloft.net>,
	Kent Overstreet <kent.overstreet@gmail.com>,
	Gary Hook <Gary.Hook@amd.com>, Arnd Bergmann <arnd@arndb.de>,
	Kan Liang <kan.liang@linux.intel.com>,
	linux-kernel@vger.kernel.org
Cc: Stephane Eranian <eranian@google.com>,
	Andi Kleen <ak@linux.intel.com>, Ian Rogers <irogers@google.com>
Subject: [PATCH v5 10/10] perf/cgroup: Do not switch system-wide events in cgroup switch
Date: Fri,  6 Dec 2019 15:15:39 -0800	[thread overview]
Message-ID: <20191206231539.227585-11-irogers@google.com> (raw)
In-Reply-To: <20191206231539.227585-1-irogers@google.com>

From: Kan Liang <kan.liang@linux.intel.com>

When counting system-wide events and cgroup events simultaneously, the
system-wide events are always scheduled out then back in during cgroup
switches, bringing extra overhead and possibly missing events. Switching
out system wide flexible events may be necessary if the scheduled in
task's cgroups have pinned events that need to be scheduled in at a higher
priority than the system wide flexible events.

Here is test with 6 child cgroups (sibling cgroups), 1 parent cgroup
and system-wide events.
A specjbb benchmark is running in each child cgroup.
The perf command is as below.
   perf stat -e cycles,instructions -e cycles,instructions
   -e cycles,instructions -e cycles,instructions
   -e cycles,instructions -e cycles,instructions
   -e cycles,instructions -e cycles,instructions
   -G cgroup1,cgroup1,cgroup2,cgroup2,cgroup3,cgroup3
   -G cgroup4,cgroup4,cgroup5,cgroup5,cgroup6,cgroup6
   -G cgroup_parent,cgroup_parent
   -a -e cycles,instructions -I 1000

The average RT (Response Time) reported from specjbb is
used as key performance metrics. (The lower the better)
                                        RT(us)              Overhead
Baseline (no perf stat):                4286.9
Use cgroup perf, no patches:            4537.1                5.84%
Use cgroup perf, apply the patch:       4440.7                3.59%

Fixes: e5d1367f17ba ("perf: Add cgroup support")
---
This patch was rebased based on: https://lkml.org/lkml/2019/8/7/771
with some minor changes to comments made by: Ian Rogers
<irogers@google.com>

Signed-off-by: Ian Rogers <irogers@google.com>
---
 include/linux/perf_event.h |   1 +
 kernel/events/core.c       | 133 ++++++++++++++++++++++++++++++++++---
 2 files changed, 123 insertions(+), 11 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a29a38df909e..7aa5df2a33eb 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -892,6 +892,7 @@ struct perf_cgroup_info {
 struct perf_cgroup {
 	struct cgroup_subsys_state	css;
 	struct perf_cgroup_info	__percpu *info;
+	unsigned int			nr_pinned_event;
 	/*
 	 * A cache of the first event with the perf_cpu_context's
 	 * perf_event_context for the first event in pinned_groups or
diff --git a/kernel/events/core.c b/kernel/events/core.c
index bcaf100d8167..fc7e9e4b8e3c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -362,8 +362,18 @@ enum event_type_t {
 	/* see ctx_resched() for details */
 	EVENT_CPU = 0x8,
 	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+
+	/* see perf_cgroup_switch() for details */
+	EVENT_CGROUP_FLEXIBLE_ONLY = 0x10,
+	EVENT_CGROUP_PINNED_ONLY = 0x20,
+	EVENT_CGROUP_ALL_ONLY = EVENT_CGROUP_FLEXIBLE_ONLY |
+				EVENT_CGROUP_PINNED_ONLY,
+
 };
 
+#define CGROUP_PINNED(type)	(type & EVENT_CGROUP_PINNED_ONLY)
+#define CGROUP_FLEXIBLE(type)	(type & EVENT_CGROUP_FLEXIBLE_ONLY)
+
 /*
  * perf_sched_events : >0 events exist
  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
@@ -668,6 +678,20 @@ perf_event_set_state(struct perf_event *event, enum perf_event_state state)
 
 #ifdef CONFIG_CGROUP_PERF
 
+/* Skip system-wide CPU events if only cgroup events are required. */
+static inline bool
+perf_cgroup_skip_switch(enum event_type_t event_type,
+			struct perf_event *event,
+			bool is_pinned)
+{
+	if (event->cgrp)
+		return 0;
+	if (is_pinned)
+		return !!CGROUP_PINNED(event_type);
+	else
+		return !!CGROUP_FLEXIBLE(event_type);
+}
+
 static inline bool
 perf_cgroup_match(struct perf_event *event)
 {
@@ -694,6 +718,8 @@ perf_cgroup_match(struct perf_event *event)
 
 static inline void perf_detach_cgroup(struct perf_event *event)
 {
+	if (event->attr.pinned)
+		event->cgrp->nr_pinned_event--;
 	css_put(&event->cgrp->css);
 	event->cgrp = NULL;
 }
@@ -781,6 +807,22 @@ perf_cgroup_set_timestamp(struct task_struct *task,
 	}
 }
 
+/* Check if cgroup and its ancestor have pinned events attached */
+static bool
+cgroup_has_pinned_events(struct perf_cgroup *cgrp)
+{
+	struct cgroup_subsys_state *css;
+	struct perf_cgroup *tmp_cgrp;
+
+	for (css = &cgrp->css; css; css = css->parent) {
+		tmp_cgrp = container_of(css, struct perf_cgroup, css);
+		if (tmp_cgrp->nr_pinned_event > 0)
+			return true;
+	}
+
+	return false;
+}
+
 static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
 
 #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
@@ -812,7 +854,22 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
 		perf_pmu_disable(cpuctx->ctx.pmu);
 
 		if (mode & PERF_CGROUP_SWOUT) {
-			cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+			/*
+			 * The system-wide events and cgroup events share the
+			 * same cpuctx groups. Decide which events to be
+			 * scheduled outbased on the types of events:
+			 * - EVENT_FLEXIBLE | EVENT_CGROUP_FLEXIBLE_ONLY:
+			 *   Only switch cgroup events from EVENT_FLEXIBLE
+			 *   groups.
+			 * - EVENT_PINNED | EVENT_CGROUP_PINNED_ONLY:
+			 *   Only switch cgroup events from EVENT_PINNED
+			 *   groups.
+			 * - EVENT_ALL | EVENT_CGROUP_ALL_ONLY:
+			 *   Only switch cgroup events from both EVENT_FLEXIBLE
+			 *   and EVENT_PINNED groups.
+			 */
+			cpu_ctx_sched_out(cpuctx,
+					EVENT_ALL | EVENT_CGROUP_ALL_ONLY);
 			/*
 			 * must not be done before ctxswout due
 			 * to event_filter_match() in event_sched_out()
@@ -831,7 +888,23 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
 			 */
 			cpuctx->cgrp = perf_cgroup_from_task(task,
 							     &cpuctx->ctx);
-			cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
+
+			/*
+			 * To keep the priority order of cpu pinned then cpu
+			 * flexible, if the new cgroup has pinned events then
+			 * sched out all system-wide flexilbe events before
+			 * sched in all events.
+			 */
+			if (cgroup_has_pinned_events(cpuctx->cgrp)) {
+				cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+				cpu_ctx_sched_in(cpuctx,
+					EVENT_ALL | EVENT_CGROUP_PINNED_ONLY,
+					task);
+			} else {
+				cpu_ctx_sched_in(cpuctx,
+					EVENT_ALL | EVENT_CGROUP_ALL_ONLY,
+					task);
+			}
 		}
 		perf_pmu_enable(cpuctx->ctx.pmu);
 		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -959,6 +1032,9 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
 	cgrp = container_of(css, struct perf_cgroup, css);
 	event->cgrp = cgrp;
 
+	if (event->attr.pinned)
+		cgrp->nr_pinned_event++;
+
 	/*
 	 * all events in a group must monitor
 	 * the same cgroup because a task belongs
@@ -1032,6 +1108,14 @@ list_update_cgroup_event(struct perf_event *event,
 
 #else /* !CONFIG_CGROUP_PERF */
 
+static inline bool
+perf_cgroup_skip_switch(enum event_type_t event_type,
+			struct perf_event *event,
+			bool pinned)
+{
+	return false;
+}
+
 static inline bool
 perf_cgroup_match(struct perf_event *event)
 {
@@ -3236,13 +3320,25 @@ static void ctx_sched_out(struct perf_event_context *ctx,
 
 	perf_pmu_disable(ctx->pmu);
 	if (is_active & EVENT_PINNED) {
-		list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
+		list_for_each_entry_safe(event, tmp, &ctx->pinned_active,
+					active_list) {
+			if (perf_cgroup_skip_switch(event_type, event, true)) {
+				ctx->is_active |= EVENT_PINNED;
+				continue;
+			}
 			group_sched_out(event, cpuctx, ctx);
+		}
 	}
 
 	if (is_active & EVENT_FLEXIBLE) {
-		list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
+		list_for_each_entry_safe(event, tmp, &ctx->flexible_active,
+					active_list) {
+			if (perf_cgroup_skip_switch(event_type, event, false)) {
+				ctx->is_active |= EVENT_FLEXIBLE;
+				continue;
+			}
 			group_sched_out(event, cpuctx, ctx);
+		}
 	}
 	perf_pmu_enable(ctx->pmu);
 }
@@ -3573,6 +3669,7 @@ static int merge_sched_in(struct perf_event_context *ctx,
 			struct perf_cpu_context *cpuctx,
 			struct perf_event *event,
 			bool is_pinned,
+			enum event_type_t event_type,
 			int *can_add_hw)
 {
 	WARN_ON_ONCE(event->ctx != ctx);
@@ -3580,6 +3677,9 @@ static int merge_sched_in(struct perf_event_context *ctx,
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
+	if (perf_cgroup_skip_switch(event_type, event, is_pinned))
+		return 0;
+
 	/*
 	 * Avoid full event_filter_match as the caller verified the CPU and
 	 * cgroup before calling.
@@ -3608,7 +3708,8 @@ static int merge_sched_in(struct perf_event_context *ctx,
 
 static int ctx_groups_sched_in(struct perf_event_context *ctx,
 			struct perf_cpu_context *cpuctx,
-			bool is_pinned)
+			bool is_pinned,
+			enum event_type_t event_type)
 {
 #ifdef CONFIG_CGROUP_PERF
 	struct cgroup_subsys_state *css = NULL;
@@ -3669,7 +3770,8 @@ static int ctx_groups_sched_in(struct perf_event_context *ctx,
 	min_heapify_all(&event_heap, &perf_min_heap);
 
 	while (event_heap.size) {
-		ret = merge_sched_in(ctx, cpuctx, *evt, is_pinned, &can_add_hw);
+		ret = merge_sched_in(ctx, cpuctx, *evt, event_type, is_pinned,
+				&can_add_hw);
 
 		if (ret)
 			return ret;
@@ -3690,6 +3792,7 @@ ctx_sched_in(struct perf_event_context *ctx,
 	     enum event_type_t event_type,
 	     struct task_struct *task)
 {
+	enum event_type_t ctx_event_type = event_type & EVENT_ALL;
 	int is_active = ctx->is_active;
 	u64 now;
 
@@ -3698,7 +3801,7 @@ ctx_sched_in(struct perf_event_context *ctx,
 	if (likely(!ctx->nr_events))
 		return;
 
-	ctx->is_active |= (event_type | EVENT_TIME);
+	ctx->is_active |= (ctx_event_type | EVENT_TIME);
 	if (ctx->task) {
 		if (!is_active)
 			cpuctx->task_ctx = ctx;
@@ -3718,14 +3821,22 @@ ctx_sched_in(struct perf_event_context *ctx,
 	/*
 	 * First go through the list and put on any pinned groups
 	 * in order to give them the best chance of going on.
+	 *
+	 * System-wide events may not have been scheduled out for a cgroup
+	 * switch.  Unconditionally call sched_in() for cgroup events and
+	 * it will filter the events.
 	 */
-	if (is_active & EVENT_PINNED)
-		ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/true);
+	if ((is_active & EVENT_PINNED) || CGROUP_PINNED(event_type)) {
+		ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/true,
+				CGROUP_PINNED(event_type));
+	}
 
 
 	/* Then walk through the lower prio flexible groups */
-	if (is_active & EVENT_FLEXIBLE)
-		ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/false);
+	if ((is_active & EVENT_FLEXIBLE) || CGROUP_FLEXIBLE(event_type)) {
+		ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/false,
+				CGROUP_FLEXIBLE(event_type));
+	}
 }
 
 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
-- 
2.24.0.393.g34dc348eaf-goog


  parent reply	other threads:[~2019-12-06 23:16 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-14  0:30 [PATCH v3 00/10] Optimize cgroup context switch Ian Rogers
2019-11-14  0:30 ` [PATCH v3 01/10] perf/cgroup: Reorder perf_cgroup_connect() Ian Rogers
2019-11-14  8:50   ` Peter Zijlstra
2019-11-14  0:30 ` [PATCH v3 02/10] lib: introduce generic min max heap Ian Rogers
2019-11-14  9:32   ` Peter Zijlstra
2019-11-14  9:35   ` Peter Zijlstra
2019-11-17 18:28   ` Joe Perches
2019-11-18  8:40     ` Peter Zijlstra
2019-11-18 11:50       ` Joe Perches
2019-11-18 12:21         ` Peter Zijlstra
2019-11-14  0:30 ` [PATCH v3 03/10] perf: Use min_max_heap in visit_groups_merge Ian Rogers
2019-11-14  9:39   ` Peter Zijlstra
2019-11-14  0:30 ` [PATCH v3 04/10] perf: Add per perf_cpu_context min_heap storage Ian Rogers
2019-11-14  9:51   ` Peter Zijlstra
2019-11-16  1:19     ` Ian Rogers
2019-11-14  0:30 ` [PATCH v3 05/10] perf/cgroup: Grow per perf_cpu_context heap storage Ian Rogers
2019-11-14  9:54   ` Peter Zijlstra
2019-11-14  0:30 ` [PATCH v3 06/10] perf/cgroup: Order events in RB tree by cgroup id Ian Rogers
2019-11-14  0:30 ` [PATCH v3 07/10] perf: simplify and rename visit_groups_merge Ian Rogers
2019-11-14 10:03   ` Peter Zijlstra
2019-11-16  1:20     ` Ian Rogers
2019-11-14  0:30 ` [PATCH v3 08/10] perf: cache perf_event_groups_first for cgroups Ian Rogers
2019-11-14 10:25   ` Peter Zijlstra
2019-11-16  1:20     ` Ian Rogers
2019-11-18  8:37       ` Peter Zijlstra
2019-11-14  0:30 ` [PATCH v3 09/10] perf: optimize event_filter_match during sched_in Ian Rogers
2019-11-14  0:30 ` [PATCH v3 10/10] perf/cgroup: Do not switch system-wide events in cgroup switch Ian Rogers
2019-11-14 10:43   ` Peter Zijlstra
2019-11-14 13:46     ` Liang, Kan
2019-11-14 13:57       ` Peter Zijlstra
2019-11-14 15:16         ` Liang, Kan
2019-11-14 15:24           ` Liang, Kan
2019-11-14 20:49             ` Liang, Kan
2019-11-14  0:42 ` [PATCH v3 00/10] Optimize cgroup context switch Ian Rogers
2019-11-14 10:45 ` Peter Zijlstra
2019-11-14 18:17   ` Ian Rogers
2019-12-06 23:16     ` Ian Rogers
2019-11-16  1:18 ` [PATCH v4 " Ian Rogers
2019-11-16  1:18   ` [PATCH v4 01/10] perf/cgroup: Reorder perf_cgroup_connect() Ian Rogers
2019-11-16  1:18   ` [PATCH v4 02/10] lib: introduce generic min max heap Ian Rogers
2019-11-21 11:11     ` Joe Perches
2019-11-16  1:18   ` [PATCH v4 03/10] perf: Use min_max_heap in visit_groups_merge Ian Rogers
2019-11-16  1:18   ` [PATCH v4 04/10] perf: Add per perf_cpu_context min_heap storage Ian Rogers
2019-11-16  1:18   ` [PATCH v4 05/10] perf/cgroup: Grow per perf_cpu_context heap storage Ian Rogers
2019-11-16  1:18   ` [PATCH v4 06/10] perf/cgroup: Order events in RB tree by cgroup id Ian Rogers
2019-11-16  1:18   ` [PATCH v4 07/10] perf: simplify and rename visit_groups_merge Ian Rogers
2019-11-16  1:18   ` [PATCH v4 08/10] perf: cache perf_event_groups_first for cgroups Ian Rogers
2019-11-16  1:18   ` [PATCH v4 09/10] perf: optimize event_filter_match during sched_in Ian Rogers
2019-11-16  1:18   ` [PATCH v4 10/10] perf/cgroup: Do not switch system-wide events in cgroup switch Ian Rogers
2019-12-06 23:15   ` [PATCH v5 00/10] Optimize cgroup context switch Ian Rogers
2019-12-06 23:15     ` [PATCH v5 01/10] perf/cgroup: Reorder perf_cgroup_connect() Ian Rogers
2019-12-06 23:15     ` [PATCH v5 02/10] lib: introduce generic min-heap Ian Rogers
2019-12-06 23:15     ` [PATCH v5 03/10] perf: Use min_max_heap in visit_groups_merge Ian Rogers
2019-12-08  7:10       ` kbuild test robot
2019-12-06 23:15     ` [PATCH v5 04/10] perf: Add per perf_cpu_context min_heap storage Ian Rogers
2019-12-06 23:15     ` [PATCH v5 05/10] perf/cgroup: Grow per perf_cpu_context heap storage Ian Rogers
2019-12-06 23:15     ` [PATCH v5 06/10] perf/cgroup: Order events in RB tree by cgroup id Ian Rogers
2019-12-06 23:15     ` [PATCH v5 07/10] perf: simplify and rename visit_groups_merge Ian Rogers
2019-12-06 23:15     ` [PATCH v5 08/10] perf: cache perf_event_groups_first for cgroups Ian Rogers
2019-12-06 23:15     ` [PATCH v5 09/10] perf: optimize event_filter_match during sched_in Ian Rogers
2019-12-06 23:15     ` Ian Rogers [this message]
2020-02-14  7:51     ` [PATCH v6 0/6] Optimize cgroup context switch Ian Rogers
2020-02-14  7:51       ` [PATCH v6 1/6] perf/cgroup: Reorder perf_cgroup_connect() Ian Rogers
2020-02-14 16:11         ` Shuah Khan
2020-02-14 17:37           ` Peter Zijlstra
2020-03-06 14:42         ` [tip: perf/core] " tip-bot2 for Peter Zijlstra
2020-02-14  7:51       ` [PATCH v6 2/6] lib: introduce generic min-heap Ian Rogers
2020-02-14 22:06         ` Randy Dunlap
2020-02-17 16:29         ` Peter Zijlstra
2020-03-06 14:42         ` [tip: perf/core] lib: Introduce " tip-bot2 for Ian Rogers
2020-02-14  7:51       ` [PATCH v6 3/6] perf: Use min_heap in visit_groups_merge Ian Rogers
2020-02-17 17:23         ` Peter Zijlstra
2020-03-06 14:42         ` [tip: perf/core] perf/core: Use min_heap in visit_groups_merge() tip-bot2 for Ian Rogers
2020-02-14  7:51       ` [PATCH v6 4/6] perf: Add per perf_cpu_context min_heap storage Ian Rogers
2020-03-06 14:42         ` [tip: perf/core] perf/core: " tip-bot2 for Ian Rogers
2020-02-14  7:51       ` [PATCH v6 5/6] perf/cgroup: Grow per perf_cpu_context heap storage Ian Rogers
2020-03-06 14:42         ` [tip: perf/core] " tip-bot2 for Ian Rogers
2020-02-14  7:51       ` [PATCH v6 6/6] perf/cgroup: Order events in RB tree by cgroup id Ian Rogers
2020-02-14 19:32       ` [PATCH v6 0/6] Optimize cgroup context switch Ian Rogers
2020-02-17 16:18       ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191206231539.227585-11-irogers@google.com \
    --to=irogers@google.com \
    --cc=Gary.Hook@amd.com \
    --cc=acme@kernel.org \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=andriy.shevchenko@linux.intel.com \
    --cc=ardb@kernel.org \
    --cc=arnd@arndb.de \
    --cc=cai@lca.pw \
    --cc=catalin.marinas@arm.com \
    --cc=davem@davemloft.net \
    --cc=eranian@google.com \
    --cc=joe.lawrence@redhat.com \
    --cc=jolsa@redhat.com \
    --cc=kan.liang@linux.intel.com \
    --cc=keescook@chromium.org \
    --cc=kent.overstreet@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mchehab+samsung@kernel.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=penguin-kernel@i-love.sakura.ne.jp \
    --cc=peterz@infradead.org \
    --cc=pmladek@suse.com \
    --cc=urezki@gmail.com \
    --cc=yamada.masahiro@socionext.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).