linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: x86@kernel.org
Cc: peterz@infradead.org, linux-kernel@vger.kernel.org,
	rostedt@goodmis.org, mhiramat@kernel.org, bristot@redhat.com,
	jbaron@akamai.com, torvalds@linux-foundation.org,
	tglx@linutronix.de, mingo@kernel.org, namit@vmware.com,
	hpa@zytor.com, luto@kernel.org, ard.biesheuvel@linaro.org,
	jpoimboe@redhat.com
Subject: [RFC][PATCH v2 13/13] x86/perf, static_call: Optimize x86_pmu methods
Date: Mon, 07 Oct 2019 10:27:21 +0200	[thread overview]
Message-ID: <20191007083831.26880701.6@infradead.org> (raw)
In-Reply-To: 20191007082708.01393931.1@infradead.org

Replace many of the indirect calls with static_call().

XXX run performance numbers

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/x86/events/core.c |  136 +++++++++++++++++++++++++++++++++++--------------
 1 file changed, 98 insertions(+), 38 deletions(-)

--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -28,6 +28,7 @@
 #include <linux/bitops.h>
 #include <linux/device.h>
 #include <linux/nospec.h>
+#include <linux/static_call.h>
 
 #include <asm/apic.h>
 #include <asm/stacktrace.h>
@@ -51,6 +52,45 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu
 
 DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
 
+static void _x86_pmu_add(struct perf_event *event) { }
+static void _x86_pmu_del(struct perf_event *event) { }
+static void _x86_pmu_read(struct perf_event *event) { x86_perf_event_update(event); }
+static void _x86_pmu_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { }
+static void _x86_pmu_drain_pebs(struct pt_regs *regs) { }
+static void _x86_pmu_pebs_aliases(struct perf_event *event) { }
+static void _x86_pmu_start_scheduling(struct cpu_hw_events *cpuc) { }
+static void _x86_pmu_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) { }
+static void _x86_pmu_stop_scheduling(struct cpu_hw_events *cpuc) { }
+static void _x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) { }
+
+DEFINE_STATIC_CALL(x86_pmu_handle_irq, x86_pmu_handle_irq);
+DEFINE_STATIC_CALL(x86_pmu_disable_all, x86_pmu_disable_all);
+DEFINE_STATIC_CALL(x86_pmu_enable_all, x86_pmu_enable_all);
+DEFINE_STATIC_CALL(x86_pmu_enable, x86_pmu_enable_event);
+DEFINE_STATIC_CALL(x86_pmu_disable, x86_pmu_disable_event);
+
+DEFINE_STATIC_CALL(x86_pmu_add, _x86_pmu_add);
+DEFINE_STATIC_CALL(x86_pmu_del, _x86_pmu_del);
+DEFINE_STATIC_CALL(x86_pmu_read, _x86_pmu_read);
+
+DEFINE_STATIC_CALL(x86_pmu_schedule_events, x86_schedule_events);
+
+// addr_offset
+// rdpmc_index
+// event_map
+
+DEFINE_STATIC_CALL(x86_pmu_get_event_constraints, x86_get_event_constraints);
+DEFINE_STATIC_CALL(x86_pmu_put_event_constraints, _x86_pmu_put_event_constraints);
+
+DEFINE_STATIC_CALL(x86_pmu_drain_pebs, _x86_pmu_drain_pebs);
+DEFINE_STATIC_CALL(x86_pmu_pebs_aliases, _x86_pmu_pebs_aliases);
+
+DEFINE_STATIC_CALL(x86_pmu_start_scheduling, _x86_pmu_start_scheduling);
+DEFINE_STATIC_CALL(x86_pmu_commit_scheduling, _x86_pmu_commit_scheduling);
+DEFINE_STATIC_CALL(x86_pmu_stop_scheduling, _x86_pmu_stop_scheduling);
+
+DEFINE_STATIC_CALL(x86_pmu_sched_task, _x86_pmu_sched_task);
+
 u64 __read_mostly hw_cache_event_ids
 				[PERF_COUNT_HW_CACHE_MAX]
 				[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -651,7 +691,7 @@ static void x86_pmu_disable(struct pmu *
 	cpuc->enabled = 0;
 	barrier();
 
-	x86_pmu.disable_all();
+	static_call(x86_pmu_disable_all)();
 }
 
 void x86_pmu_enable_all(int added)
@@ -884,8 +924,7 @@ int x86_schedule_events(struct cpu_hw_ev
 	if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
 		n0 -= cpuc->n_txn;
 
-	if (x86_pmu.start_scheduling)
-		x86_pmu.start_scheduling(cpuc);
+	static_cond_call(x86_pmu_start_scheduling)(cpuc);
 
 	for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
 		c = cpuc->event_constraint[i];
@@ -902,7 +941,7 @@ int x86_schedule_events(struct cpu_hw_ev
 		 * change due to external factors (sibling state, allow_tfa).
 		 */
 		if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) {
-			c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
+			c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]);
 			cpuc->event_constraint[i] = c;
 		}
 
@@ -969,8 +1008,7 @@ int x86_schedule_events(struct cpu_hw_ev
 	if (!unsched && assign) {
 		for (i = 0; i < n; i++) {
 			e = cpuc->event_list[i];
-			if (x86_pmu.commit_scheduling)
-				x86_pmu.commit_scheduling(cpuc, i, assign[i]);
+			static_cond_call(x86_pmu_commit_scheduling)(cpuc, i, assign[i]);
 		}
 	} else {
 		for (i = n0; i < n; i++) {
@@ -979,15 +1017,13 @@ int x86_schedule_events(struct cpu_hw_ev
 			/*
 			 * release events that failed scheduling
 			 */
-			if (x86_pmu.put_event_constraints)
-				x86_pmu.put_event_constraints(cpuc, e);
+			static_cond_call(x86_pmu_put_event_constraints)(cpuc, e);
 
 			cpuc->event_constraint[i] = NULL;
 		}
 	}
 
-	if (x86_pmu.stop_scheduling)
-		x86_pmu.stop_scheduling(cpuc);
+	static_cond_call(x86_pmu_stop_scheduling)(cpuc);
 
 	return unsched ? -EINVAL : 0;
 }
@@ -1174,7 +1210,7 @@ static void x86_pmu_enable(struct pmu *p
 	cpuc->enabled = 1;
 	barrier();
 
-	x86_pmu.enable_all(added);
+	static_call(x86_pmu_enable_all)(added);
 }
 
 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -1288,7 +1324,7 @@ static int x86_pmu_add(struct perf_event
 	if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
 		goto done_collect;
 
-	ret = x86_pmu.schedule_events(cpuc, n, assign);
+	ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
 	if (ret)
 		goto out;
 	/*
@@ -1306,13 +1342,11 @@ static int x86_pmu_add(struct perf_event
 	cpuc->n_added += n - n0;
 	cpuc->n_txn += n - n0;
 
-	if (x86_pmu.add) {
-		/*
-		 * This is before x86_pmu_enable() will call x86_pmu_start(),
-		 * so we enable LBRs before an event needs them etc..
-		 */
-		x86_pmu.add(event);
-	}
+	/*
+	 * This is before x86_pmu_enable() will call x86_pmu_start(),
+	 * so we enable LBRs before an event needs them etc..
+	 */
+	static_cond_call(x86_pmu_add)(event);
 
 	ret = 0;
 out:
@@ -1340,7 +1374,7 @@ static void x86_pmu_start(struct perf_ev
 	cpuc->events[idx] = event;
 	__set_bit(idx, cpuc->active_mask);
 	__set_bit(idx, cpuc->running);
-	x86_pmu.enable(event);
+	static_call(x86_pmu_enable)(event);
 	perf_event_update_userpage(event);
 }
 
@@ -1410,7 +1444,7 @@ void x86_pmu_stop(struct perf_event *eve
 	struct hw_perf_event *hwc = &event->hw;
 
 	if (test_bit(hwc->idx, cpuc->active_mask)) {
-		x86_pmu.disable(event);
+		static_call(x86_pmu_disable)(event);
 		__clear_bit(hwc->idx, cpuc->active_mask);
 		cpuc->events[hwc->idx] = NULL;
 		WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
@@ -1460,8 +1494,7 @@ static void x86_pmu_del(struct perf_even
 	if (i >= cpuc->n_events - cpuc->n_added)
 		--cpuc->n_added;
 
-	if (x86_pmu.put_event_constraints)
-		x86_pmu.put_event_constraints(cpuc, event);
+	static_cond_call(x86_pmu_put_event_constraints)(cpuc, event);
 
 	/* Delete the array entry. */
 	while (++i < cpuc->n_events) {
@@ -1474,13 +1507,12 @@ static void x86_pmu_del(struct perf_even
 	perf_event_update_userpage(event);
 
 do_del:
-	if (x86_pmu.del) {
-		/*
-		 * This is after x86_pmu_stop(); so we disable LBRs after any
-		 * event can need them etc..
-		 */
-		x86_pmu.del(event);
-	}
+
+	/*
+	 * This is after x86_pmu_stop(); so we disable LBRs after any
+	 * event can need them etc..
+	 */
+	static_cond_call(x86_pmu_del)(event);
 }
 
 int x86_pmu_handle_irq(struct pt_regs *regs)
@@ -1558,7 +1590,7 @@ perf_event_nmi_handler(unsigned int cmd,
 		return NMI_DONE;
 
 	start_clock = sched_clock();
-	ret = x86_pmu.handle_irq(regs);
+	ret = static_call(x86_pmu_handle_irq)(regs);
 	finish_clock = sched_clock();
 
 	perf_sample_event_took(finish_clock - start_clock);
@@ -1765,6 +1797,32 @@ ssize_t x86_event_sysfs_show(char *page,
 static struct attribute_group x86_pmu_attr_group;
 static struct attribute_group x86_pmu_caps_group;
 
+static void x86_pmu_static_call_update(void)
+{
+	static_call_update(x86_pmu_handle_irq, x86_pmu.handle_irq);
+	static_call_update(x86_pmu_disable_all, x86_pmu.disable_all);
+	static_call_update(x86_pmu_enable_all, x86_pmu.enable_all);
+	static_call_update(x86_pmu_enable, x86_pmu.enable);
+	static_call_update(x86_pmu_disable, x86_pmu.disable);
+
+	static_call_update(x86_pmu_add, x86_pmu.add);
+	static_call_update(x86_pmu_del, x86_pmu.del);
+	static_call_update(x86_pmu_read, x86_pmu.read);
+
+	static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events);
+	static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints);
+	static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints);
+
+	static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
+	static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
+
+	static_call_update(x86_pmu_start_scheduling, x86_pmu.start_scheduling);
+	static_call_update(x86_pmu_commit_scheduling, x86_pmu.commit_scheduling);
+	static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling);
+
+	static_call_update(x86_pmu_sched_task, x86_pmu.sched_task);
+}
+
 static int __init init_hw_perf_events(void)
 {
 	struct x86_pmu_quirk *quirk;
@@ -1829,6 +1887,11 @@ static int __init init_hw_perf_events(vo
 	pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
 	pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
 
+	if (!x86_pmu.read)
+		x86_pmu.read = _x86_pmu_read;
+
+	x86_pmu_static_call_update();
+
 	/*
 	 * Install callbacks. Core will call them for each online
 	 * cpu.
@@ -1865,11 +1928,9 @@ static int __init init_hw_perf_events(vo
 }
 early_initcall(init_hw_perf_events);
 
-static inline void x86_pmu_read(struct perf_event *event)
+static void x86_pmu_read(struct perf_event *event)
 {
-	if (x86_pmu.read)
-		return x86_pmu.read(event);
-	x86_perf_event_update(event);
+	static_call(x86_pmu_read)(event);
 }
 
 /*
@@ -1946,7 +2007,7 @@ static int x86_pmu_commit_txn(struct pmu
 	if (!x86_pmu_initialized())
 		return -EAGAIN;
 
-	ret = x86_pmu.schedule_events(cpuc, n, assign);
+	ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
 	if (ret)
 		return ret;
 
@@ -2239,8 +2300,7 @@ static const struct attribute_group *x86
 
 static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
 {
-	if (x86_pmu.sched_task)
-		x86_pmu.sched_task(ctx, sched_in);
+	static_cond_call(x86_pmu_sched_task)(ctx, sched_in);
 }
 
 void perf_check_microcode(void)



  parent reply	other threads:[~2019-10-07 11:25 UTC|newest]

Thread overview: 128+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-07  9:02 [RESEND] everything text-poke: ftrace, modules, static_call and jump_label Peter Zijlstra
2019-10-07  8:17 ` [PATCH v3 0/6] Rewrite x86/ftrace to use text_poke() Peter Zijlstra
2019-10-07  8:17   ` [PATCH v3 1/6] x86/alternatives: Teach text_poke_bp() to emulate instructions Peter Zijlstra
2019-10-08 14:29     ` Borislav Petkov
2019-10-08 14:40       ` Steven Rostedt
2019-10-08 14:50         ` Borislav Petkov
2019-10-08 14:48       ` Peter Zijlstra
2019-10-08 14:54         ` Borislav Petkov
2019-10-08 15:04           ` Steven Rostedt
2019-10-08 15:24             ` Borislav Petkov
2019-10-09 12:03     ` Daniel Bristot de Oliveira
2019-10-07  8:17   ` [PATCH v3 2/6] x86/alternatives: Update int3_emulate_push() comment Peter Zijlstra
2019-10-07  8:17   ` [PATCH v3 3/6] x86/alternatives,jump_label: Provide better text_poke() batching interface Peter Zijlstra
2019-10-09 12:04     ` Daniel Bristot de Oliveira
2019-10-07  8:17   ` [PATCH v3 4/6] x86/alternatives: Add and use text_gen_insn() helper Peter Zijlstra
2019-10-08  6:23     ` Masami Hiramatsu
2019-10-08  8:15       ` Peter Zijlstra
2019-10-07  8:17   ` [PATCH v3 5/6] x86/ftrace: Use text_poke() Peter Zijlstra
2019-10-08 14:43     ` Steven Rostedt
2019-10-08 17:11       ` Peter Zijlstra
2019-10-08 17:27         ` Steven Rostedt
2019-10-10  2:41       ` Steven Rostedt
2019-10-10  9:20         ` Peter Zijlstra
2019-10-10 13:19           ` Steven Rostedt
2019-10-10 14:05             ` Peter Zijlstra
2019-10-10 15:54               ` Steven Rostedt
2019-10-10 17:28                 ` Peter Zijlstra
2019-10-10 17:48                   ` Steven Rostedt
2019-10-11 10:45                     ` Peter Zijlstra
2019-10-11 10:47                       ` Peter Zijlstra
2019-10-11 10:50                         ` Peter Zijlstra
2019-10-11 12:59                   ` Peter Zijlstra
2019-10-11 13:33                     ` Steven Rostedt
2019-10-11 13:45                       ` Peter Zijlstra
2019-10-15 13:07                     ` Jessica Yu
2019-10-15 13:56                       ` Peter Zijlstra
2019-10-15 14:11                         ` Peter Zijlstra
2019-10-15 14:13                         ` Miroslav Benes
2019-10-15 15:06                           ` Joe Lawrence
2019-10-15 15:31                             ` Jessica Yu
2019-10-15 22:17                               ` Joe Lawrence
2019-10-15 22:27                                 ` Steven Rostedt
2019-10-16  7:42                                   ` Peter Zijlstra
2019-10-16 10:15                                     ` Miroslav Benes
2019-10-21 15:05                                     ` Josh Poimboeuf
2020-01-20 16:50                                       ` Josh Poimboeuf
2020-01-21  8:35                                         ` Miroslav Benes
2020-01-21 16:10                                           ` Josh Poimboeuf
2020-01-22 10:09                                             ` Miroslav Benes
2020-01-22 21:42                                               ` Josh Poimboeuf
2020-01-28  9:28                                                 ` Miroslav Benes
2020-01-28 15:00                                                   ` Josh Poimboeuf
2020-01-28 15:40                                                     ` Petr Mladek
2020-01-28 17:02                                                       ` Josh Poimboeuf
2020-01-29  0:46                                                         ` Jiri Kosina
2020-01-29  2:17                                                           ` Josh Poimboeuf
2020-01-29  3:14                                                             ` Jiri Kosina
2020-01-29 12:28                                                         ` Miroslav Benes
2020-01-29 15:59                                                           ` Josh Poimboeuf
2020-01-30  9:53                                                             ` Petr Mladek
2020-01-30 14:17                                                               ` Josh Poimboeuf
2020-01-31  7:17                                                                 ` Petr Mladek
2020-01-22 12:15                                             ` Miroslav Benes
2020-01-22 15:05                                               ` Miroslav Benes
2020-01-22 22:03                                                 ` Josh Poimboeuf
2020-01-23 10:19                                                   ` Martin Jambor
2019-10-16  7:49                                   ` Peter Zijlstra
2019-10-16 10:20                                     ` Miroslav Benes
2019-10-16 13:29                                       ` Miroslav Benes
2019-10-18 13:03                                         ` Jessica Yu
2019-10-18 13:40                                           ` Petr Mladek
2019-10-21 14:14                                             ` Jessica Yu
2019-10-21 15:31                                             ` Josh Poimboeuf
2019-10-22  8:27                                           ` Miroslav Benes
2019-10-22 14:31                                             ` Josh Poimboeuf
2019-10-23  9:04                                               ` Miroslav Benes
2019-10-16  6:51                             ` Miroslav Benes
2019-10-16  9:23                               ` Peter Zijlstra
2019-10-16  9:36                                 ` Jessica Yu
2019-10-16  9:51                                   ` Peter Zijlstra
2019-10-16 12:39                               ` Peter Zijlstra
2019-10-22  8:45                                 ` Miroslav Benes
2019-10-15 14:42                         ` Peter Zijlstra
2019-10-15 18:31                           ` Peter Zijlstra
2019-10-15 15:51                         ` Jessica Yu
2019-10-15 13:28                     ` Steven Rostedt
2019-10-15 13:42                       ` Peter Zijlstra
2019-10-15 16:09                       ` Jessica Yu
2019-10-07  8:17   ` [PATCH v3 6/6] x86/mm: Remove set_kernel_text_r[ow]() Peter Zijlstra
2019-10-08 15:07   ` [PATCH v3 0/6] Rewrite x86/ftrace to use text_poke() Steven Rostedt
2019-10-07  8:25 ` [PATCH v2 0/4] Propagate module notifier errors Peter Zijlstra
2019-10-07  8:25   ` [PATCH v2 1/4] notifier: Fix broken error handling pattern Peter Zijlstra
2019-10-10 22:01     ` Rafael J. Wysocki
2019-10-07  8:25   ` [PATCH v2 2/4] module: Fix up module_notifier return values Peter Zijlstra
2019-10-23 19:25     ` Steven Rostedt
2019-10-07  8:25   ` [PATCH v2 3/4] module: Properly propagate MODULE_STATE_COMING failure Peter Zijlstra
2019-10-08 13:08     ` Miroslav Benes
2019-10-07  8:25   ` [PATCH v2 4/4] jump_label,module: Fix module lifetime for __jump_label_mod_text_reserved Peter Zijlstra
2019-10-23 19:29     ` Steven Rostedt
2019-10-07  8:27 ` [PATCH v2 00/13] Add static_call() Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 01/13] compiler.h: Make __ADDRESSABLE() symbol truly unique Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 02/13] static_call: Add basic static call infrastructure Peter Zijlstra
2019-10-07 11:33     ` Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 03/13] static_call: Add inline " Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 04/13] static_call: Avoid kprobes on inline static_call()s Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 05/13] x86/static_call: Add out-of-line static call implementation Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 06/13] x86/static_call: Add inline static call implementation for x86-64 Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 07/13] static_call: Simple self-test Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 08/13] tracepoints: Use static_call Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 09/13] x86/alternatives: Teach text_poke_bp() to emulate RET Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 10/13] static_call: Add static_cond_call() Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 11/13] static_call: Handle tail-calls Peter Zijlstra
2019-10-07  8:27   ` [PATCH v2 12/13] static_call: Allow early init Peter Zijlstra
2019-10-07  8:27   ` Peter Zijlstra [this message]
2019-10-07 11:33   ` [PATCH v2 00/13] Add static_call() Peter Zijlstra
2019-10-07  8:44 ` [RFC][PATCH 0/9] Variable size jump_label support Peter Zijlstra
2019-10-07  8:44   ` [RFC][PATCH 1/9] jump_label, x86: Strip ASM " Peter Zijlstra
2019-10-07  8:44   ` [RFC][PATCH 2/9] jump_label, x86: Factor out the __jump_table generation Peter Zijlstra
2019-10-07  8:44   ` [RFC][PATCH 3/9] jump_label, x86: Remove init NOP optimization Peter Zijlstra
2019-10-07  8:44   ` [RFC][PATCH 4/9] jump_label, x86: Improve error when we fail expected text Peter Zijlstra
2019-10-07  8:44   ` [RFC][PATCH 5/9] jump_label, x86: Introduce jump_entry_size() Peter Zijlstra
2019-10-07  8:44   ` [RFC][PATCH 6/9] jump_label, x86: Add variable length patching support Peter Zijlstra
2019-10-07  8:44   ` [RFC][PATCH 7/9] jump_label,objtool: Validate variable size jump labels Peter Zijlstra
2019-10-07  8:44   ` [RFC][PATCH 8/9] jump_label,objtool: Generate possible statistics Peter Zijlstra
2019-10-07  8:44   ` [RFC][PATCH 9/9] jump_label, x86: Enable JMP8/NOP2 support Peter Zijlstra
2019-10-07 12:07   ` [RFC][PATCH 0/9] Variable size jump_label support Peter Zijlstra
2019-10-07 12:55     ` Ingo Molnar
2019-10-07 15:08       ` Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191007083831.26880701.6@infradead.org \
    --to=peterz@infradead.org \
    --cc=ard.biesheuvel@linaro.org \
    --cc=bristot@redhat.com \
    --cc=hpa@zytor.com \
    --cc=jbaron@akamai.com \
    --cc=jpoimboe@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mhiramat@kernel.org \
    --cc=mingo@kernel.org \
    --cc=namit@vmware.com \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).