linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: kan.liang@linux.intel.com
To: peterz@infradead.org, acme@kernel.org, mingo@kernel.org,
	linux-kernel@vger.kernel.org
Cc: tglx@linutronix.de, bp@alien8.de, namhyung@kernel.org,
	jolsa@redhat.com, ak@linux.intel.com, yao.jin@linux.intel.com,
	alexander.shishkin@linux.intel.com, adrian.hunter@intel.com,
	Kan Liang <kan.liang@linux.intel.com>
Subject: [PATCH 16/49] perf/x86: Register hybrid PMUs
Date: Mon,  8 Feb 2021 07:25:13 -0800	[thread overview]
Message-ID: <1612797946-18784-17-git-send-email-kan.liang@linux.intel.com> (raw)
In-Reply-To: <1612797946-18784-1-git-send-email-kan.liang@linux.intel.com>

From: Kan Liang <kan.liang@linux.intel.com>

Different hybrid PMUs have different PMU capabilities and events. Perf
should registers a dedicated PMU for each of them.

To check the X86 event, perf has to go through all possible hybrid pmus.

Only the PMU for the boot CPU is registered in init_hw_perf_events()
because the boot CPU is the only online CPU at that moment.
The init_hybrid_pmu() is introduced to register and initialize the other
type of PMUs when the new type of CPU is online.

All hybrid PMUs have capability PERF_PMU_CAP_HETEROGENEOUS_CPUS.
The PMU name for hybrid PMUs will be "cpu_XXX", which will be assigned
later in a separated patch.

The PMU type id for the core PMU is still PERF_TYPE_RAW. For the other
hybrid PMUs, the PMU type id is not hard code.

The event->cpu must be compatitable with the supported CPUs of the PMU.
Add a check in the x86_pmu_event_init().

The events in a group must be from the same type of hybrid PMU.

Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
 arch/x86/events/core.c       |  98 +++++++++++++++++++++++++++++++++-------
 arch/x86/events/intel/core.c | 105 ++++++++++++++++++++++++++++++++++++++++++-
 arch/x86/events/perf_event.h |  24 ++++++++++
 3 files changed, 211 insertions(+), 16 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index bbd87b7..44ad8dc 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -480,7 +480,7 @@ int x86_setup_perfctr(struct perf_event *event)
 		local64_set(&hwc->period_left, hwc->sample_period);
 	}
 
-	if (attr->type == PERF_TYPE_RAW)
+	if (attr->type == event->pmu->type)
 		return x86_pmu_extra_regs(event->attr.config, event);
 
 	if (attr->type == PERF_TYPE_HW_CACHE)
@@ -615,7 +615,7 @@ int x86_pmu_hw_config(struct perf_event *event)
 	if (!event->attr.exclude_kernel)
 		event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
 
-	if (event->attr.type == PERF_TYPE_RAW)
+	if (event->attr.type == event->pmu->type)
 		event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
 
 	if (event->attr.sample_period && x86_pmu.limit_period) {
@@ -746,7 +746,16 @@ static struct pmu pmu;
 
 static inline int is_x86_event(struct perf_event *event)
 {
-	return event->pmu == &pmu;
+	int bit;
+
+	if (!IS_X86_HYBRID)
+		return event->pmu == &pmu;
+
+	for_each_set_bit(bit, &x86_pmu.hybrid_pmu_bitmap, X86_HYBRID_PMU_MAX_INDEX) {
+		if (event->pmu == &x86_pmu.hybrid_pmu[bit].pmu)
+			return true;
+	}
+	return false;
 }
 
 struct pmu *x86_get_pmu(void)
@@ -2053,8 +2062,11 @@ static int __init init_hw_perf_events(void)
 
 	pmu.attr_update = x86_pmu.attr_update;
 
-	x86_pmu_show_pmu_cap(x86_pmu.num_counters, x86_pmu.num_counters_fixed,
-			     x86_pmu.intel_ctrl);
+	if (!IS_X86_HYBRID) {
+		x86_pmu_show_pmu_cap(x86_pmu.num_counters,
+				     x86_pmu.num_counters_fixed,
+				     x86_pmu.intel_ctrl);
+	}
 
 	if (!x86_pmu.read)
 		x86_pmu.read = _x86_pmu_read;
@@ -2084,9 +2096,36 @@ static int __init init_hw_perf_events(void)
 	if (err)
 		goto out1;
 
-	err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
-	if (err)
-		goto out2;
+	if (!IS_X86_HYBRID) {
+		err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+		if (err)
+			goto out2;
+	} else {
+		struct x86_hybrid_pmu *hybrid_pmu;
+		int bit;
+
+		for_each_set_bit(bit, &x86_pmu.hybrid_pmu_bitmap, X86_HYBRID_PMU_MAX_INDEX) {
+			hybrid_pmu = &x86_pmu.hybrid_pmu[bit];
+
+			hybrid_pmu->pmu = pmu;
+			hybrid_pmu->pmu.type = -1;
+			hybrid_pmu->pmu.attr_update = x86_pmu.attr_update;
+			hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
+
+			/* Only register the PMU for the boot CPU */
+			if (bit != x86_hybrid_get_idx_from_cpu(smp_processor_id()))
+				continue;
+
+			if (X86_HYBRID_PMU_CORE_IDX == bit)
+				err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, PERF_TYPE_RAW);
+			else
+				err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, -1);
+			if (err)
+				clear_bit(bit, &x86_pmu.hybrid_pmu_bitmap);
+		}
+		if (!x86_pmu.hybrid_pmu_bitmap)
+			goto out2;
+	}
 
 	return 0;
 
@@ -2221,6 +2260,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
 		return ERR_PTR(-ENOMEM);
 	cpuc->is_fake = 1;
 
+	if (IS_X86_HYBRID)
+		cpuc->hybrid_pmu_idx = x86_hybrid_get_idx_from_cpu(cpu);
+	else
+		cpuc->hybrid_pmu_idx = X86_NON_HYBRID_PMU;
+
 	if (intel_cpuc_prepare(cpuc, cpu))
 		goto error;
 
@@ -2273,6 +2317,28 @@ static int validate_group(struct perf_event *event)
 	struct cpu_hw_events *fake_cpuc;
 	int ret = -EINVAL, n;
 
+	/*
+	 * Reject events from different hybrid PMUs.
+	 */
+	if (IS_X86_HYBRID) {
+		struct perf_event *sibling;
+		struct pmu *pmu = NULL;
+
+		if (leader->pmu->task_ctx_nr == perf_hw_context)
+			pmu = leader->pmu;
+		else {
+			for_each_sibling_event(sibling, leader) {
+				if (sibling->pmu->task_ctx_nr == perf_hw_context) {
+					pmu = sibling->pmu;
+					break;
+				}
+			}
+		}
+
+		if (pmu && pmu != event->pmu)
+			return ret;
+	}
+
 	fake_cpuc = allocate_fake_cpuc();
 	if (IS_ERR(fake_cpuc))
 		return PTR_ERR(fake_cpuc);
@@ -2301,16 +2367,18 @@ static int validate_group(struct perf_event *event)
 
 static int x86_pmu_event_init(struct perf_event *event)
 {
+	struct x86_hybrid_pmu *hybrid_pmu = NULL;
 	int err;
 
-	switch (event->attr.type) {
-	case PERF_TYPE_RAW:
-	case PERF_TYPE_HARDWARE:
-	case PERF_TYPE_HW_CACHE:
-		break;
-
-	default:
+	if ((event->attr.type != event->pmu->type) &&
+	    (event->attr.type != PERF_TYPE_HARDWARE) &&
+	    (event->attr.type != PERF_TYPE_HW_CACHE))
 		return -ENOENT;
+
+	if (IS_X86_HYBRID && (event->cpu != -1)) {
+		hybrid_pmu = container_of(event->pmu, struct x86_hybrid_pmu, pmu);
+		if (!cpumask_test_cpu(event->cpu, &hybrid_pmu->supported_cpus))
+			return -ENOENT;
 	}
 
 	err = __x86_pmu_event_init(event);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 559b4e9..d2de342 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3721,7 +3721,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
 		event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
 	}
 
-	if (event->attr.type != PERF_TYPE_RAW)
+	if ((event->attr.type == PERF_TYPE_HARDWARE) ||
+	    (event->attr.type == PERF_TYPE_HW_CACHE))
 		return 0;
 
 	/*
@@ -4304,12 +4305,97 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
 	}
 }
 
+static void init_hybrid_pmu(int cpu)
+{
+	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+	int idx = x86_hybrid_get_idx_from_cpu(cpu);
+	struct x86_hybrid_pmu *pmu;
+	struct perf_cpu_context *cpuctx;
+	unsigned int fixed_mask, unused_eax, unused_ebx, unused_edx;
+
+	if (WARN_ON(!IS_VALID_HYBRID_PMU_IDX(idx)))
+		return;
+
+	if (!test_bit(idx, &x86_pmu.hybrid_pmu_bitmap))
+		return;
+
+	cpuc->hybrid_pmu_idx = idx;
+	pmu = &x86_pmu.hybrid_pmu[idx];
+
+	/* Only register PMU for the first CPU */
+	if (!cpumask_empty(&pmu->supported_cpus)) {
+		cpumask_set_cpu(cpu, &pmu->supported_cpus);
+
+		/*
+		 * The cpuctx of all CPUs are allocated when registering the
+		 * boot CPU's PMU. At that time, the PMU for other hybrid CPUs
+		 * is not registered yet. The boot CPU's PMU was
+		 * unconditionally assigned to each cpuctx->ctx.pmu.
+		 * Update the cpuctx->ctx.pmu when the PMU for other hybrid
+		 * CPUs is known.
+		 */
+		cpuctx = per_cpu_ptr(pmu->pmu.pmu_cpu_context, cpu);
+		cpuctx->ctx.pmu = &pmu->pmu;
+		return;
+	}
+
+	if ((pmu->pmu.type == -1) &&
+	    perf_pmu_register(&pmu->pmu, pmu->name,
+			      (idx == X86_HYBRID_PMU_CORE_IDX) ? PERF_TYPE_RAW : -1))
+		return;
+
+	cpuctx = per_cpu_ptr(pmu->pmu.pmu_cpu_context, cpu);
+	cpuctx->ctx.pmu = &pmu->pmu;
+
+	/*
+	 * Except for ECX, other fields have been stored in the x86 struct
+	 * at boot time.
+	 */
+	cpuid(10, &unused_eax, &unused_ebx, &fixed_mask, &unused_edx);
+
+	intel_pmu_check_num_counters(&pmu->num_counters,
+				     &pmu->num_counters_fixed,
+				     &pmu->intel_ctrl,
+				     (u64)fixed_mask);
+
+	pr_info("%s PMU driver: ", pmu->name);
+
+	if (pmu->intel_cap.perf_metrics) {
+		pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
+		pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS;
+	}
+
+	if (pmu->intel_cap.pebs_output_pt_available) {
+		pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+		pr_cont("PEBS-via-PT ");
+	}
+
+	intel_pmu_check_event_constraints(pmu->event_constraints,
+					  pmu->num_counters,
+					  pmu->num_counters_fixed,
+					  pmu->intel_ctrl);
+
+	intel_pmu_check_extra_regs(pmu->extra_regs);
+
+	pr_cont("\n");
+
+	x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
+			     pmu->intel_ctrl);
+
+	cpumask_set_cpu(cpu, &pmu->supported_cpus);
+
+	return;
+}
+
 static void intel_pmu_cpu_starting(int cpu)
 {
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 	int core_id = topology_core_id(cpu);
 	int i;
 
+	if (IS_X86_HYBRID)
+		init_hybrid_pmu(cpu);
+
 	init_debug_store_on_cpu(cpu);
 	/*
 	 * Deal with CPUs that don't clear their LBRs on power-up.
@@ -4424,6 +4510,23 @@ void intel_cpuc_finish(struct cpu_hw_events *cpuc)
 static void intel_pmu_cpu_dead(int cpu)
 {
 	intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
+
+	if (IS_X86_HYBRID) {
+		struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+		int idx = x86_hybrid_get_idx_from_cpu(cpu);
+		struct x86_hybrid_pmu *hybrid_pmu;
+
+		if (WARN_ON(!IS_VALID_HYBRID_PMU_IDX(idx)))
+			return;
+
+		hybrid_pmu = &x86_pmu.hybrid_pmu[idx];
+		cpumask_clear_cpu(cpu, &hybrid_pmu->supported_cpus);
+		cpuc->hybrid_pmu_idx = X86_NON_HYBRID_PMU;
+		if (cpumask_empty(&hybrid_pmu->supported_cpus)) {
+			perf_pmu_unregister(&hybrid_pmu->pmu);
+			hybrid_pmu->pmu.type = -1;
+		}
+	}
 }
 
 static void intel_pmu_sched_task(struct perf_event_context *ctx,
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index d5fcc15..740ba48 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -643,9 +643,14 @@ enum x86_hybrid_pmu_type_idx {
 	X86_HYBRID_PMU_MAX_INDEX
 };
 
+#define X86_HYBRID_ATOM_CPU_TYPE	0x20
+#define X86_HYBRID_CORE_CPU_TYPE	0x40
 
 struct x86_hybrid_pmu {
 	struct pmu			pmu;
+	const char			*name;
+	u32				cpu_type;
+	cpumask_t			supported_cpus;
 	union perf_capabilities		intel_cap;
 	u64				intel_ctrl;
 	int				max_pebs_events;
@@ -679,6 +684,25 @@ struct x86_hybrid_pmu {
 #define X86_HYBRID_READ_FROM_EVENT(_name, _event)			\
 	(IS_X86_HYBRID ? ((struct x86_hybrid_pmu *)(_event->pmu))->_name : x86_pmu._name)
 
+#define IS_VALID_HYBRID_PMU_IDX(idx)					\
+	(idx < X86_HYBRID_PMU_MAX_INDEX && idx > X86_NON_HYBRID_PMU)
+
+static inline enum x86_hybrid_pmu_type_idx
+x86_hybrid_get_idx_from_cpu(unsigned int cpu)
+{
+	unsigned int cpu_type = cpu_data(cpu).x86_cpu_type >> X86_HYBRID_CPU_TYPE_ID_SHIFT;
+
+	switch (cpu_type) {
+	case X86_HYBRID_ATOM_CPU_TYPE:
+		return X86_HYBRID_PMU_ATOM_IDX;
+	case X86_HYBRID_CORE_CPU_TYPE:
+		return X86_HYBRID_PMU_CORE_IDX;
+	default:
+		pr_warn("CPU %u: invalid cpu type %u\n", cpu, cpu_type);
+	}
+	return X86_NON_HYBRID_PMU;
+}
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
-- 
2.7.4


  parent reply	other threads:[~2021-02-08 18:12 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-08 15:24 [PATCH 00/49] Add Alder Lake support for perf kan.liang
2021-02-08 15:24 ` [PATCH 01/49] x86/cpufeatures: Enumerate Intel Hybrid Technology feature bit kan.liang
2021-02-08 15:24 ` [PATCH 02/49] x86/cpu: Describe hybrid CPUs in cpuinfo_x86 kan.liang
2021-02-08 17:56   ` Borislav Petkov
2021-02-08 19:04     ` Liang, Kan
2021-02-08 19:10       ` Luck, Tony
2021-02-08 19:19         ` Borislav Petkov
2021-02-08 15:25 ` [PATCH 03/49] perf/x86/intel: Hybrid PMU support for perf capabilities kan.liang
2021-02-08 15:25 ` [PATCH 04/49] perf/x86: Hybrid PMU support for intel_ctrl kan.liang
2021-02-08 15:25 ` [PATCH 05/49] perf/x86: Hybrid PMU support for counters kan.liang
2021-02-08 15:25 ` [PATCH 06/49] perf/x86: Hybrid PMU support for unconstrained kan.liang
2021-02-08 15:25 ` [PATCH 07/49] perf/x86: Hybrid PMU support for hardware cache event kan.liang
2021-02-08 15:25 ` [PATCH 08/49] perf/x86: Hybrid PMU support for event constraints kan.liang
2021-02-08 15:25 ` [PATCH 09/49] perf/x86: Hybrid PMU support for extra_regs kan.liang
2021-02-08 15:25 ` [PATCH 10/49] perf/x86/intel: Factor out intel_pmu_check_num_counters kan.liang
2021-02-08 15:25 ` [PATCH 11/49] perf/x86/intel: Factor out intel_pmu_check_event_constraints kan.liang
2021-02-08 15:25 ` [PATCH 12/49] perf/x86/intel: Factor out intel_pmu_check_extra_regs kan.liang
2021-02-08 15:25 ` [PATCH 13/49] perf/x86: Expose check_hw_exists kan.liang
2021-02-08 15:25 ` [PATCH 14/49] perf/x86: Remove temporary pmu assignment in event_init kan.liang
2021-02-08 15:25 ` [PATCH 15/49] perf/x86: Factor out x86_pmu_show_pmu_cap kan.liang
2021-02-08 15:25 ` kan.liang [this message]
2021-02-08 15:25 ` [PATCH 17/49] perf/x86: Add structures for the attributes of Hybrid PMUs kan.liang
2021-02-08 15:25 ` [PATCH 18/49] perf/x86/intel: Add attr_update for " kan.liang
2021-02-08 15:25 ` [PATCH 19/49] perf/x86: Support filter_match callback kan.liang
2021-02-08 15:25 ` [PATCH 20/49] perf/x86/intel: Add Alder Lake Hybrid support kan.liang
2021-02-08 15:25 ` [PATCH 21/49] perf: Introduce PERF_TYPE_HARDWARE_PMU and PERF_TYPE_HW_CACHE_PMU kan.liang
2021-02-08 15:25 ` [PATCH 22/49] perf/x86/intel/uncore: Add Alder Lake support kan.liang
2021-02-09  4:18   ` kernel test robot
2021-02-08 15:25 ` [PATCH 23/49] perf/x86/msr: Add Alder Lake CPU support kan.liang
2021-02-09  3:58   ` kernel test robot
2021-02-09 13:44     ` Liang, Kan
2021-02-09  5:15   ` kernel test robot
2021-02-08 15:25 ` [PATCH 24/49] perf/x86/cstate: " kan.liang
2021-02-08 15:25 ` [PATCH 25/49] perf/x86/rapl: Add support for Intel Alder Lake kan.liang
2021-02-09  5:16   ` kernel test robot
2021-02-08 15:25 ` [PATCH 26/49] perf jevents: Support unit value "cpu_core" and "cpu_atom" kan.liang
2021-02-08 15:25 ` [PATCH 27/49] perf util: Save pmu name to struct perf_pmu_alias kan.liang
2021-02-08 18:57   ` Arnaldo Carvalho de Melo
2021-02-09  0:17     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 28/49] perf pmu: Save detected hybrid pmus to a global pmu list kan.liang
2021-02-08 18:55   ` Arnaldo Carvalho de Melo
2021-02-09  0:05     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 29/49] perf pmu: Add hybrid helper functions kan.liang
2021-02-08 15:25 ` [PATCH 30/49] perf list: Support --cputype option to list hybrid pmu events kan.liang
2021-02-08 15:25 ` [PATCH 31/49] perf stat: Hybrid evsel uses its own cpus kan.liang
2021-02-08 15:25 ` [PATCH 32/49] perf header: Support HYBRID_TOPOLOGY feature kan.liang
2021-02-08 19:05   ` Arnaldo Carvalho de Melo
2021-02-09  0:26     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 33/49] perf header: Support hybrid CPU_PMU_CAPS kan.liang
2021-02-08 15:25 ` [PATCH 34/49] tools headers uapi: Update tools's copy of linux/perf_event.h kan.liang
2021-02-08 15:25 ` [PATCH 35/49] perf parse-events: Create two hybrid hardware events kan.liang
2021-02-08 18:59   ` Arnaldo Carvalho de Melo
2021-02-09  0:23     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 36/49] perf parse-events: Create two hybrid cache events kan.liang
2021-02-08 15:25 ` [PATCH 37/49] perf parse-events: Support hardware events inside PMU kan.liang
2021-02-08 15:25 ` [PATCH 38/49] perf list: Display pmu prefix for partially supported hybrid cache events kan.liang
2021-02-08 15:25 ` [PATCH 39/49] perf parse-events: Support hybrid raw events kan.liang
2021-02-08 19:07   ` Arnaldo Carvalho de Melo
2021-02-09  0:28     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 40/49] perf stat: Support --cputype option for hybrid events kan.liang
2021-02-08 15:25 ` [PATCH 41/49] perf stat: Support metrics with " kan.liang
2021-02-08 15:25 ` [PATCH 42/49] perf evlist: Create two hybrid 'cycles' events by default kan.liang
2021-02-08 15:25 ` [PATCH 43/49] perf stat: Add default hybrid events kan.liang
2021-02-08 19:10   ` Arnaldo Carvalho de Melo
2021-02-09  0:36     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 44/49] perf stat: Uniquify hybrid event name kan.liang
2021-02-08 15:25 ` [PATCH 45/49] perf stat: Merge event counts from all hybrid PMUs kan.liang
2021-02-08 15:25 ` [PATCH 46/49] perf stat: Filter out unmatched aggregation for hybrid event kan.liang
2021-02-08 19:16   ` Arnaldo Carvalho de Melo
2021-02-09  0:53     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 47/49] perf evlist: Warn as events from different hybrid PMUs in a group kan.liang
2021-02-08 15:25 ` [PATCH 48/49] perf Documentation: Document intel-hybrid support kan.liang
2021-02-08 15:25 ` [PATCH 49/49] perf evsel: Adjust hybrid event and global event mixed group kan.liang
2021-02-08 19:12   ` Arnaldo Carvalho de Melo
2021-02-09  0:47     ` Jin, Yao
2021-02-11 11:40 ` [PATCH 00/49] Add Alder Lake support for perf Jiri Olsa
2021-02-11 16:22   ` Liang, Kan
2021-02-18  0:07     ` Jin, Yao
2021-03-04 15:50 ` Liang, Kan
2021-03-04 17:50   ` Peter Zijlstra
2021-03-05 11:14     ` Peter Zijlstra
2021-03-05 13:36       ` Liang, Kan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1612797946-18784-17-git-send-email-kan.liang@linux.intel.com \
    --to=kan.liang@linux.intel.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=ak@linux.intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=bp@alien8.de \
    --cc=jolsa@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=yao.jin@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).