All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vincent Guittot <vincent.guittot@linaro.org>
To: peterz@infradead.org, mingo@kernel.org,
	linux-kernel@vger.kernel.org, preeti@linux.vnet.ibm.com,
	linux@arm.linux.org.uk, linux-arm-kernel@lists.infradead.org
Cc: riel@redhat.com, Morten.Rasmussen@arm.com, efault@gmx.de,
	nicolas.pitre@linaro.org, linaro-kernel@lists.linaro.org,
	daniel.lezcano@linaro.org, dietmar.eggemann@arm.com,
	Vincent Guittot <vincent.guittot@linaro.org>
Subject: [PATCH v5 08/12] sched: move cfs task on a CPU with higher capacity
Date: Tue, 26 Aug 2014 13:06:51 +0200	[thread overview]
Message-ID: <1409051215-16788-9-git-send-email-vincent.guittot@linaro.org> (raw)
In-Reply-To: <1409051215-16788-1-git-send-email-vincent.guittot@linaro.org>

If the CPU is used for handling lot of IRQs, trig a load balance to check if
it's worth moving its tasks on another CPU that has more capacity.

As a sidenote, this will note generate more spurious ilb because we already
trig an ilb if there is more than 1 busy cpu. If this cpu is the only one that
has a task, we will trig the ilb once for migrating the task.

The nohz_kick_needed function has been cleaned up a bit while adding the new
test

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 kernel/sched/fair.c | 69 +++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 49 insertions(+), 20 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 18db43e..60ae1ce 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6049,6 +6049,14 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 			return true;
 	}
 
+	/*
+	 * The group capacity is reduced probably because of activity from other
+	 * sched class or interrupts which use part of the available capacity
+	 */
+	if ((sg->sgc->capacity_orig * 100) > (sgs->group_capacity *
+				env->sd->imbalance_pct))
+		return true;
+
 	return false;
 }
 
@@ -6534,13 +6542,23 @@ static int need_active_balance(struct lb_env *env)
 	struct sched_domain *sd = env->sd;
 
 	if (env->idle == CPU_NEWLY_IDLE) {
+		int src_cpu = env->src_cpu;
 
 		/*
 		 * ASYM_PACKING needs to force migrate tasks from busy but
 		 * higher numbered CPUs in order to pack all tasks in the
 		 * lowest numbered CPUs.
 		 */
-		if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
+		if ((sd->flags & SD_ASYM_PACKING) && src_cpu > env->dst_cpu)
+			return 1;
+
+		/*
+		 * If the CPUs share their cache and the src_cpu's capacity is
+		 * reduced because of other sched_class or IRQs, we trig an
+		 * active balance to move the task
+		 */
+		if ((capacity_orig_of(src_cpu) * 100) > (capacity_of(src_cpu) *
+				sd->imbalance_pct))
 			return 1;
 	}
 
@@ -6643,6 +6661,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 
 	schedstat_add(sd, lb_imbalance[idle], env.imbalance);
 
+	env.src_cpu = busiest->cpu;
+
 	ld_moved = 0;
 	if (busiest->nr_running > 1) {
 		/*
@@ -6652,7 +6672,6 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 		 * correctly treated as an imbalance.
 		 */
 		env.flags |= LBF_ALL_PINNED;
-		env.src_cpu   = busiest->cpu;
 		env.src_rq    = busiest;
 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
 
@@ -7359,10 +7378,12 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 
 /*
  * Current heuristic for kicking the idle load balancer in the presence
- * of an idle cpu is the system.
+ * of an idle cpu in the system.
  *   - This rq has more than one task.
- *   - At any scheduler domain level, this cpu's scheduler group has multiple
- *     busy cpu's exceeding the group's capacity.
+ *   - This rq has at least one CFS task and the capacity of the CPU is
+ *     significantly reduced because of RT tasks or IRQs.
+ *   - At parent of LLC scheduler domain level, this cpu's scheduler group has
+ *     multiple busy cpu.
  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
  *     domain span are idle.
  */
@@ -7372,9 +7393,10 @@ static inline int nohz_kick_needed(struct rq *rq)
 	struct sched_domain *sd;
 	struct sched_group_capacity *sgc;
 	int nr_busy, cpu = rq->cpu;
+	bool kick = false;
 
 	if (unlikely(rq->idle_balance))
-		return 0;
+		return false;
 
        /*
 	* We may be recently in ticked or tickless idle mode. At the first
@@ -7388,38 +7410,45 @@ static inline int nohz_kick_needed(struct rq *rq)
 	 * balancing.
 	 */
 	if (likely(!atomic_read(&nohz.nr_cpus)))
-		return 0;
+		return false;
 
 	if (time_before(now, nohz.next_balance))
-		return 0;
+		return false;
 
 	if (rq->nr_running >= 2)
-		goto need_kick;
+		return true;
 
 	rcu_read_lock();
 	sd = rcu_dereference(per_cpu(sd_busy, cpu));
-
 	if (sd) {
 		sgc = sd->groups->sgc;
 		nr_busy = atomic_read(&sgc->nr_busy_cpus);
 
-		if (nr_busy > 1)
-			goto need_kick_unlock;
+		if (nr_busy > 1) {
+			kick = true;
+			goto unlock;
+		}
+
 	}
 
-	sd = rcu_dereference(per_cpu(sd_asym, cpu));
+	sd = rcu_dereference(rq->sd);
+	if (sd) {
+		if ((rq->cfs.h_nr_running >= 1) &&
+				((rq->cpu_capacity * sd->imbalance_pct) <
+				(rq->cpu_capacity_orig * 100))) {
+			kick = true;
+			goto unlock;
+		}
+	}
 
+	sd = rcu_dereference(per_cpu(sd_asym, cpu));
 	if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
 				  sched_domain_span(sd)) < cpu))
-		goto need_kick_unlock;
+		kick = true;
 
+unlock:
 	rcu_read_unlock();
-	return 0;
-
-need_kick_unlock:
-	rcu_read_unlock();
-need_kick:
-	return 1;
+	return kick;
 }
 #else
 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
-- 
1.9.1


WARNING: multiple messages have this Message-ID (diff)
From: vincent.guittot@linaro.org (Vincent Guittot)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH v5 08/12] sched: move cfs task on a CPU with higher capacity
Date: Tue, 26 Aug 2014 13:06:51 +0200	[thread overview]
Message-ID: <1409051215-16788-9-git-send-email-vincent.guittot@linaro.org> (raw)
In-Reply-To: <1409051215-16788-1-git-send-email-vincent.guittot@linaro.org>

If the CPU is used for handling lot of IRQs, trig a load balance to check if
it's worth moving its tasks on another CPU that has more capacity.

As a sidenote, this will note generate more spurious ilb because we already
trig an ilb if there is more than 1 busy cpu. If this cpu is the only one that
has a task, we will trig the ilb once for migrating the task.

The nohz_kick_needed function has been cleaned up a bit while adding the new
test

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 kernel/sched/fair.c | 69 +++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 49 insertions(+), 20 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 18db43e..60ae1ce 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6049,6 +6049,14 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 			return true;
 	}
 
+	/*
+	 * The group capacity is reduced probably because of activity from other
+	 * sched class or interrupts which use part of the available capacity
+	 */
+	if ((sg->sgc->capacity_orig * 100) > (sgs->group_capacity *
+				env->sd->imbalance_pct))
+		return true;
+
 	return false;
 }
 
@@ -6534,13 +6542,23 @@ static int need_active_balance(struct lb_env *env)
 	struct sched_domain *sd = env->sd;
 
 	if (env->idle == CPU_NEWLY_IDLE) {
+		int src_cpu = env->src_cpu;
 
 		/*
 		 * ASYM_PACKING needs to force migrate tasks from busy but
 		 * higher numbered CPUs in order to pack all tasks in the
 		 * lowest numbered CPUs.
 		 */
-		if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
+		if ((sd->flags & SD_ASYM_PACKING) && src_cpu > env->dst_cpu)
+			return 1;
+
+		/*
+		 * If the CPUs share their cache and the src_cpu's capacity is
+		 * reduced because of other sched_class or IRQs, we trig an
+		 * active balance to move the task
+		 */
+		if ((capacity_orig_of(src_cpu) * 100) > (capacity_of(src_cpu) *
+				sd->imbalance_pct))
 			return 1;
 	}
 
@@ -6643,6 +6661,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 
 	schedstat_add(sd, lb_imbalance[idle], env.imbalance);
 
+	env.src_cpu = busiest->cpu;
+
 	ld_moved = 0;
 	if (busiest->nr_running > 1) {
 		/*
@@ -6652,7 +6672,6 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 		 * correctly treated as an imbalance.
 		 */
 		env.flags |= LBF_ALL_PINNED;
-		env.src_cpu   = busiest->cpu;
 		env.src_rq    = busiest;
 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
 
@@ -7359,10 +7378,12 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 
 /*
  * Current heuristic for kicking the idle load balancer in the presence
- * of an idle cpu is the system.
+ * of an idle cpu in the system.
  *   - This rq has more than one task.
- *   - At any scheduler domain level, this cpu's scheduler group has multiple
- *     busy cpu's exceeding the group's capacity.
+ *   - This rq has at least one CFS task and the capacity of the CPU is
+ *     significantly reduced because of RT tasks or IRQs.
+ *   - At parent of LLC scheduler domain level, this cpu's scheduler group has
+ *     multiple busy cpu.
  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
  *     domain span are idle.
  */
@@ -7372,9 +7393,10 @@ static inline int nohz_kick_needed(struct rq *rq)
 	struct sched_domain *sd;
 	struct sched_group_capacity *sgc;
 	int nr_busy, cpu = rq->cpu;
+	bool kick = false;
 
 	if (unlikely(rq->idle_balance))
-		return 0;
+		return false;
 
        /*
 	* We may be recently in ticked or tickless idle mode. At the first
@@ -7388,38 +7410,45 @@ static inline int nohz_kick_needed(struct rq *rq)
 	 * balancing.
 	 */
 	if (likely(!atomic_read(&nohz.nr_cpus)))
-		return 0;
+		return false;
 
 	if (time_before(now, nohz.next_balance))
-		return 0;
+		return false;
 
 	if (rq->nr_running >= 2)
-		goto need_kick;
+		return true;
 
 	rcu_read_lock();
 	sd = rcu_dereference(per_cpu(sd_busy, cpu));
-
 	if (sd) {
 		sgc = sd->groups->sgc;
 		nr_busy = atomic_read(&sgc->nr_busy_cpus);
 
-		if (nr_busy > 1)
-			goto need_kick_unlock;
+		if (nr_busy > 1) {
+			kick = true;
+			goto unlock;
+		}
+
 	}
 
-	sd = rcu_dereference(per_cpu(sd_asym, cpu));
+	sd = rcu_dereference(rq->sd);
+	if (sd) {
+		if ((rq->cfs.h_nr_running >= 1) &&
+				((rq->cpu_capacity * sd->imbalance_pct) <
+				(rq->cpu_capacity_orig * 100))) {
+			kick = true;
+			goto unlock;
+		}
+	}
 
+	sd = rcu_dereference(per_cpu(sd_asym, cpu));
 	if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
 				  sched_domain_span(sd)) < cpu))
-		goto need_kick_unlock;
+		kick = true;
 
+unlock:
 	rcu_read_unlock();
-	return 0;
-
-need_kick_unlock:
-	rcu_read_unlock();
-need_kick:
-	return 1;
+	return kick;
 }
 #else
 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
-- 
1.9.1

  parent reply	other threads:[~2014-08-26 11:08 UTC|newest]

Thread overview: 164+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-08-26 11:06 [PATCH v5 00/12] sched: consolidation of cpu_capacity Vincent Guittot
2014-08-26 11:06 ` Vincent Guittot
2014-08-26 11:06 ` [PATCH v5 01/12] sched: fix imbalance flag reset Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-09-19 11:47   ` [tip:sched/core] sched: Fix " tip-bot for Vincent Guittot
2014-08-26 11:06 ` [PATCH v5 02/12] sched: remove a wake_affine condition Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-09-19 11:47   ` [tip:sched/core] sched: Remove a wake_affine() condition tip-bot for Vincent Guittot
2014-08-26 11:06 ` [PATCH v5 03/12] sched: fix avg_load computation Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-08-30 12:00   ` Preeti U Murthy
2014-08-30 12:00     ` Preeti U Murthy
2014-09-03 11:09     ` Vincent Guittot
2014-09-03 11:09       ` Vincent Guittot
2014-09-03 23:43       ` Tim Chen
2014-09-03 23:43         ` Tim Chen
2014-09-04  7:17         ` Vincent Guittot
2014-09-04  7:17           ` Vincent Guittot
2014-09-04 16:26           ` Tim Chen
2014-09-04 16:26             ` Tim Chen
2014-09-05 11:10   ` Preeti U Murthy
2014-09-05 11:10     ` Preeti U Murthy
2014-09-19 11:47   ` [tip:sched/core] sched: Fix " tip-bot for Vincent Guittot
2014-08-26 11:06 ` [PATCH v5 04/12] sched: Allow all archs to set the capacity_orig Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-08-27 13:12   ` Kamalesh Babulal
2014-08-27 13:12     ` Kamalesh Babulal
2014-08-30 17:07   ` Preeti U Murthy
2014-08-30 17:07     ` Preeti U Murthy
2014-09-01  8:05     ` Vincent Guittot
2014-09-01  8:05       ` Vincent Guittot
2014-09-03  8:41       ` Preeti U Murthy
2014-09-03  8:41         ` Preeti U Murthy
2014-09-10 13:50     ` Peter Zijlstra
2014-09-10 13:50       ` Peter Zijlstra
2014-09-10 14:22       ` Vincent Guittot
2014-09-10 14:22         ` Vincent Guittot
2014-09-11 10:36       ` Preeti U Murthy
2014-09-11 10:36         ` Preeti U Murthy
2014-09-19 11:47   ` [tip:sched/core] sched: Allow all architectures to set ' capacity_orig' tip-bot for Vincent Guittot
2014-08-26 11:06 ` [PATCH v5 05/12] ARM: topology: use new cpu_capacity interface Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-09-11 18:52   ` Nicolas Pitre
2014-09-11 18:52     ` Nicolas Pitre
2014-09-19 11:48   ` [tip:sched/core] ARM: topology: Use the " tip-bot for Vincent Guittot
2014-08-26 11:06 ` [PATCH v5 06/12] sched: add per rq cpu_capacity_orig Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-08-27 13:32   ` Kamalesh Babulal
2014-08-27 13:32     ` Kamalesh Babulal
2014-08-28  7:34     ` Vincent Guittot
2014-08-28  7:34       ` Vincent Guittot
2014-09-10 13:53   ` Peter Zijlstra
2014-09-10 13:53     ` Peter Zijlstra
2014-09-10 14:19     ` Vincent Guittot
2014-09-10 14:19       ` Vincent Guittot
2014-09-11 19:02   ` Nicolas Pitre
2014-09-11 19:02     ` Nicolas Pitre
2014-09-15 21:22     ` Vincent Guittot
2014-09-15 21:22       ` Vincent Guittot
2014-08-26 11:06 ` [PATCH v5 07/12] sched: test the cpu's capacity in wake affine Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-09-10 14:19   ` Peter Zijlstra
2014-09-10 14:19     ` Peter Zijlstra
2014-09-19 11:48   ` [tip:sched/core] sched: Test the CPU's capacity in wake_affine() tip-bot for Vincent Guittot
2014-08-26 11:06 ` Vincent Guittot [this message]
2014-08-26 11:06   ` [PATCH v5 08/12] sched: move cfs task on a CPU with higher capacity Vincent Guittot
2014-08-30 17:50   ` Preeti U Murthy
2014-08-30 17:50     ` Preeti U Murthy
2014-09-01  8:45     ` Vincent Guittot
2014-09-01  8:45       ` Vincent Guittot
2014-09-03  9:11       ` Preeti U Murthy
2014-09-03  9:11         ` Preeti U Murthy
2014-09-03 11:44         ` Vincent Guittot
2014-09-03 11:44           ` Vincent Guittot
2014-09-03 12:26           ` Preeti U Murthy
2014-09-03 12:26             ` Preeti U Murthy
2014-09-03 12:49             ` Vincent Guittot
2014-09-03 12:49               ` Vincent Guittot
2014-09-11  9:27             ` Peter Zijlstra
2014-09-11  9:27               ` Peter Zijlstra
2014-09-05 12:06   ` Preeti U Murthy
2014-09-05 12:06     ` Preeti U Murthy
2014-09-05 12:24     ` Vincent Guittot
2014-09-05 12:24       ` Vincent Guittot
2014-09-11 10:07   ` Peter Zijlstra
2014-09-11 10:07     ` Peter Zijlstra
2014-09-11 11:20     ` Vincent Guittot
2014-09-11 11:20       ` Vincent Guittot
2014-09-11 10:13   ` Peter Zijlstra
2014-09-11 10:13     ` Peter Zijlstra
2014-09-11 12:14     ` Vincent Guittot
2014-09-11 12:14       ` Vincent Guittot
2014-09-11 11:54   ` Peter Zijlstra
2014-09-11 11:54     ` Peter Zijlstra
2014-08-26 11:06 ` [PATCH v5 09/12] sched: add usage_load_avg Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-09-04  7:34   ` [PATCH v5 09/11] " Vincent Guittot
2014-09-04  7:34     ` Vincent Guittot
2014-09-11 11:17     ` Peter Zijlstra
2014-09-11 11:17       ` Peter Zijlstra
2014-09-11 11:17   ` [PATCH v5 09/12] " Peter Zijlstra
2014-09-11 11:17     ` Peter Zijlstra
2014-09-11 12:18     ` Vincent Guittot
2014-09-11 12:18       ` Vincent Guittot
2014-09-11 12:20     ` Vincent Guittot
2014-09-11 12:20       ` Vincent Guittot
2014-09-15 19:15   ` Morten Rasmussen
2014-09-15 19:15     ` Morten Rasmussen
2014-09-15 22:33     ` Vincent Guittot
2014-09-15 22:33       ` Vincent Guittot
2014-08-26 11:06 ` [PATCH v5 10/12] sched: get CPU's utilization statistic Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-09-11 12:34   ` Peter Zijlstra
2014-09-11 12:34     ` Peter Zijlstra
2014-09-11 13:07     ` Vincent Guittot
2014-09-11 13:07       ` Vincent Guittot
2014-09-11 14:04       ` Peter Zijlstra
2014-09-11 14:04         ` Peter Zijlstra
2014-09-11 19:17         ` Nicolas Pitre
2014-09-11 19:17           ` Nicolas Pitre
2014-09-12  7:41           ` Vincent Guittot
2014-09-12  7:41             ` Vincent Guittot
2014-09-15 19:45         ` Morten Rasmussen
2014-09-15 19:45           ` Morten Rasmussen
2014-09-16 22:43           ` Vincent Guittot
2014-09-16 22:43             ` Vincent Guittot
2014-09-15 19:28     ` Morten Rasmussen
2014-09-15 19:28       ` Morten Rasmussen
2014-08-26 11:06 ` [PATCH v5 11/12] sched: replace capacity_factor by utilization Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot
2014-09-11 15:39   ` Peter Zijlstra
2014-09-11 15:39     ` Peter Zijlstra
2014-09-11 16:15   ` Peter Zijlstra
2014-09-11 16:15     ` Peter Zijlstra
2014-09-11 17:26     ` Vincent Guittot
2014-09-11 17:26       ` Vincent Guittot
2014-09-14 19:41       ` Peter Zijlstra
2014-09-14 19:41         ` Peter Zijlstra
2014-09-14 19:51         ` Peter Zijlstra
2014-09-14 19:51           ` Peter Zijlstra
2014-09-15 11:42         ` Peter Zijlstra
2014-09-15 11:42           ` Peter Zijlstra
2014-09-15 19:07           ` Nicolas Pitre
2014-09-15 19:07             ` Nicolas Pitre
2014-09-15 20:01             ` Peter Zijlstra
2014-09-15 20:01               ` Peter Zijlstra
2014-09-17 18:45               ` Morten Rasmussen
2014-09-17 18:45                 ` Morten Rasmussen
2014-09-17 18:58                 ` Morten Rasmussen
2014-09-17 18:58                   ` Morten Rasmussen
2014-09-17 23:03                 ` Peter Zijlstra
2014-09-17 23:03                   ` Peter Zijlstra
2014-09-15 22:14           ` Vincent Guittot
2014-09-15 22:14             ` Vincent Guittot
2014-09-15 22:18             ` Vincent Guittot
2014-09-15 22:18               ` Vincent Guittot
2014-09-17 22:25             ` Peter Zijlstra
2014-09-17 22:25               ` Peter Zijlstra
2014-09-18  1:32               ` Vincent Guittot
2014-09-18  1:32                 ` Vincent Guittot
2014-09-16 17:00         ` Dietmar Eggemann
2014-09-16 17:00           ` Dietmar Eggemann
2014-08-26 11:06 ` [PATCH v5 12/12] sched: add SD_PREFER_SIBLING for SMT level Vincent Guittot
2014-08-26 11:06   ` Vincent Guittot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1409051215-16788-9-git-send-email-vincent.guittot@linaro.org \
    --to=vincent.guittot@linaro.org \
    --cc=Morten.Rasmussen@arm.com \
    --cc=daniel.lezcano@linaro.org \
    --cc=dietmar.eggemann@arm.com \
    --cc=efault@gmx.de \
    --cc=linaro-kernel@lists.linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@arm.linux.org.uk \
    --cc=mingo@kernel.org \
    --cc=nicolas.pitre@linaro.org \
    --cc=peterz@infradead.org \
    --cc=preeti@linux.vnet.ibm.com \
    --cc=riel@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.