All of lore.kernel.org
 help / color / mirror / Atom feed
From: Odin Ugedal <odin@uged.al>
To: "Michal Koutný" <mkoutny@suse.com>
Cc: linux-kernel@vger.kernel.org,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Phil Auld <pauld@redhat.com>, Ingo Molnar <mingo@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Juri Lelli <juri.lelli@redhat.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Ben Segall <bsegall@google.com>, Mel Gorman <mgorman@suse.de>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Odin Ugedal <odin@uged.al>, Rik van Riel <riel@surriel.com>,
	Giovanni Gherdovich <ggherdovich@suse.cz>
Subject: Re: [RFC PATCH v2 3/5] sched/fair: Rename leaf_list to more fitting load_list
Date: Tue, 21 Sep 2021 20:06:40 +0100	[thread overview]
Message-ID: <CAFpoUr36x44xRpU0CH3sVYNNt9n3JyUuPpWGSzPyH4OWbX8VpQ@mail.gmail.com> (raw)
In-Reply-To: <20210819175034.4577-4-mkoutny@suse.com>

tor. 19. aug. 2021 kl. 18:50 skrev Michal Koutný <mkoutny@suse.com>:
>
> The leaf_list name is obsolete and misleading. The list is nowadays used
> to hold cfs_rqs with non-zero PELT that has to be decayed to properly
> account for blocked tasks. Those can be inner nodes of the task_group
> tree as well.
>
> Signed-off-by: Michal Koutný <mkoutny@suse.com>
> ---
>  kernel/sched/core.c  |   4 +-
>  kernel/sched/fair.c  | 114 +++++++++++++++++++++----------------------
>  kernel/sched/sched.h |  17 +++----
>  3 files changed, 65 insertions(+), 70 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 20ffcc044134..e55a7c898cd9 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -8961,8 +8961,8 @@ void __init sched_init(void)
>                 init_rt_rq(&rq->rt);
>                 init_dl_rq(&rq->dl);
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> -               INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
> -               rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
> +               INIT_LIST_HEAD(&rq->load_cfs_rq_list);
> +               rq->tmp_alone_branch = &rq->load_cfs_rq_list;
>                 /*
>                  * How much CPU bandwidth does root_task_group get?
>                  *
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 905f95b91a7a..6f4d5d4dcdd9 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -286,13 +286,13 @@ static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
>                 strlcpy(path, "(null)", len);
>  }
>
> -static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
> +static inline bool list_add_load_cfs_rq(struct cfs_rq *cfs_rq)
>  {
>         struct rq *rq = rq_of(cfs_rq);
>         int cpu = cpu_of(rq);
>
>         if (cfs_rq->on_list)
> -               return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
> +               return rq->tmp_alone_branch == &rq->load_cfs_rq_list;
>
>         cfs_rq->on_list = 1;
>
> @@ -313,14 +313,14 @@ static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
>                  * the list, this means to put the child at the tail
>                  * of the list that starts by parent.
>                  */
> -               list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
> -                       &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
> +               list_add_tail_rcu(&cfs_rq->load_cfs_rq_list,
> +                       &(cfs_rq->tg->parent->cfs_rq[cpu]->load_cfs_rq_list));
>                 /*
>                  * The branch is now connected to its tree so we can
>                  * reset tmp_alone_branch to the beginning of the
>                  * list.
>                  */
> -               rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
> +               rq->tmp_alone_branch = &rq->load_cfs_rq_list;
>                 return true;
>         }
>
> @@ -329,13 +329,13 @@ static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
>                  * cfs rq without parent should be put
>                  * at the tail of the list.
>                  */
> -               list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
> -                       &rq->leaf_cfs_rq_list);
> +               list_add_tail_rcu(&cfs_rq->load_cfs_rq_list,
> +                       &rq->load_cfs_rq_list);
>                 /*
>                  * We have reach the top of a tree so we can reset
>                  * tmp_alone_branch to the beginning of the list.
>                  */
> -               rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
> +               rq->tmp_alone_branch = &rq->load_cfs_rq_list;
>                 return true;
>         }
>
> @@ -345,44 +345,44 @@ static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
>          * tmp_alone_branch points to the begin of the branch
>          * where we will add parent.
>          */
> -       list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
> +       list_add_rcu(&cfs_rq->load_cfs_rq_list, rq->tmp_alone_branch);
>         /*
>          * update tmp_alone_branch to points to the new begin
>          * of the branch
>          */
> -       rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
> +       rq->tmp_alone_branch = &cfs_rq->load_cfs_rq_list;
>         return false;
>  }
>
> -static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
> +static inline void list_del_load_cfs_rq(struct cfs_rq *cfs_rq)
>  {
>         if (cfs_rq->on_list) {
>                 struct rq *rq = rq_of(cfs_rq);
>
>                 /*
>                  * With cfs_rq being unthrottled/throttled during an enqueue,
> -                * it can happen the tmp_alone_branch points the a leaf that
> +                * it can happen the tmp_alone_branch points the cfs_rq that
>                  * we finally want to del. In this case, tmp_alone_branch moves
> -                * to the prev element but it will point to rq->leaf_cfs_rq_list
> +                * to the prev element but it will point to rq->load_cfs_rq_list
>                  * at the end of the enqueue.
>                  */
> -               if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
> -                       rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
> +               if (rq->tmp_alone_branch == &cfs_rq->load_cfs_rq_list)
> +                       rq->tmp_alone_branch = cfs_rq->load_cfs_rq_list.prev;
>
> -               list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
> +               list_del_rcu(&cfs_rq->load_cfs_rq_list);
>                 cfs_rq->on_list = 0;
>         }
>  }
>
> -static inline void assert_list_leaf_cfs_rq(struct rq *rq)
> +static inline void assert_list_load_cfs_rq(struct rq *rq)
>  {
> -       SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
> +       SCHED_WARN_ON(rq->tmp_alone_branch != &rq->load_cfs_rq_list);
>  }
>
> -/* Iterate thr' all leaf cfs_rq's on a runqueue */
> -#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)                     \
> -       list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,    \
> -                                leaf_cfs_rq_list)
> +/* Iterate thr' all loaded cfs_rq's on a runqueue */
> +#define for_each_load_cfs_rq_safe(rq, cfs_rq, pos)                     \
> +       list_for_each_entry_safe(cfs_rq, pos, &rq->load_cfs_rq_list,    \
> +                                load_cfs_rq_list)
>
>  /* Do the two (enqueued) entities belong to the same group ? */
>  static inline struct cfs_rq *
> @@ -442,20 +442,20 @@ static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
>                 strlcpy(path, "(null)", len);
>  }
>
> -static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
> +static inline bool list_add_load_cfs_rq(struct cfs_rq *cfs_rq)
>  {
>         return true;
>  }
>
> -static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
> +static inline void list_del_load_cfs_rq(struct cfs_rq *cfs_rq)
>  {
>  }
>
> -static inline void assert_list_leaf_cfs_rq(struct rq *rq)
> +static inline void assert_list_load_cfs_rq(struct rq *rq)
>  {
>  }
>
> -#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)     \
> +#define for_each_load_cfs_rq_safe(rq, cfs_rq, pos)     \
>                 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
>
>  static inline struct sched_entity *parent_entity(struct sched_entity *se)
> @@ -3257,12 +3257,12 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
>  #ifdef CONFIG_SMP
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>  /*
> - * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
> + * Because list_add_load_cfs_rq always places a child cfs_rq on the list
>   * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
>   * bottom-up, we only have to test whether the cfs_rq before us on the list
>   * is our child.
>   * If cfs_rq is not on the list, test whether a child needs its to be added to
> - * connect a branch to the tree  * (see list_add_leaf_cfs_rq() for details).
> + * connect a branch to the tree (see list_add_load_cfs_rq() for details).
>   */
>  static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
>  {
> @@ -3270,14 +3270,14 @@ static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
>         struct list_head *prev;
>
>         if (cfs_rq->on_list) {
> -               prev = cfs_rq->leaf_cfs_rq_list.prev;
> +               prev = cfs_rq->load_cfs_rq_list.prev;
>         } else {
>                 struct rq *rq = rq_of(cfs_rq);
>
>                 prev = rq->tmp_alone_branch;
>         }
>
> -       prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
> +       prev_cfs_rq = container_of(prev, struct cfs_rq, load_cfs_rq_list);
>
>         return (prev_cfs_rq->tg->parent == cfs_rq->tg);
>  }
> @@ -4298,7 +4298,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>          * add it unconditionally.
>          */
>         if (cfs_rq->nr_running == 1 || cfs_bandwidth_used())
> -               list_add_leaf_cfs_rq(cfs_rq);
> +               list_add_load_cfs_rq(cfs_rq);
>
>         if (cfs_rq->nr_running == 1)
>                 check_enqueue_throttle(cfs_rq);
> @@ -4775,7 +4775,7 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
>
>                 /* Add cfs_rq with load or one or more already running entities to the list */
>                 if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
> -                       list_add_leaf_cfs_rq(cfs_rq);
> +                       list_add_load_cfs_rq(cfs_rq);
>         }
>
>         return 0;
> @@ -4789,7 +4789,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
>         /* group is entering throttled state, stop time */
>         if (!cfs_rq->throttle_count) {
>                 cfs_rq->throttled_clock_task = rq_clock_task(rq);
> -               list_del_leaf_cfs_rq(cfs_rq);
> +               list_del_load_cfs_rq(cfs_rq);
>         }
>         cfs_rq->throttle_count++;
>
> @@ -4900,10 +4900,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
>                 /* Nothing to run but something to decay? Complete the branch */
>                 if (cfs_rq->on_list)
>                         for_each_sched_entity(se) {
> -                               if (list_add_leaf_cfs_rq(group_cfs_rq(se)))
> +                               if (list_add_load_cfs_rq(group_cfs_rq(se)))
>                                         break;
>                         }
> -               assert_list_leaf_cfs_rq(rq);
> +               assert_list_load_cfs_rq(rq);
>                 return;
>         }
>
> @@ -4939,10 +4939,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
>
>                 /*
>                  * One parent has been throttled and cfs_rq removed from the
> -                * list. Add it back to not break the leaf list.
> +                * list. Add it back to not break the load list.
>                  */
>                 if (throttled_hierarchy(cfs_rq))
> -                       list_add_leaf_cfs_rq(cfs_rq);
> +                       list_add_load_cfs_rq(cfs_rq);
>         }
>
>         /* At this point se is NULL and we are at root level*/
> @@ -4951,17 +4951,17 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
>  unthrottle_throttle:
>         /*
>          * The cfs_rq_throttled() breaks in the above iteration can result in
> -        * incomplete leaf list maintenance, resulting in triggering the
> +        * incomplete load list maintenance, resulting in triggering the
>          * assertion below.
>          */
>         for_each_sched_entity(se) {
>                 cfs_rq = cfs_rq_of(se);
>
> -               if (list_add_leaf_cfs_rq(cfs_rq))
> +               if (list_add_load_cfs_rq(cfs_rq))
>                         break;
>         }
>
> -       assert_list_leaf_cfs_rq(rq);
> +       assert_list_load_cfs_rq(rq);
>
>         /* Determine whether we need to wake up potentially idle CPU: */
>         if (rq->curr == rq->idle && rq->cfs.nr_running)
> @@ -5601,12 +5601,12 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
>                 if (cfs_rq_throttled(cfs_rq))
>                         goto enqueue_throttle;
>
> -               /*
> -                * One parent has been throttled and cfs_rq removed from the
> -                * list. Add it back to not break the leaf list.
> -                */
> -               if (throttled_hierarchy(cfs_rq))
> -                       list_add_leaf_cfs_rq(cfs_rq);
> +               /*
> +                * One parent has been throttled and cfs_rq removed from the
> +                * list. Add it back to not break the load list.
> +                */
> +               if (throttled_hierarchy(cfs_rq))
> +                       list_add_load_cfs_rq(cfs_rq);
>         }
>
>         /* At this point se is NULL and we are at root level*/
> @@ -5634,18 +5634,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
>                 /*
>                  * When bandwidth control is enabled; the cfs_rq_throttled()
>                  * breaks in the above iteration can result in incomplete
> -                * leaf list maintenance, resulting in triggering the assertion
> +                * load list maintenance, resulting in triggering the assertion
>                  * below.
>                  */
>                 for_each_sched_entity(se) {
>                         cfs_rq = cfs_rq_of(se);
>
> -                       if (list_add_leaf_cfs_rq(cfs_rq))
> +                       if (list_add_load_cfs_rq(cfs_rq))
>                                 break;
>                 }
>         }
>
> -       assert_list_leaf_cfs_rq(rq);
> +       assert_list_load_cfs_rq(rq);
>
>         hrtick_update(rq);
>  }
> @@ -8122,9 +8122,9 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
>
>         /*
>          * Iterates the task_group tree in a bottom up fashion, see
> -        * list_add_leaf_cfs_rq() for details.
> +        * list_add_load_cfs_rq() for details.
>          */
> -       for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
> +       for_each_load_cfs_rq_safe(rq, cfs_rq, pos) {
>                 struct sched_entity *se;
>
>                 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
> @@ -8144,7 +8144,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
>                  * decayed cfs_rqs linger on the list.
>                  */
>                 if (cfs_rq_is_decayed(cfs_rq))
> -                       list_del_leaf_cfs_rq(cfs_rq);
> +                       list_del_load_cfs_rq(cfs_rq);
>
>                 /* Don't need periodic decay once load/util_avg are null */
>                 if (cfs_rq_has_blocked(cfs_rq))
> @@ -11111,7 +11111,7 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
>  {
>         struct cfs_rq *cfs_rq;
>
> -       list_add_leaf_cfs_rq(cfs_rq_of(se));
> +       list_add_load_cfs_rq(cfs_rq_of(se));
>
>         /* Start to propagate at parent */
>         se = se->parent;
> @@ -11121,11 +11121,11 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
>
>                 if (!cfs_rq_throttled(cfs_rq)){
>                         update_load_avg(cfs_rq, se, UPDATE_TG);
> -                       list_add_leaf_cfs_rq(cfs_rq);
> +                       list_add_load_cfs_rq(cfs_rq);
>                         continue;
>                 }
>
> -               if (list_add_leaf_cfs_rq(cfs_rq))
> +               if (list_add_load_cfs_rq(cfs_rq))
>                         break;
>         }
>  }
> @@ -11383,7 +11383,7 @@ void unregister_fair_sched_group(struct task_group *tg)
>                 rq = cpu_rq(cpu);
>
>                 raw_spin_rq_lock_irqsave(rq, flags);
> -               list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
> +               list_del_load_cfs_rq(tg->cfs_rq[cpu]);
>                 raw_spin_rq_unlock_irqrestore(rq, flags);
>         }
>  }
> @@ -11543,7 +11543,7 @@ void print_cfs_stats(struct seq_file *m, int cpu)
>         struct cfs_rq *cfs_rq, *pos;
>
>         rcu_read_lock();
> -       for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
> +       for_each_load_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
>                 print_cfs_rq(m, cpu, cfs_rq);
>         rcu_read_unlock();
>  }
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 219ee463fe64..dc9382295ec9 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -587,16 +587,8 @@ struct cfs_rq {
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>         struct rq               *rq;    /* CPU runqueue to which this cfs_rq is attached */
>
> -       /*
> -        * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
> -        * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
> -        * (like users, containers etc.)
> -        *
> -        * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
> -        * This list is used during load balance.
> -        */
>         int                     on_list;
> -       struct list_head        leaf_cfs_rq_list;
> +       struct list_head        load_cfs_rq_list;
>         struct task_group       *tg;    /* group that "owns" this runqueue */
>
>  #ifdef CONFIG_CFS_BANDWIDTH
> @@ -950,8 +942,11 @@ struct rq {
>         struct dl_rq            dl;
>
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> -       /* list of leaf cfs_rq on this CPU: */
> -       struct list_head        leaf_cfs_rq_list;
> +       /* Bottom up ordered list of cfs_rqs with load (see
> +        * cfs_rq_is_decayed()) on this CPU.
> +        * This list is used during load balance.
> +        */

Fully agree that a rename of some sort is good, as I struggled to understand it
in the beginning as well. However, I am not sure if "load_list" is the
best one (but
it is definitely better). It cfs_rq will be in the list even if it has
no load, as long as some
of its descendants have. Also, "load->weight" is not _really_ load,
but that all depends on
definition I guess.

I have no new suggestions right now, other than maybe "active" (but
not 100% sure)
so "load list" might be a good one after all.

> +       struct list_head        load_cfs_rq_list;
>         struct list_head        *tmp_alone_branch;
>  #endif /* CONFIG_FAIR_GROUP_SCHED */
>
> --
> 2.32.0
>

Odin
Thanks

  reply	other threads:[~2021-09-21 19:06 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-19 17:50 [RFC PATCH v2 0/5] leaf_cfs_rq_list cleanups and fix Michal Koutný
2021-08-19 17:50 ` [RFC PATCH v2 1/5] sched/fair: Add ancestors of unthrottled undecayed cfs_rq Michal Koutný
2021-09-09 13:57   ` Vincent Guittot
2021-09-10 11:35     ` Michal Koutný
2021-09-10 13:17       ` Vincent Guittot
2021-08-19 17:50 ` [RFC PATCH v2 2/5] sched: Add group_se() helper Michal Koutný
2021-08-19 17:50 ` [RFC PATCH v2 3/5] sched/fair: Rename leaf_list to more fitting load_list Michal Koutný
2021-09-21 19:06   ` Odin Ugedal [this message]
2021-08-19 17:50 ` [RFC PATCH v2 4/5] sched/fair: Simplify load_cfs_rq_list maintenance Michal Koutný
2021-09-10 14:19   ` Vincent Guittot
2021-09-14  9:22     ` Michal Koutný
2021-09-14  9:45       ` Vincent Guittot
2021-09-15 12:30         ` Vincent Guittot
2021-09-15 18:59           ` Odin Ugedal
2021-09-21 19:21             ` Odin Ugedal
2021-09-21 21:40               ` Michal Koutný
2021-08-19 17:50 ` [RFC PATCH v2 5/5] sched/fair: Simplify ancestor enqueue loops Michal Koutný
2021-09-09 14:04   ` Vincent Guittot
2021-09-10 11:35     ` Michal Koutný
2021-09-10 11:43       ` Vincent Guittot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAFpoUr36x44xRpU0CH3sVYNNt9n3JyUuPpWGSzPyH4OWbX8VpQ@mail.gmail.com \
    --to=odin@uged.al \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=ggherdovich@suse.cz \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@suse.de \
    --cc=mingo@redhat.com \
    --cc=mkoutny@suse.com \
    --cc=pauld@redhat.com \
    --cc=peterz@infradead.org \
    --cc=riel@surriel.com \
    --cc=rostedt@goodmis.org \
    --cc=vincent.guittot@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.