All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Li, Aubrey" <aubrey.li@linux.intel.com>
To: "Joel Fernandes (Google)" <joel@joelfernandes.org>,
	Nishanth Aravamudan <naravamudan@digitalocean.com>,
	Julien Desfossez <jdesfossez@digitalocean.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Tim Chen <tim.c.chen@linux.intel.com>,
	Vineeth Pillai <viremana@linux.microsoft.com>,
	Aaron Lu <aaron.lwe@gmail.com>,
	Aubrey Li <aubrey.intel@gmail.com>,
	tglx@linutronix.de, linux-kernel@vger.kernel.org
Cc: mingo@kernel.org, torvalds@linux-foundation.org,
	fweisbec@gmail.com, keescook@chromium.org, kerrnel@google.com,
	Phil Auld <pauld@redhat.com>,
	Valentin Schneider <valentin.schneider@arm.com>,
	Mel Gorman <mgorman@techsingularity.net>,
	Pawan Gupta <pawan.kumar.gupta@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	vineeth@bitbyteword.org, Chen Yu <yu.c.chen@intel.com>,
	Christian Brauner <christian.brauner@ubuntu.com>,
	Agata Gruza <agata.gruza@intel.com>,
	Antonio Gomez Iglesias <antonio.gomez.iglesias@intel.com>,
	graf@amazon.com, konrad.wilk@oracle.com, dfaggioli@suse.com,
	pjt@google.com, rostedt@goodmis.org, derkling@google.com,
	benbjiang@tencent.com,
	Alexandre Chartre <alexandre.chartre@oracle.com>,
	James.Bottomley@hansenpartnership.com, OWeisse@umich.edu,
	Dhaval Giani <dhaval.giani@oracle.com>,
	Junaid Shahid <junaids@google.com>,
	jsbarnes@google.com, chris.hyser@oracle.com,
	"Paul E. McKenney" <paulmck@kernel.org>,
	Tim Chen <tim.c.chen@intel.com>
Subject: Re: [PATCH v8 -tip 24/26] sched: Move core-scheduler interfacing code to a new file
Date: Mon, 26 Oct 2020 09:05:52 +0800	[thread overview]
Message-ID: <c7466b5d-9850-9eff-3e67-f0cb1b578cc3@linux.intel.com> (raw)
In-Reply-To: <20201020014336.2076526-25-joel@joelfernandes.org>

On 2020/10/20 9:43, Joel Fernandes (Google) wrote:
> core.c is already huge. The core-tagging interface code is largely
> independent of it. Move it to its own file to make both files easier to
> maintain.
> 
> Tested-by: Julien Desfossez <jdesfossez@digitalocean.com>
> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> ---
>  kernel/sched/Makefile  |   1 +
>  kernel/sched/core.c    | 481 +----------------------------------------
>  kernel/sched/coretag.c | 468 +++++++++++++++++++++++++++++++++++++++
>  kernel/sched/sched.h   |  56 ++++-
>  4 files changed, 523 insertions(+), 483 deletions(-)
>  create mode 100644 kernel/sched/coretag.c
> 
> diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
> index 5fc9c9b70862..c526c20adf9d 100644
> --- a/kernel/sched/Makefile
> +++ b/kernel/sched/Makefile
> @@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
>  obj-$(CONFIG_MEMBARRIER) += membarrier.o
>  obj-$(CONFIG_CPU_ISOLATION) += isolation.o
>  obj-$(CONFIG_PSI) += psi.o
> +obj-$(CONFIG_SCHED_CORE) += coretag.o
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index b3afbba5abe1..211e0784675f 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -162,11 +162,6 @@ static bool sched_core_empty(struct rq *rq)
>  	return RB_EMPTY_ROOT(&rq->core_tree);
>  }
>  
> -static bool sched_core_enqueued(struct task_struct *task)
> -{
> -	return !RB_EMPTY_NODE(&task->core_node);
> -}
> -
>  static struct task_struct *sched_core_first(struct rq *rq)
>  {
>  	struct task_struct *task;
> @@ -188,7 +183,7 @@ static void sched_core_flush(int cpu)
>  	rq->core->core_task_seq++;
>  }
>  
> -static void sched_core_enqueue(struct rq *rq, struct task_struct *p)
> +void sched_core_enqueue(struct rq *rq, struct task_struct *p)
>  {
>  	struct rb_node *parent, **node;
>  	struct task_struct *node_task;
> @@ -215,7 +210,7 @@ static void sched_core_enqueue(struct rq *rq, struct task_struct *p)
>  	rb_insert_color(&p->core_node, &rq->core_tree);
>  }
>  
> -static void sched_core_dequeue(struct rq *rq, struct task_struct *p)
> +void sched_core_dequeue(struct rq *rq, struct task_struct *p)
>  {
>  	rq->core->core_task_seq++;
>  
> @@ -310,7 +305,6 @@ static int __sched_core_stopper(void *data)
>  }
>  
>  static DEFINE_MUTEX(sched_core_mutex);
> -static DEFINE_MUTEX(sched_core_tasks_mutex);
>  static int sched_core_count;
>  
>  static void __sched_core_enable(void)
> @@ -346,16 +340,6 @@ void sched_core_put(void)
>  		__sched_core_disable();
>  	mutex_unlock(&sched_core_mutex);
>  }
> -
> -static int sched_core_share_tasks(struct task_struct *t1, struct task_struct *t2);
> -
> -#else /* !CONFIG_SCHED_CORE */
> -
> -static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
> -static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { }
> -static bool sched_core_enqueued(struct task_struct *task) { return false; }
> -static int sched_core_share_tasks(struct task_struct *t1, struct task_struct *t2) { }
> -
>  #endif /* CONFIG_SCHED_CORE */
>  
>  /*
> @@ -8505,9 +8489,6 @@ void sched_offline_group(struct task_group *tg)
>  	spin_unlock_irqrestore(&task_group_lock, flags);
>  }
>  
> -#define SCHED_CORE_GROUP_COOKIE_MASK ((1UL << (sizeof(unsigned long) * 4)) - 1)
> -static unsigned long cpu_core_get_group_cookie(struct task_group *tg);
> -
>  static void sched_change_group(struct task_struct *tsk, int type)
>  {
>  	struct task_group *tg;
> @@ -8583,11 +8564,6 @@ void sched_move_task(struct task_struct *tsk)
>  	task_rq_unlock(rq, tsk, &rf);
>  }
>  
> -static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
> -{
> -	return css ? container_of(css, struct task_group, css) : NULL;
> -}
> -
>  static struct cgroup_subsys_state *
>  cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
>  {
> @@ -9200,459 +9176,6 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
>  }
>  #endif /* CONFIG_RT_GROUP_SCHED */
>  
> -#ifdef CONFIG_SCHED_CORE
> -/*
> - * A simple wrapper around refcount. An allocated sched_core_cookie's
> - * address is used to compute the cookie of the task.
> - */
> -struct sched_core_cookie {
> -	refcount_t refcnt;
> -};
> -
> -/*
> - * sched_core_tag_requeue - Common helper for all interfaces to set a cookie.
> - * @p: The task to assign a cookie to.
> - * @cookie: The cookie to assign.
> - * @group: is it a group interface or a per-task interface.
> - *
> - * This function is typically called from a stop-machine handler.
> - */
> -void sched_core_tag_requeue(struct task_struct *p, unsigned long cookie, bool group)
> -{
> -	if (!p)
> -		return;
> -
> -	if (group)
> -		p->core_group_cookie = cookie;
> -	else
> -		p->core_task_cookie = cookie;
> -
> -	/* Use up half of the cookie's bits for task cookie and remaining for group cookie. */
> -	p->core_cookie = (p->core_task_cookie <<
> -				(sizeof(unsigned long) * 4)) + p->core_group_cookie;
> -
> -	if (sched_core_enqueued(p)) {
> -		sched_core_dequeue(task_rq(p), p);
> -		if (!p->core_cookie)
> -			return;
> -	}
> -
> -	if (sched_core_enabled(task_rq(p)) &&
> -			p->core_cookie && task_on_rq_queued(p))
> -		sched_core_enqueue(task_rq(p), p);
> -}
> -
> -/* Per-task interface */
> -static unsigned long sched_core_alloc_task_cookie(void)
> -{
> -	struct sched_core_cookie *ptr =
> -		kmalloc(sizeof(struct sched_core_cookie), GFP_KERNEL);
> -
> -	if (!ptr)
> -		return 0;
> -	refcount_set(&ptr->refcnt, 1);
> -
> -	/*
> -	 * NOTE: sched_core_put() is not done by put_task_cookie(). Instead, it
> -	 * is done after the stopper runs.
> -	 */
> -	sched_core_get();
> -	return (unsigned long)ptr;
> -}
> -
> -static bool sched_core_get_task_cookie(unsigned long cookie)
> -{
> -	struct sched_core_cookie *ptr = (struct sched_core_cookie *)cookie;
> -
> -	/*
> -	 * NOTE: sched_core_put() is not done by put_task_cookie(). Instead, it
> -	 * is done after the stopper runs.
> -	 */
> -	sched_core_get();
> -	return refcount_inc_not_zero(&ptr->refcnt);
> -}
> -
> -static void sched_core_put_task_cookie(unsigned long cookie)
> -{
> -	struct sched_core_cookie *ptr = (struct sched_core_cookie *)cookie;
> -
> -	if (refcount_dec_and_test(&ptr->refcnt))
> -		kfree(ptr);
> -}
> -
> -struct sched_core_task_write_tag {
> -	struct task_struct *tasks[2];
> -	unsigned long cookies[2];
> -};
> -
> -/*
> - * Ensure that the task has been requeued. The stopper ensures that the task cannot
> - * be migrated to a different CPU while its core scheduler queue state is being updated.
> - * It also makes sure to requeue a task if it was running actively on another CPU.
> - */
> -static int sched_core_task_join_stopper(void *data)
> -{
> -	struct sched_core_task_write_tag *tag = (struct sched_core_task_write_tag *)data;
> -	int i;
> -
> -	for (i = 0; i < 2; i++)
> -		sched_core_tag_requeue(tag->tasks[i], tag->cookies[i], false /* !group */);
> -
> -	return 0;
> -}
> -
> -static int sched_core_share_tasks(struct task_struct *t1, struct task_struct *t2)
> -{
> -	struct sched_core_task_write_tag wr = {}; /* for stop machine. */
> -	bool sched_core_put_after_stopper = false;
> -	unsigned long cookie;
> -	int ret = -ENOMEM;
> -
> -	mutex_lock(&sched_core_tasks_mutex);
> -
> -	/*
> -	 * NOTE: sched_core_get() is done by sched_core_alloc_task_cookie() or
> -	 *       sched_core_put_task_cookie(). However, sched_core_put() is done
> -	 *       by this function *after* the stopper removes the tasks from the
> -	 *       core queue, and not before. This is just to play it safe.
> -	 */
> -	if (t2 == NULL) {
> -		if (t1->core_task_cookie) {
> -			sched_core_put_task_cookie(t1->core_task_cookie);
> -			sched_core_put_after_stopper = true;
> -			wr.tasks[0] = t1; /* Keep wr.cookies[0] reset for t1. */
> -		}
> -	} else if (t1 == t2) {
> -		/* Assign a unique per-task cookie solely for t1. */
> -
> -		cookie = sched_core_alloc_task_cookie();
> -		if (!cookie)
> -			goto out_unlock;
> -
> -		if (t1->core_task_cookie) {
> -			sched_core_put_task_cookie(t1->core_task_cookie);
> -			sched_core_put_after_stopper = true;
> -		}
> -		wr.tasks[0] = t1;
> -		wr.cookies[0] = cookie;
> -	} else
> -	/*
> -	 * 		t1		joining		t2
> -	 * CASE 1:
> -	 * before	0				0
> -	 * after	new cookie			new cookie
> -	 *
> -	 * CASE 2:
> -	 * before	X (non-zero)			0
> -	 * after	0				0
> -	 *
> -	 * CASE 3:
> -	 * before	0				X (non-zero)
> -	 * after	X				X
> -	 *
> -	 * CASE 4:
> -	 * before	Y (non-zero)			X (non-zero)
> -	 * after	X				X
> -	 */
> -	if (!t1->core_task_cookie && !t2->core_task_cookie) {
> -		/* CASE 1. */
> -		cookie = sched_core_alloc_task_cookie();
> -		if (!cookie)
> -			goto out_unlock;
> -
> -		/* Add another reference for the other task. */
> -		if (!sched_core_get_task_cookie(cookie)) {
> -			return -EINVAL;
> -			goto out_unlock;
> -		}
> -
> -		wr.tasks[0] = t1;
> -		wr.tasks[1] = t2;
> -		wr.cookies[0] = wr.cookies[1] = cookie;
> -
> -	} else if (t1->core_task_cookie && !t2->core_task_cookie) {
> -		/* CASE 2. */
> -		sched_core_put_task_cookie(t1->core_task_cookie);
> -		sched_core_put_after_stopper = true;
> -
> -		wr.tasks[0] = t1; /* Reset cookie for t1. */
> -
> -	} else if (!t1->core_task_cookie && t2->core_task_cookie) {
> -		/* CASE 3. */
> -		if (!sched_core_get_task_cookie(t2->core_task_cookie)) {
> -			ret = -EINVAL;
> -			goto out_unlock;
> -		}
> -
> -		wr.tasks[0] = t1;
> -		wr.cookies[0] = t2->core_task_cookie;
> -
> -	} else {
> -		/* CASE 4. */
> -		if (!sched_core_get_task_cookie(t2->core_task_cookie)) {
> -			ret = -EINVAL;
> -			goto out_unlock;
> -		}
> -		sched_core_put_task_cookie(t1->core_task_cookie);
> -		sched_core_put_after_stopper = true;
> -
> -		wr.tasks[0] = t1;
> -		wr.cookies[0] = t2->core_task_cookie;
> -	}
> -
> -	stop_machine(sched_core_task_join_stopper, (void *)&wr, NULL);
> -
> -	if (sched_core_put_after_stopper)
> -		sched_core_put();
> -
> -	ret = 0;
> -out_unlock:
> -	mutex_unlock(&sched_core_tasks_mutex);
> -	return ret;
> -}
> -
> -/* Called from prctl interface: PR_SCHED_CORE_SHARE */
> -int sched_core_share_pid(pid_t pid)
> -{
> -	struct task_struct *task;
> -	int err;
> -
> -	if (pid == 0) { /* Recent current task's cookie. */
> -		/* Resetting a cookie requires privileges. */
> -		if (current->core_task_cookie)
> -			if (!capable(CAP_SYS_ADMIN))
> -				return -EPERM;
> -		task = NULL;
> -	} else {
> -		rcu_read_lock();
> -		task = pid ? find_task_by_vpid(pid) : current;
> -		if (!task) {
> -			rcu_read_unlock();
> -			return -ESRCH;
> -		}
> -
> -		get_task_struct(task);
> -
> -		/*
> -		 * Check if this process has the right to modify the specified
> -		 * process. Use the regular "ptrace_may_access()" checks.
> -		 */
> -		if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
> -			rcu_read_unlock();
> -			err = -EPERM;
> -			goto out_put;
> -		}
> -		rcu_read_unlock();
> -	}
> -
> -	err = sched_core_share_tasks(current, task);
> -out_put:
> -	if (task)
> -		put_task_struct(task);
> -	return err;
> -}
> -
> -/* CGroup interface */
> -
> -/*
> - * Helper to get the cookie in a hierarchy.
> - * The cookie is a combination of a tag and color. Any ancestor
> - * can have a tag/color. tag is the first-level cookie setting
> - * with color being the second. Atmost one color and one tag is
> - * allowed.
> - */
> -static unsigned long cpu_core_get_group_cookie(struct task_group *tg)
> -{
> -	unsigned long color = 0;
> -
> -	if (!tg)
> -		return 0;
> -
> -	for (; tg; tg = tg->parent) {
> -		if (tg->core_tag_color) {
> -			WARN_ON_ONCE(color);
> -			color = tg->core_tag_color;
> -		}
> -
> -		if (tg->core_tagged) {
> -			unsigned long cookie = ((unsigned long)tg << 8) | color;
> -			cookie &= SCHED_CORE_GROUP_COOKIE_MASK;
> -			return cookie;
> -		}
> -	}
> -
> -	return 0;
> -}
> -
> -/* Determine if any group in @tg's children are tagged or colored. */
> -static bool cpu_core_check_descendants(struct task_group *tg, bool check_tag,
> -					bool check_color)
> -{
> -	struct task_group *child;
> -
> -	rcu_read_lock();
> -	list_for_each_entry_rcu(child, &tg->children, siblings) {
> -		if ((child->core_tagged && check_tag) ||
> -		    (child->core_tag_color && check_color)) {
> -			rcu_read_unlock();
> -			return true;
> -		}
> -
> -		rcu_read_unlock();
> -		return cpu_core_check_descendants(child, check_tag, check_color);
> -	}
> -
> -	rcu_read_unlock();
> -	return false;
> -}
> -
> -static u64 cpu_core_tag_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
> -{
> -	struct task_group *tg = css_tg(css);
> -
> -	return !!tg->core_tagged;
> -}
> -
> -static u64 cpu_core_tag_color_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
> -{
> -	struct task_group *tg = css_tg(css);
> -
> -	return tg->core_tag_color;
> -}
> -
> -#ifdef CONFIG_SCHED_DEBUG
> -static u64 cpu_core_group_cookie_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
> -{
> -	return cpu_core_get_group_cookie(css_tg(css));
> -}
> -#endif
> -
> -struct write_core_tag {
> -	struct cgroup_subsys_state *css;
> -	unsigned long cookie;
> -};
> -
> -static int __sched_write_tag(void *data)
> -{
> -	struct write_core_tag *tag = (struct write_core_tag *) data;
> -	struct task_struct *p;
> -	struct cgroup_subsys_state *css;
> -
> -	rcu_read_lock();
> -	css_for_each_descendant_pre(css, tag->css) {
> -		struct css_task_iter it;
> -
> -		css_task_iter_start(css, 0, &it);
> -		/*
> -		 * Note: css_task_iter_next will skip dying tasks.
> -		 * There could still be dying tasks left in the core queue
> -		 * when we set cgroup tag to 0 when the loop is done below.
> -		 */
> -		while ((p = css_task_iter_next(&it)))
> -			sched_core_tag_requeue(p, tag->cookie, true /* group */);
> -
> -		css_task_iter_end(&it);
> -	}
> -	rcu_read_unlock();
> -
> -	return 0;
> -}
> -
> -static int cpu_core_tag_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)
> -{
> -	struct task_group *tg = css_tg(css);
> -	struct write_core_tag wtag;
> -
> -	if (val > 1)
> -		return -ERANGE;
> -
> -	if (!static_branch_likely(&sched_smt_present))
> -		return -EINVAL;
> -
> -	if (!tg->core_tagged && val) {
> -		/* Tag is being set. Check ancestors and descendants. */
> -		if (cpu_core_get_group_cookie(tg) ||
> -		    cpu_core_check_descendants(tg, true /* tag */, true /* color */))
> -			return -EBUSY;
> -	} else if (tg->core_tagged && !val) {
> -		/* Tag is being reset. Check descendants. */
> -		if (cpu_core_check_descendants(tg, true /* tag */, true /* color */))
> -			return -EBUSY;
> -	} else {
> -		return 0;
> -	}
> -
> -	if (!!val)
> -		sched_core_get();
> -
> -	wtag.css = css;
> -	wtag.cookie = (unsigned long)tg << 8; /* Reserve lower 8 bits for color. */
> -
> -	/* Truncate the upper 32-bits - those are used by the per-task cookie. */
> -	wtag.cookie &= (1UL << (sizeof(unsigned long) * 4)) - 1;
> -
> -	tg->core_tagged = val;
> -
> -	stop_machine(__sched_write_tag, (void *) &wtag, NULL);
> -	if (!val)
> -		sched_core_put();
> -
> -	return 0;
> -}
> -
> -static int cpu_core_tag_color_write_u64(struct cgroup_subsys_state *css,
> -					struct cftype *cft, u64 val)
> -{
> -	struct task_group *tg = css_tg(css);
> -	struct write_core_tag wtag;
> -	u64 cookie;
> -
> -	if (val > 255)
> -		return -ERANGE;
> -
> -	if (!static_branch_likely(&sched_smt_present))
> -		return -EINVAL;
> -
> -	cookie = cpu_core_get_group_cookie(tg);
> -	/* Can't set color if nothing in the ancestors were tagged. */
> -	if (!cookie)
> -		return -EINVAL;
> -
> -	/*
> -	 * Something in the ancestors already colors us. Can't change the color
> -	 * at this level.
> -	 */
> -	if (!tg->core_tag_color && (cookie & 255))
> -		return -EINVAL;
> -
> -	/*
> -	 * Check if any descendants are colored. If so, we can't recolor them.
> -	 * Don't need to check if descendants are tagged, since we don't allow
> -	 * tagging when already tagged.
> -	 */
> -	if (cpu_core_check_descendants(tg, false /* tag */, true /* color */))
> -		return -EINVAL;
> -
> -	cookie &= ~255;
> -	cookie |= val;
> -	wtag.css = css;
> -	wtag.cookie = cookie;
> -	tg->core_tag_color = val;
> -
> -	stop_machine(__sched_write_tag, (void *) &wtag, NULL);
> -
> -	return 0;
> -}
> -
> -void sched_tsk_free(struct task_struct *tsk)
> -{
> -	if (!tsk->core_task_cookie)
> -		return;
> -	sched_core_put_task_cookie(tsk->core_task_cookie);
> -	sched_core_put();
> -}
> -#endif
> -
>  static struct cftype cpu_legacy_files[] = {
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>  	{
> diff --git a/kernel/sched/coretag.c b/kernel/sched/coretag.c
> new file mode 100644
> index 000000000000..3333c9b0afc5
> --- /dev/null
> +++ b/kernel/sched/coretag.c
> @@ -0,0 +1,468 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * kernel/sched/core-tag.c
> + *
> + * Core-scheduling tagging interface support.
> + *
> + * Copyright(C) 2020, Joel Fernandes.
> + * Initial interfacing code  by Peter Ziljstra.
> + */
> +
> +#include "sched.h"
> +
> +/*
> + * A simple wrapper around refcount. An allocated sched_core_cookie's
> + * address is used to compute the cookie of the task.
> + */
> +struct sched_core_cookie {
> +	refcount_t refcnt;
> +};
> +
> +static DEFINE_MUTEX(sched_core_tasks_mutex);
> +
> +/*
> + * sched_core_tag_requeue - Common helper for all interfaces to set a cookie.
> + * @p: The task to assign a cookie to.
> + * @cookie: The cookie to assign.
> + * @group: is it a group interface or a per-task interface.
> + *
> + * This function is typically called from a stop-machine handler.
> + */
> +void sched_core_tag_requeue(struct task_struct *p, unsigned long cookie, bool group)
> +{
> +	if (!p)
> +		return;
> +
> +	if (group)
> +		p->core_group_cookie = cookie;
> +	else
> +		p->core_task_cookie = cookie;
> +
> +	/* Use up half of the cookie's bits for task cookie and remaining for group cookie. */
> +	p->core_cookie = (p->core_task_cookie <<
> +				(sizeof(unsigned long) * 4)) + p->core_group_cookie;
> +
> +	if (sched_core_enqueued(p)) {
> +		sched_core_dequeue(task_rq(p), p);
> +		if (!p->core_cookie)
> +			return;
> +	}
> +
> +	if (sched_core_enabled(task_rq(p)) &&
> +			p->core_cookie && task_on_rq_queued(p))
> +		sched_core_enqueue(task_rq(p), p);
> +}
> +
> +/* Per-task interface: Used by fork(2) and prctl(2). */
> +static unsigned long sched_core_alloc_task_cookie(void)
> +{
> +	struct sched_core_cookie *ptr =
> +		kmalloc(sizeof(struct sched_core_cookie), GFP_KERNEL);
> +
> +	if (!ptr)
> +		return 0;
> +	refcount_set(&ptr->refcnt, 1);
> +
> +	/*
> +	 * NOTE: sched_core_put() is not done by put_task_cookie(). Instead, it
> +	 * is done after the stopper runs.
> +	 */
> +	sched_core_get();
> +	return (unsigned long)ptr;
> +}
> +
> +static bool sched_core_get_task_cookie(unsigned long cookie)
> +{
> +	struct sched_core_cookie *ptr = (struct sched_core_cookie *)cookie;
> +
> +	/*
> +	 * NOTE: sched_core_put() is not done by put_task_cookie(). Instead, it
> +	 * is done after the stopper runs.
> +	 */
> +	sched_core_get();
> +	return refcount_inc_not_zero(&ptr->refcnt);
> +}
> +
> +static void sched_core_put_task_cookie(unsigned long cookie)
> +{
> +	struct sched_core_cookie *ptr = (struct sched_core_cookie *)cookie;
> +
> +	if (refcount_dec_and_test(&ptr->refcnt))
> +		kfree(ptr);
> +}
> +
> +struct sched_core_task_write_tag {
> +	struct task_struct *tasks[2];
> +	unsigned long cookies[2];
> +};
> +
> +/*
> + * Ensure that the task has been requeued. The stopper ensures that the task cannot
> + * be migrated to a different CPU while its core scheduler queue state is being updated.
> + * It also makes sure to requeue a task if it was running actively on another CPU.
> + */
> +static int sched_core_task_join_stopper(void *data)
> +{
> +	struct sched_core_task_write_tag *tag = (struct sched_core_task_write_tag *)data;
> +	int i;
> +
> +	for (i = 0; i < 2; i++)
> +		sched_core_tag_requeue(tag->tasks[i], tag->cookies[i], false /* !group */);
> +
> +	return 0;
> +}
> +
> +int sched_core_share_tasks(struct task_struct *t1, struct task_struct *t2)
> +{
> +	struct sched_core_task_write_tag wr = {}; /* for stop machine. */
> +	bool sched_core_put_after_stopper = false;
> +	unsigned long cookie;
> +	int ret = -ENOMEM;
> +
> +	mutex_lock(&sched_core_tasks_mutex);
> +
> +	/*
> +	 * NOTE: sched_core_get() is done by sched_core_alloc_task_cookie() or
> +	 *       sched_core_put_task_cookie(). However, sched_core_put() is done
> +	 *       by this function *after* the stopper removes the tasks from the
> +	 *       core queue, and not before. This is just to play it safe.
> +	 */
> +	if (t2 == NULL) {
> +		if (t1->core_task_cookie) {
> +			sched_core_put_task_cookie(t1->core_task_cookie);
> +			sched_core_put_after_stopper = true;
> +			wr.tasks[0] = t1; /* Keep wr.cookies[0] reset for t1. */
> +		}
> +	} else if (t1 == t2) {
> +		/* Assign a unique per-task cookie solely for t1. */
> +
> +		cookie = sched_core_alloc_task_cookie();
> +		if (!cookie)
> +			goto out_unlock;
> +
> +		if (t1->core_task_cookie) {
> +			sched_core_put_task_cookie(t1->core_task_cookie);
> +			sched_core_put_after_stopper = true;
> +		}
> +		wr.tasks[0] = t1;
> +		wr.cookies[0] = cookie;
> +	} else
> +	/*
> +	 * 		t1		joining		t2
> +	 * CASE 1:
> +	 * before	0				0
> +	 * after	new cookie			new cookie
> +	 *
> +	 * CASE 2:
> +	 * before	X (non-zero)			0
> +	 * after	0				0
> +	 *
> +	 * CASE 3:
> +	 * before	0				X (non-zero)
> +	 * after	X				X
> +	 *
> +	 * CASE 4:
> +	 * before	Y (non-zero)			X (non-zero)
> +	 * after	X				X
> +	 */
> +	if (!t1->core_task_cookie && !t2->core_task_cookie) {
> +		/* CASE 1. */
> +		cookie = sched_core_alloc_task_cookie();
> +		if (!cookie)
> +			goto out_unlock;
> +
> +		/* Add another reference for the other task. */
> +		if (!sched_core_get_task_cookie(cookie)) {
> +			return -EINVAL;

ret = -EINVAL; mutex is not released otherwise... 

> +			goto out_unlock;
> +		}
> +
> +		wr.tasks[0] = t1;
> +		wr.tasks[1] = t2;
> +		wr.cookies[0] = wr.cookies[1] = cookie;
> +
> +	} else if (t1->core_task_cookie && !t2->core_task_cookie) {
> +		/* CASE 2. */
> +		sched_core_put_task_cookie(t1->core_task_cookie);
> +		sched_core_put_after_stopper = true;
> +
> +		wr.tasks[0] = t1; /* Reset cookie for t1. */
> +
> +	} else if (!t1->core_task_cookie && t2->core_task_cookie) {
> +		/* CASE 3. */
> +		if (!sched_core_get_task_cookie(t2->core_task_cookie)) {
> +			ret = -EINVAL;
> +			goto out_unlock;
> +		}
> +
> +		wr.tasks[0] = t1;
> +		wr.cookies[0] = t2->core_task_cookie;
> +
> +	} else {
> +		/* CASE 4. */
> +		if (!sched_core_get_task_cookie(t2->core_task_cookie)) {
> +			ret = -EINVAL;
> +			goto out_unlock;
> +		}
> +		sched_core_put_task_cookie(t1->core_task_cookie);
> +		sched_core_put_after_stopper = true;
> +
> +		wr.tasks[0] = t1;
> +		wr.cookies[0] = t2->core_task_cookie;
> +	}
> +
> +	stop_machine(sched_core_task_join_stopper, (void *)&wr, NULL);
> +
> +	if (sched_core_put_after_stopper)
> +		sched_core_put();
> +
> +	ret = 0;
> +out_unlock:
> +	mutex_unlock(&sched_core_tasks_mutex);
> +	return ret;
> +}
> +
> +/* Called from prctl interface: PR_SCHED_CORE_SHARE */
> +int sched_core_share_pid(pid_t pid)
> +{
> +	struct task_struct *task;
> +	int err;
> +
> +	if (pid == 0) { /* Recent current task's cookie. */
> +		/* Resetting a cookie requires privileges. */
> +		if (current->core_task_cookie)
> +			if (!capable(CAP_SYS_ADMIN))
> +				return -EPERM;
> +		task = NULL;
> +	} else {
> +		rcu_read_lock();
> +		task = pid ? find_task_by_vpid(pid) : current;
> +		if (!task) {
> +			rcu_read_unlock();
> +			return -ESRCH;
> +		}
> +
> +		get_task_struct(task);
> +
> +		/*
> +		 * Check if this process has the right to modify the specified
> +		 * process. Use the regular "ptrace_may_access()" checks.
> +		 */
> +		if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
> +			rcu_read_unlock();
> +			err = -EPERM;
> +			goto out_put;
> +		}
> +		rcu_read_unlock();
> +	}
> +
> +	err = sched_core_share_tasks(current, task);
> +out_put:
> +	if (task)
> +		put_task_struct(task);
> +	return err;
> +}
> +
> +/* CGroup core-scheduling interface support. */
> +
> +/*
> + * Helper to get the cookie in a hierarchy.
> + * The cookie is a combination of a tag and color. Any ancestor
> + * can have a tag/color. tag is the first-level cookie setting
> + * with color being the second. Atmost one color and one tag is
> + * allowed.
> + */
> +unsigned long cpu_core_get_group_cookie(struct task_group *tg)
> +{
> +	unsigned long color = 0;
> +
> +	if (!tg)
> +		return 0;
> +
> +	for (; tg; tg = tg->parent) {
> +		if (tg->core_tag_color) {
> +			WARN_ON_ONCE(color);
> +			color = tg->core_tag_color;
> +		}
> +
> +		if (tg->core_tagged) {
> +			unsigned long cookie = ((unsigned long)tg << 8) | color;
> +			cookie &= SCHED_CORE_GROUP_COOKIE_MASK;
> +			return cookie;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +/* Determine if any group in @tg's children are tagged or colored. */
> +static bool cpu_core_check_descendants(struct task_group *tg, bool check_tag,
> +				       bool check_color)
> +{
> +	struct task_group *child;
> +
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(child, &tg->children, siblings) {
> +		if ((child->core_tagged && check_tag) ||
> +		    (child->core_tag_color && check_color)) {
> +			rcu_read_unlock();
> +			return true;
> +		}
> +
> +		rcu_read_unlock();
> +		return cpu_core_check_descendants(child, check_tag, check_color);
> +	}
> +
> +	rcu_read_unlock();
> +	return false;
> +}
> +
> +u64 cpu_core_tag_read_u64(struct cgroup_subsys_state *css,
> +			  struct cftype *cft)
> +{
> +	struct task_group *tg = css_tg(css);
> +
> +	return !!tg->core_tagged;
> +}
> +
> +u64 cpu_core_tag_color_read_u64(struct cgroup_subsys_state *css,
> +				struct cftype *cft)
> +{
> +	struct task_group *tg = css_tg(css);
> +
> +	return tg->core_tag_color;
> +}
> +
> +#ifdef CONFIG_SCHED_DEBUG
> +u64 cpu_core_group_cookie_read_u64(struct cgroup_subsys_state *css,
> +				   struct cftype *cft)
> +{
> +	return cpu_core_get_group_cookie(css_tg(css));
> +}
> +#endif
> +
> +struct write_core_tag {
> +	struct cgroup_subsys_state *css;
> +	unsigned long cookie;
> +};
> +
> +static int __sched_write_tag(void *data)
> +{
> +	struct write_core_tag *tag = (struct write_core_tag *) data;
> +	struct task_struct *p;
> +	struct cgroup_subsys_state *css;
> +
> +	rcu_read_lock();
> +	css_for_each_descendant_pre(css, tag->css) {
> +		struct css_task_iter it;
> +
> +		css_task_iter_start(css, 0, &it);
> +		/*
> +		 * Note: css_task_iter_next will skip dying tasks.
> +		 * There could still be dying tasks left in the core queue
> +		 * when we set cgroup tag to 0 when the loop is done below.
> +		 */
> +		while ((p = css_task_iter_next(&it)))
> +			sched_core_tag_requeue(p, tag->cookie, true /* group */);
> +
> +		css_task_iter_end(&it);
> +	}
> +	rcu_read_unlock();
> +
> +	return 0;
> +}
> +
> +int cpu_core_tag_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
> +			   u64 val)
> +{
> +	struct task_group *tg = css_tg(css);
> +	struct write_core_tag wtag;
> +
> +	if (val > 1)
> +		return -ERANGE;
> +
> +	if (!static_branch_likely(&sched_smt_present))
> +		return -EINVAL;
> +
> +	if (!tg->core_tagged && val) {
> +		/* Tag is being set. Check ancestors and descendants. */
> +		if (cpu_core_get_group_cookie(tg) ||
> +		    cpu_core_check_descendants(tg, true /* tag */, true /* color */))
> +			return -EBUSY;
> +	} else if (tg->core_tagged && !val) {
> +		/* Tag is being reset. Check descendants. */
> +		if (cpu_core_check_descendants(tg, true /* tag */, true /* color */))
> +			return -EBUSY;
> +	} else {
> +		return 0;
> +	}
> +
> +	if (!!val)
> +		sched_core_get();
> +
> +	wtag.css = css;
> +	wtag.cookie = (unsigned long)tg << 8; /* Reserve lower 8 bits for color. */
> +
> +	/* Truncate the upper 32-bits - those are used by the per-task cookie. */
> +	wtag.cookie &= (1UL << (sizeof(unsigned long) * 4)) - 1;
> +
> +	tg->core_tagged = val;
> +
> +	stop_machine(__sched_write_tag, (void *) &wtag, NULL);
> +	if (!val)
> +		sched_core_put();
> +
> +	return 0;
> +}
> +
> +int cpu_core_tag_color_write_u64(struct cgroup_subsys_state *css,
> +				 struct cftype *cft, u64 val)
> +{
> +	struct task_group *tg = css_tg(css);
> +	struct write_core_tag wtag;
> +	u64 cookie;
> +
> +	if (val > 255)
> +		return -ERANGE;
> +
> +	if (!static_branch_likely(&sched_smt_present))
> +		return -EINVAL;
> +
> +	cookie = cpu_core_get_group_cookie(tg);
> +	/* Can't set color if nothing in the ancestors were tagged. */
> +	if (!cookie)
> +		return -EINVAL;
> +
> +	/*
> +	 * Something in the ancestors already colors us. Can't change the color
> +	 * at this level.
> +	 */
> +	if (!tg->core_tag_color && (cookie & 255))
> +		return -EINVAL;
> +
> +	/*
> +	 * Check if any descendants are colored. If so, we can't recolor them.
> +	 * Don't need to check if descendants are tagged, since we don't allow
> +	 * tagging when already tagged.
> +	 */
> +	if (cpu_core_check_descendants(tg, false /* tag */, true /* color */))
> +		return -EINVAL;
> +
> +	cookie &= ~255;
> +	cookie |= val;
> +	wtag.css = css;
> +	wtag.cookie = cookie;
> +	tg->core_tag_color = val;
> +
> +	stop_machine(__sched_write_tag, (void *) &wtag, NULL);
> +
> +	return 0;
> +}
> +
> +void sched_tsk_free(struct task_struct *tsk)
> +{
> +	if (!tsk->core_task_cookie)
> +		return;
> +	sched_core_put_task_cookie(tsk->core_task_cookie);
> +	sched_core_put();
> +}
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index aebeb91c4a0f..290a3b8be3d3 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -437,6 +437,11 @@ struct task_group {
>  
>  };
>  
> +static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
> +{
> +	return css ? container_of(css, struct task_group, css) : NULL;
> +}
> +
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>  #define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
>  
> @@ -1104,6 +1109,8 @@ static inline int cpu_of(struct rq *rq)
>  #ifdef CONFIG_SCHED_CORE
>  DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
>  
> +#define SCHED_CORE_GROUP_COOKIE_MASK ((1UL << (sizeof(unsigned long) * 4)) - 1)
> +
>  static inline bool sched_core_enabled(struct rq *rq)
>  {
>  	return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
> @@ -1148,10 +1155,54 @@ static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
>  	return idle_core || rq->core->core_cookie == p->core_cookie;
>  }
>  
> -extern void queue_core_balance(struct rq *rq);
> +static inline bool sched_core_enqueued(struct task_struct *task)
> +{
> +	return !RB_EMPTY_NODE(&task->core_node);
> +}
> +
> +void queue_core_balance(struct rq *rq);
> +
> +void sched_core_enqueue(struct rq *rq, struct task_struct *p);
> +void sched_core_dequeue(struct rq *rq, struct task_struct *p);
> +void sched_core_get(void);
> +void sched_core_put(void);
> +
> +void sched_core_tag_requeue(struct task_struct *p, unsigned long cookie,
> +			    bool group);
> +
> +int sched_core_share_pid(pid_t pid);
> +int sched_core_share_tasks(struct task_struct *t1, struct task_struct *t2);
> +
> +unsigned long cpu_core_get_group_cookie(struct task_group *tg);
> +
> +u64 cpu_core_tag_read_u64(struct cgroup_subsys_state *css,
> +			  struct cftype *cft);
> +
> +u64 cpu_core_tag_color_read_u64(struct cgroup_subsys_state *css,
> +				struct cftype *cft);
> +
> +#ifdef CONFIG_SCHED_DEBUG
> +u64 cpu_core_group_cookie_read_u64(struct cgroup_subsys_state *css,
> +				   struct cftype *cft);
> +#endif
> +
> +int cpu_core_tag_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
> +			   u64 val);
> +
> +int cpu_core_tag_color_write_u64(struct cgroup_subsys_state *css,
> +				 struct cftype *cft, u64 val);
> +
> +#ifndef TIF_UNSAFE_RET
> +#define TIF_UNSAFE_RET (0)
> +#endif
>  
>  #else /* !CONFIG_SCHED_CORE */
>  
> +static inline bool sched_core_enqueued(struct task_struct *task) { return false; }
> +static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
> +static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { }
> +static inline int sched_core_share_tasks(struct task_struct *t1, struct task_struct *t2) { }
> +
>  static inline bool sched_core_enabled(struct rq *rq)
>  {
>  	return false;
> @@ -2779,7 +2830,4 @@ void swake_up_all_locked(struct swait_queue_head *q);
>  void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
>  
>  #ifdef CONFIG_SCHED_CORE
> -#ifndef TIF_UNSAFE_RET
> -#define TIF_UNSAFE_RET (0)
> -#endif
>  #endif
> 


  reply	other threads:[~2020-10-26  1:06 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-20  1:43 [PATCH v8 -tip 00/26] Core scheduling Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 01/26] sched: Wrap rq::lock access Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 02/26] sched: Introduce sched_class::pick_task() Joel Fernandes (Google)
2020-10-22  7:59   ` Li, Aubrey
2020-10-22 15:25     ` Joel Fernandes
2020-10-23  5:25       ` Li, Aubrey
2020-10-23 21:47         ` Joel Fernandes
2020-10-24  2:48           ` Li, Aubrey
2020-10-24 11:10             ` Vineeth Pillai
2020-10-24 12:27               ` Vineeth Pillai
2020-10-24 23:48                 ` Li, Aubrey
2020-10-26  9:01                 ` Peter Zijlstra
2020-10-27  3:17                   ` Li, Aubrey
2020-10-27 14:19                   ` Joel Fernandes
2020-10-27 15:23                     ` Joel Fernandes
2020-10-27 14:14                 ` Joel Fernandes
2020-10-20  1:43 ` [PATCH v8 -tip 03/26] sched: Core-wide rq->lock Joel Fernandes (Google)
2020-10-26 11:59   ` Peter Zijlstra
2020-10-27 16:27     ` Joel Fernandes
2020-10-20  1:43 ` [PATCH v8 -tip 04/26] sched/fair: Add a few assertions Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 05/26] sched: Basic tracking of matching tasks Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 06/26] sched: Add core wide task selection and scheduling Joel Fernandes (Google)
2020-10-23 13:51   ` Peter Zijlstra
2020-10-23 13:54     ` Peter Zijlstra
2020-10-23 17:57       ` Joel Fernandes
2020-10-23 19:26         ` Peter Zijlstra
2020-10-23 21:31           ` Joel Fernandes
2020-10-26  8:28             ` Peter Zijlstra
2020-10-27 16:58               ` Joel Fernandes
2020-10-26  9:31             ` Peter Zijlstra
2020-11-05 18:50               ` Joel Fernandes
2020-11-05 22:07                 ` Joel Fernandes
2020-10-23 15:05   ` Peter Zijlstra
2020-10-23 17:59     ` Joel Fernandes
2020-10-20  1:43 ` [PATCH v8 -tip 07/26] sched/fair: Fix forced idle sibling starvation corner case Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 08/26] sched/fair: Snapshot the min_vruntime of CPUs on force idle Joel Fernandes (Google)
2020-10-26 12:47   ` Peter Zijlstra
2020-10-28 15:29     ` Joel Fernandes
2020-10-28 18:39     ` Joel Fernandes
2020-10-29 16:59     ` Joel Fernandes
2020-10-29 18:24     ` Joel Fernandes
2020-10-29 18:59       ` Peter Zijlstra
2020-10-30  2:36         ` Joel Fernandes
2020-10-30  2:42           ` Joel Fernandes
2020-10-30  8:41             ` Peter Zijlstra
2020-10-31 21:41               ` Joel Fernandes
2020-10-20  1:43 ` [PATCH v8 -tip 09/26] sched: Trivial forced-newidle balancer Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 10/26] sched: migration changes for core scheduling Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 11/26] irq_work: Cleanup Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 12/26] arch/x86: Add a new TIF flag for untrusted tasks Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 13/26] kernel/entry: Add support for core-wide protection of kernel-mode Joel Fernandes (Google)
2020-10-20  3:41   ` Randy Dunlap
2020-11-03  0:20     ` Joel Fernandes
2020-10-22  5:48   ` Li, Aubrey
2020-11-03  0:50     ` Joel Fernandes
2020-10-30 10:29   ` Alexandre Chartre
2020-11-03  1:20     ` Joel Fernandes
2020-11-06 16:57       ` Alexandre Chartre
2020-11-06 17:43         ` Joel Fernandes
2020-11-06 18:07           ` Alexandre Chartre
2020-11-10  9:35       ` Alexandre Chartre
2020-11-10 22:42         ` Joel Fernandes
2020-11-16 10:08           ` Alexandre Chartre
2020-11-16 14:50             ` Joel Fernandes
2020-11-16 15:43               ` Joel Fernandes
2020-10-20  1:43 ` [PATCH v8 -tip 14/26] entry/idle: Enter and exit kernel protection during idle entry and exit Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 15/26] entry/kvm: Protect the kernel when entering from guest Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 16/26] sched: cgroup tagging interface for core scheduling Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 17/26] sched: Split the cookie and setup per-task cookie on fork Joel Fernandes (Google)
2020-11-04 22:30   ` chris hyser
2020-11-05 14:49     ` Joel Fernandes
2020-11-09 23:30     ` chris hyser
2020-10-20  1:43 ` [PATCH v8 -tip 18/26] sched: Add a per-thread core scheduling interface Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 19/26] sched: Add a second-level tag for nested CGroup usecase Joel Fernandes (Google)
2020-10-31  0:42   ` Josh Don
2020-11-03  2:54     ` Joel Fernandes
     [not found]   ` <6c07e70d-52f2-69ff-e1fa-690cd2c97f3d@linux.intel.com>
2020-11-05 15:52     ` Joel Fernandes
2020-10-20  1:43 ` [PATCH v8 -tip 20/26] sched: Release references to the per-task cookie on exit Joel Fernandes (Google)
2020-11-04 21:50   ` chris hyser
2020-11-05 15:46     ` Joel Fernandes
2020-10-20  1:43 ` [PATCH v8 -tip 21/26] sched: Handle task addition to CGroup Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 22/26] sched/debug: Add CGroup node for printing group cookie if SCHED_DEBUG Joel Fernandes (Google)
2020-10-20  1:43 ` [PATCH v8 -tip 23/26] kselftest: Add tests for core-sched interface Joel Fernandes (Google)
2020-10-30 22:20   ` [PATCH] sched: Change all 4 space tabs to actual tabs John B. Wyatt IV
2020-10-20  1:43 ` [PATCH v8 -tip 24/26] sched: Move core-scheduler interfacing code to a new file Joel Fernandes (Google)
2020-10-26  1:05   ` Li, Aubrey [this message]
2020-11-03  2:58     ` Joel Fernandes
2020-10-20  1:43 ` [PATCH v8 -tip 25/26] Documentation: Add core scheduling documentation Joel Fernandes (Google)
2020-10-20  3:36   ` Randy Dunlap
2020-11-12 16:11     ` Joel Fernandes
2020-10-20  1:43 ` [PATCH v8 -tip 26/26] sched: Debug bits Joel Fernandes (Google)
2020-10-30 13:26 ` [PATCH v8 -tip 00/26] Core scheduling Ning, Hongyu
2020-11-06  2:58   ` Li, Aubrey
2020-11-06 17:54     ` Joel Fernandes
2020-11-09  6:04       ` Li, Aubrey
2020-11-06 20:55 ` [RFT for v9] (Was Re: [PATCH v8 -tip 00/26] Core scheduling) Joel Fernandes
2020-11-13  9:22   ` Ning, Hongyu
2020-11-13 10:01     ` Ning, Hongyu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c7466b5d-9850-9eff-3e67-f0cb1b578cc3@linux.intel.com \
    --to=aubrey.li@linux.intel.com \
    --cc=James.Bottomley@hansenpartnership.com \
    --cc=OWeisse@umich.edu \
    --cc=aaron.lwe@gmail.com \
    --cc=agata.gruza@intel.com \
    --cc=alexandre.chartre@oracle.com \
    --cc=antonio.gomez.iglesias@intel.com \
    --cc=aubrey.intel@gmail.com \
    --cc=benbjiang@tencent.com \
    --cc=chris.hyser@oracle.com \
    --cc=christian.brauner@ubuntu.com \
    --cc=derkling@google.com \
    --cc=dfaggioli@suse.com \
    --cc=dhaval.giani@oracle.com \
    --cc=fweisbec@gmail.com \
    --cc=graf@amazon.com \
    --cc=jdesfossez@digitalocean.com \
    --cc=joel@joelfernandes.org \
    --cc=jsbarnes@google.com \
    --cc=junaids@google.com \
    --cc=keescook@chromium.org \
    --cc=kerrnel@google.com \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mingo@kernel.org \
    --cc=naravamudan@digitalocean.com \
    --cc=pauld@redhat.com \
    --cc=paulmck@kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=pjt@google.com \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=tim.c.chen@intel.com \
    --cc=tim.c.chen@linux.intel.com \
    --cc=torvalds@linux-foundation.org \
    --cc=valentin.schneider@arm.com \
    --cc=vineeth@bitbyteword.org \
    --cc=viremana@linux.microsoft.com \
    --cc=yu.c.chen@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.