linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] sched: task group weight interface
@ 2008-12-10 19:40 Peter Zijlstra
  2008-12-10 19:40 ` [PATCH 1/2] sched: tg->weight Peter Zijlstra
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Peter Zijlstra @ 2008-12-10 19:40 UTC (permalink / raw)
  To: mingo, dhaval, vatsa, cfriesen; +Cc: linux-kernel, Peter Zijlstra

Hi,

Dhaval poked me that he (and others) didn't like the weight interface we
currently have for task groups. I concurr and dusted off these old patches.


-- 


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/2] sched: tg->weight
  2008-12-10 19:40 [PATCH 0/2] sched: task group weight interface Peter Zijlstra
@ 2008-12-10 19:40 ` Peter Zijlstra
  2008-12-10 19:40 ` [PATCH 2/2] sched: group nice Peter Zijlstra
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 6+ messages in thread
From: Peter Zijlstra @ 2008-12-10 19:40 UTC (permalink / raw)
  To: mingo, dhaval, vatsa, cfriesen; +Cc: linux-kernel, Peter Zijlstra

[-- Attachment #1: sched-tg_weight.patch --]
[-- Type: text/plain, Size: 7038 bytes --]

While going through the whole group thing again, I realized tg->shares ought
to be called tg->weight, as its the total group weight, and not a share of
the group weight.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
 include/linux/sched.h |    4 ++--
 kernel/sched.c        |   49 +++++++++++++++++++++++++++----------------------
 kernel/sched_fair.c   |    2 +-
 kernel/user.c         |    4 ++--
 4 files changed, 32 insertions(+), 27 deletions(-)

Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -275,7 +275,7 @@ struct task_group {
 	struct sched_entity **se;
 	/* runqueue "owned" by this group on each cpu */
 	struct cfs_rq **cfs_rq;
-	unsigned long shares;
+	unsigned long weight;
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -445,7 +445,7 @@ struct cfs_rq {
 	unsigned long h_load;
 
 	/*
-	 * this cpu's part of tg->shares
+	 * this cpu's part of tg->weight
 	 */
 	unsigned long shares;
 
@@ -1465,7 +1465,7 @@ static void __set_se_shares(struct sched
  * Calculate and set the cpu's group shares.
  */
 static void
-update_group_shares_cpu(struct task_group *tg, int cpu,
+update_group_weight_cpu(struct task_group *tg, int cpu,
 			unsigned long sd_shares, unsigned long sd_rq_weight)
 {
 	unsigned long shares;
@@ -1525,14 +1525,14 @@ static int tg_shares_up(struct task_grou
 		shares += tg->cfs_rq[i]->shares;
 	}
 
-	if ((!shares && rq_weight) || shares > tg->shares)
-		shares = tg->shares;
+	if ((!shares && rq_weight) || shares > tg->weight)
+		shares = tg->weight;
 
 	if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
-		shares = tg->shares;
+		shares = tg->weight;
 
 	for_each_cpu(i, sched_domain_span(sd))
-		update_group_shares_cpu(tg, i, shares, rq_weight);
+		update_group_weight_cpu(tg, i, shares, rq_weight);
 
 	return 0;
 }
@@ -8112,7 +8112,7 @@ static void init_tg_cfs_entry(struct tas
 		se->cfs_rq = parent->my_q;
 
 	se->my_q = cfs_rq;
-	se->load.weight = tg->shares;
+	se->load.weight = tg->weight;
 	se->load.inv_weight = 0;
 	se->parent = parent;
 }
@@ -8237,7 +8237,7 @@ void __init sched_init(void)
 		init_cfs_rq(&rq->cfs, rq);
 		init_rt_rq(&rq->rt, rq);
 #ifdef CONFIG_FAIR_GROUP_SCHED
-		init_task_group.shares = init_task_group_load;
+		init_task_group.weight = init_task_group_load;
 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
 #ifdef CONFIG_CGROUP_SCHED
 		/*
@@ -8261,7 +8261,7 @@ void __init sched_init(void)
 		 */
 		init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
 #elif defined CONFIG_USER_SCHED
-		root_task_group.shares = NICE_0_LOAD;
+		root_task_group.weight = NICE_0_LOAD;
 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
 		/*
 		 * In case of task-groups formed thr' the user id of tasks,
@@ -8524,7 +8524,7 @@ int alloc_fair_sched_group(struct task_g
 	if (!tg->se)
 		goto err;
 
-	tg->shares = NICE_0_LOAD;
+	tg->weight = NICE_0_LOAD;
 
 	for_each_possible_cpu(i) {
 		rq = cpu_rq(i);
@@ -8807,7 +8807,7 @@ static void set_se_shares(struct sched_e
 
 static DEFINE_MUTEX(shares_mutex);
 
-int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+int sched_group_set_weight(struct task_group *tg, unsigned long shares)
 {
 	int i;
 	unsigned long flags;
@@ -8824,7 +8824,7 @@ int sched_group_set_shares(struct task_g
 		shares = MAX_SHARES;
 
 	mutex_lock(&shares_mutex);
-	if (tg->shares == shares)
+	if (tg->weight == shares)
 		goto done;
 
 	spin_lock_irqsave(&task_group_lock, flags);
@@ -8840,7 +8840,7 @@ int sched_group_set_shares(struct task_g
 	 * Now we are free to modify the group's share on each cpu
 	 * w/o tripping rebalance_share or load_balance_fair.
 	 */
-	tg->shares = shares;
+	tg->weight = shares;
 	for_each_possible_cpu(i) {
 		/*
 		 * force a rebalance
@@ -8863,9 +8863,9 @@ done:
 	return 0;
 }
 
-unsigned long sched_group_shares(struct task_group *tg)
+unsigned long sched_group_weight(struct task_group *tg)
 {
-	return tg->shares;
+	return tg->weight;
 }
 #endif
 
@@ -9183,17 +9183,17 @@ cpu_cgroup_attach(struct cgroup_subsys *
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
+static int cpu_weight_write_u64(struct cgroup *cgrp, struct cftype *cftype,
 				u64 shareval)
 {
-	return sched_group_set_shares(cgroup_tg(cgrp), shareval);
+	return sched_group_set_weight(cgroup_tg(cgrp), shareval);
 }
 
-static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_weight_read_u64(struct cgroup *cgrp, struct cftype *cft)
 {
 	struct task_group *tg = cgroup_tg(cgrp);
 
-	return (u64) tg->shares;
+	return (u64) tg->weight;
 }
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
@@ -9225,8 +9225,13 @@ static struct cftype cpu_files[] = {
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	{
 		.name = "shares",
-		.read_u64 = cpu_shares_read_u64,
-		.write_u64 = cpu_shares_write_u64,
+		.read_u64 = cpu_weight_read_u64,
+		.write_u64 = cpu_weight_write_u64,
+	},
+	{
+		.name = "weight",
+		.read_u64 = cpu_weight_read_u64,
+		.write_u64 = cpu_weight_write_u64,
 	},
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
Index: linux-2.6/kernel/sched_fair.c
===================================================================
--- linux-2.6.orig/kernel/sched_fair.c
+++ linux-2.6/kernel/sched_fair.c
@@ -1117,7 +1117,7 @@ static long effective_load(struct task_g
 		wl += more_w;
 		wg += more_w;
 
-		S = se->my_q->tg->shares;
+		S = se->my_q->tg->weight;
 		s = se->my_q->shares;
 		rw = se->my_q->rq_weight;
 
Index: linux-2.6/include/linux/sched.h
===================================================================
--- linux-2.6.orig/include/linux/sched.h
+++ linux-2.6/include/linux/sched.h
@@ -2282,8 +2282,8 @@ extern struct task_group *sched_create_g
 extern void sched_destroy_group(struct task_group *tg);
 extern void sched_move_task(struct task_struct *tsk);
 #ifdef CONFIG_FAIR_GROUP_SCHED
-extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
-extern unsigned long sched_group_shares(struct task_group *tg);
+extern int sched_group_set_weight(struct task_group *tg, unsigned long shares);
+extern unsigned long sched_group_weight(struct task_group *tg);
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
 extern int sched_group_set_rt_runtime(struct task_group *tg,
Index: linux-2.6/kernel/user.c
===================================================================
--- linux-2.6.orig/kernel/user.c
+++ linux-2.6/kernel/user.c
@@ -142,7 +142,7 @@ static ssize_t cpu_shares_show(struct ko
 {
 	struct user_struct *up = container_of(kobj, struct user_struct, kobj);
 
-	return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
+	return sprintf(buf, "%lu\n", sched_group_weight(up->tg));
 }
 
 static ssize_t cpu_shares_store(struct kobject *kobj,
@@ -155,7 +155,7 @@ static ssize_t cpu_shares_store(struct k
 
 	sscanf(buf, "%lu", &shares);
 
-	rc = sched_group_set_shares(up->tg, shares);
+	rc = sched_group_set_weight(up->tg, shares);
 
 	return (rc ? rc : size);
 }

-- 


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 2/2] sched: group nice
  2008-12-10 19:40 [PATCH 0/2] sched: task group weight interface Peter Zijlstra
  2008-12-10 19:40 ` [PATCH 1/2] sched: tg->weight Peter Zijlstra
@ 2008-12-10 19:40 ` Peter Zijlstra
  2008-12-12  9:31 ` [PATCH 0/2] sched: task group weight interface Ingo Molnar
  2009-01-12 15:14 ` [PATCH 0/2] sched: task group weight interface Dhaval Giani
  3 siblings, 0 replies; 6+ messages in thread
From: Peter Zijlstra @ 2008-12-10 19:40 UTC (permalink / raw)
  To: mingo, dhaval, vatsa, cfriesen; +Cc: linux-kernel, Peter Zijlstra

[-- Attachment #1: sched-group-nice.patch --]
[-- Type: text/plain, Size: 1769 bytes --]

allow to use nice values to set group weights

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
 kernel/sched.c |   39 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)

Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -8558,6 +8558,17 @@ static inline void unregister_fair_sched
 {
 	list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
 }
+
+static int cpu_nice_write(struct cgroup *cgrp, struct cftype *cftype,
+			  s64 niceval)
+{
+	return sched_group_set_nice(cgroup_tg(cgrp), niceval);
+}
+
+static s64 cpu_nice_read(struct cgroup *cgrp, struct cftype *cftype)
+{
+	return sched_group_nice(cgroup_tg(cgrp));
+}
 #else /* !CONFG_FAIR_GROUP_SCHED */
 static inline void free_fair_sched_group(struct task_group *tg)
 {
@@ -8867,6 +8878,29 @@ unsigned long sched_group_weight(struct 
 {
 	return tg->weight;
 }
+
+static int sched_group_set_nice(struct task_group *tg, int nice)
+{
+	unsigned int prio = nice + 20;
+
+	if (prio > ARRAY_SIZE(prio_to_weight))
+		return -EINVAL;
+
+	sched_group_set_weight(tg, prio_to_weight[prio]);
+	return 0;
+}
+
+static int sched_group_nice(struct task_group *tg)
+{
+	unsigned long weight = sched_group_weight(tg);
+	int prio;
+
+	for (prio = 0; prio < ARRAY_SIZE(prio_to_weight); prio++)
+		if (prio_to_weight[prio] <= weight)
+			break;
+
+	return prio - 20;
+}
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -9233,6 +9267,11 @@ static struct cftype cpu_files[] = {
 		.read_u64 = cpu_weight_read_u64,
 		.write_u64 = cpu_weight_write_u64,
 	},
+	{
+		.name = "nice",
+		.read_s64 = cpu_nice_read,
+		.write_s64 = cpu_nice_write,
+	},
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
 	{

-- 


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 0/2] sched: task group weight interface
  2008-12-10 19:40 [PATCH 0/2] sched: task group weight interface Peter Zijlstra
  2008-12-10 19:40 ` [PATCH 1/2] sched: tg->weight Peter Zijlstra
  2008-12-10 19:40 ` [PATCH 2/2] sched: group nice Peter Zijlstra
@ 2008-12-12  9:31 ` Ingo Molnar
  2008-12-14  4:58   ` [RESEND][PATCH]: sched: Fix compile errors introduced by new group scheduler interface Dhaval Giani
  2009-01-12 15:14 ` [PATCH 0/2] sched: task group weight interface Dhaval Giani
  3 siblings, 1 reply; 6+ messages in thread
From: Ingo Molnar @ 2008-12-12  9:31 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: dhaval, vatsa, cfriesen, linux-kernel


* Peter Zijlstra <a.p.zijlstra@chello.nl> wrote:

> Hi,
> 
> Dhaval poked me that he (and others) didn't like the weight interface 
> we currently have for task groups. I concurr and dusted off these old 
> patches.

applied to tip/sched/core, thanks Peter!

	Ingo

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [RESEND][PATCH]: sched: Fix compile errors introduced by new group scheduler interface
  2008-12-12  9:31 ` [PATCH 0/2] sched: task group weight interface Ingo Molnar
@ 2008-12-14  4:58   ` Dhaval Giani
  0 siblings, 0 replies; 6+ messages in thread
From: Dhaval Giani @ 2008-12-14  4:58 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: Peter Zijlstra, vatsa, cfriesen, linux-kernel

 
> applied to tip/sched/core, thanks Peter!
> 

Also needs this to build,


sched: Fix compile errors introduced by new group scheduler interface

commit e642daa433a5ea1599965576dcea70dcc0a36608 introduces

kernel/sched.c: In function ‘cpu_nice_write’:
kernel/sched.c:8505: error: implicit declaration of function ‘sched_group_set_nice’
kernel/sched.c:8505: error: implicit declaration of function ‘cgroup_tg’
kernel/sched.c: In function ‘cpu_nice_read’:
kernel/sched.c:8510: error: implicit declaration of function ‘sched_group_nice’
kernel/sched.c: At top level:
kernel/sched.c:8822: error: static declaration of ‘sched_group_set_nice’ follows non-static declaration
kernel/sched.c:8505: error: previous implicit declaration of ‘sched_group_set_nice’ was here
kernel/sched.c:8833: error: static declaration of ‘sched_group_nice’ follows non-static declaration
kernel/sched.c:8510: error: previous implicit declaration of ‘sched_group_nice’ was here
kernel/sched.c:9103: error: conflicting types for ‘cgroup_tg’
kernel/sched.c:8505: error: previous implicit declaration of ‘cgroup_tg’ was here
make[1]: *** [kernel/sched.o] Error 1
make: *** [kernel/sched.o] Error 2

Fix those.

Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>

---
 kernel/sched.c |   20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)

Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c	2008-12-12 16:18:54.000000000 +0530
+++ linux-2.6/kernel/sched.c	2008-12-12 16:21:36.000000000 +0530
@@ -8499,16 +8499,7 @@ static inline void unregister_fair_sched
 	list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
 }
 
-static int cpu_nice_write(struct cgroup *cgrp, struct cftype *cftype,
-			  s64 niceval)
-{
-	return sched_group_set_nice(cgroup_tg(cgrp), niceval);
-}
 
-static s64 cpu_nice_read(struct cgroup *cgrp, struct cftype *cftype)
-{
-	return sched_group_nice(cgroup_tg(cgrp));
-}
 #else /* !CONFG_FAIR_GROUP_SCHED */
 static inline void free_fair_sched_group(struct task_group *tg)
 {
@@ -9169,6 +9160,17 @@ static u64 cpu_weight_read_u64(struct cg
 
 	return (u64) tg->weight;
 }
+
+static int cpu_nice_write(struct cgroup *cgrp, struct cftype *cftype,
+			  s64 niceval)
+{
+	return sched_group_set_nice(cgroup_tg(cgrp), niceval);
+}
+
+static s64 cpu_nice_read(struct cgroup *cgrp, struct cftype *cftype)
+{
+	return sched_group_nice(cgroup_tg(cgrp));
+}
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_RT_GROUP_SCHED


-- 
regards,
Dhaval

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 0/2] sched: task group weight interface
  2008-12-10 19:40 [PATCH 0/2] sched: task group weight interface Peter Zijlstra
                   ` (2 preceding siblings ...)
  2008-12-12  9:31 ` [PATCH 0/2] sched: task group weight interface Ingo Molnar
@ 2009-01-12 15:14 ` Dhaval Giani
  3 siblings, 0 replies; 6+ messages in thread
From: Dhaval Giani @ 2009-01-12 15:14 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: mingo, vatsa, cfriesen, linux-kernel, Balbir Singh

On Wed, Dec 10, 2008 at 08:40:48PM +0100, Peter Zijlstra wrote:
> Hi,
> 
> Dhaval poked me that he (and others) didn't like the weight interface we
> currently have for task groups. I concurr and dusted off these old patches.
> 

/me wonders what happened to these patches?

> 
> -- 

-- 
regards,
Dhaval

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2009-01-12 15:14 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2008-12-10 19:40 [PATCH 0/2] sched: task group weight interface Peter Zijlstra
2008-12-10 19:40 ` [PATCH 1/2] sched: tg->weight Peter Zijlstra
2008-12-10 19:40 ` [PATCH 2/2] sched: group nice Peter Zijlstra
2008-12-12  9:31 ` [PATCH 0/2] sched: task group weight interface Ingo Molnar
2008-12-14  4:58   ` [RESEND][PATCH]: sched: Fix compile errors introduced by new group scheduler interface Dhaval Giani
2009-01-12 15:14 ` [PATCH 0/2] sched: task group weight interface Dhaval Giani

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).