linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice
@ 2012-11-09  1:51 Clark Williams
  2013-01-24 16:59 ` Ingo Molnar
  0 siblings, 1 reply; 9+ messages in thread
From: Clark Williams @ 2012-11-09  1:51 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Thomas Gleixner, Ingo Molnar, LKML, Steven Rostedt

[-- Attachment #1: Type: text/plain, Size: 4441 bytes --]


This version stores the user-input value in a separate location from
the jiffies values used by the scheduler, to prevent a race condition.

Subject: [PATCH v2] sched: add a tuning knob to allow changing RR
timeslice

User wanted a facility simliar to the ability on Solaris to adjust
the SCHED_RR timeslice value. Add a /proc/sys/kernel scheduler knob
named sched_rr_timeslice_ms which allows global changing of the SCHED_RR
timeslice value. User visable value is in milliseconds but is stored as
jiffies.  Setting to 0 (zero) resets to the default (currently 100ms).

Patch against tip/master, currently 3.7-rc3.

Signed-off-by: Clark Williams <williams@redhat.com>
---
 include/linux/sched.h |  7 ++++++-
 kernel/sched/core.c   | 30 ++++++++++++++++++++++++++++++
 kernel/sched/rt.c     |  4 ++--
 kernel/sysctl.c       |  7 +++++++
 4 files changed, 45 insertions(+), 3 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 03be150..1e2f38a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2071,11 +2071,16 @@ static inline unsigned int
get_sysctl_timer_migration(void) #endif /* CONFIG_SCHED_DEBUG */
 extern unsigned int sysctl_sched_rt_period;
 extern int sysctl_sched_rt_runtime;
-
+extern int sysctl_sched_rr_timeslice_ms;
+extern int sched_rr_timeslice;
 int sched_rt_handler(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos);
 
+int sched_rr_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos);
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 extern unsigned int sysctl_sched_autogroup_enabled;
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c2e077c..318f617 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -288,6 +288,17 @@ __read_mostly int scheduler_running;
 int sysctl_sched_rt_runtime = 950000;
 
 
+/*
+ * SCHED_RR timeslice in jiffies
+ *
+ */
+int sched_rr_timeslice = RR_TIMESLICE;
+int sysctl_sched_rr_timeslice_ms;
+
+static inline void init_rr_timeslice()
+{
+	sysctl_sched_rr_timeslice_ms = msecs_to_jiffies(RR_TIMESLICE);
+}
 
 /*
  * __task_rq_lock - lock the rq @p resides on.
@@ -6863,6 +6874,8 @@ void __init sched_init(void)
 	init_rt_bandwidth(&def_rt_bandwidth,
 			global_rt_period(), global_rt_runtime());
 
+	init_rr_timeslice();
+
 #ifdef CONFIG_RT_GROUP_SCHED
 	init_rt_bandwidth(&root_task_group.rt_bandwidth,
 			global_rt_period(), global_rt_runtime());
@@ -7543,6 +7556,23 @@ int sched_rt_handler(struct ctl_table *table,
int write, return ret;
 }
 
+int sched_rr_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+	static DEFINE_MUTEX(mutex);
+
+	mutex_lock(&mutex);
+	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+	/* make sure we maintain jiffies internally */
+	if (!ret && write)
+		sched_rr_timeslice = (sysctl_sched_rr_timeslice_ms <=
0) ?
+			RR_TIMESLICE :
msecs_to_jiffies(sysctl_sched_rr_timeslice_ms);
+	mutex_unlock(&mutex);
+	return ret;
+}
+
 #ifdef CONFIG_CGROUP_SCHED
 
 /* return corresponding task_group object of a cgroup */
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 418feb0..71aa6d0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2010,7 +2010,7 @@ static void task_tick_rt(struct rq *rq, struct
task_struct *p, int queued) if (--p->rt.time_slice)
 		return;
 
-	p->rt.time_slice = RR_TIMESLICE;
+	p->rt.time_slice = sched_rr_timeslice;
 
 	/*
 	 * Requeue to the end of queue if we (and all of our
ancestors) are the @@ -2041,7 +2041,7 @@ static unsigned int
get_rr_interval_rt(struct rq *rq, struct task_struct *task)
 	 * Time slice is 0 for SCHED_FIFO tasks
 	 */
 	if (task->policy == SCHED_RR)
-		return RR_TIMESLICE;
+		return sched_rr_timeslice;
 	else
 		return 0;
 }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b769d25..9fa0885 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -403,6 +403,13 @@ static struct ctl_table kern_table[] = {
 		.mode		= 0644,
 		.proc_handler	= sched_rt_handler,
 	},
+	{
+		.procname	= "sched_rr_timeslice_ms",
+		.data		= &sysctl_sched_rr_timeslice_ms,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= sched_rr_handler,
+	},
 #ifdef CONFIG_SCHED_AUTOGROUP
 	{
 		.procname	= "sched_autogroup_enabled",
-- 
1.7.11.7


[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice
  2012-11-09  1:51 [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice Clark Williams
@ 2013-01-24 16:59 ` Ingo Molnar
  2013-01-24 19:54   ` Clark Williams
  0 siblings, 1 reply; 9+ messages in thread
From: Ingo Molnar @ 2013-01-24 16:59 UTC (permalink / raw)
  To: Clark Williams
  Cc: Peter Zijlstra, Thomas Gleixner, Ingo Molnar, LKML, Steven Rostedt


* Clark Williams <williams@redhat.com> wrote:

> This version stores the user-input value in a separate 
> location from the jiffies values used by the scheduler, to 
> prevent a race condition.
> 
> Subject: [PATCH v2] sched: add a tuning knob to allow changing 
> RR timeslice

looks useful.

> @@ -2010,7 +2010,7 @@ static void task_tick_rt(struct rq *rq, struct
> task_struct *p, int queued) if (--p->rt.time_slice)
>  		return;
>  
> -	p->rt.time_slice = RR_TIMESLICE;
> +	p->rt.time_slice = sched_rr_timeslice;
>  
>  	/*
>  	 * Requeue to the end of queue if we (and all of our
> ancestors) are the @@ -2041,7 +2041,7 @@ static unsigned int
> get_rr_interval_rt(struct rq *rq, struct task_struct *task)
>  	 * Time slice is 0 for SCHED_FIFO tasks

Patch wont apply due to patch corruption, alas.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice
  2013-01-24 16:59 ` Ingo Molnar
@ 2013-01-24 19:54   ` Clark Williams
  2013-01-25  8:19     ` Ingo Molnar
  0 siblings, 1 reply; 9+ messages in thread
From: Clark Williams @ 2013-01-24 19:54 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Peter Zijlstra, Thomas Gleixner, Ingo Molnar, LKML, Steven Rostedt

[-- Attachment #1: Type: text/plain, Size: 4509 bytes --]

On Thu, 24 Jan 2013 17:59:55 +0100
Ingo Molnar <mingo@kernel.org> wrote:

> 
> * Clark Williams <williams@redhat.com> wrote:
> 
> > This version stores the user-input value in a separate 
> > location from the jiffies values used by the scheduler, to 
> > prevent a race condition.
> > 
> > Subject: [PATCH v2] sched: add a tuning knob to allow changing 
> > RR timeslice
> 
> looks useful.
> 
> > @@ -2010,7 +2010,7 @@ static void task_tick_rt(struct rq *rq, struct
> > task_struct *p, int queued) if (--p->rt.time_slice)
> >  		return;
> >  
> > -	p->rt.time_slice = RR_TIMESLICE;
> > +	p->rt.time_slice = sched_rr_timeslice;
> >  
> >  	/*
> >  	 * Requeue to the end of queue if we (and all of our
> > ancestors) are the @@ -2041,7 +2041,7 @@ static unsigned int
> > get_rr_interval_rt(struct rq *rq, struct task_struct *task)
> >  	 * Time slice is 0 for SCHED_FIFO tasks
> 
> Patch wont apply due to patch corruption, alas.
> 
> 

Easily fixed. Modified for 3.8-rc4:

commit 0e2d40c5c84d06670f85cc212591f27f69f59c62
Author: Clark Williams <williams@redhat.com>
Date:   Thu Jan 24 13:51:01 2013 -0600

    [kernel] sched: add a tuning knob to allow changing RR timeslice
    
    Add a /proc/sys/kernel scheduler knob named sched_rr_timeslice_ms
    that allows global changing of the SCHED_RR timeslice value. User
    visable value is in milliseconds but is stored as jiffies.  Setting
    to 0 (zero) resets to the default (currently 100ms).
    
    Signed-off-by: Clark Williams <williams@redhat.com>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6fc8f45..d803690 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2082,6 +2082,11 @@ int sched_rt_handler(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos);
 
+extern int sched_rr_timeslice;
+extern int sched_rr_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos);
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 extern unsigned int sysctl_sched_autogroup_enabled;
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 257002c..5675074 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7507,6 +7507,24 @@ static int sched_rt_global_constraints(void)
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+int sched_rr_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+	static DEFINE_MUTEX(mutex);
+
+	mutex_lock(&mutex);
+	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+	/* make sure that internally we keep jiffies */
+	/* also, writing zero resets timeslice to default */
+	if (!ret && write) 
+		sched_rr_timeslice = sched_rr_timeslice <= 0 ? 
+			RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+	mutex_unlock(&mutex);
+	return ret;
+}
+
 int sched_rt_handler(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 418feb0..6c54e83 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -11,6 +11,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
 
 struct rt_bandwidth def_rt_bandwidth;
 
+int sched_rr_timeslice = RR_TIMESLICE;
+
 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
 {
 	struct rt_bandwidth *rt_b =
@@ -2010,7 +2012,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 	if (--p->rt.time_slice)
 		return;
 
-	p->rt.time_slice = RR_TIMESLICE;
+	p->rt.time_slice = sched_rr_timeslice;
 
 	/*
 	 * Requeue to the end of queue if we (and all of our ancestors) are the
@@ -2041,7 +2043,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
 	 * Time slice is 0 for SCHED_FIFO tasks
 	 */
 	if (task->policy == SCHED_RR)
-		return RR_TIMESLICE;
+		return sched_rr_timeslice;
 	else
 		return 0;
 }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c88878d..1eabf86 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -403,6 +403,14 @@ static struct ctl_table kern_table[] = {
 		.mode		= 0644,
 		.proc_handler	= sched_rt_handler,
 	},
+	{
+		.procname	= "sched_rr_timeslice_ms",
+		.data		= &sched_rr_timeslice,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= sched_rr_handler,
+	},
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 	{
 		.procname	= "sched_autogroup_enabled",


[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice
  2013-01-24 19:54   ` Clark Williams
@ 2013-01-25  8:19     ` Ingo Molnar
  2013-01-25 18:02       ` Clark Williams
  0 siblings, 1 reply; 9+ messages in thread
From: Ingo Molnar @ 2013-01-25  8:19 UTC (permalink / raw)
  To: Clark Williams
  Cc: Peter Zijlstra, Thomas Gleixner, Ingo Molnar, LKML, Steven Rostedt


* Clark Williams <williams@redhat.com> wrote:

> On Thu, 24 Jan 2013 17:59:55 +0100
> Ingo Molnar <mingo@kernel.org> wrote:
> 
> > 
> > * Clark Williams <williams@redhat.com> wrote:
> > 
> > > This version stores the user-input value in a separate 
> > > location from the jiffies values used by the scheduler, to 
> > > prevent a race condition.
> > > 
> > > Subject: [PATCH v2] sched: add a tuning knob to allow changing 
> > > RR timeslice
> > 
> > looks useful.
> > 
> > > @@ -2010,7 +2010,7 @@ static void task_tick_rt(struct rq *rq, struct
> > > task_struct *p, int queued) if (--p->rt.time_slice)
> > >  		return;
> > >  
> > > -	p->rt.time_slice = RR_TIMESLICE;
> > > +	p->rt.time_slice = sched_rr_timeslice;
> > >  
> > >  	/*
> > >  	 * Requeue to the end of queue if we (and all of our
> > > ancestors) are the @@ -2041,7 +2041,7 @@ static unsigned int
> > > get_rr_interval_rt(struct rq *rq, struct task_struct *task)
> > >  	 * Time slice is 0 for SCHED_FIFO tasks
> > 
> > Patch wont apply due to patch corruption, alas.
> > 
> > 
> 
> Easily fixed. Modified for 3.8-rc4:

Thanks. Some more substantial review feedback this time around:

> 
> commit 0e2d40c5c84d06670f85cc212591f27f69f59c62
> Author: Clark Williams <williams@redhat.com>
> Date:   Thu Jan 24 13:51:01 2013 -0600
> 
>     [kernel] sched: add a tuning knob to allow changing RR timeslice
>     
>     Add a /proc/sys/kernel scheduler knob named sched_rr_timeslice_ms

s/Add a /proc/sys/kernel/sched_rr_timeslice_ms scheduler knob

>     that allows global changing of the SCHED_RR timeslice value. User
>     visable value is in milliseconds but is stored as jiffies.  Setting

s/visible

>     to 0 (zero) resets to the default (currently 100ms).
>     
>     Signed-off-by: Clark Williams <williams@redhat.com>
> 
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 6fc8f45..d803690 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -2082,6 +2082,11 @@ int sched_rt_handler(struct ctl_table *table, int write,
>  		void __user *buffer, size_t *lenp,
>  		loff_t *ppos);
>  
> +extern int sched_rr_timeslice;
> +extern int sched_rr_handler(struct ctl_table *table, int write,
> +		void __user *buffer, size_t *lenp,
> +		loff_t *ppos);
> +

Shouldn't this be in kernel/sched/sched.h instead of the 
(already too large) linux/sched.h?

>  #ifdef CONFIG_SCHED_AUTOGROUP
>  extern unsigned int sysctl_sched_autogroup_enabled;
>  
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 257002c..5675074 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -7507,6 +7507,24 @@ static int sched_rt_global_constraints(void)
>  }
>  #endif /* CONFIG_RT_GROUP_SCHED */
>  
> +int sched_rr_handler(struct ctl_table *table, int write,
> +		void __user *buffer, size_t *lenp,
> +		loff_t *ppos)
> +{
> +	int ret;
> +	static DEFINE_MUTEX(mutex);

This mutex should be outside the function (not in local scope), 
and named like this, with an explanation:

+/*
+ * Since it's an int the CPU will always read a full word
+ * of the RR timeslice interval - no need for locking.
+ *
+ * But in the RR handler we read the value multiple times
+ * before setting it, which should be protected - hence
+ * this mutex:
+ */
+static DEFINE_MUTEX(rr_timeslice_mutex);


> +
> +	mutex_lock(&mutex);
> +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
> +	/* make sure that internally we keep jiffies */
> +	/* also, writing zero resets timeslice to default */
> +	if (!ret && write) 
> +		sched_rr_timeslice = sched_rr_timeslice <= 0 ? 
> +			RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
> +	mutex_unlock(&mutex);
> +	return ret;

A couple of stray spaces at end of lines. Also, please put curly 
braces around multi-line statements.

Multi-line comments should be like this:

  /*
   * Comment .....
   * ...... goes here.
   */


> +}
> +
>  int sched_rt_handler(struct ctl_table *table, int write,
>  		void __user *buffer, size_t *lenp,
>  		loff_t *ppos)
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 418feb0..6c54e83 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -11,6 +11,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
>  
>  struct rt_bandwidth def_rt_bandwidth;
>  
> +int sched_rr_timeslice = RR_TIMESLICE;
> +

I think this could go into core.c as well, together with the 
mutex. That way it's easier to see why the mutex is needed as 
well.

It should also be __read_mostly.

>  static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
>  {
>  	struct rt_bandwidth *rt_b =
> @@ -2010,7 +2012,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
>  	if (--p->rt.time_slice)
>  		return;
>  
> -	p->rt.time_slice = RR_TIMESLICE;
> +	p->rt.time_slice = sched_rr_timeslice;
>  
>  	/*
>  	 * Requeue to the end of queue if we (and all of our ancestors) are the
> @@ -2041,7 +2043,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
>  	 * Time slice is 0 for SCHED_FIFO tasks
>  	 */
>  	if (task->policy == SCHED_RR)
> -		return RR_TIMESLICE;
> +		return sched_rr_timeslice;
>  	else
>  		return 0;
>  }
> diff --git a/kernel/sysctl.c b/kernel/sysctl.c
> index c88878d..1eabf86 100644
> --- a/kernel/sysctl.c
> +++ b/kernel/sysctl.c
> @@ -403,6 +403,14 @@ static struct ctl_table kern_table[] = {
>  		.mode		= 0644,
>  		.proc_handler	= sched_rt_handler,
>  	},
> +	{
> +		.procname	= "sched_rr_timeslice_ms",
> +		.data		= &sched_rr_timeslice,
> +		.maxlen		= sizeof(int),
> +		.mode		= 0644,
> +		.proc_handler	= sched_rr_handler,

Does this allow negative values? Wouldn't it be better to make 
it unsigned int all around?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice
  2013-01-25  8:19     ` Ingo Molnar
@ 2013-01-25 18:02       ` Clark Williams
  2013-01-25 18:36         ` Ingo Molnar
  0 siblings, 1 reply; 9+ messages in thread
From: Clark Williams @ 2013-01-25 18:02 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Peter Zijlstra, Thomas Gleixner, Ingo Molnar, LKML, Steven Rostedt

[-- Attachment #1: Type: text/plain, Size: 10417 bytes --]

On Fri, 25 Jan 2013 09:19:16 +0100
Ingo Molnar <mingo@kernel.org> wrote:

> 
> * Clark Williams <williams@redhat.com> wrote:
> 
> > On Thu, 24 Jan 2013 17:59:55 +0100
> > Ingo Molnar <mingo@kernel.org> wrote:
> > 
> > > 
> > > * Clark Williams <williams@redhat.com> wrote:
> > > 
> > > > This version stores the user-input value in a separate 
> > > > location from the jiffies values used by the scheduler, to 
> > > > prevent a race condition.
> > > > 
> > > > Subject: [PATCH v2] sched: add a tuning knob to allow changing 
> > > > RR timeslice
> > > 
> > > looks useful.
> > > 
> > > > @@ -2010,7 +2010,7 @@ static void task_tick_rt(struct rq *rq, struct
> > > > task_struct *p, int queued) if (--p->rt.time_slice)
> > > >  		return;
> > > >  
> > > > -	p->rt.time_slice = RR_TIMESLICE;
> > > > +	p->rt.time_slice = sched_rr_timeslice;
> > > >  
> > > >  	/*
> > > >  	 * Requeue to the end of queue if we (and all of our
> > > > ancestors) are the @@ -2041,7 +2041,7 @@ static unsigned int
> > > > get_rr_interval_rt(struct rq *rq, struct task_struct *task)
> > > >  	 * Time slice is 0 for SCHED_FIFO tasks
> > > 
> > > Patch wont apply due to patch corruption, alas.
> > > 
> > > 
> > 
> > Easily fixed. Modified for 3.8-rc4:
> 
> Thanks. Some more substantial review feedback this time around:
> 
> > 
> > commit 0e2d40c5c84d06670f85cc212591f27f69f59c62
> > Author: Clark Williams <williams@redhat.com>
> > Date:   Thu Jan 24 13:51:01 2013 -0600
> > 
> >     [kernel] sched: add a tuning knob to allow changing RR timeslice
> >     
> >     Add a /proc/sys/kernel scheduler knob named sched_rr_timeslice_ms
> 
> s/Add a /proc/sys/kernel/sched_rr_timeslice_ms scheduler knob
> 
> >     that allows global changing of the SCHED_RR timeslice value. User
> >     visable value is in milliseconds but is stored as jiffies.  Setting
> 
> s/visible
> 
> >     to 0 (zero) resets to the default (currently 100ms).
> >     
> >     Signed-off-by: Clark Williams <williams@redhat.com>
> > 
> > diff --git a/include/linux/sched.h b/include/linux/sched.h
> > index 6fc8f45..d803690 100644
> > --- a/include/linux/sched.h
> > +++ b/include/linux/sched.h
> > @@ -2082,6 +2082,11 @@ int sched_rt_handler(struct ctl_table *table, int write,
> >  		void __user *buffer, size_t *lenp,
> >  		loff_t *ppos);
> >  
> > +extern int sched_rr_timeslice;
> > +extern int sched_rr_handler(struct ctl_table *table, int write,
> > +		void __user *buffer, size_t *lenp,
> > +		loff_t *ppos);
> > +
> 
> Shouldn't this be in kernel/sched/sched.h instead of the 
> (already too large) linux/sched.h?
> 

I don't think that will work be cause kernel/sysctl.c needs the
externs and I doubt we want to include kernel/sched/sched.h from there. 

> >  #ifdef CONFIG_SCHED_AUTOGROUP
> >  extern unsigned int sysctl_sched_autogroup_enabled;
> >  
> > diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> > index 257002c..5675074 100644
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -7507,6 +7507,24 @@ static int sched_rt_global_constraints(void)
> >  }
> >  #endif /* CONFIG_RT_GROUP_SCHED */
> >  
> > +int sched_rr_handler(struct ctl_table *table, int write,
> > +		void __user *buffer, size_t *lenp,
> > +		loff_t *ppos)
> > +{
> > +	int ret;
> > +	static DEFINE_MUTEX(mutex);
> 
> This mutex should be outside the function (not in local scope), 
> and named like this, with an explanation:
> 
> +/*
> + * Since it's an int the CPU will always read a full word
> + * of the RR timeslice interval - no need for locking.
> + *
> + * But in the RR handler we read the value multiple times
> + * before setting it, which should be protected - hence
> + * this mutex:
> + */
> +static DEFINE_MUTEX(rr_timeslice_mutex);
> 

Done.

> 
> > +
> > +	mutex_lock(&mutex);
> > +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
> > +	/* make sure that internally we keep jiffies */
> > +	/* also, writing zero resets timeslice to default */
> > +	if (!ret && write) 
> > +		sched_rr_timeslice = sched_rr_timeslice <= 0 ? 
> > +			RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
> > +	mutex_unlock(&mutex);
> > +	return ret;
> 
> A couple of stray spaces at end of lines. Also, please put curly 
> braces around multi-line statements.
> 

I didn't put curly braces there because "technically" that's a single
line statement. But since in fact it is multi-line, I've added them :).

> Multi-line comments should be like this:
> 
>   /*
>    * Comment .....
>    * ...... goes here.
>    */
> 

Done.

> 
> > +}
> > +
> >  int sched_rt_handler(struct ctl_table *table, int write,
> >  		void __user *buffer, size_t *lenp,
> >  		loff_t *ppos)
> > diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> > index 418feb0..6c54e83 100644
> > --- a/kernel/sched/rt.c
> > +++ b/kernel/sched/rt.c
> > @@ -11,6 +11,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
> >  
> >  struct rt_bandwidth def_rt_bandwidth;
> >  
> > +int sched_rr_timeslice = RR_TIMESLICE;
> > +
> 
> I think this could go into core.c as well, together with the 
> mutex. That way it's easier to see why the mutex is needed as 
> well.
> 
> It should also be __read_mostly.

Done and done. 

> 
> >  static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
> >  {
> >  	struct rt_bandwidth *rt_b =
> > @@ -2010,7 +2012,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
> >  	if (--p->rt.time_slice)
> >  		return;
> >  
> > -	p->rt.time_slice = RR_TIMESLICE;
> > +	p->rt.time_slice = sched_rr_timeslice;
> >  
> >  	/*
> >  	 * Requeue to the end of queue if we (and all of our ancestors) are the
> > @@ -2041,7 +2043,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
> >  	 * Time slice is 0 for SCHED_FIFO tasks
> >  	 */
> >  	if (task->policy == SCHED_RR)
> > -		return RR_TIMESLICE;
> > +		return sched_rr_timeslice;
> >  	else
> >  		return 0;
> >  }
> > diff --git a/kernel/sysctl.c b/kernel/sysctl.c
> > index c88878d..1eabf86 100644
> > --- a/kernel/sysctl.c
> > +++ b/kernel/sysctl.c
> > @@ -403,6 +403,14 @@ static struct ctl_table kern_table[] = {
> >  		.mode		= 0644,
> >  		.proc_handler	= sched_rt_handler,
> >  	},
> > +	{
> > +		.procname	= "sched_rr_timeslice_ms",
> > +		.data		= &sched_rr_timeslice,
> > +		.maxlen		= sizeof(int),
> > +		.mode		= 0644,
> > +		.proc_handler	= sched_rr_handler,
> 
> Does this allow negative values? Wouldn't it be better to make 
> it unsigned int all around?

Good point. Changed to unsigned int. 

Updated patch:

commit 93dfbf6326cc4ba85c917f9440203f9fc19e9bcc
Author: Clark Williams <williams@redhat.com>
Date:   Thu Jan 24 13:51:01 2013 -0600

    [kernel] sched: add a tuning knob to allow changing RR timeslice
    
    Add a /proc/sys/kernel/sched_rr_timeslice_ms tuning knob
    that allows global changing of the SCHED_RR timeslice value. User
    visible value is in milliseconds but is stored as jiffies.  Setting
    to 0 (zero) resets to the default (currently 100ms).
    
    Signed-off-by: Clark Williams <williams@redhat.com>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6fc8f45..5d0a7cf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1226,6 +1226,11 @@ struct sched_rt_entity {
  */
 #define RR_TIMESLICE		(100 * HZ / 1000)
 
+extern __read_mostly unsigned int sched_rr_timeslice;
+
+extern int sched_rr_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos);
 struct rcu_node;
 
 enum perf_event_task_context {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 257002c..0f7e6a2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7507,6 +7507,43 @@ static int sched_rt_global_constraints(void)
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+/*
+ * Since it's an int the CPU will always read a full word
+ * of the RR timeslice interval - no need for locking.
+ *
+ * But in the RR handler we read the value multiple times
+ * before setting it, which should be protected - hence
+ * this mutex:
+ */
+static DEFINE_MUTEX(rr_timeslice_mutex);
+
+unsigned int __read_mostly sched_rr_timeslice = RR_TIMESLICE;
+
+/*
+ * manage /proc/sys/kernel/sched_rr_timeslice_ms entry
+ * for changing SCHED_RR quantum interval
+ */
+
+int sched_rr_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+
+	mutex_lock(&rr_timeslice_mutex);
+	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+	/*
+	 * make sure that internally we keep jiffies
+	 * also, writing zero resets timeslice to default
+	 */
+	if (!ret && write)  {
+		sched_rr_timeslice = sched_rr_timeslice == 0 ?
+			RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+	}
+	mutex_unlock(&rr_timeslice_mutex);
+	return ret;
+}
+
 int sched_rt_handler(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 418feb0..71aa6d0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2010,7 +2010,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 	if (--p->rt.time_slice)
 		return;
 
-	p->rt.time_slice = RR_TIMESLICE;
+	p->rt.time_slice = sched_rr_timeslice;
 
 	/*
 	 * Requeue to the end of queue if we (and all of our ancestors) are the
@@ -2041,7 +2041,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
 	 * Time slice is 0 for SCHED_FIFO tasks
 	 */
 	if (task->policy == SCHED_RR)
-		return RR_TIMESLICE;
+		return sched_rr_timeslice;
 	else
 		return 0;
 }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c88878d..6770fc8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -403,6 +403,14 @@ static struct ctl_table kern_table[] = {
 		.mode		= 0644,
 		.proc_handler	= sched_rt_handler,
 	},
+	{
+		.procname	= "sched_rr_timeslice_ms",
+		.data		= &sched_rr_timeslice,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_rr_handler,
+	},
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 	{
 		.procname	= "sched_autogroup_enabled",




[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice
  2013-01-25 18:02       ` Clark Williams
@ 2013-01-25 18:36         ` Ingo Molnar
  2013-01-25 21:43           ` Clark Williams
  0 siblings, 1 reply; 9+ messages in thread
From: Ingo Molnar @ 2013-01-25 18:36 UTC (permalink / raw)
  To: Clark Williams
  Cc: Peter Zijlstra, Thomas Gleixner, Ingo Molnar, LKML, Steven Rostedt


* Clark Williams <williams@redhat.com> wrote:

> > Shouldn't this be in kernel/sched/sched.h instead of the 
> > (already too large) linux/sched.h?
> 
> I don't think that will work be cause kernel/sysctl.c needs 
> the externs and I doubt we want to include 
> kernel/sched/sched.h from there.

Then at least introduce a new include/linux/sched_sysctl.h file 
please, and include it in sysctl.c and move this new knob there?

Other scheduler related sysctl knobs can move there too later 
on, reducing the burden on sched.h.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice
  2013-01-25 18:36         ` Ingo Molnar
@ 2013-01-25 21:43           ` Clark Williams
  2013-01-26 12:16             ` Ingo Molnar
  2013-01-26 12:18             ` Ingo Molnar
  0 siblings, 2 replies; 9+ messages in thread
From: Clark Williams @ 2013-01-25 21:43 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Peter Zijlstra, Thomas Gleixner, Ingo Molnar, LKML, Steven Rostedt

[-- Attachment #1: Type: text/plain, Size: 11340 bytes --]

On Fri, 25 Jan 2013 19:36:10 +0100
Ingo Molnar <mingo@kernel.org> wrote:

> 
> * Clark Williams <williams@redhat.com> wrote:
> 
> > > Shouldn't this be in kernel/sched/sched.h instead of the 
> > > (already too large) linux/sched.h?
> > 
> > I don't think that will work be cause kernel/sysctl.c needs 
> > the externs and I doubt we want to include 
> > kernel/sched/sched.h from there.
> 
> Then at least introduce a new include/linux/sched_sysctl.h file 
> please, and include it in sysctl.c and move this new knob there?
> 
> Other scheduler related sysctl knobs can move there too later 
> on, reducing the burden on sched.h.
> 
> Thanks,
> 
> 	Ingo


Like this?

commit 907d3684c01fcc5c6a985276f750d816f309651c
Author: Clark Williams <williams@redhat.com>
Date:   Fri Jan 25 14:43:05 2013 -0600

    [kernel] create include file for scheduler tuning knobs
    
    Create include/linux/sched_sysctl.h for scheduler tuning knob-related
    definitions, then move sysctl-related bits into it from
    include/linux/sched.h.
    
    Signed-off-by: Clark Williams <williams@redhat.com>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5d0a7cf..c39e995 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -51,6 +51,7 @@ struct sched_param {
 #include <linux/cred.h>
 #include <linux/llist.h>
 #include <linux/uidgid.h>
+#include <linux/sched_sysctl.h>
 
 #include <asm/processor.h>
 
@@ -304,19 +305,6 @@ static inline void lockup_detector_init(void)
 }
 #endif
 
-#ifdef CONFIG_DETECT_HUNG_TASK
-extern unsigned int  sysctl_hung_task_panic;
-extern unsigned long sysctl_hung_task_check_count;
-extern unsigned long sysctl_hung_task_timeout_secs;
-extern unsigned long sysctl_hung_task_warnings;
-extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
-					 void __user *buffer,
-					 size_t *lenp, loff_t *ppos);
-#else
-/* Avoid need for ifdefs elsewhere in the code */
-enum { sysctl_hung_task_timeout_secs = 0 };
-#endif
-
 /* Attach to any functions which should be ignored in wchan output. */
 #define __sched		__attribute__((__section__(".sched.text")))
 
@@ -338,23 +326,6 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
 struct nsproxy;
 struct user_namespace;
 
-/*
- * Default maximum number of active map areas, this limits the number of vmas
- * per mm struct. Users can overwrite this number by sysctl but there is a
- * problem.
- *
- * When a program's coredump is generated as ELF format, a section is created
- * per a vma. In ELF, the number of sections is represented in unsigned short.
- * This means the number of sections should be smaller than 65535 at coredump.
- * Because the kernel adds some informative sections to a image of program at
- * generating coredump, we need some margin. The number of extra sections is
- * 1-3 now and depends on arch. We use "5" as safe margin, here.
- */
-#define MAPCOUNT_ELF_CORE_MARGIN	(5)
-#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
-
-extern int sysctl_max_map_count;
-
 #include <linux/aio.h>
 
 #ifdef CONFIG_MMU
@@ -1220,17 +1191,6 @@ struct sched_rt_entity {
 #endif
 };
 
-/*
- * default timeslice is 100 msecs (used only for SCHED_RR tasks).
- * Timeslices get refilled after they expire.
- */
-#define RR_TIMESLICE		(100 * HZ / 1000)
-
-extern __read_mostly unsigned int sched_rr_timeslice;
-
-extern int sched_rr_handler(struct ctl_table *table, int write,
-		void __user *buffer, size_t *lenp,
-		loff_t *ppos);
 struct rcu_node;
 
 enum perf_event_task_context {
@@ -2038,76 +1998,6 @@ extern void wake_up_idle_cpu(int cpu);
 static inline void wake_up_idle_cpu(int cpu) { }
 #endif
 
-extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
-extern unsigned int sysctl_sched_wakeup_granularity;
-extern unsigned int sysctl_sched_child_runs_first;
-
-enum sched_tunable_scaling {
-	SCHED_TUNABLESCALING_NONE,
-	SCHED_TUNABLESCALING_LOG,
-	SCHED_TUNABLESCALING_LINEAR,
-	SCHED_TUNABLESCALING_END,
-};
-extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
-
-extern unsigned int sysctl_numa_balancing_scan_delay;
-extern unsigned int sysctl_numa_balancing_scan_period_min;
-extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_period_reset;
-extern unsigned int sysctl_numa_balancing_scan_size;
-extern unsigned int sysctl_numa_balancing_settle_count;
-
-#ifdef CONFIG_SCHED_DEBUG
-extern unsigned int sysctl_sched_migration_cost;
-extern unsigned int sysctl_sched_nr_migrate;
-extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_timer_migration;
-extern unsigned int sysctl_sched_shares_window;
-
-int sched_proc_update_handler(struct ctl_table *table, int write,
-		void __user *buffer, size_t *length,
-		loff_t *ppos);
-#endif
-#ifdef CONFIG_SCHED_DEBUG
-static inline unsigned int get_sysctl_timer_migration(void)
-{
-	return sysctl_timer_migration;
-}
-#else
-static inline unsigned int get_sysctl_timer_migration(void)
-{
-	return 1;
-}
-#endif
-extern unsigned int sysctl_sched_rt_period;
-extern int sysctl_sched_rt_runtime;
-
-int sched_rt_handler(struct ctl_table *table, int write,
-		void __user *buffer, size_t *lenp,
-		loff_t *ppos);
-
-#ifdef CONFIG_SCHED_AUTOGROUP
-extern unsigned int sysctl_sched_autogroup_enabled;
-
-extern void sched_autogroup_create_attach(struct task_struct *p);
-extern void sched_autogroup_detach(struct task_struct *p);
-extern void sched_autogroup_fork(struct signal_struct *sig);
-extern void sched_autogroup_exit(struct signal_struct *sig);
-#ifdef CONFIG_PROC_FS
-extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
-extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
-#endif
-#else
-static inline void sched_autogroup_create_attach(struct task_struct *p) { }
-static inline void sched_autogroup_detach(struct task_struct *p) { }
-static inline void sched_autogroup_fork(struct signal_struct *sig) { }
-static inline void sched_autogroup_exit(struct signal_struct *sig) { }
-#endif
-
-#ifdef CONFIG_CFS_BANDWIDTH
-extern unsigned int sysctl_sched_cfs_bandwidth_slice;
-#endif
 
 #ifdef CONFIG_RT_MUTEXES
 extern int rt_mutex_getprio(struct task_struct *p);
diff --git a/include/linux/sched_sysctl.h b/include/linux/sched_sysctl.h
new file mode 100644
index 0000000..66e5faa
--- /dev/null
+++ b/include/linux/sched_sysctl.h
@@ -0,0 +1,133 @@
+#ifndef _SCHED_SYSCTL_H
+#define _SCHED_SYSCTL_H
+
+
+/* provide a home for sysctl scheduler tuning knobs */
+
+/*
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * Timeslices get refilled after they expire.
+ */
+#define RR_TIMESLICE		(100 * HZ / 1000)
+
+/*
+ * allow user modification of global SCHED_RR timeslice
+ * through:
+ *
+ *    /proc/sys/kernel/sched_rr_timeslice_ms
+ */
+extern __read_mostly unsigned int sched_rr_timeslice;
+
+extern int sched_rr_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos);
+
+/*
+ *  control realtime throttling:
+ *
+ *  /proc/sys/kernel/sched_rt_period_us
+ *  /proc/sys/kernel/sched_rt_runtime_us
+ */
+extern unsigned int sysctl_sched_rt_period;
+extern int sysctl_sched_rt_runtime;
+
+extern int sched_rt_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos);
+
+extern unsigned int sysctl_sched_latency;
+extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_child_runs_first;
+
+enum sched_tunable_scaling {
+	SCHED_TUNABLESCALING_NONE,
+	SCHED_TUNABLESCALING_LOG,
+	SCHED_TUNABLESCALING_LINEAR,
+	SCHED_TUNABLESCALING_END,
+};
+extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
+
+extern unsigned int sysctl_numa_balancing_scan_delay;
+extern unsigned int sysctl_numa_balancing_scan_period_min;
+extern unsigned int sysctl_numa_balancing_scan_period_max;
+extern unsigned int sysctl_numa_balancing_scan_period_reset;
+extern unsigned int sysctl_numa_balancing_scan_size;
+extern unsigned int sysctl_numa_balancing_settle_count;
+
+#ifdef CONFIG_SCHED_DEBUG
+extern unsigned int sysctl_sched_migration_cost;
+extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_sched_time_avg;
+extern unsigned int sysctl_timer_migration;
+extern unsigned int sysctl_sched_shares_window;
+
+int sched_proc_update_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *length,
+		loff_t *ppos);
+#endif
+#ifdef CONFIG_SCHED_DEBUG
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+	return sysctl_timer_migration;
+}
+#else
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+	return 1;
+}
+#endif
+
+#ifdef CONFIG_CFS_BANDWIDTH
+extern unsigned int sysctl_sched_cfs_bandwidth_slice;
+#endif
+
+#ifdef CONFIG_DETECT_HUNG_TASK
+extern unsigned int  sysctl_hung_task_panic;
+extern unsigned long sysctl_hung_task_check_count;
+extern unsigned long sysctl_hung_task_timeout_secs;
+extern unsigned long sysctl_hung_task_warnings;
+extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
+					 void __user *buffer,
+					 size_t *lenp, loff_t *ppos);
+#else
+/* Avoid need for ifdefs elsewhere in the code */
+enum { sysctl_hung_task_timeout_secs = 0 };
+#endif
+
+/*
+ * Default maximum number of active map areas, this limits the number of vmas
+ * per mm struct. Users can overwrite this number by sysctl but there is a
+ * problem.
+ *
+ * When a program's coredump is generated as ELF format, a section is created
+ * per a vma. In ELF, the number of sections is represented in unsigned short.
+ * This means the number of sections should be smaller than 65535 at coredump.
+ * Because the kernel adds some informative sections to a image of program at
+ * generating coredump, we need some margin. The number of extra sections is
+ * 1-3 now and depends on arch. We use "5" as safe margin, here.
+ */
+#define MAPCOUNT_ELF_CORE_MARGIN	(5)
+#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+extern int sysctl_max_map_count;
+
+#ifdef CONFIG_SCHED_AUTOGROUP
+extern unsigned int sysctl_sched_autogroup_enabled;
+
+extern void sched_autogroup_create_attach(struct task_struct *p);
+extern void sched_autogroup_detach(struct task_struct *p);
+extern void sched_autogroup_fork(struct signal_struct *sig);
+extern void sched_autogroup_exit(struct signal_struct *sig);
+#ifdef CONFIG_PROC_FS
+extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
+extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
+#endif
+#else
+static inline void sched_autogroup_create_attach(struct task_struct *p) { }
+static inline void sched_autogroup_detach(struct task_struct *p) { }
+static inline void sched_autogroup_fork(struct signal_struct *sig) { }
+static inline void sched_autogroup_exit(struct signal_struct *sig) { }
+#endif
+
+#endif /* _SCHED_SYSCTL_H */

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice
  2013-01-25 21:43           ` Clark Williams
@ 2013-01-26 12:16             ` Ingo Molnar
  2013-01-26 12:18             ` Ingo Molnar
  1 sibling, 0 replies; 9+ messages in thread
From: Ingo Molnar @ 2013-01-26 12:16 UTC (permalink / raw)
  To: Clark Williams
  Cc: Peter Zijlstra, Thomas Gleixner, Ingo Molnar, LKML, Steven Rostedt


* Clark Williams <williams@redhat.com> wrote:

> On Fri, 25 Jan 2013 19:36:10 +0100
> Ingo Molnar <mingo@kernel.org> wrote:
> 
> > 
> > * Clark Williams <williams@redhat.com> wrote:
> > 
> > > > Shouldn't this be in kernel/sched/sched.h instead of the 
> > > > (already too large) linux/sched.h?
> > > 
> > > I don't think that will work be cause kernel/sysctl.c needs 
> > > the externs and I doubt we want to include 
> > > kernel/sched/sched.h from there.
> > 
> > Then at least introduce a new include/linux/sched_sysctl.h file 
> > please, and include it in sysctl.c and move this new knob there?
> > 
> > Other scheduler related sysctl knobs can move there too later 
> > on, reducing the burden on sched.h.
> > 
> > Thanks,
> > 
> > 	Ingo
> 
> 
> Like this?

I only suggested to create it for this new sysctl, but you doing 
the whole work for all scheduler sysctls is a perfect solution 
of course :-)

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice
  2013-01-25 21:43           ` Clark Williams
  2013-01-26 12:16             ` Ingo Molnar
@ 2013-01-26 12:18             ` Ingo Molnar
  1 sibling, 0 replies; 9+ messages in thread
From: Ingo Molnar @ 2013-01-26 12:18 UTC (permalink / raw)
  To: Clark Williams
  Cc: Peter Zijlstra, Thomas Gleixner, Ingo Molnar, LKML, Steven Rostedt


Would be nice to have a patch against -tip, as we already have 
an interacting change:

 57d2aa00dcec sched/rt: Avoid updating RT entry timeout twice within one tick period

Which creates a conflict.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2013-01-26 12:18 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-11-09  1:51 [PATCH v2] sched: add a tuning knob to allow changing RR tmeslice Clark Williams
2013-01-24 16:59 ` Ingo Molnar
2013-01-24 19:54   ` Clark Williams
2013-01-25  8:19     ` Ingo Molnar
2013-01-25 18:02       ` Clark Williams
2013-01-25 18:36         ` Ingo Molnar
2013-01-25 21:43           ` Clark Williams
2013-01-26 12:16             ` Ingo Molnar
2013-01-26 12:18             ` Ingo Molnar

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).