All of lore.kernel.org
 help / color / mirror / Atom feed
* [jlelli:fixes/rt-double_enqueue 1/1] kernel/sched/core.c:7107:34: warning: Uninitialized variable: new_effective_prio [uninitvar]
@ 2021-06-14 14:00 kernel test robot
  0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2021-06-14 14:00 UTC (permalink / raw)
  To: kbuild

[-- Attachment #1: Type: text/plain, Size: 29534 bytes --]

CC: kbuild-all(a)lists.01.org
TO: Juri Lelli <juri.lelli@redhat.com>

tree:   https://github.com/jlelli/linux.git fixes/rt-double_enqueue
head:   f06324e1cbda29c75934fd8596ec7048deb9d810
commit: f06324e1cbda29c75934fd8596ec7048deb9d810 [1/1] sched/rt: Fix double enqueue cause by rt_effective_prio
:::::: branch date: 4 hours ago
:::::: commit date: 4 hours ago
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>


cppcheck possible warnings: (new ones prefixed by >>, may not real problems)

>> kernel/sched/core.c:7107:34: warning: Uninitialized variable: new_effective_prio [uninitvar]
    __setscheduler(rq, p, attr, pi, new_effective_prio);
                                    ^

vim +7107 kernel/sched/core.c

c69e8d9c01db2a kernel/sched.c      David Howells             2008-11-14  6876  
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  6877  static int __sched_setscheduler(struct task_struct *p,
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  6878  				const struct sched_attr *attr,
dbc7f069b93a24 kernel/sched/core.c Peter Zijlstra            2015-06-11  6879  				bool user, bool pi)
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6880  {
383afd0971538b kernel/sched/core.c Steven Rostedt            2014-03-11  6881  	int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
383afd0971538b kernel/sched/core.c Steven Rostedt            2014-03-11  6882  		      MAX_RT_PRIO - 1 - attr->sched_priority;
da0c1e65b51a28 kernel/sched/core.c Kirill Tkhai              2014-08-20  6883  	int retval, oldprio, oldpolicy = -1, queued, running;
0782e63bc6fe7e kernel/sched/core.c Thomas Gleixner           2015-05-05  6884  	int new_effective_prio, policy = attr->sched_policy;
83ab0aa0d5623d kernel/sched.c      Thomas Gleixner           2010-02-17  6885  	const struct sched_class *prev_class;
565790d28b1e33 kernel/sched/core.c Peter Zijlstra            2020-05-11  6886  	struct callback_head *head;
eb58075149b7f0 kernel/sched/core.c Peter Zijlstra            2015-07-31  6887  	struct rq_flags rf;
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6888  	int reset_on_fork;
7a57f32a4d5c80 kernel/sched/core.c Peter Zijlstra            2017-02-21  6889  	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
eb58075149b7f0 kernel/sched/core.c Peter Zijlstra            2015-07-31  6890  	struct rq *rq;
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6891  
896bbb2522587e kernel/sched/core.c Steven Rostedt (VMware    2017-03-09  6892) 	/* The pi code expects interrupts enabled */
896bbb2522587e kernel/sched/core.c Steven Rostedt (VMware    2017-03-09  6893) 	BUG_ON(pi && in_interrupt());
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6894  recheck:
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  6895  	/* Double check policy once rq lock held: */
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6896  	if (policy < 0) {
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6897  		reset_on_fork = p->sched_reset_on_fork;
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6898  		policy = oldpolicy = p->policy;
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6899  	} else {
7479f3c9cf67ed kernel/sched/core.c Peter Zijlstra            2014-01-15  6900  		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6901  
20f9cd2acb1d74 kernel/sched/core.c Henrik Austad             2015-09-09  6902  		if (!valid_policy(policy))
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6903  			return -EINVAL;
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6904  	}
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6905  
794a56ebd9a57d kernel/sched/core.c Juri Lelli                2017-12-04  6906  	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7479f3c9cf67ed kernel/sched/core.c Peter Zijlstra            2014-01-15  6907  		return -EINVAL;
7479f3c9cf67ed kernel/sched/core.c Peter Zijlstra            2014-01-15  6908  
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6909  	/*
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6910  	 * Valid priorities for SCHED_FIFO and SCHED_RR are
ae18ad281e8259 kernel/sched/core.c Dietmar Eggemann          2021-01-28  6911  	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
dd41f596cda0d7 kernel/sched.c      Ingo Molnar               2007-07-09  6912  	 * SCHED_BATCH and SCHED_IDLE is 0.
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6913  	 */
ae18ad281e8259 kernel/sched/core.c Dietmar Eggemann          2021-01-28  6914  	if (attr->sched_priority > MAX_RT_PRIO-1)
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6915  		return -EINVAL;
aab03e05e8f7e2 kernel/sched/core.c Dario Faggioli            2013-11-28  6916  	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
aab03e05e8f7e2 kernel/sched/core.c Dario Faggioli            2013-11-28  6917  	    (rt_policy(policy) != (attr->sched_priority != 0)))
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6918  		return -EINVAL;
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6919  
37e4ab3f0cba13 kernel/sched.c      Olivier Croquette         2005-06-25  6920  	/*
37e4ab3f0cba13 kernel/sched.c      Olivier Croquette         2005-06-25  6921  	 * Allow unprivileged RT tasks to decrease priority:
37e4ab3f0cba13 kernel/sched.c      Olivier Croquette         2005-06-25  6922  	 */
961ccddd59d627 kernel/sched.c      Rusty Russell             2008-06-23  6923  	if (user && !capable(CAP_SYS_NICE)) {
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  6924  		if (fair_policy(policy)) {
d0ea026808ad81 kernel/sched/core.c Dongsheng Yang            2014-01-27  6925  			if (attr->sched_nice < task_nice(p) &&
eaad45132c564c kernel/sched/core.c Peter Zijlstra            2014-01-16  6926  			    !can_nice(p, attr->sched_nice))
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  6927  				return -EPERM;
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  6928  		}
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  6929  
e05606d3301525 kernel/sched.c      Ingo Molnar               2007-07-09  6930  		if (rt_policy(policy)) {
a44702e8858a07 kernel/sched.c      Oleg Nesterov             2010-06-11  6931  			unsigned long rlim_rtprio =
a44702e8858a07 kernel/sched.c      Oleg Nesterov             2010-06-11  6932  					task_rlimit(p, RLIMIT_RTPRIO);
5fe1d75f349740 kernel/sched.c      Oleg Nesterov             2006-09-29  6933  
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  6934  			/* Can't set/change the rt policy: */
8dc3e9099e01df kernel/sched.c      Oleg Nesterov             2006-09-29  6935  			if (policy != p->policy && !rlim_rtprio)
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6936  				return -EPERM;
8dc3e9099e01df kernel/sched.c      Oleg Nesterov             2006-09-29  6937  
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  6938  			/* Can't increase priority: */
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  6939  			if (attr->sched_priority > p->rt_priority &&
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  6940  			    attr->sched_priority > rlim_rtprio)
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6941  				return -EPERM;
8dc3e9099e01df kernel/sched.c      Oleg Nesterov             2006-09-29  6942  		}
c02aa73b1d18e4 kernel/sched.c      Darren Hart               2011-02-17  6943  
d44753b843e093 kernel/sched/core.c Juri Lelli                2014-03-03  6944  		 /*
d44753b843e093 kernel/sched/core.c Juri Lelli                2014-03-03  6945  		  * Can't set/change SCHED_DEADLINE policy at all for now
d44753b843e093 kernel/sched/core.c Juri Lelli                2014-03-03  6946  		  * (safest behavior); in the future we would like to allow
d44753b843e093 kernel/sched/core.c Juri Lelli                2014-03-03  6947  		  * unprivileged DL tasks to increase their relative deadline
d44753b843e093 kernel/sched/core.c Juri Lelli                2014-03-03  6948  		  * or reduce their runtime (both ways reducing utilization)
d44753b843e093 kernel/sched/core.c Juri Lelli                2014-03-03  6949  		  */
d44753b843e093 kernel/sched/core.c Juri Lelli                2014-03-03  6950  		if (dl_policy(policy))
d44753b843e093 kernel/sched/core.c Juri Lelli                2014-03-03  6951  			return -EPERM;
d44753b843e093 kernel/sched/core.c Juri Lelli                2014-03-03  6952  
dd41f596cda0d7 kernel/sched.c      Ingo Molnar               2007-07-09  6953  		/*
c02aa73b1d18e4 kernel/sched.c      Darren Hart               2011-02-17  6954  		 * Treat SCHED_IDLE as nice 20. Only allow a switch to
c02aa73b1d18e4 kernel/sched.c      Darren Hart               2011-02-17  6955  		 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596cda0d7 kernel/sched.c      Ingo Molnar               2007-07-09  6956  		 */
1da1843f9f0334 kernel/sched/core.c Viresh Kumar              2018-11-05  6957  		if (task_has_idle_policy(p) && !idle_policy(policy)) {
d0ea026808ad81 kernel/sched/core.c Dongsheng Yang            2014-01-27  6958  			if (!can_nice(p, task_nice(p)))
dd41f596cda0d7 kernel/sched.c      Ingo Molnar               2007-07-09  6959  				return -EPERM;
c02aa73b1d18e4 kernel/sched.c      Darren Hart               2011-02-17  6960  		}
8dc3e9099e01df kernel/sched.c      Oleg Nesterov             2006-09-29  6961  
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  6962  		/* Can't change other user's priorities: */
c69e8d9c01db2a kernel/sched.c      David Howells             2008-11-14  6963  		if (!check_same_owner(p))
37e4ab3f0cba13 kernel/sched.c      Olivier Croquette         2005-06-25  6964  			return -EPERM;
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6965  
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  6966  		/* Normal users shall not reset the sched_reset_on_fork flag: */
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6967  		if (p->sched_reset_on_fork && !reset_on_fork)
ca94c442535a44 kernel/sched.c      Lennart Poettering        2009-06-15  6968  			return -EPERM;
37e4ab3f0cba13 kernel/sched.c      Olivier Croquette         2005-06-25  6969  	}
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6970  
725aad24c3ba96 kernel/sched.c      Jeremy Fitzhardinge       2008-08-03  6971  	if (user) {
794a56ebd9a57d kernel/sched/core.c Juri Lelli                2017-12-04  6972  		if (attr->sched_flags & SCHED_FLAG_SUGOV)
794a56ebd9a57d kernel/sched/core.c Juri Lelli                2017-12-04  6973  			return -EINVAL;
794a56ebd9a57d kernel/sched/core.c Juri Lelli                2017-12-04  6974  
b0ae1981137503 kernel/sched.c      KOSAKI Motohiro           2010-10-15  6975  		retval = security_task_setscheduler(p);
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6976  		if (retval)
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6977  			return retval;
725aad24c3ba96 kernel/sched.c      Jeremy Fitzhardinge       2008-08-03  6978  	}
725aad24c3ba96 kernel/sched.c      Jeremy Fitzhardinge       2008-08-03  6979  
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  6980  	/* Update task specific "requested" clamps */
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  6981  	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  6982  		retval = uclamp_validate(p, attr);
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  6983  		if (retval)
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  6984  			return retval;
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  6985  	}
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  6986  
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  6987  	if (pi)
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  6988  		cpuset_read_lock();
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  6989  
b29739f902ee76 kernel/sched.c      Ingo Molnar               2006-06-27  6990  	/*
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  6991  	 * Make sure no PI-waiters arrive (or leave) while we are
b29739f902ee76 kernel/sched.c      Ingo Molnar               2006-06-27  6992  	 * changing the priority of the task:
0122ec5b02f766 kernel/sched.c      Peter Zijlstra            2011-04-05  6993  	 *
25985edcedea63 kernel/sched.c      Lucas De Marchi           2011-03-30  6994  	 * To be able to change p->policy safely, the appropriate
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6995  	 * runqueue lock must be held.
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  6996  	 */
eb58075149b7f0 kernel/sched/core.c Peter Zijlstra            2015-07-31  6997  	rq = task_rq_lock(p, &rf);
80f5c1b84baa81 kernel/sched/core.c Peter Zijlstra            2016-10-03  6998  	update_rq_clock(rq);
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  6999  
34f971f6f7988b kernel/sched.c      Peter Zijlstra            2010-09-22  7000  	/*
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  7001  	 * Changing the policy of the stop threads its a very bad idea:
34f971f6f7988b kernel/sched.c      Peter Zijlstra            2010-09-22  7002  	 */
34f971f6f7988b kernel/sched.c      Peter Zijlstra            2010-09-22  7003  	if (p == rq->stop) {
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7004  		retval = -EINVAL;
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7005  		goto unlock;
34f971f6f7988b kernel/sched.c      Peter Zijlstra            2010-09-22  7006  	}
34f971f6f7988b kernel/sched.c      Peter Zijlstra            2010-09-22  7007  
a51e91981870d0 kernel/sched.c      Dario Faggioli            2011-03-24  7008  	/*
d6b1e9119787fd kernel/sched/core.c Thomas Gleixner           2014-02-07  7009  	 * If not changing anything there's no need to proceed further,
d6b1e9119787fd kernel/sched/core.c Thomas Gleixner           2014-02-07  7010  	 * but store a possible modification of reset_on_fork.
a51e91981870d0 kernel/sched.c      Dario Faggioli            2011-03-24  7011  	 */
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  7012  	if (unlikely(policy == p->policy)) {
d0ea026808ad81 kernel/sched/core.c Dongsheng Yang            2014-01-27  7013  		if (fair_policy(policy) && attr->sched_nice != task_nice(p))
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  7014  			goto change;
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  7015  		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  7016  			goto change;
75381608e8410a kernel/sched/core.c Wanpeng Li                2014-11-26  7017  		if (dl_policy(policy) && dl_param_changed(p, attr))
aab03e05e8f7e2 kernel/sched/core.c Dario Faggioli            2013-11-28  7018  			goto change;
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  7019  		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  7020  			goto change;
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  7021  
d6b1e9119787fd kernel/sched/core.c Thomas Gleixner           2014-02-07  7022  		p->sched_reset_on_fork = reset_on_fork;
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7023  		retval = 0;
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7024  		goto unlock;
a51e91981870d0 kernel/sched.c      Dario Faggioli            2011-03-24  7025  	}
d50dde5a10f305 kernel/sched/core.c Dario Faggioli            2013-11-07  7026  change:
a51e91981870d0 kernel/sched.c      Dario Faggioli            2011-03-24  7027  
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7028  	if (user) {
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7029  #ifdef CONFIG_RT_GROUP_SCHED
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7030  		/*
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7031  		 * Do not allow realtime tasks into groups that have no runtime
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7032  		 * assigned.
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7033  		 */
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7034  		if (rt_bandwidth_enabled() && rt_policy(policy) &&
f44937718ce3b8 kernel/sched.c      Mike Galbraith            2011-01-13  7035  				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
f44937718ce3b8 kernel/sched.c      Mike Galbraith            2011-01-13  7036  				!task_group_is_autogroup(task_group(p))) {
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7037  			retval = -EPERM;
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7038  			goto unlock;
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7039  		}
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7040  #endif
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7041  #ifdef CONFIG_SMP
794a56ebd9a57d kernel/sched/core.c Juri Lelli                2017-12-04  7042  		if (dl_bandwidth_enabled() && dl_policy(policy) &&
794a56ebd9a57d kernel/sched/core.c Juri Lelli                2017-12-04  7043  				!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7044  			cpumask_t *span = rq->rd->span;
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7045  
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7046  			/*
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7047  			 * Don't allow tasks with an affinity mask smaller than
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7048  			 * the entire root_domain to become SCHED_DEADLINE. We
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7049  			 * will also fail if there's no bandwidth available.
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7050  			 */
3bd3706251ee8a kernel/sched/core.c Sebastian Andrzej Siewior 2019-04-23  7051  			if (!cpumask_subset(span, p->cpus_ptr) ||
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7052  			    rq->rd->dl_bw.bw == 0) {
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7053  				retval = -EPERM;
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7054  				goto unlock;
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7055  			}
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7056  		}
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7057  #endif
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7058  	}
dc61b1d65e353d kernel/sched.c      Peter Zijlstra            2010-06-08  7059  
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  7060  	/* Re-check policy now with rq lock held: */
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  7061  	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  7062  		policy = oldpolicy = -1;
eb58075149b7f0 kernel/sched/core.c Peter Zijlstra            2015-07-31  7063  		task_rq_unlock(rq, p, &rf);
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  7064  		if (pi)
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  7065  			cpuset_read_unlock();
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  7066  		goto recheck;
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  7067  	}
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7068  
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7069  	/*
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7070  	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7071  	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7072  	 * is available.
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7073  	 */
06a76fe08d4daa kernel/sched/core.c Nicolas Pitre             2017-06-21  7074  	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7075  		retval = -EBUSY;
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7076  		goto unlock;
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7077  	}
332ac17ef5bfcf kernel/sched/core.c Dario Faggioli            2013-11-07  7078  
c365c292d05908 kernel/sched/core.c Thomas Gleixner           2014-02-07  7079  	p->sched_reset_on_fork = reset_on_fork;
c365c292d05908 kernel/sched/core.c Thomas Gleixner           2014-02-07  7080  	oldprio = p->prio;
c365c292d05908 kernel/sched/core.c Thomas Gleixner           2014-02-07  7081  
dbc7f069b93a24 kernel/sched/core.c Peter Zijlstra            2015-06-11  7082  	if (pi) {
f06324e1cbda29 kernel/sched/core.c Juri Lelli                2021-04-23  7083  		newprio = fair_policy(attr->sched_policy) ?
f06324e1cbda29 kernel/sched/core.c Juri Lelli                2021-04-23  7084  			NICE_TO_PRIO(attr->sched_nice) : newprio;
f06324e1cbda29 kernel/sched/core.c Juri Lelli                2021-04-23  7085  
c365c292d05908 kernel/sched/core.c Thomas Gleixner           2014-02-07  7086  		/*
0782e63bc6fe7e kernel/sched/core.c Thomas Gleixner           2015-05-05  7087  		 * Take priority boosted tasks into account. If the new
0782e63bc6fe7e kernel/sched/core.c Thomas Gleixner           2015-05-05  7088  		 * effective priority is unchanged, we just store the new
c365c292d05908 kernel/sched/core.c Thomas Gleixner           2014-02-07  7089  		 * normal parameters and do not touch the scheduler class and
c365c292d05908 kernel/sched/core.c Thomas Gleixner           2014-02-07  7090  		 * the runqueue. This will be done when the task deboost
c365c292d05908 kernel/sched/core.c Thomas Gleixner           2014-02-07  7091  		 * itself.
c365c292d05908 kernel/sched/core.c Thomas Gleixner           2014-02-07  7092  		 */
acd58620e415ae kernel/sched/core.c Peter Zijlstra            2017-03-23  7093  		new_effective_prio = rt_effective_prio(p, newprio);
ff77e468535987 kernel/sched/core.c Peter Zijlstra            2016-01-18  7094  		if (new_effective_prio == oldprio)
ff77e468535987 kernel/sched/core.c Peter Zijlstra            2016-01-18  7095  			queue_flags &= ~DEQUEUE_MOVE;
dbc7f069b93a24 kernel/sched/core.c Peter Zijlstra            2015-06-11  7096  	}
c365c292d05908 kernel/sched/core.c Thomas Gleixner           2014-02-07  7097  
da0c1e65b51a28 kernel/sched/core.c Kirill Tkhai              2014-08-20  7098  	queued = task_on_rq_queued(p);
051a1d1afa4720 kernel/sched.c      Dmitry Adamushko          2007-12-18  7099  	running = task_current(rq, p);
da0c1e65b51a28 kernel/sched/core.c Kirill Tkhai              2014-08-20  7100  	if (queued)
ff77e468535987 kernel/sched/core.c Peter Zijlstra            2016-01-18  7101  		dequeue_task(rq, p, queue_flags);
83b699ed20f521 kernel/sched.c      Srivatsa Vaddagiri        2007-10-15  7102  	if (running)
f3cd1c4ec059c9 kernel/sched/core.c Kirill Tkhai              2014-09-12  7103  		put_prev_task(rq, p);
f6b53205e17c8c kernel/sched.c      Dmitry Adamushko          2007-10-15  7104  
83ab0aa0d5623d kernel/sched.c      Thomas Gleixner           2010-02-17  7105  	prev_class = p->sched_class;
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  7106  
f06324e1cbda29 kernel/sched/core.c Juri Lelli                2021-04-23 @7107  	__setscheduler(rq, p, attr, pi, new_effective_prio);
a509a7cd797470 kernel/sched/core.c Patrick Bellasi           2019-06-21  7108  	__setscheduler_uclamp(p, attr);
f6b53205e17c8c kernel/sched.c      Dmitry Adamushko          2007-10-15  7109  
da0c1e65b51a28 kernel/sched/core.c Kirill Tkhai              2014-08-20  7110  	if (queued) {
81a44c5441d7f7 kernel/sched/core.c Thomas Gleixner           2014-02-07  7111  		/*
81a44c5441d7f7 kernel/sched/core.c Thomas Gleixner           2014-02-07  7112  		 * We enqueue to tail when the priority of a task is
81a44c5441d7f7 kernel/sched/core.c Thomas Gleixner           2014-02-07  7113  		 * increased (user space view).
81a44c5441d7f7 kernel/sched/core.c Thomas Gleixner           2014-02-07  7114  		 */
ff77e468535987 kernel/sched/core.c Peter Zijlstra            2016-01-18  7115  		if (oldprio < p->prio)
ff77e468535987 kernel/sched/core.c Peter Zijlstra            2016-01-18  7116  			queue_flags |= ENQUEUE_HEAD;
1de64443d755f8 kernel/sched/core.c Peter Zijlstra            2015-09-30  7117  
ff77e468535987 kernel/sched/core.c Peter Zijlstra            2016-01-18  7118  		enqueue_task(rq, p, queue_flags);
81a44c5441d7f7 kernel/sched/core.c Thomas Gleixner           2014-02-07  7119  	}
a399d233078edb kernel/sched/core.c Vincent Guittot           2016-09-12  7120  	if (running)
03b7fad167efca kernel/sched/core.c Peter Zijlstra            2019-05-29  7121  		set_next_task(rq, p);
cb46984504048d kernel/sched.c      Steven Rostedt            2008-01-25  7122  
da7a735e51f962 kernel/sched.c      Peter Zijlstra            2011-01-17  7123  	check_class_changed(rq, p, prev_class, oldprio);
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  7124  
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  7125  	/* Avoid rq from going away on us: */
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  7126  	preempt_disable();
565790d28b1e33 kernel/sched/core.c Peter Zijlstra            2020-05-11  7127  	head = splice_balance_callbacks(rq);
eb58075149b7f0 kernel/sched/core.c Peter Zijlstra            2015-07-31  7128  	task_rq_unlock(rq, p, &rf);
b29739f902ee76 kernel/sched.c      Ingo Molnar               2006-06-27  7129  
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  7130  	if (pi) {
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  7131  		cpuset_read_unlock();
95e02ca9bb5324 kernel/sched.c      Thomas Gleixner           2006-06-27  7132  		rt_mutex_adjust_pi(p);
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  7133  	}
95e02ca9bb5324 kernel/sched.c      Thomas Gleixner           2006-06-27  7134  
d1ccc66df8bfe3 kernel/sched/core.c Ingo Molnar               2017-02-01  7135  	/* Run balance callbacks after we've adjusted the PI chain: */
565790d28b1e33 kernel/sched/core.c Peter Zijlstra            2020-05-11  7136  	balance_callbacks(rq, head);
4c9a4bc89a9cca kernel/sched/core.c Peter Zijlstra            2015-06-11  7137  	preempt_enable();
4c9a4bc89a9cca kernel/sched/core.c Peter Zijlstra            2015-06-11  7138  
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  7139  	return 0;
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7140  
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7141  unlock:
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7142  	task_rq_unlock(rq, p, &rf);
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  7143  	if (pi)
710da3c8ea7dfb kernel/sched/core.c Juri Lelli                2019-07-19  7144  		cpuset_read_unlock();
4b211f2b129dd1 kernel/sched/core.c Mathieu Poirier           2019-07-19  7145  	return retval;
^1da177e4c3f41 kernel/sched.c      Linus Torvalds            2005-04-16  7146  }
961ccddd59d627 kernel/sched.c      Rusty Russell             2008-06-23  7147  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-06-14 14:00 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-14 14:00 [jlelli:fixes/rt-double_enqueue 1/1] kernel/sched/core.c:7107:34: warning: Uninitialized variable: new_effective_prio [uninitvar] kernel test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.