linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/5] Use wake_up_locked() in eventpoll
@ 2007-10-24 12:24 Matthew Wilcox
  2007-10-24 12:24 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
                   ` (3 more replies)
  0 siblings, 4 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-10-24 12:24 UTC (permalink / raw)
  To: torvalds, akpm, linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

Replace the uses of __wake_up_locked with wake_up_locked

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 fs/eventpoll.c |   11 ++++-------
 1 files changed, 4 insertions(+), 7 deletions(-)

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 34f68f3..81c04ab 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -656,8 +656,7 @@ is_linked:
 	 * wait list.
 	 */
 	if (waitqueue_active(&ep->wq))
-		__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-				 TASK_INTERRUPTIBLE);
+		wake_up_locked(&ep->wq);
 	if (waitqueue_active(&ep->poll_wait))
 		pwake++;
 
@@ -780,7 +779,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
 
 		/* Notify waiting tasks that events are available */
 		if (waitqueue_active(&ep->wq))
-			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
+			wake_up_locked(&ep->wq);
 		if (waitqueue_active(&ep->poll_wait))
 			pwake++;
 	}
@@ -854,8 +853,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
 
 			/* Notify waiting tasks that events are available */
 			if (waitqueue_active(&ep->wq))
-				__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-						 TASK_INTERRUPTIBLE);
+				wake_up_locked(&ep->wq);
 			if (waitqueue_active(&ep->poll_wait))
 				pwake++;
 		}
@@ -978,8 +976,7 @@ errxit:
 		 * wait list (delayed after we release the lock).
 		 */
 		if (waitqueue_active(&ep->wq))
-			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-					 TASK_INTERRUPTIBLE);
+			wake_up_locked(&ep->wq);
 		if (waitqueue_active(&ep->poll_wait))
 			pwake++;
 	}
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-24 12:24 [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
@ 2007-10-24 12:24 ` Matthew Wilcox
  2007-10-25  3:31   ` Andrew Morton
                     ` (2 more replies)
  2007-10-24 12:24 ` [PATCH 3/5] Add TASK_WAKEKILL Matthew Wilcox
                   ` (2 subsequent siblings)
  3 siblings, 3 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-10-24 12:24 UTC (permalink / raw)
  To: torvalds, akpm, linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

Abstracting away direct uses of TASK_ flags allows us to change the
definitions of the task flags more easily.

Also restructure do_wait() a little

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 arch/ia64/kernel/perfmon.c |    4 +-
 fs/proc/array.c            |    7 +---
 fs/proc/base.c             |    2 +-
 include/linux/sched.h      |   15 +++++++
 include/linux/wait.h       |   11 +++--
 kernel/exit.c              |   90 +++++++++++++++++++------------------------
 kernel/power/process.c     |    6 +-
 kernel/ptrace.c            |    8 ++--
 kernel/sched.c             |   15 +++----
 kernel/signal.c            |    6 +-
 kernel/wait.c              |    2 +-
 11 files changed, 82 insertions(+), 84 deletions(-)

diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 59169bf..c2ca724 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2631,7 +2631,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
 	 */
 	if (task == current) return 0;
 
-	if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+	if (!is_task_stopped_or_traced(task)) {
 		DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
 		return -EBUSY;
 	}
@@ -4792,7 +4792,7 @@ recheck:
 	 * the task must be stopped.
 	 */
 	if (PFM_CMD_STOPPED(cmd)) {
-		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+		if (!is_task_stopped_or_traced(task)) {
 			DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
 			return -EBUSY;
 		}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 63c95af..0bf1fa0 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -141,12 +141,7 @@ static const char *task_state_array[] = {
 
 static inline const char *get_task_state(struct task_struct *tsk)
 {
-	unsigned int state = (tsk->state & (TASK_RUNNING |
-					    TASK_INTERRUPTIBLE |
-					    TASK_UNINTERRUPTIBLE |
-					    TASK_STOPPED |
-					    TASK_TRACED)) |
-					   tsk->exit_state;
+	unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
 	const char **p = &task_state_array[0];
 
 	while (state) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index aeaf0d0..049a80c 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -199,7 +199,7 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
 	(task == current || \
 	(task->parent == current && \
 	(task->ptrace & PT_PTRACED) && \
-	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+	 (is_task_stopped_or_traced(task)) && \
 	 security_ptrace(current,task) == 0))
 
 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 13df99f..fc28637 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -178,6 +178,21 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 /* in tsk->state again */
 #define TASK_DEAD		64
 
+/* Convenience macros for the sake of wake_up */
+#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_ALL		(TASK_NORMAL | TASK_STOPPED | TASK_TRACED)
+
+/* get_task_state() */
+#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
+				 TASK_UNINTERRUPTIBLE | TASK_STOPPED | \
+				 TASK_TRACED)
+
+#define is_task_traced(task)	((task->state & TASK_TRACED) != 0)
+#define is_task_stopped(task)	((task->state & TASK_STOPPED) != 0)
+#define is_task_stopped_or_traced(task)	\
+			((task->state & (TASK_STOPPED | TASK_TRACED)) != 0)
+#define is_task_loadavg(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0)
+
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
 #define set_task_state(tsk, state_value)		\
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0e68628..0a410a4 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
 int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
 wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
 
-#define wake_up(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_nr(x, nr)		__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_all(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
+#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
+#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
+#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
+#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL)
+
 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
-#define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
 
 #define __wait_event(wq, condition) 					\
 do {									\
diff --git a/kernel/exit.c b/kernel/exit.c
index f1aec27..868ea3e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -249,7 +249,7 @@ static int has_stopped_jobs(struct pid *pgrp)
 	struct task_struct *p;
 
 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-		if (p->state != TASK_STOPPED)
+		if (!is_task_stopped(p))
 			continue;
 		retval = 1;
 		break;
@@ -614,7 +614,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
 		p->parent = p->real_parent;
 		add_parent(p);
 
-		if (p->state == TASK_TRACED) {
+		if (is_task_traced(p)) {
 			/*
 			 * If it was at a trace stop, turn it into
 			 * a normal stop since it's no longer being
@@ -1387,7 +1387,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
 
 		exit_code = p->exit_code;
 		if (unlikely(!exit_code) ||
-		    unlikely(p->state & TASK_TRACED))
+		    unlikely(is_task_traced(p)))
 			goto bail_ref;
 		return wait_noreap_copyout(p, pid, uid,
 					   why, (exit_code << 8) | 0x7f,
@@ -1565,60 +1565,51 @@ repeat:
 			}
 			allowed = 1;
 
-			switch (p->state) {
-			case TASK_TRACED:
-				/*
-				 * When we hit the race with PTRACE_ATTACH,
-				 * we will not report this child.  But the
-				 * race means it has not yet been moved to
-				 * our ptrace_children list, so we need to
-				 * set the flag here to avoid a spurious ECHILD
-				 * when the race happens with the only child.
-				 */
-				flag = 1;
-				if (!my_ptrace_child(p))
-					continue;
-				/*FALLTHROUGH*/
-			case TASK_STOPPED:
+			if (is_task_stopped_or_traced(p)) {
 				/*
 				 * It's stopped now, so it might later
 				 * continue, exit, or stop again.
+				 *
+				 * When we hit the race with PTRACE_ATTACH, we
+				 * will not report this child.  But the race
+				 * means it has not yet been moved to our
+				 * ptrace_children list, so we need to set the
+				 * flag here to avoid a spurious ECHILD when
+				 * the race happens with the only child.
 				 */
 				flag = 1;
-				if (!(options & WUNTRACED) &&
-				    !my_ptrace_child(p))
-					continue;
+
+				if (!my_ptrace_child(p)) {
+					if (is_task_traced(p))
+						continue;
+					if (!(options & WUNTRACED))
+						continue;
+				}
+
 				retval = wait_task_stopped(p, ret == 2,
-							   (options & WNOWAIT),
-							   infop,
-							   stat_addr, ru);
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval == -EAGAIN)
 					goto repeat;
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
-			default:
-			// case EXIT_DEAD:
-				if (p->exit_state == EXIT_DEAD)
+			} else if (p->exit_state == EXIT_DEAD) {
+				continue;
+			} else if (p->exit_state == EXIT_ZOMBIE) {
+				/*
+				 * Eligible but we cannot release it yet:
+				 */
+				if (ret == 2)
+					goto check_continued;
+				if (!likely(options & WEXITED))
 					continue;
-			// case EXIT_ZOMBIE:
-				if (p->exit_state == EXIT_ZOMBIE) {
-					/*
-					 * Eligible but we cannot release
-					 * it yet:
-					 */
-					if (ret == 2)
-						goto check_continued;
-					if (!likely(options & WEXITED))
-						continue;
-					retval = wait_task_zombie(
-						p, (options & WNOWAIT),
-						infop, stat_addr, ru);
-					/* He released the lock.  */
-					if (retval != 0)
-						goto end;
-					break;
-				}
+				retval = wait_task_zombie(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
+				/* He released the lock.  */
+				if (retval != 0)
+					goto end;
+			} else {
 check_continued:
 				/*
 				 * It's running now, so it might later
@@ -1627,12 +1618,11 @@ check_continued:
 				flag = 1;
 				if (!unlikely(options & WCONTINUED))
 					continue;
-				retval = wait_task_continued(
-					p, (options & WNOWAIT),
-					infop, stat_addr, ru);
+				retval = wait_task_continued(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
 			}
 		}
 		if (!flag) {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 6533923..7710c05 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -86,9 +86,9 @@ static void fake_signal_wake_up(struct task_struct *p, int resume)
 
 static void send_fake_signal(struct task_struct *p)
 {
-	if (p->state == TASK_STOPPED)
+	if (is_task_stopped(p))
 		force_sig_specific(SIGSTOP, p);
-	fake_signal_wake_up(p, p->state == TASK_STOPPED);
+	fake_signal_wake_up(p, is_task_stopped(p));
 }
 
 static int has_mm(struct task_struct *p)
@@ -182,7 +182,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
 			if (frozen(p) || !freezeable(p))
 				continue;
 
-			if (p->state == TASK_TRACED && frozen(p->parent)) {
+			if (is_task_traced(p) && frozen(p->parent)) {
 				cancel_freezing(p);
 				continue;
 			}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 7c76f2f..9e46a9b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -51,7 +51,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 void ptrace_untrace(struct task_struct *child)
 {
 	spin_lock(&child->sighand->siglock);
-	if (child->state == TASK_TRACED) {
+	if (is_task_traced(child)) {
 		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
 			child->state = TASK_STOPPED;
 		} else {
@@ -79,7 +79,7 @@ void __ptrace_unlink(struct task_struct *child)
 		add_parent(child);
 	}
 
-	if (child->state == TASK_TRACED)
+	if (is_task_traced(child))
 		ptrace_untrace(child);
 }
 
@@ -103,9 +103,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
 	    && child->signal != NULL) {
 		ret = 0;
 		spin_lock_irq(&child->sighand->siglock);
-		if (child->state == TASK_STOPPED) {
+		if (is_task_stopped(child)) {
 			child->state = TASK_TRACED;
-		} else if (child->state != TASK_TRACED && !kill) {
+		} else if (!is_task_traced(child) && !kill) {
 			ret = -ESRCH;
 		}
 		spin_unlock_irq(&child->sighand->siglock);
diff --git a/kernel/sched.c b/kernel/sched.c
index 2810e56..28dde29 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -975,7 +975,7 @@ static int effective_prio(struct task_struct *p)
  */
 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, wakeup);
@@ -987,7 +987,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  */
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible++;
 
 	dequeue_task(rq, p, sleep);
@@ -1623,8 +1623,7 @@ out:
 
 int fastcall wake_up_process(struct task_struct *p)
 {
-	return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
-				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+	return try_to_wake_up(p, TASK_ALL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
 
@@ -3826,8 +3825,7 @@ void fastcall complete(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 1, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete);
@@ -3838,8 +3836,7 @@ void fastcall complete_all(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 0, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);
@@ -5237,7 +5234,7 @@ static void activate_idle_task(struct task_struct *p, struct rq *rq)
 {
 	update_rq_clock(rq);
 
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, 0);
diff --git a/kernel/signal.c b/kernel/signal.c
index 1200630..1b08700 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -838,7 +838,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
 		return 0;
 	if (sig == SIGKILL)
 		return 1;
-	if (p->state & (TASK_STOPPED | TASK_TRACED))
+	if (is_task_stopped_or_traced(p))
 		return 0;
 	return task_curr(p) || !signal_pending(p);
 }
@@ -1441,7 +1441,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
 	BUG_ON(sig == -1);
 
  	/* do_notify_parent_cldstop should have been called instead.  */
- 	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+ 	BUG_ON(is_task_stopped_or_traced(tsk));
 
 	BUG_ON(!tsk->ptrace &&
 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1729,7 +1729,7 @@ static int do_signal_stop(int signr)
 			 * so this check has no races.
 			 */
 			if (!t->exit_state &&
-			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+			    !is_task_stopped_or_traced(t)) {
 				stop_count++;
 				signal_wake_up(t, 0);
 			}
diff --git a/kernel/wait.c b/kernel/wait.c
index 444ddbf..f987688 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
 {
 	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
 	if (waitqueue_active(wq))
-		__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
+		__wake_up(wq, TASK_NORMAL, 1, &key);
 }
 EXPORT_SYMBOL(__wake_up_bit);
 
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 3/5] Add TASK_WAKEKILL
  2007-10-24 12:24 [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
  2007-10-24 12:24 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
@ 2007-10-24 12:24 ` Matthew Wilcox
  2007-10-24 12:24 ` [PATCH 4/5] Add lock_page_killable Matthew Wilcox
  2007-10-24 12:24 ` [PATCH 5/5] Make wait_on_retry_sync_kiocb killable Matthew Wilcox
  3 siblings, 0 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-10-24 12:24 UTC (permalink / raw)
  To: torvalds, akpm, linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

Set TASK_WAKEKILL for TASK_STOPPED and TASK_TRACED, add TASK_KILLABLE and
use TASK_WAKEKILL in signal_wake_up()

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 include/linux/sched.h |   22 ++++++++++++++--------
 kernel/signal.c       |    8 ++++----
 2 files changed, 18 insertions(+), 12 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index fc28637..d1e32ec 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -170,27 +170,33 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #define TASK_RUNNING		0
 #define TASK_INTERRUPTIBLE	1
 #define TASK_UNINTERRUPTIBLE	2
-#define TASK_STOPPED		4
-#define TASK_TRACED		8
+#define __TASK_STOPPED		4
+#define __TASK_TRACED		8
 /* in tsk->exit_state */
 #define EXIT_ZOMBIE		16
 #define EXIT_DEAD		32
 /* in tsk->state again */
 #define TASK_DEAD		64
+#define TASK_WAKEKILL		128
+
+/* Convenience macros for the sake of set_task_state */
+#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
+#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
 
 /* Convenience macros for the sake of wake_up */
 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
-#define TASK_ALL		(TASK_NORMAL | TASK_STOPPED | TASK_TRACED)
+#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
 
 /* get_task_state() */
 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
-				 TASK_UNINTERRUPTIBLE | TASK_STOPPED | \
-				 TASK_TRACED)
+				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+				 __TASK_TRACED)
 
-#define is_task_traced(task)	((task->state & TASK_TRACED) != 0)
-#define is_task_stopped(task)	((task->state & TASK_STOPPED) != 0)
+#define is_task_traced(task)	((task->state & __TASK_TRACED) != 0)
+#define is_task_stopped(task)	((task->state & __TASK_STOPPED) != 0)
 #define is_task_stopped_or_traced(task)	\
-			((task->state & (TASK_STOPPED | TASK_TRACED)) != 0)
+			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 #define is_task_loadavg(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0)
 
 #define __set_task_state(tsk, state_value)		\
diff --git a/kernel/signal.c b/kernel/signal.c
index 1b08700..0a805b7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -456,15 +456,15 @@ void signal_wake_up(struct task_struct *t, int resume)
 	set_tsk_thread_flag(t, TIF_SIGPENDING);
 
 	/*
-	 * For SIGKILL, we want to wake it up in the stopped/traced case.
-	 * We don't check t->state here because there is a race with it
+	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
+	 * case. We don't check t->state here because there is a race with it
 	 * executing another processor and just now entering stopped state.
 	 * By using wake_up_state, we ensure the process will wake up and
 	 * handle its death signal.
 	 */
 	mask = TASK_INTERRUPTIBLE;
 	if (resume)
-		mask |= TASK_STOPPED | TASK_TRACED;
+		mask |= TASK_WAKEKILL;
 	if (!wake_up_state(t, mask))
 		kick_process(t);
 }
@@ -620,7 +620,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
 			 * Wake up the stopped thread _after_ setting
 			 * TIF_SIGPENDING
 			 */
-			state = TASK_STOPPED;
+			state = __TASK_STOPPED;
 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
 				set_tsk_thread_flag(t, TIF_SIGPENDING);
 				state |= TASK_INTERRUPTIBLE;
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 4/5] Add lock_page_killable
  2007-10-24 12:24 [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
  2007-10-24 12:24 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
  2007-10-24 12:24 ` [PATCH 3/5] Add TASK_WAKEKILL Matthew Wilcox
@ 2007-10-24 12:24 ` Matthew Wilcox
  2007-10-25  4:11   ` Andrew Morton
  2007-10-24 12:24 ` [PATCH 5/5] Make wait_on_retry_sync_kiocb killable Matthew Wilcox
  3 siblings, 1 reply; 22+ messages in thread
From: Matthew Wilcox @ 2007-10-24 12:24 UTC (permalink / raw)
  To: torvalds, akpm, linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

and associated infrastructure such as sync_page_killable and
fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
to allow us to kill `cat' of a file on an NFS-mounted filesystem.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 include/linux/pagemap.h |   14 ++++++++++++++
 include/linux/sched.h   |    9 ++++++++-
 kernel/signal.c         |    5 +++++
 mm/filemap.c            |   25 +++++++++++++++++++++----
 4 files changed, 48 insertions(+), 5 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index db8a410..4b62a10 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 }
 
 extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
 extern void FASTCALL(__lock_page_nosync(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
@@ -171,6 +172,19 @@ static inline void lock_page(struct page *page)
 }
 
 /*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals.  It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+	might_sleep();
+	if (TestSetPageLocked(page))
+		return __lock_page_killable(page);
+	return 0;
+}
+
+/*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
  */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d1e32ec..7ccf92a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1856,7 +1856,14 @@ static inline int signal_pending(struct task_struct *p)
 {
 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 }
-  
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+	return signal_pending(p) && __fatal_signal_pending(p);
+}
+
 static inline int need_resched(void)
 {
 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/kernel/signal.c b/kernel/signal.c
index 0a805b7..50aa183 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -994,6 +994,11 @@ void zap_other_threads(struct task_struct *p)
 	}
 }
 
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+	return sigismember(&tsk->pending.signal, SIGKILL);
+}
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
diff --git a/mm/filemap.c b/mm/filemap.c
index 5209e47..6cccb4b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -172,6 +172,12 @@ static int sync_page(void *word)
 	return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+	sync_page(word);
+	return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:	address space structure to write
@@ -576,6 +582,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+	return __wait_on_bit_lock(page_waitqueue(page), &wait,
+					sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -967,7 +981,8 @@ page_ok:
 
 page_not_up_to_date:
 		/* Get exclusive access to the page ... */
-		lock_page(page);
+		if (lock_page_killable(page))
+			goto readpage_eio;
 
 		/* Did it get truncated before we got the lock? */
 		if (!page->mapping) {
@@ -995,7 +1010,8 @@ readpage:
 		}
 
 		if (!PageUptodate(page)) {
-			lock_page(page);
+			if (lock_page_killable(page))
+				goto readpage_eio;
 			if (!PageUptodate(page)) {
 				if (page->mapping == NULL) {
 					/*
@@ -1006,15 +1022,16 @@ readpage:
 					goto find_page;
 				}
 				unlock_page(page);
-				error = -EIO;
 				shrink_readahead_size_eio(filp, ra);
-				goto readpage_error;
+				goto readpage_eio;
 			}
 			unlock_page(page);
 		}
 
 		goto page_ok;
 
+readpage_eio:
+		error = -EIO;
 readpage_error:
 		/* UHHUH! A synchronous read error occurred. Report it */
 		desc->error = error;
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 5/5] Make wait_on_retry_sync_kiocb killable
  2007-10-24 12:24 [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
                   ` (2 preceding siblings ...)
  2007-10-24 12:24 ` [PATCH 4/5] Add lock_page_killable Matthew Wilcox
@ 2007-10-24 12:24 ` Matthew Wilcox
  2007-10-25  3:43   ` Andrew Morton
  2007-10-25 18:31   ` Zach Brown
  3 siblings, 2 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-10-24 12:24 UTC (permalink / raw)
  To: torvalds, akpm, linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

Use TASK_KILLABLE to allow wait_on_retry_sync_kiocb to return -EINTR.
All callers then check the return value and break out of their loops.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 fs/read_write.c |   17 ++++++++++++-----
 1 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/fs/read_write.c b/fs/read_write.c
index 124693e..3196a3b 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -218,14 +218,15 @@ Einval:
 	return -EINVAL;
 }
 
-static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
+static int wait_on_retry_sync_kiocb(struct kiocb *iocb)
 {
-	set_current_state(TASK_UNINTERRUPTIBLE);
+	set_current_state(TASK_KILLABLE);
 	if (!kiocbIsKicked(iocb))
 		schedule();
 	else
 		kiocbClearKicked(iocb);
 	__set_current_state(TASK_RUNNING);
+	return fatal_signal_pending(current) ? -EINTR : 0;
 }
 
 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
@@ -242,7 +243,9 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp
 		ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
 		if (ret != -EIOCBRETRY)
 			break;
-		wait_on_retry_sync_kiocb(&kiocb);
+		ret = wait_on_retry_sync_kiocb(&kiocb);
+		if (ret)
+			break;
 	}
 
 	if (-EIOCBQUEUED == ret)
@@ -300,7 +303,9 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
 		ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
 		if (ret != -EIOCBRETRY)
 			break;
-		wait_on_retry_sync_kiocb(&kiocb);
+		ret = wait_on_retry_sync_kiocb(&kiocb);
+		if (ret)
+			break;
 	}
 
 	if (-EIOCBQUEUED == ret)
@@ -466,7 +471,9 @@ ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
 		ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
 		if (ret != -EIOCBRETRY)
 			break;
-		wait_on_retry_sync_kiocb(&kiocb);
+		ret = wait_on_retry_sync_kiocb(&kiocb);
+		if (ret)
+			break;
 	}
 
 	if (ret == -EIOCBQUEUED)
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-24 12:24 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
@ 2007-10-25  3:31   ` Andrew Morton
  2007-10-26 18:45   ` Andrew Morton
  2007-12-05 12:56   ` Ingo Molnar
  2 siblings, 0 replies; 22+ messages in thread
From: Andrew Morton @ 2007-10-25  3:31 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: torvalds, linux-kernel, Matthew Wilcox

On Wed, 24 Oct 2007 08:24:55 -0400 Matthew Wilcox <matthew@wil.cx> wrote:

> Abstracting away direct uses of TASK_ flags allows us to change the
> definitions of the task flags more easily.
> 
> Also restructure do_wait() a little

umm, spose so.

There's an excellent chance that a millionth of our monkeys will come
along and wreck this diff during the next two months.  In which case
I might end up dropping bits of it, we'll see.

It would be much better if this patch was a series of patches: one to add
the new infrastructure and one-per-subsystem to introduce users of the
infrastructure.  That way I wouldn't have to go mucking around in the
internals of the patch and remembering to tell you when I tossed bits of it
out, and which bits those were.

This shouldn't come as news to an old-timer :(

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 5/5] Make wait_on_retry_sync_kiocb killable
  2007-10-24 12:24 ` [PATCH 5/5] Make wait_on_retry_sync_kiocb killable Matthew Wilcox
@ 2007-10-25  3:43   ` Andrew Morton
  2007-10-25 18:31   ` Zach Brown
  1 sibling, 0 replies; 22+ messages in thread
From: Andrew Morton @ 2007-10-25  3:43 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: torvalds, linux-kernel, Matthew Wilcox, linux-aio

On Wed, 24 Oct 2007 08:24:58 -0400 Matthew Wilcox <matthew@wil.cx> wrote:

> Use TASK_KILLABLE to allow wait_on_retry_sync_kiocb to return -EINTR.
> All callers then check the return value and break out of their loops.
> 

Let's cc the AIO list on AIO patches, please.

> ---
>  fs/read_write.c |   17 ++++++++++++-----
>  1 files changed, 12 insertions(+), 5 deletions(-)
> 
> diff --git a/fs/read_write.c b/fs/read_write.c
> index 124693e..3196a3b 100644
> --- a/fs/read_write.c
> +++ b/fs/read_write.c
> @@ -218,14 +218,15 @@ Einval:
>  	return -EINVAL;
>  }
>  
> -static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
> +static int wait_on_retry_sync_kiocb(struct kiocb *iocb)
>  {
> -	set_current_state(TASK_UNINTERRUPTIBLE);
> +	set_current_state(TASK_KILLABLE);
>  	if (!kiocbIsKicked(iocb))
>  		schedule();
>  	else
>  		kiocbClearKicked(iocb);
>  	__set_current_state(TASK_RUNNING);
> +	return fatal_signal_pending(current) ? -EINTR : 0;
>  }
>  
>  ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
> @@ -242,7 +243,9 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp
>  		ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
>  		if (ret != -EIOCBRETRY)
>  			break;
> -		wait_on_retry_sync_kiocb(&kiocb);
> +		ret = wait_on_retry_sync_kiocb(&kiocb);
> +		if (ret)
> +			break;
>  	}
>  
>  	if (-EIOCBQUEUED == ret)
> @@ -300,7 +303,9 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
>  		ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
>  		if (ret != -EIOCBRETRY)
>  			break;
> -		wait_on_retry_sync_kiocb(&kiocb);
> +		ret = wait_on_retry_sync_kiocb(&kiocb);
> +		if (ret)
> +			break;
>  	}
>  
>  	if (-EIOCBQUEUED == ret)
> @@ -466,7 +471,9 @@ ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
>  		ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
>  		if (ret != -EIOCBRETRY)
>  			break;
> -		wait_on_retry_sync_kiocb(&kiocb);
> +		ret = wait_on_retry_sync_kiocb(&kiocb);
> +		if (ret)
> +			break;
>  	}
>  
>  	if (ret == -EIOCBQUEUED)
> -- 
> 1.4.4.2

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 4/5] Add lock_page_killable
  2007-10-24 12:24 ` [PATCH 4/5] Add lock_page_killable Matthew Wilcox
@ 2007-10-25  4:11   ` Andrew Morton
  2007-10-25  4:13     ` Nick Piggin
  0 siblings, 1 reply; 22+ messages in thread
From: Andrew Morton @ 2007-10-25  4:11 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: torvalds, linux-kernel, Matthew Wilcox

On Wed, 24 Oct 2007 08:24:57 -0400 Matthew Wilcox <matthew@wil.cx> wrote:

> and associated infrastructure such as sync_page_killable and
> fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
> to allow us to kill `cat' of a file on an NFS-mounted filesystem.

whoa, big change.

What exactly are the semantics here?  If the process has actually been
killed (ie: we know that userspace won't be running again) then we break
out of a lock_page() and allow the process to exit?  ie: it's basically
invisible to userspace?

If so, it sounds OK.  I guess.  We're still screwed if the process is doing
a synchronous write and lots of other scenarios.

How well has this been tested?

Have the NFS guys had a think about it?

Why does it return -EIO from read() and not -EINTR?

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 4/5] Add lock_page_killable
  2007-10-25  4:11   ` Andrew Morton
@ 2007-10-25  4:13     ` Nick Piggin
  0 siblings, 0 replies; 22+ messages in thread
From: Nick Piggin @ 2007-10-25  4:13 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox, torvalds, linux-kernel, Matthew Wilcox

On Thursday 25 October 2007 14:11, Andrew Morton wrote:
> On Wed, 24 Oct 2007 08:24:57 -0400 Matthew Wilcox <matthew@wil.cx> wrote:
> > and associated infrastructure such as sync_page_killable and
> > fatal_signal_pending.  Use lock_page_killable in
> > do_generic_mapping_read() to allow us to kill `cat' of a file on an
> > NFS-mounted filesystem.
>
> whoa, big change.
>
> What exactly are the semantics here?  If the process has actually been
> killed (ie: we know that userspace won't be running again) then we break
> out of a lock_page() and allow the process to exit?  ie: it's basically
> invisible to userspace?

The actual conversions should also be relatively useful groundwork
if we ever want to make more things become generally interruptible.


> If so, it sounds OK.  I guess.  We're still screwed if the process is doing
> a synchronous write and lots of other scenarios.

I don't think it will matter in too many situations. If the process is
doing a synchronous write, nothing is guaranteed until the syscall
returns success...

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 5/5] Make wait_on_retry_sync_kiocb killable
  2007-10-24 12:24 ` [PATCH 5/5] Make wait_on_retry_sync_kiocb killable Matthew Wilcox
  2007-10-25  3:43   ` Andrew Morton
@ 2007-10-25 18:31   ` Zach Brown
  1 sibling, 0 replies; 22+ messages in thread
From: Zach Brown @ 2007-10-25 18:31 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: torvalds, akpm, linux-kernel, Matthew Wilcox, linux-aio


Matthew Wilcox wrote:
> Use TASK_KILLABLE to allow wait_on_retry_sync_kiocb to return -EINTR.
> All callers then check the return value and break out of their loops.

This won't work because "sync" kiocbs are a nasty hack that don't follow
the (also nasty) refcounting patterns of the aio core.

-EIOCBRETRY means that aio_{read,write}() has taken on the "IO" kiocb
reference and has ensured that call kick_iocb() will be called in the
future.

Usually kick_iocb() would queue the kiocb to have its ki_retry method
called by the kernel aio threads while holding that reference.  But
"sync" kiocbs are on-stack and aren't reference counted.  kick_iocb() magic:

        /* sync iocbs are easy: they can only ever be executing from a

         * single context. */
        if (is_sync_kiocb(iocb)) {
                kiocbSetKicked(iocb);
                wake_up_process(iocb->ki_obj.tsk);

                return;

        }

So, with this patch, if we catch a signal and return from
wait_on_retry_sync_kiocb() and return from do_sync_{read,write}() then
that on-stack sync kiocb is going to be long gone when kick_iocb() goes
to work with it.

So the first step would be to make sync kiocbs real refcounted
structures so that kick_iocb() could find that the sync submitter has
disappeared.

But then we have to worry about leaving retrying operations in flight
after the sync submitter has returned from their system call.  They
might be VERY SURPRISED to find that a read() implemented with
do_sync_read() is still writing into their userspace pointer after the
syscall was interrupted by a signal.

This leads us to the possibility of working with the ki_cancel method to
stop a pending operation if a signal is caught from a sync submitter.
In practice, nothing sets ki_cancel.

And finally, this code will not be run in a solely mainline kernel.  The
only thing in mainline that returns -EIOCBRETRY is the goofy usb gadget.
 It has both ->{read,write} and ->aio_{read,write} file op methods so
vfs_{read,write}() will never call do_sync_{read,write}().  Sure,
out-of-tree aio providers (SDP?) might get caught up in this.

(Ha ha!  Welcome to fs/aio.c!)

So I'm not sure where to go with this.  It's a mess, but it doesn't seem
like anything is using it.  A significant clean up of the retry and
cancelation support in fs/aio.c is in flight.  Maybe we can revisit this
once that settles down.

- z

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-24 12:24 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
  2007-10-25  3:31   ` Andrew Morton
@ 2007-10-26 18:45   ` Andrew Morton
  2007-10-26 20:39     ` Alexey Dobriyan
  2007-12-05 12:56   ` Ingo Molnar
  2 siblings, 1 reply; 22+ messages in thread
From: Andrew Morton @ 2007-10-26 18:45 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: torvalds, linux-kernel, matthew, willy, Stephane Eranian

On Wed, 24 Oct 2007 08:24:55 -0400
Matthew Wilcox <matthew@wil.cx> wrote:

> Abstracting away direct uses of TASK_ flags allows us to change the
> definitions of the task flags more easily.
> 
> Also restructure do_wait() a little
>
> ...
>
> diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
> index 59169bf..c2ca724 100644
> --- a/arch/ia64/kernel/perfmon.c
> +++ b/arch/ia64/kernel/perfmon.c
> @@ -2631,7 +2631,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
>  	 */
>  	if (task == current) return 0;
>  
> -	if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
> +	if (!is_task_stopped_or_traced(task)) {
>  		DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
>  		return -EBUSY;
>  	}
> @@ -4792,7 +4792,7 @@ recheck:
>  	 * the task must be stopped.
>  	 */
>  	if (PFM_CMD_STOPPED(cmd)) {
> -		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
> +		if (!is_task_stopped_or_traced(task)) {
>  			DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
>  			return -EBUSY;
>  		}

I have dropped this hunk because the file which it is patching is removed
by the (newly-added-to-mm) git-perfmon.patch.  I can't immediately find any
corresponding code which was readded in a different place by git-perfmon so
it looks like this code was simply zapped.

Of course, if git-perfmon doesn't merge in 2.6.25 then I'll end up merging
your patch but accidentally leaving 2.6.25's arch/ia64/kernel/perfmon.c
unpatched.  It looks like that'll be non-fatal.

This isn't going to go very well and I might end up having to drop this
whole patch series and ask for a refactored one.  We'll see.


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-26 18:45   ` Andrew Morton
@ 2007-10-26 20:39     ` Alexey Dobriyan
  2007-10-27  0:33       ` Matthew Wilcox
  0 siblings, 1 reply; 22+ messages in thread
From: Alexey Dobriyan @ 2007-10-26 20:39 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Matthew Wilcox, torvalds, linux-kernel, willy, Stephane Eranian

On Fri, Oct 26, 2007 at 11:45:15AM -0700, Andrew Morton wrote:
> On Wed, 24 Oct 2007 08:24:55 -0400
> Matthew Wilcox <matthew@wil.cx> wrote:
> 
> > Abstracting away direct uses of TASK_ flags allows us to change the
> > definitions of the task flags more easily.

> > --- a/arch/ia64/kernel/perfmon.c
> > +++ b/arch/ia64/kernel/perfmon.c
> > @@ -2631,7 +2631,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
> >  	 */
> >  	if (task == current) return 0;
> >  
> > -	if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
> > +	if (!is_task_stopped_or_traced(task)) {
> >  		DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
> >  		return -EBUSY;
> >  	}
> > @@ -4792,7 +4792,7 @@ recheck:
> >  	 * the task must be stopped.
> >  	 */
> >  	if (PFM_CMD_STOPPED(cmd)) {
> > -		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
> > +		if (!is_task_stopped_or_traced(task)) {
                     ^^^^^^^^^^^^^^^^^^^^^^^^^^

I think this is horrible. Are you going to add full blown static inline
function for every combination of state tests?

> I have dropped this hunk because the file which it is patching is removed
> by the (newly-added-to-mm) git-perfmon.patch.  I can't immediately find any
> corresponding code which was readded in a different place by git-perfmon so
> it looks like this code was simply zapped.
> 
> Of course, if git-perfmon doesn't merge in 2.6.25 then I'll end up merging
> your patch but accidentally leaving 2.6.25's arch/ia64/kernel/perfmon.c
> unpatched.  It looks like that'll be non-fatal.
> 
> This isn't going to go very well and I might end up having to drop this
> whole patch series and ask for a refactored one.  We'll see.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-26 20:39     ` Alexey Dobriyan
@ 2007-10-27  0:33       ` Matthew Wilcox
  0 siblings, 0 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-10-27  0:33 UTC (permalink / raw)
  To: Alexey Dobriyan
  Cc: Andrew Morton, torvalds, linux-kernel, willy, Stephane Eranian

On Sat, Oct 27, 2007 at 12:39:41AM +0400, Alexey Dobriyan wrote:
> > > -		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
> > > +		if (!is_task_stopped_or_traced(task)) {
>                      ^^^^^^^^^^^^^^^^^^^^^^^^^^
> 
> I think this is horrible. Are you going to add full blown static inline
> function for every combination of state tests?

No, because we don't need them.  Just stopped, traced, stopped_or_traced.

-- 
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours.  We can't possibly take such
a retrograde step."

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-24 12:24 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
  2007-10-25  3:31   ` Andrew Morton
  2007-10-26 18:45   ` Andrew Morton
@ 2007-12-05 12:56   ` Ingo Molnar
  2007-12-06 14:42     ` Matthew Wilcox
  2 siblings, 1 reply; 22+ messages in thread
From: Ingo Molnar @ 2007-12-05 12:56 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: torvalds, akpm, linux-kernel, Matthew Wilcox


* Matthew Wilcox <matthew@wil.cx> wrote:

> +#define is_task_stopped(task)	((task->state & TASK_STOPPED) != 0)
> +#define is_task_stopped_or_traced(task)	\
> +			((task->state & (TASK_STOPPED | TASK_TRACED)) != 0)
> +#define is_task_loadavg(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0)

1) please change 'is' and 'task' around so that it reads nicer:

   if (task_is_stopped(t))

instead of the tongue-twister:

   if (is_task_stopped(t))

2) please change task_is_loadavg() to something more sensible - i didnt 
know what it meant when i first saw it in -mm's sched.c. 
task_is_uninterruptible() would be the logical choice ...

	Ingo


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-12-05 12:56   ` Ingo Molnar
@ 2007-12-06 14:42     ` Matthew Wilcox
  0 siblings, 0 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-12-06 14:42 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: torvalds, akpm, linux-kernel, Matthew Wilcox

On Wed, Dec 05, 2007 at 01:56:22PM +0100, Ingo Molnar wrote:
> 1) please change 'is' and 'task' around so that it reads nicer:
> 
>    if (task_is_stopped(t))
> 
> instead of the tongue-twister:
> 
>    if (is_task_stopped(t))

Sure, no problem.  I vacillated on this order myself.

> 2) please change task_is_loadavg() to something more sensible - i didnt 
> know what it meant when i first saw it in -mm's sched.c. 
> task_is_uninterruptible() would be the logical choice ...

It's not obvious to me that "can't be interrupted by a signal" is the
same thing as "contributes to loadavg".  Indeed, I think we would all
like to see a day where there's no such thing as an uninterruptible sleep,
but we'll still want at least some of those tasks contributing to loadavg.

You're right that task_is_loadavg() doesn't make much sense.
task_contributes_to_load() is a better name, but only marginally.

I've considered task_performing_io(), and name the corresponding bit
__TASK_PERFORMING_IO (or even just __TASK_DOING_IO or __TASK_IO), but
it's not just doing IO that makes a task contribute to loadavg.

I have no good suggestions here ;-(

-- 
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours.  We can't possibly take such
a retrograde step."

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-18 22:25 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
@ 2007-10-25  3:50   ` Nick Piggin
  0 siblings, 0 replies; 22+ messages in thread
From: Nick Piggin @ 2007-10-25  3:50 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-kernel, Matthew Wilcox

On Friday 19 October 2007 08:25, Matthew Wilcox wrote:
> Abstracting away direct uses of TASK_ flags allows us to change the
> definitions of the task flags more easily.
>
> Also restructure do_wait() a little
>
> Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
> ---
>  arch/ia64/kernel/perfmon.c |    4 +-
>  fs/proc/array.c            |    9 +---
>  fs/proc/base.c             |    2 +-
>  include/linux/sched.h      |   15 +++++++
>  include/linux/wait.h       |   11 +++--
>  kernel/exit.c              |   90
> +++++++++++++++++++------------------------ kernel/power/process.c     |   
> 7 +--
>  kernel/ptrace.c            |    8 ++--
>  kernel/sched.c             |   15 +++----
>  kernel/signal.c            |    6 +-
>  kernel/wait.c              |    2 +-
>  11 files changed, 83 insertions(+), 86 deletions(-)
>
> diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
> index f55fa07..6b0a6cf 100644
> --- a/arch/ia64/kernel/perfmon.c
> +++ b/arch/ia64/kernel/perfmon.c
> @@ -2630,7 +2630,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct
> task_struct *task) */
>  	if (task == current) return 0;
>
> -	if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
> +	if (!is_task_stopped_or_traced(task)) {
>  		DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid,
> task->state)); return -EBUSY;
>  	}
> @@ -4790,7 +4790,7 @@ recheck:
>  	 * the task must be stopped.
>  	 */
>  	if (PFM_CMD_STOPPED(cmd)) {
> -		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
> +		if (!is_task_stopped_or_traced(task)) {
>  			DPRINT(("[%d] task not in stopped state\n", task->pid));
>  			return -EBUSY;
>  		}
> diff --git a/fs/proc/array.c b/fs/proc/array.c
> index 27b59f5..8939bf0 100644
> --- a/fs/proc/array.c
> +++ b/fs/proc/array.c
> @@ -140,13 +140,8 @@ static const char *task_state_array[] = {
>
>  static inline const char *get_task_state(struct task_struct *tsk)
>  {
> -	unsigned int state = (tsk->state & (TASK_RUNNING |
> -					    TASK_INTERRUPTIBLE |
> -					    TASK_UNINTERRUPTIBLE |
> -					    TASK_STOPPED |
> -					    TASK_TRACED)) |
> -			(tsk->exit_state & (EXIT_ZOMBIE |
> -					    EXIT_DEAD));
> +	unsigned int state = (tsk->state & TASK_REPORT) |
> +			(tsk->exit_state & (EXIT_ZOMBIE | EXIT_DEAD));
>  	const char **p = &task_state_array[0];
>
>  	while (state) {
> diff --git a/fs/proc/base.c b/fs/proc/base.c
> index 4fe74d1..e7e1815 100644
> --- a/fs/proc/base.c
> +++ b/fs/proc/base.c
> @@ -196,7 +196,7 @@ static int proc_root_link(struct inode *inode, struct
> dentry **dentry, struct vf (task == current || \
>  	(task->parent == current && \
>  	(task->ptrace & PT_PTRACED) && \
> -	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
> +	 (is_task_stopped_or_traced(task)) && \
>  	 security_ptrace(current,task) == 0))
>
>  static int proc_pid_cmdline(struct task_struct *task, char * buffer)
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index c204ab0..5ef5253 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -177,6 +177,21 @@ print_cfs_rq(struct seq_file *m, int cpu, struct
> cfs_rq *cfs_rq) /* in tsk->state again */
>  #define TASK_DEAD		64
>
> +/* Convenience macros for the sake of wake_up */
> +#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
> +#define TASK_ALL		(TASK_NORMAL | TASK_STOPPED | TASK_TRACED)
> +
> +/* get_task_state() */
> +#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
> +				 TASK_UNINTERRUPTIBLE | TASK_STOPPED | \
> +				 TASK_TRACED)

I think it would be nicer if you made it explicit in the name that
these are not individual flags. Maybe it doesn't matter though...

Also, TASK_NORMAL / TASK_ALL aren't very good names. TASK_SLEEP_NORMAL
TASK_SLEEP_ALL might be a bit more helpful?

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-18 22:25 [PATCH 0/5] TASK_KILLABLE Matthew Wilcox
@ 2007-10-18 22:25 ` Matthew Wilcox
  2007-10-25  3:50   ` Nick Piggin
  0 siblings, 1 reply; 22+ messages in thread
From: Matthew Wilcox @ 2007-10-18 22:25 UTC (permalink / raw)
  To: linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

Abstracting away direct uses of TASK_ flags allows us to change the
definitions of the task flags more easily.

Also restructure do_wait() a little

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 arch/ia64/kernel/perfmon.c |    4 +-
 fs/proc/array.c            |    9 +---
 fs/proc/base.c             |    2 +-
 include/linux/sched.h      |   15 +++++++
 include/linux/wait.h       |   11 +++--
 kernel/exit.c              |   90 +++++++++++++++++++------------------------
 kernel/power/process.c     |    7 +--
 kernel/ptrace.c            |    8 ++--
 kernel/sched.c             |   15 +++----
 kernel/signal.c            |    6 +-
 kernel/wait.c              |    2 +-
 11 files changed, 83 insertions(+), 86 deletions(-)

diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f55fa07..6b0a6cf 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2630,7 +2630,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
 	 */
 	if (task == current) return 0;
 
-	if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+	if (!is_task_stopped_or_traced(task)) {
 		DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
 		return -EBUSY;
 	}
@@ -4790,7 +4790,7 @@ recheck:
 	 * the task must be stopped.
 	 */
 	if (PFM_CMD_STOPPED(cmd)) {
-		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+		if (!is_task_stopped_or_traced(task)) {
 			DPRINT(("[%d] task not in stopped state\n", task->pid));
 			return -EBUSY;
 		}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 27b59f5..8939bf0 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -140,13 +140,8 @@ static const char *task_state_array[] = {
 
 static inline const char *get_task_state(struct task_struct *tsk)
 {
-	unsigned int state = (tsk->state & (TASK_RUNNING |
-					    TASK_INTERRUPTIBLE |
-					    TASK_UNINTERRUPTIBLE |
-					    TASK_STOPPED |
-					    TASK_TRACED)) |
-			(tsk->exit_state & (EXIT_ZOMBIE |
-					    EXIT_DEAD));
+	unsigned int state = (tsk->state & TASK_REPORT) |
+			(tsk->exit_state & (EXIT_ZOMBIE | EXIT_DEAD));
 	const char **p = &task_state_array[0];
 
 	while (state) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 4fe74d1..e7e1815 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -196,7 +196,7 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
 	(task == current || \
 	(task->parent == current && \
 	(task->ptrace & PT_PTRACED) && \
-	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+	 (is_task_stopped_or_traced(task)) && \
 	 security_ptrace(current,task) == 0))
 
 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c204ab0..5ef5253 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -177,6 +177,21 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 /* in tsk->state again */
 #define TASK_DEAD		64
 
+/* Convenience macros for the sake of wake_up */
+#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_ALL		(TASK_NORMAL | TASK_STOPPED | TASK_TRACED)
+
+/* get_task_state() */
+#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
+				 TASK_UNINTERRUPTIBLE | TASK_STOPPED | \
+				 TASK_TRACED)
+
+#define is_task_traced(task)	((task->state & TASK_TRACED) != 0)
+#define is_task_stopped(task)	((task->state & TASK_STOPPED) != 0)
+#define is_task_stopped_or_traced(task)	\
+			((task->state & (TASK_STOPPED | TASK_TRACED)) != 0)
+#define is_task_loadavg(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0)
+
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
 #define set_task_state(tsk, state_value)		\
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0e68628..0a410a4 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
 int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
 wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
 
-#define wake_up(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_nr(x, nr)		__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_all(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
+#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
+#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
+#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
+#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL)
+
 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
-#define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
 
 #define __wait_event(wq, condition) 					\
 do {									\
diff --git a/kernel/exit.c b/kernel/exit.c
index 2c704c8..f3c6e8c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -249,7 +249,7 @@ static int has_stopped_jobs(struct pid *pgrp)
 	struct task_struct *p;
 
 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-		if (p->state != TASK_STOPPED)
+		if (!is_task_stopped(p))
 			continue;
 		retval = 1;
 		break;
@@ -613,7 +613,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
 		p->parent = p->real_parent;
 		add_parent(p);
 
-		if (p->state == TASK_TRACED) {
+		if (is_task_traced(p)) {
 			/*
 			 * If it was at a trace stop, turn it into
 			 * a normal stop since it's no longer being
@@ -1354,7 +1354,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
 
 		exit_code = p->exit_code;
 		if (unlikely(!exit_code) ||
-		    unlikely(p->state & TASK_TRACED))
+		    unlikely(is_task_traced(p)))
 			goto bail_ref;
 		return wait_noreap_copyout(p, pid, uid,
 					   why, (exit_code << 8) | 0x7f,
@@ -1533,60 +1533,51 @@ repeat:
 			}
 			allowed = 1;
 
-			switch (p->state) {
-			case TASK_TRACED:
-				/*
-				 * When we hit the race with PTRACE_ATTACH,
-				 * we will not report this child.  But the
-				 * race means it has not yet been moved to
-				 * our ptrace_children list, so we need to
-				 * set the flag here to avoid a spurious ECHILD
-				 * when the race happens with the only child.
-				 */
-				flag = 1;
-				if (!my_ptrace_child(p))
-					continue;
-				/*FALLTHROUGH*/
-			case TASK_STOPPED:
+			if (is_task_stopped_or_traced(p)) {
 				/*
 				 * It's stopped now, so it might later
 				 * continue, exit, or stop again.
+				 *
+				 * When we hit the race with PTRACE_ATTACH, we
+				 * will not report this child.  But the race
+				 * means it has not yet been moved to our
+				 * ptrace_children list, so we need to set the
+				 * flag here to avoid a spurious ECHILD when
+				 * the race happens with the only child.
 				 */
 				flag = 1;
-				if (!(options & WUNTRACED) &&
-				    !my_ptrace_child(p))
-					continue;
+
+				if (!my_ptrace_child(p)) {
+					if (is_task_traced(p))
+						continue;
+					if (!(options & WUNTRACED))
+						continue;
+				}
+
 				retval = wait_task_stopped(p, ret == 2,
-							   (options & WNOWAIT),
-							   infop,
-							   stat_addr, ru);
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval == -EAGAIN)
 					goto repeat;
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
-			default:
-			// case EXIT_DEAD:
-				if (p->exit_state == EXIT_DEAD)
+			} else if (p->exit_state == EXIT_DEAD) {
+				continue;
+			} else if (p->exit_state == EXIT_ZOMBIE) {
+				/*
+				 * Eligible but we cannot release it yet:
+				 */
+				if (ret == 2)
+					goto check_continued;
+				if (!likely(options & WEXITED))
 					continue;
-			// case EXIT_ZOMBIE:
-				if (p->exit_state == EXIT_ZOMBIE) {
-					/*
-					 * Eligible but we cannot release
-					 * it yet:
-					 */
-					if (ret == 2)
-						goto check_continued;
-					if (!likely(options & WEXITED))
-						continue;
-					retval = wait_task_zombie(
-						p, (options & WNOWAIT),
-						infop, stat_addr, ru);
-					/* He released the lock.  */
-					if (retval != 0)
-						goto end;
-					break;
-				}
+				retval = wait_task_zombie(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
+				/* He released the lock.  */
+				if (retval != 0)
+					goto end;
+			} else {
 check_continued:
 				/*
 				 * It's running now, so it might later
@@ -1595,12 +1586,11 @@ check_continued:
 				flag = 1;
 				if (!unlikely(options & WCONTINUED))
 					continue;
-				retval = wait_task_continued(
-					p, (options & WNOWAIT),
-					infop, stat_addr, ru);
+				retval = wait_task_continued(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
 			}
 		}
 		if (!flag) {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 3434940..ac0c27a 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -83,10 +83,10 @@ static void freeze_task(struct task_struct *p)
 		rmb();
 		if (!frozen(p)) {
 			set_freeze_flag(p);
-			if (p->state == TASK_STOPPED)
+			if (is_task_stopped(p))
 				force_sig_specific(SIGSTOP, p);
 			spin_lock_irqsave(&p->sighand->siglock, flags);
-			signal_wake_up(p, p->state == TASK_STOPPED);
+			signal_wake_up(p, is_task_stopped(p));
 			spin_unlock_irqrestore(&p->sighand->siglock, flags);
 		}
 	}
@@ -120,8 +120,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
 				continue;
 
 			if (freeze_user_space) {
-				if (p->state == TASK_TRACED &&
-				    frozen(p->parent)) {
+				if (is_task_traced(p) && frozen(p->parent)) {
 					cancel_freezing(p);
 					continue;
 				}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a73ebd3..7d1ef3b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -50,7 +50,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 void ptrace_untrace(struct task_struct *child)
 {
 	spin_lock(&child->sighand->siglock);
-	if (child->state == TASK_TRACED) {
+	if (is_task_traced(child)) {
 		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
 			child->state = TASK_STOPPED;
 		} else {
@@ -78,7 +78,7 @@ void __ptrace_unlink(struct task_struct *child)
 		add_parent(child);
 	}
 
-	if (child->state == TASK_TRACED)
+	if (is_task_traced(child))
 		ptrace_untrace(child);
 }
 
@@ -102,9 +102,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
 	    && child->signal != NULL) {
 		ret = 0;
 		spin_lock_irq(&child->sighand->siglock);
-		if (child->state == TASK_STOPPED) {
+		if (is_task_stopped(child)) {
 			child->state = TASK_TRACED;
-		} else if (child->state != TASK_TRACED && !kill) {
+		} else if (!is_task_traced(child) && !kill) {
 			ret = -ESRCH;
 		}
 		spin_unlock_irq(&child->sighand->siglock);
diff --git a/kernel/sched.c b/kernel/sched.c
index 92721d1..04f6141 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -961,7 +961,7 @@ static int effective_prio(struct task_struct *p)
  */
 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, wakeup);
@@ -973,7 +973,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  */
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible++;
 
 	dequeue_task(rq, p, sleep);
@@ -1609,8 +1609,7 @@ out:
 
 int fastcall wake_up_process(struct task_struct *p)
 {
-	return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
-				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+	return try_to_wake_up(p, TASK_ALL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
 
@@ -3786,8 +3785,7 @@ void fastcall complete(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 1, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete);
@@ -3798,8 +3796,7 @@ void fastcall complete_all(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 0, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);
@@ -5172,7 +5169,7 @@ static void activate_idle_task(struct task_struct *p, struct rq *rq)
 {
 	update_rq_clock(rq);
 
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, 0);
diff --git a/kernel/signal.c b/kernel/signal.c
index 2124ffa..16c16a3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -839,7 +839,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
 		return 0;
 	if (sig == SIGKILL)
 		return 1;
-	if (p->state & (TASK_STOPPED | TASK_TRACED))
+	if (is_task_stopped_or_traced(p))
 		return 0;
 	return task_curr(p) || !signal_pending(p);
 }
@@ -1437,7 +1437,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
 	BUG_ON(sig == -1);
 
  	/* do_notify_parent_cldstop should have been called instead.  */
- 	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+ 	BUG_ON(is_task_stopped_or_traced(tsk));
 
 	BUG_ON(!tsk->ptrace &&
 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1704,7 +1704,7 @@ static int do_signal_stop(int signr)
 			 * so this check has no races.
 			 */
 			if (!t->exit_state &&
-			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+			    !is_task_stopped_or_traced(t)) {
 				stop_count++;
 				signal_wake_up(t, 0);
 			}
diff --git a/kernel/wait.c b/kernel/wait.c
index 444ddbf..f987688 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
 {
 	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
 	if (waitqueue_active(wq))
-		__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
+		__wake_up(wq, TASK_NORMAL, 1, &key);
 }
 EXPORT_SYMBOL(__wake_up_bit);
 
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-09-02  2:46 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
  2007-09-02  2:54   ` Matthew Wilcox
  2007-09-02  3:35   ` Daniel Walker
@ 2007-09-03 21:03   ` Matthew Wilcox
  2 siblings, 0 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-09-03 21:03 UTC (permalink / raw)
  To: akpm, torvalds; +Cc: linux-kernel

On Sat, Sep 01, 2007 at 10:46:51PM -0400, Matthew Wilcox wrote:
> Abstracting away direct uses of TASK_ flags allows us to change the
> definitions of the task flags more easily.
> 
> Also restructure do_wait() a little
> 
> Signed-off-by: Matthew Wilcox <matthew@wil.cx>
> ---
>  arch/ia64/kernel/perfmon.c |    4 +-
>  fs/proc/array.c            |    9 +---
>  fs/proc/base.c             |    2 +-
>  include/linux/sched.h      |   15 +++++++
>  include/linux/wait.h       |   11 +++--
>  kernel/exit.c              |   90 +++++++++++++++++++------------------------
>  kernel/power/process.c     |    7 +--
>  kernel/ptrace.c            |    8 ++--
>  kernel/sched.c             |   15 +++----
>  kernel/signal.c            |    6 +-
>  kernel/wait.c              |    2 +-
>  11 files changed, 83 insertions(+), 86 deletions(-)

Here's a replacement patch 2/5, including fixes for the problems spotted
by Daniel and myself.  A machine running a kernel including these patches
has been up for more than 24 hours.

commit 10287ac5d615a2ccca6611426572259ebc69dc92
Author: Matthew Wilcox <matthew@wil.cx>
Date:   Sat Sep 1 09:31:22 2007 -0400

    Use macros instead of TASK_ flags
    
    Abstracting away direct uses of TASK_ flags allows us to change the
    definitions of the task flags more easily.
    
    Also restructure do_wait() a little
    
    Signed-off-by: Matthew Wilcox <matthew@wil.cx>

diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 14b8e5a..3690c46 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2637,7 +2637,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
 	 */
 	if (task == current) return 0;
 
-	if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+	if (!is_task_stopped_or_traced(task)) {
 		DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
 		return -EBUSY;
 	}
@@ -4797,7 +4797,7 @@ recheck:
 	 * the task must be stopped.
 	 */
 	if (PFM_CMD_STOPPED(cmd)) {
-		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+		if (!is_task_stopped_or_traced(task)) {
 			DPRINT(("[%d] task not in stopped state\n", task->pid));
 			return -EBUSY;
 		}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ee4814d..6a3c876 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -140,13 +140,8 @@ static const char *task_state_array[] = {
 
 static inline const char *get_task_state(struct task_struct *tsk)
 {
-	unsigned int state = (tsk->state & (TASK_RUNNING |
-					    TASK_INTERRUPTIBLE |
-					    TASK_UNINTERRUPTIBLE |
-					    TASK_STOPPED |
-					    TASK_TRACED)) |
-			(tsk->exit_state & (EXIT_ZOMBIE |
-					    EXIT_DEAD));
+	unsigned int state = (tsk->state & TASK_REPORT) |
+			(tsk->exit_state & (EXIT_ZOMBIE | EXIT_DEAD));
 	const char **p = &task_state_array[0];
 
 	while (state) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 19489b0..3155ef1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -196,7 +196,7 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
 	(task == current || \
 	(task->parent == current && \
 	(task->ptrace & PT_PTRACED) && \
-	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+	 (is_task_stopped_or_traced(task)) && \
 	 security_ptrace(current,task) == 0))
 
 static int proc_pid_environ(struct task_struct *task, char * buffer)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f4e324e..ea509e9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -176,6 +176,21 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #define TASK_NONINTERACTIVE	64
 #define TASK_DEAD		128
 
+/* Convenience macros for the sake of wake_up */
+#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_ALL		(TASK_NORMAL | TASK_STOPPED | TASK_TRACED)
+
+/* get_task_state() */
+#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
+				 TASK_UNINTERRUPTIBLE | TASK_STOPPED | \
+				 TASK_TRACED)
+
+#define is_task_traced(task)	((task->state & TASK_TRACED) != 0)
+#define is_task_stopped(task)	((task->state & TASK_STOPPED) != 0)
+#define is_task_stopped_or_traced(task)	\
+			((task->state & (TASK_STOPPED | TASK_TRACED)) != 0)
+#define is_task_loadavg(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0)
+
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
 #define set_task_state(tsk, state_value)		\
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0e68628..0a410a4 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
 int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
 wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
 
-#define wake_up(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_nr(x, nr)		__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_all(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
+#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
+#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
+#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
+#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL)
+
 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
-#define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
 
 #define __wait_event(wq, condition) 					\
 do {									\
diff --git a/kernel/exit.c b/kernel/exit.c
index 06b24b3..325106a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -259,7 +259,7 @@ static int has_stopped_jobs(struct pid *pgrp)
 	struct task_struct *p;
 
 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-		if (p->state != TASK_STOPPED)
+		if (!is_task_stopped(p))
 			continue;
 		retval = 1;
 		break;
@@ -634,7 +634,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
 		p->parent = p->real_parent;
 		add_parent(p);
 
-		if (p->state == TASK_TRACED) {
+		if (is_task_traced(p)) {
 			/*
 			 * If it was at a trace stop, turn it into
 			 * a normal stop since it's no longer being
@@ -1372,7 +1372,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
 
 		exit_code = p->exit_code;
 		if (unlikely(!exit_code) ||
-		    unlikely(p->state & TASK_TRACED))
+		    unlikely(is_task_traced(p)))
 			goto bail_ref;
 		return wait_noreap_copyout(p, pid, uid,
 					   why, (exit_code << 8) | 0x7f,
@@ -1554,60 +1554,51 @@ repeat:
 			}
 			allowed = 1;
 
-			switch (p->state) {
-			case TASK_TRACED:
-				/*
-				 * When we hit the race with PTRACE_ATTACH,
-				 * we will not report this child.  But the
-				 * race means it has not yet been moved to
-				 * our ptrace_children list, so we need to
-				 * set the flag here to avoid a spurious ECHILD
-				 * when the race happens with the only child.
-				 */
-				flag = 1;
-				if (!my_ptrace_child(p))
-					continue;
-				/*FALLTHROUGH*/
-			case TASK_STOPPED:
+			if (is_task_stopped_or_traced(p)) {
 				/*
 				 * It's stopped now, so it might later
 				 * continue, exit, or stop again.
+				 *
+				 * When we hit the race with PTRACE_ATTACH, we
+				 * will not report this child.  But the race
+				 * means it has not yet been moved to our
+				 * ptrace_children list, so we need to set the
+				 * flag here to avoid a spurious ECHILD when
+				 * the race happens with the only child.
 				 */
 				flag = 1;
-				if (!(options & WUNTRACED) &&
-				    !my_ptrace_child(p))
-					continue;
+
+				if (!my_ptrace_child(p)) {
+					if (is_task_traced(p))
+						continue;
+					if (!(options & WUNTRACED))
+						continue;
+				}
+
 				retval = wait_task_stopped(p, ret == 2,
-							   (options & WNOWAIT),
-							   infop,
-							   stat_addr, ru);
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval == -EAGAIN)
 					goto repeat;
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
-			default:
-			// case EXIT_DEAD:
-				if (p->exit_state == EXIT_DEAD)
+			} else if (p->exit_state == EXIT_DEAD) {
+				continue;
+			} else if (p->exit_state == EXIT_ZOMBIE) {
+				/*
+				 * Eligible but we cannot release it yet:
+				 */
+				if (ret == 2)
+					goto check_continued;
+				if (!likely(options & WEXITED))
 					continue;
-			// case EXIT_ZOMBIE:
-				if (p->exit_state == EXIT_ZOMBIE) {
-					/*
-					 * Eligible but we cannot release
-					 * it yet:
-					 */
-					if (ret == 2)
-						goto check_continued;
-					if (!likely(options & WEXITED))
-						continue;
-					retval = wait_task_zombie(
-						p, (options & WNOWAIT),
-						infop, stat_addr, ru);
-					/* He released the lock.  */
-					if (retval != 0)
-						goto end;
-					break;
-				}
+				retval = wait_task_zombie(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
+				/* He released the lock.  */
+				if (retval != 0)
+					goto end;
+			} else {
 check_continued:
 				/*
 				 * It's running now, so it might later
@@ -1616,12 +1607,11 @@ check_continued:
 				flag = 1;
 				if (!unlikely(options & WCONTINUED))
 					continue;
-				retval = wait_task_continued(
-					p, (options & WNOWAIT),
-					infop, stat_addr, ru);
+				retval = wait_task_continued(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
 			}
 		}
 		if (!flag) {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 3434940..ac0c27a 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -83,10 +83,10 @@ static void freeze_task(struct task_struct *p)
 		rmb();
 		if (!frozen(p)) {
 			set_freeze_flag(p);
-			if (p->state == TASK_STOPPED)
+			if (is_task_stopped(p))
 				force_sig_specific(SIGSTOP, p);
 			spin_lock_irqsave(&p->sighand->siglock, flags);
-			signal_wake_up(p, p->state == TASK_STOPPED);
+			signal_wake_up(p, is_task_stopped(p));
 			spin_unlock_irqrestore(&p->sighand->siglock, flags);
 		}
 	}
@@ -120,8 +120,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
 				continue;
 
 			if (freeze_user_space) {
-				if (p->state == TASK_TRACED &&
-				    frozen(p->parent)) {
+				if (is_task_traced(p) && frozen(p->parent)) {
 					cancel_freezing(p);
 					continue;
 				}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 82a558b..92a2283 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -50,7 +50,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 void ptrace_untrace(struct task_struct *child)
 {
 	spin_lock(&child->sighand->siglock);
-	if (child->state == TASK_TRACED) {
+	if (is_task_traced(child)) {
 		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
 			child->state = TASK_STOPPED;
 		} else {
@@ -78,7 +78,7 @@ void __ptrace_unlink(struct task_struct *child)
 		add_parent(child);
 	}
 
-	if (child->state == TASK_TRACED)
+	if (is_task_traced(child))
 		ptrace_untrace(child);
 }
 
@@ -102,9 +102,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
 	    && child->signal != NULL) {
 		ret = 0;
 		spin_lock_irq(&child->sighand->siglock);
-		if (child->state == TASK_STOPPED) {
+		if (is_task_stopped(child)) {
 			child->state = TASK_TRACED;
-		} else if (child->state != TASK_TRACED && !kill) {
+		} else if (!is_task_traced(child) && !kill) {
 			ret = -ESRCH;
 		}
 		spin_unlock_irq(&child->sighand->siglock);
diff --git a/kernel/sched.c b/kernel/sched.c
index b533d6d..e3be352 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -944,7 +944,7 @@ static int effective_prio(struct task_struct *p)
  */
 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, wakeup);
@@ -958,7 +958,7 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
 {
 	update_rq_clock(rq);
 
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, 0);
@@ -970,7 +970,7 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
  */
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible++;
 
 	dequeue_task(rq, p, sleep);
@@ -1566,8 +1566,7 @@ out:
 
 int fastcall wake_up_process(struct task_struct *p)
 {
-	return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
-				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+	return try_to_wake_up(p, TASK_ALL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
 
@@ -3708,8 +3707,7 @@ void fastcall complete(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 1, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete);
@@ -3720,8 +3718,7 @@ void fastcall complete_all(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 0, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);
diff --git a/kernel/signal.c b/kernel/signal.c
index 3169bed..53cbac4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -841,7 +841,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
 		return 0;
 	if (sig == SIGKILL)
 		return 1;
-	if (p->state & (TASK_STOPPED | TASK_TRACED))
+	if (is_task_stopped_or_traced(p))
 		return 0;
 	return task_curr(p) || !signal_pending(p);
 }
@@ -1445,7 +1445,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
 	BUG_ON(sig == -1);
 
  	/* do_notify_parent_cldstop should have been called instead.  */
- 	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+ 	BUG_ON(is_task_stopped_or_traced(tsk));
 
 	BUG_ON(!tsk->ptrace &&
 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1712,7 +1712,7 @@ static int do_signal_stop(int signr)
 			 * so this check has no races.
 			 */
 			if (!t->exit_state &&
-			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+			    !is_task_stopped_or_traced(t)) {
 				stop_count++;
 				signal_wake_up(t, 0);
 			}
diff --git a/kernel/wait.c b/kernel/wait.c
index 444ddbf..f987688 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
 {
 	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
 	if (waitqueue_active(wq))
-		__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
+		__wake_up(wq, TASK_NORMAL, 1, &key);
 }
 EXPORT_SYMBOL(__wake_up_bit);
 

-- 
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours.  We can't possibly take such
a retrograde step."

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-09-02  3:35   ` Daniel Walker
@ 2007-09-02  4:05     ` Matthew Wilcox
  0 siblings, 0 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-09-02  4:05 UTC (permalink / raw)
  To: Daniel Walker; +Cc: akpm, torvalds, linux-kernel

On Sat, Sep 01, 2007 at 08:35:06PM -0700, Daniel Walker wrote:
> Does it take task->state or task ?

task.  Clearly I didn't test on ia64.  (There was an iteration where it
took task->state, and I guess I missed one).  Thanks for pointing out
this oops, I'll fix it for round three.

-- 
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours.  We can't possibly take such
a retrograde step."

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-09-02  2:46 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
  2007-09-02  2:54   ` Matthew Wilcox
@ 2007-09-02  3:35   ` Daniel Walker
  2007-09-02  4:05     ` Matthew Wilcox
  2007-09-03 21:03   ` Matthew Wilcox
  2 siblings, 1 reply; 22+ messages in thread
From: Daniel Walker @ 2007-09-02  3:35 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: akpm, torvalds, linux-kernel

On Sat, 2007-09-01 at 22:46 -0400, Matthew Wilcox wrote:
>         */
>         if (task == current) return 0;
>  
> -       if ((task->state != TASK_STOPPED) && (task->state !=
> TASK_TRACED)) {
> +       if (!is_task_stopped_or_traced(task->state)) {
>                 DPRINT(("cannot attach to non-stopped task [%d] state=
> -               if ((task->state != TASK_STOPPED) && (task->state !=
> TASK_TRACED)) {
> +               if (!is_task_stopped_or_traced(task)) {
>                         DPRINT(("[%d] task not in stopped state\n",
> task->pid));
>                         return -EBUSY;
>                 } 

Does it take task->state or task ?

Daniel


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-09-02  2:46 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
@ 2007-09-02  2:54   ` Matthew Wilcox
  2007-09-02  3:35   ` Daniel Walker
  2007-09-03 21:03   ` Matthew Wilcox
  2 siblings, 0 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-09-02  2:54 UTC (permalink / raw)
  To: akpm, torvalds; +Cc: linux-kernel

On Sat, Sep 01, 2007 at 10:46:51PM -0400, Matthew Wilcox wrote:
> Abstracting away direct uses of TASK_ flags allows us to change the
> definitions of the task flags more easily.
> --- a/kernel/exit.c
> +++ b/kernel/exit.c
> @@ -259,7 +259,7 @@ static int has_stopped_jobs(struct pid *pgrp)
>  	struct task_struct *p;
>  
>  	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
> -		if (p->state != TASK_STOPPED)
> +		if (is_task_stopped(p))

Funny how I can't spot these things before I send them.

Fixed locally; I'll see if anyone else spots a problem before I send out
a v3.

-- 
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours.  We can't possibly take such
a retrograde step."

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 2/5] Use macros instead of TASK_ flags
  2007-09-02  2:43 [PATCH] TASK_KILLABLE version 2 Matthew Wilcox
@ 2007-09-02  2:46 ` Matthew Wilcox
  2007-09-02  2:54   ` Matthew Wilcox
                     ` (2 more replies)
  0 siblings, 3 replies; 22+ messages in thread
From: Matthew Wilcox @ 2007-09-02  2:46 UTC (permalink / raw)
  To: akpm, torvalds; +Cc: linux-kernel, Matthew Wilcox

Abstracting away direct uses of TASK_ flags allows us to change the
definitions of the task flags more easily.

Also restructure do_wait() a little

Signed-off-by: Matthew Wilcox <matthew@wil.cx>
---
 arch/ia64/kernel/perfmon.c |    4 +-
 fs/proc/array.c            |    9 +---
 fs/proc/base.c             |    2 +-
 include/linux/sched.h      |   15 +++++++
 include/linux/wait.h       |   11 +++--
 kernel/exit.c              |   90 +++++++++++++++++++------------------------
 kernel/power/process.c     |    7 +--
 kernel/ptrace.c            |    8 ++--
 kernel/sched.c             |   15 +++----
 kernel/signal.c            |    6 +-
 kernel/wait.c              |    2 +-
 11 files changed, 83 insertions(+), 86 deletions(-)

diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 14b8e5a..75a99ee 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2637,7 +2637,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
 	 */
 	if (task == current) return 0;
 
-	if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+	if (!is_task_stopped_or_traced(task->state)) {
 		DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
 		return -EBUSY;
 	}
@@ -4797,7 +4797,7 @@ recheck:
 	 * the task must be stopped.
 	 */
 	if (PFM_CMD_STOPPED(cmd)) {
-		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+		if (!is_task_stopped_or_traced(task)) {
 			DPRINT(("[%d] task not in stopped state\n", task->pid));
 			return -EBUSY;
 		}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ee4814d..6a3c876 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -140,13 +140,8 @@ static const char *task_state_array[] = {
 
 static inline const char *get_task_state(struct task_struct *tsk)
 {
-	unsigned int state = (tsk->state & (TASK_RUNNING |
-					    TASK_INTERRUPTIBLE |
-					    TASK_UNINTERRUPTIBLE |
-					    TASK_STOPPED |
-					    TASK_TRACED)) |
-			(tsk->exit_state & (EXIT_ZOMBIE |
-					    EXIT_DEAD));
+	unsigned int state = (tsk->state & TASK_REPORT) |
+			(tsk->exit_state & (EXIT_ZOMBIE | EXIT_DEAD));
 	const char **p = &task_state_array[0];
 
 	while (state) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 19489b0..3155ef1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -196,7 +196,7 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
 	(task == current || \
 	(task->parent == current && \
 	(task->ptrace & PT_PTRACED) && \
-	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+	 (is_task_stopped_or_traced(task)) && \
 	 security_ptrace(current,task) == 0))
 
 static int proc_pid_environ(struct task_struct *task, char * buffer)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f4e324e..ea509e9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -176,6 +176,21 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #define TASK_NONINTERACTIVE	64
 #define TASK_DEAD		128
 
+/* Convenience macros for the sake of wake_up */
+#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_ALL		(TASK_NORMAL | TASK_STOPPED | TASK_TRACED)
+
+/* get_task_state() */
+#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
+				 TASK_UNINTERRUPTIBLE | TASK_STOPPED | \
+				 TASK_TRACED)
+
+#define is_task_traced(task)	((task->state & TASK_TRACED) != 0)
+#define is_task_stopped(task)	((task->state & TASK_STOPPED) != 0)
+#define is_task_stopped_or_traced(task)	\
+			((task->state & (TASK_STOPPED | TASK_TRACED)) != 0)
+#define is_task_loadavg(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0)
+
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
 #define set_task_state(tsk, state_value)		\
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0e68628..0a410a4 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
 int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
 wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
 
-#define wake_up(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_nr(x, nr)		__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_all(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
+#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
+#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
+#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
+#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL)
+
 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
-#define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
 
 #define __wait_event(wq, condition) 					\
 do {									\
diff --git a/kernel/exit.c b/kernel/exit.c
index 06b24b3..3abc703 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -259,7 +259,7 @@ static int has_stopped_jobs(struct pid *pgrp)
 	struct task_struct *p;
 
 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-		if (p->state != TASK_STOPPED)
+		if (is_task_stopped(p))
 			continue;
 		retval = 1;
 		break;
@@ -634,7 +634,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
 		p->parent = p->real_parent;
 		add_parent(p);
 
-		if (p->state == TASK_TRACED) {
+		if (is_task_traced(p)) {
 			/*
 			 * If it was at a trace stop, turn it into
 			 * a normal stop since it's no longer being
@@ -1372,7 +1372,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
 
 		exit_code = p->exit_code;
 		if (unlikely(!exit_code) ||
-		    unlikely(p->state & TASK_TRACED))
+		    unlikely(is_task_traced(p)))
 			goto bail_ref;
 		return wait_noreap_copyout(p, pid, uid,
 					   why, (exit_code << 8) | 0x7f,
@@ -1554,60 +1554,51 @@ repeat:
 			}
 			allowed = 1;
 
-			switch (p->state) {
-			case TASK_TRACED:
-				/*
-				 * When we hit the race with PTRACE_ATTACH,
-				 * we will not report this child.  But the
-				 * race means it has not yet been moved to
-				 * our ptrace_children list, so we need to
-				 * set the flag here to avoid a spurious ECHILD
-				 * when the race happens with the only child.
-				 */
-				flag = 1;
-				if (!my_ptrace_child(p))
-					continue;
-				/*FALLTHROUGH*/
-			case TASK_STOPPED:
+			if (is_task_stopped_or_traced(p)) {
 				/*
 				 * It's stopped now, so it might later
 				 * continue, exit, or stop again.
+				 *
+				 * When we hit the race with PTRACE_ATTACH, we
+				 * will not report this child.  But the race
+				 * means it has not yet been moved to our
+				 * ptrace_children list, so we need to set the
+				 * flag here to avoid a spurious ECHILD when
+				 * the race happens with the only child.
 				 */
 				flag = 1;
-				if (!(options & WUNTRACED) &&
-				    !my_ptrace_child(p))
-					continue;
+
+				if (!my_ptrace_child(p)) {
+					if (is_task_traced(p))
+						continue;
+					if (!(options & WUNTRACED))
+						continue;
+				}
+
 				retval = wait_task_stopped(p, ret == 2,
-							   (options & WNOWAIT),
-							   infop,
-							   stat_addr, ru);
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval == -EAGAIN)
 					goto repeat;
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
-			default:
-			// case EXIT_DEAD:
-				if (p->exit_state == EXIT_DEAD)
+			} else if (p->exit_state == EXIT_DEAD) {
+				continue;
+			} else if (p->exit_state == EXIT_ZOMBIE) {
+				/*
+				 * Eligible but we cannot release it yet:
+				 */
+				if (ret == 2)
+					goto check_continued;
+				if (!likely(options & WEXITED))
 					continue;
-			// case EXIT_ZOMBIE:
-				if (p->exit_state == EXIT_ZOMBIE) {
-					/*
-					 * Eligible but we cannot release
-					 * it yet:
-					 */
-					if (ret == 2)
-						goto check_continued;
-					if (!likely(options & WEXITED))
-						continue;
-					retval = wait_task_zombie(
-						p, (options & WNOWAIT),
-						infop, stat_addr, ru);
-					/* He released the lock.  */
-					if (retval != 0)
-						goto end;
-					break;
-				}
+				retval = wait_task_zombie(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
+				/* He released the lock.  */
+				if (retval != 0)
+					goto end;
+			} else {
 check_continued:
 				/*
 				 * It's running now, so it might later
@@ -1616,12 +1607,11 @@ check_continued:
 				flag = 1;
 				if (!unlikely(options & WCONTINUED))
 					continue;
-				retval = wait_task_continued(
-					p, (options & WNOWAIT),
-					infop, stat_addr, ru);
+				retval = wait_task_continued(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
 			}
 		}
 		if (!flag) {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 3434940..ac0c27a 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -83,10 +83,10 @@ static void freeze_task(struct task_struct *p)
 		rmb();
 		if (!frozen(p)) {
 			set_freeze_flag(p);
-			if (p->state == TASK_STOPPED)
+			if (is_task_stopped(p))
 				force_sig_specific(SIGSTOP, p);
 			spin_lock_irqsave(&p->sighand->siglock, flags);
-			signal_wake_up(p, p->state == TASK_STOPPED);
+			signal_wake_up(p, is_task_stopped(p));
 			spin_unlock_irqrestore(&p->sighand->siglock, flags);
 		}
 	}
@@ -120,8 +120,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
 				continue;
 
 			if (freeze_user_space) {
-				if (p->state == TASK_TRACED &&
-				    frozen(p->parent)) {
+				if (is_task_traced(p) && frozen(p->parent)) {
 					cancel_freezing(p);
 					continue;
 				}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 82a558b..92a2283 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -50,7 +50,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 void ptrace_untrace(struct task_struct *child)
 {
 	spin_lock(&child->sighand->siglock);
-	if (child->state == TASK_TRACED) {
+	if (is_task_traced(child)) {
 		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
 			child->state = TASK_STOPPED;
 		} else {
@@ -78,7 +78,7 @@ void __ptrace_unlink(struct task_struct *child)
 		add_parent(child);
 	}
 
-	if (child->state == TASK_TRACED)
+	if (is_task_traced(child))
 		ptrace_untrace(child);
 }
 
@@ -102,9 +102,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
 	    && child->signal != NULL) {
 		ret = 0;
 		spin_lock_irq(&child->sighand->siglock);
-		if (child->state == TASK_STOPPED) {
+		if (is_task_stopped(child)) {
 			child->state = TASK_TRACED;
-		} else if (child->state != TASK_TRACED && !kill) {
+		} else if (!is_task_traced(child) && !kill) {
 			ret = -ESRCH;
 		}
 		spin_unlock_irq(&child->sighand->siglock);
diff --git a/kernel/sched.c b/kernel/sched.c
index b533d6d..e3be352 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -944,7 +944,7 @@ static int effective_prio(struct task_struct *p)
  */
 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, wakeup);
@@ -958,7 +958,7 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
 {
 	update_rq_clock(rq);
 
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, 0);
@@ -970,7 +970,7 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
  */
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible++;
 
 	dequeue_task(rq, p, sleep);
@@ -1566,8 +1566,7 @@ out:
 
 int fastcall wake_up_process(struct task_struct *p)
 {
-	return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
-				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+	return try_to_wake_up(p, TASK_ALL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
 
@@ -3708,8 +3707,7 @@ void fastcall complete(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 1, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete);
@@ -3720,8 +3718,7 @@ void fastcall complete_all(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 0, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);
diff --git a/kernel/signal.c b/kernel/signal.c
index 3169bed..53cbac4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -841,7 +841,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
 		return 0;
 	if (sig == SIGKILL)
 		return 1;
-	if (p->state & (TASK_STOPPED | TASK_TRACED))
+	if (is_task_stopped_or_traced(p))
 		return 0;
 	return task_curr(p) || !signal_pending(p);
 }
@@ -1445,7 +1445,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
 	BUG_ON(sig == -1);
 
  	/* do_notify_parent_cldstop should have been called instead.  */
- 	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+ 	BUG_ON(is_task_stopped_or_traced(tsk));
 
 	BUG_ON(!tsk->ptrace &&
 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1712,7 +1712,7 @@ static int do_signal_stop(int signr)
 			 * so this check has no races.
 			 */
 			if (!t->exit_state &&
-			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+			    !is_task_stopped_or_traced(t)) {
 				stop_count++;
 				signal_wake_up(t, 0);
 			}
diff --git a/kernel/wait.c b/kernel/wait.c
index 444ddbf..f987688 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
 {
 	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
 	if (waitqueue_active(wq))
-		__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
+		__wake_up(wq, TASK_NORMAL, 1, &key);
 }
 EXPORT_SYMBOL(__wake_up_bit);
 
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2007-12-06 14:42 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-10-24 12:24 [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
2007-10-24 12:24 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
2007-10-25  3:31   ` Andrew Morton
2007-10-26 18:45   ` Andrew Morton
2007-10-26 20:39     ` Alexey Dobriyan
2007-10-27  0:33       ` Matthew Wilcox
2007-12-05 12:56   ` Ingo Molnar
2007-12-06 14:42     ` Matthew Wilcox
2007-10-24 12:24 ` [PATCH 3/5] Add TASK_WAKEKILL Matthew Wilcox
2007-10-24 12:24 ` [PATCH 4/5] Add lock_page_killable Matthew Wilcox
2007-10-25  4:11   ` Andrew Morton
2007-10-25  4:13     ` Nick Piggin
2007-10-24 12:24 ` [PATCH 5/5] Make wait_on_retry_sync_kiocb killable Matthew Wilcox
2007-10-25  3:43   ` Andrew Morton
2007-10-25 18:31   ` Zach Brown
  -- strict thread matches above, loose matches on Subject: below --
2007-10-18 22:25 [PATCH 0/5] TASK_KILLABLE Matthew Wilcox
2007-10-18 22:25 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
2007-10-25  3:50   ` Nick Piggin
2007-09-02  2:43 [PATCH] TASK_KILLABLE version 2 Matthew Wilcox
2007-09-02  2:46 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
2007-09-02  2:54   ` Matthew Wilcox
2007-09-02  3:35   ` Daniel Walker
2007-09-02  4:05     ` Matthew Wilcox
2007-09-03 21:03   ` Matthew Wilcox

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).