linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/5] TASK_KILLABLE
@ 2007-10-18 22:25 Matthew Wilcox
  2007-10-18 22:25 ` [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
                   ` (5 more replies)
  0 siblings, 6 replies; 13+ messages in thread
From: Matthew Wilcox @ 2007-10-18 22:25 UTC (permalink / raw)
  To: linux-kernel; +Cc: Matthew Wilcox

This series of patches introduces the facility to deliver only fatal
signals to tasks which are otherwise waiting uninterruptibly.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/5] Use wake_up_locked() in eventpoll
  2007-10-18 22:25 [PATCH 0/5] TASK_KILLABLE Matthew Wilcox
@ 2007-10-18 22:25 ` Matthew Wilcox
  2007-10-19  3:56   ` Arjan van de Ven
  2007-10-18 22:25 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 13+ messages in thread
From: Matthew Wilcox @ 2007-10-18 22:25 UTC (permalink / raw)
  To: linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

Replace the uses of __wake_up_locked with wake_up_locked

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 fs/eventpoll.c |   11 ++++-------
 1 files changed, 4 insertions(+), 7 deletions(-)

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 77b9953..72e4cb4 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -657,8 +657,7 @@ is_linked:
 	 * wait list.
 	 */
 	if (waitqueue_active(&ep->wq))
-		__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-				 TASK_INTERRUPTIBLE);
+		wake_up_locked(&ep->wq);
 	if (waitqueue_active(&ep->poll_wait))
 		pwake++;
 
@@ -781,7 +780,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
 
 		/* Notify waiting tasks that events are available */
 		if (waitqueue_active(&ep->wq))
-			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
+			wake_up_locked(&ep->wq);
 		if (waitqueue_active(&ep->poll_wait))
 			pwake++;
 	}
@@ -855,8 +854,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
 
 			/* Notify waiting tasks that events are available */
 			if (waitqueue_active(&ep->wq))
-				__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-						 TASK_INTERRUPTIBLE);
+				wake_up_locked(&ep->wq);
 			if (waitqueue_active(&ep->poll_wait))
 				pwake++;
 		}
@@ -979,8 +977,7 @@ errxit:
 		 * wait list (delayed after we release the lock).
 		 */
 		if (waitqueue_active(&ep->wq))
-			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-					 TASK_INTERRUPTIBLE);
+			wake_up_locked(&ep->wq);
 		if (waitqueue_active(&ep->poll_wait))
 			pwake++;
 	}
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-18 22:25 [PATCH 0/5] TASK_KILLABLE Matthew Wilcox
  2007-10-18 22:25 ` [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
@ 2007-10-18 22:25 ` Matthew Wilcox
  2007-10-25  3:50   ` Nick Piggin
  2007-10-18 22:26 ` [PATCH 3/5] Add TASK_WAKEKILL Matthew Wilcox
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 13+ messages in thread
From: Matthew Wilcox @ 2007-10-18 22:25 UTC (permalink / raw)
  To: linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

Abstracting away direct uses of TASK_ flags allows us to change the
definitions of the task flags more easily.

Also restructure do_wait() a little

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 arch/ia64/kernel/perfmon.c |    4 +-
 fs/proc/array.c            |    9 +---
 fs/proc/base.c             |    2 +-
 include/linux/sched.h      |   15 +++++++
 include/linux/wait.h       |   11 +++--
 kernel/exit.c              |   90 +++++++++++++++++++------------------------
 kernel/power/process.c     |    7 +--
 kernel/ptrace.c            |    8 ++--
 kernel/sched.c             |   15 +++----
 kernel/signal.c            |    6 +-
 kernel/wait.c              |    2 +-
 11 files changed, 83 insertions(+), 86 deletions(-)

diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f55fa07..6b0a6cf 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2630,7 +2630,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
 	 */
 	if (task == current) return 0;
 
-	if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+	if (!is_task_stopped_or_traced(task)) {
 		DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
 		return -EBUSY;
 	}
@@ -4790,7 +4790,7 @@ recheck:
 	 * the task must be stopped.
 	 */
 	if (PFM_CMD_STOPPED(cmd)) {
-		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
+		if (!is_task_stopped_or_traced(task)) {
 			DPRINT(("[%d] task not in stopped state\n", task->pid));
 			return -EBUSY;
 		}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 27b59f5..8939bf0 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -140,13 +140,8 @@ static const char *task_state_array[] = {
 
 static inline const char *get_task_state(struct task_struct *tsk)
 {
-	unsigned int state = (tsk->state & (TASK_RUNNING |
-					    TASK_INTERRUPTIBLE |
-					    TASK_UNINTERRUPTIBLE |
-					    TASK_STOPPED |
-					    TASK_TRACED)) |
-			(tsk->exit_state & (EXIT_ZOMBIE |
-					    EXIT_DEAD));
+	unsigned int state = (tsk->state & TASK_REPORT) |
+			(tsk->exit_state & (EXIT_ZOMBIE | EXIT_DEAD));
 	const char **p = &task_state_array[0];
 
 	while (state) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 4fe74d1..e7e1815 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -196,7 +196,7 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
 	(task == current || \
 	(task->parent == current && \
 	(task->ptrace & PT_PTRACED) && \
-	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
+	 (is_task_stopped_or_traced(task)) && \
 	 security_ptrace(current,task) == 0))
 
 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c204ab0..5ef5253 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -177,6 +177,21 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 /* in tsk->state again */
 #define TASK_DEAD		64
 
+/* Convenience macros for the sake of wake_up */
+#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_ALL		(TASK_NORMAL | TASK_STOPPED | TASK_TRACED)
+
+/* get_task_state() */
+#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
+				 TASK_UNINTERRUPTIBLE | TASK_STOPPED | \
+				 TASK_TRACED)
+
+#define is_task_traced(task)	((task->state & TASK_TRACED) != 0)
+#define is_task_stopped(task)	((task->state & TASK_STOPPED) != 0)
+#define is_task_stopped_or_traced(task)	\
+			((task->state & (TASK_STOPPED | TASK_TRACED)) != 0)
+#define is_task_loadavg(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0)
+
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
 #define set_task_state(tsk, state_value)		\
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0e68628..0a410a4 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
 int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
 wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
 
-#define wake_up(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_nr(x, nr)		__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_all(x)			__wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
+#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
+#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
+#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
+#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL)
+
 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
-#define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
 
 #define __wait_event(wq, condition) 					\
 do {									\
diff --git a/kernel/exit.c b/kernel/exit.c
index 2c704c8..f3c6e8c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -249,7 +249,7 @@ static int has_stopped_jobs(struct pid *pgrp)
 	struct task_struct *p;
 
 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-		if (p->state != TASK_STOPPED)
+		if (!is_task_stopped(p))
 			continue;
 		retval = 1;
 		break;
@@ -613,7 +613,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
 		p->parent = p->real_parent;
 		add_parent(p);
 
-		if (p->state == TASK_TRACED) {
+		if (is_task_traced(p)) {
 			/*
 			 * If it was at a trace stop, turn it into
 			 * a normal stop since it's no longer being
@@ -1354,7 +1354,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
 
 		exit_code = p->exit_code;
 		if (unlikely(!exit_code) ||
-		    unlikely(p->state & TASK_TRACED))
+		    unlikely(is_task_traced(p)))
 			goto bail_ref;
 		return wait_noreap_copyout(p, pid, uid,
 					   why, (exit_code << 8) | 0x7f,
@@ -1533,60 +1533,51 @@ repeat:
 			}
 			allowed = 1;
 
-			switch (p->state) {
-			case TASK_TRACED:
-				/*
-				 * When we hit the race with PTRACE_ATTACH,
-				 * we will not report this child.  But the
-				 * race means it has not yet been moved to
-				 * our ptrace_children list, so we need to
-				 * set the flag here to avoid a spurious ECHILD
-				 * when the race happens with the only child.
-				 */
-				flag = 1;
-				if (!my_ptrace_child(p))
-					continue;
-				/*FALLTHROUGH*/
-			case TASK_STOPPED:
+			if (is_task_stopped_or_traced(p)) {
 				/*
 				 * It's stopped now, so it might later
 				 * continue, exit, or stop again.
+				 *
+				 * When we hit the race with PTRACE_ATTACH, we
+				 * will not report this child.  But the race
+				 * means it has not yet been moved to our
+				 * ptrace_children list, so we need to set the
+				 * flag here to avoid a spurious ECHILD when
+				 * the race happens with the only child.
 				 */
 				flag = 1;
-				if (!(options & WUNTRACED) &&
-				    !my_ptrace_child(p))
-					continue;
+
+				if (!my_ptrace_child(p)) {
+					if (is_task_traced(p))
+						continue;
+					if (!(options & WUNTRACED))
+						continue;
+				}
+
 				retval = wait_task_stopped(p, ret == 2,
-							   (options & WNOWAIT),
-							   infop,
-							   stat_addr, ru);
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval == -EAGAIN)
 					goto repeat;
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
-			default:
-			// case EXIT_DEAD:
-				if (p->exit_state == EXIT_DEAD)
+			} else if (p->exit_state == EXIT_DEAD) {
+				continue;
+			} else if (p->exit_state == EXIT_ZOMBIE) {
+				/*
+				 * Eligible but we cannot release it yet:
+				 */
+				if (ret == 2)
+					goto check_continued;
+				if (!likely(options & WEXITED))
 					continue;
-			// case EXIT_ZOMBIE:
-				if (p->exit_state == EXIT_ZOMBIE) {
-					/*
-					 * Eligible but we cannot release
-					 * it yet:
-					 */
-					if (ret == 2)
-						goto check_continued;
-					if (!likely(options & WEXITED))
-						continue;
-					retval = wait_task_zombie(
-						p, (options & WNOWAIT),
-						infop, stat_addr, ru);
-					/* He released the lock.  */
-					if (retval != 0)
-						goto end;
-					break;
-				}
+				retval = wait_task_zombie(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
+				/* He released the lock.  */
+				if (retval != 0)
+					goto end;
+			} else {
 check_continued:
 				/*
 				 * It's running now, so it might later
@@ -1595,12 +1586,11 @@ check_continued:
 				flag = 1;
 				if (!unlikely(options & WCONTINUED))
 					continue;
-				retval = wait_task_continued(
-					p, (options & WNOWAIT),
-					infop, stat_addr, ru);
+				retval = wait_task_continued(p,
+						(options & WNOWAIT), infop,
+						stat_addr, ru);
 				if (retval != 0) /* He released the lock.  */
 					goto end;
-				break;
 			}
 		}
 		if (!flag) {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 3434940..ac0c27a 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -83,10 +83,10 @@ static void freeze_task(struct task_struct *p)
 		rmb();
 		if (!frozen(p)) {
 			set_freeze_flag(p);
-			if (p->state == TASK_STOPPED)
+			if (is_task_stopped(p))
 				force_sig_specific(SIGSTOP, p);
 			spin_lock_irqsave(&p->sighand->siglock, flags);
-			signal_wake_up(p, p->state == TASK_STOPPED);
+			signal_wake_up(p, is_task_stopped(p));
 			spin_unlock_irqrestore(&p->sighand->siglock, flags);
 		}
 	}
@@ -120,8 +120,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
 				continue;
 
 			if (freeze_user_space) {
-				if (p->state == TASK_TRACED &&
-				    frozen(p->parent)) {
+				if (is_task_traced(p) && frozen(p->parent)) {
 					cancel_freezing(p);
 					continue;
 				}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a73ebd3..7d1ef3b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -50,7 +50,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 void ptrace_untrace(struct task_struct *child)
 {
 	spin_lock(&child->sighand->siglock);
-	if (child->state == TASK_TRACED) {
+	if (is_task_traced(child)) {
 		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
 			child->state = TASK_STOPPED;
 		} else {
@@ -78,7 +78,7 @@ void __ptrace_unlink(struct task_struct *child)
 		add_parent(child);
 	}
 
-	if (child->state == TASK_TRACED)
+	if (is_task_traced(child))
 		ptrace_untrace(child);
 }
 
@@ -102,9 +102,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
 	    && child->signal != NULL) {
 		ret = 0;
 		spin_lock_irq(&child->sighand->siglock);
-		if (child->state == TASK_STOPPED) {
+		if (is_task_stopped(child)) {
 			child->state = TASK_TRACED;
-		} else if (child->state != TASK_TRACED && !kill) {
+		} else if (!is_task_traced(child) && !kill) {
 			ret = -ESRCH;
 		}
 		spin_unlock_irq(&child->sighand->siglock);
diff --git a/kernel/sched.c b/kernel/sched.c
index 92721d1..04f6141 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -961,7 +961,7 @@ static int effective_prio(struct task_struct *p)
  */
 static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, wakeup);
@@ -973,7 +973,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  */
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
 {
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible++;
 
 	dequeue_task(rq, p, sleep);
@@ -1609,8 +1609,7 @@ out:
 
 int fastcall wake_up_process(struct task_struct *p)
 {
-	return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
-				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+	return try_to_wake_up(p, TASK_ALL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
 
@@ -3786,8 +3785,7 @@ void fastcall complete(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 1, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete);
@@ -3798,8 +3796,7 @@ void fastcall complete_all(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 0, 0, NULL);
+	__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);
@@ -5172,7 +5169,7 @@ static void activate_idle_task(struct task_struct *p, struct rq *rq)
 {
 	update_rq_clock(rq);
 
-	if (p->state == TASK_UNINTERRUPTIBLE)
+	if (is_task_loadavg(p))
 		rq->nr_uninterruptible--;
 
 	enqueue_task(rq, p, 0);
diff --git a/kernel/signal.c b/kernel/signal.c
index 2124ffa..16c16a3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -839,7 +839,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
 		return 0;
 	if (sig == SIGKILL)
 		return 1;
-	if (p->state & (TASK_STOPPED | TASK_TRACED))
+	if (is_task_stopped_or_traced(p))
 		return 0;
 	return task_curr(p) || !signal_pending(p);
 }
@@ -1437,7 +1437,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
 	BUG_ON(sig == -1);
 
  	/* do_notify_parent_cldstop should have been called instead.  */
- 	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+ 	BUG_ON(is_task_stopped_or_traced(tsk));
 
 	BUG_ON(!tsk->ptrace &&
 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1704,7 +1704,7 @@ static int do_signal_stop(int signr)
 			 * so this check has no races.
 			 */
 			if (!t->exit_state &&
-			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+			    !is_task_stopped_or_traced(t)) {
 				stop_count++;
 				signal_wake_up(t, 0);
 			}
diff --git a/kernel/wait.c b/kernel/wait.c
index 444ddbf..f987688 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
 {
 	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
 	if (waitqueue_active(wq))
-		__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
+		__wake_up(wq, TASK_NORMAL, 1, &key);
 }
 EXPORT_SYMBOL(__wake_up_bit);
 
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 3/5] Add TASK_WAKEKILL
  2007-10-18 22:25 [PATCH 0/5] TASK_KILLABLE Matthew Wilcox
  2007-10-18 22:25 ` [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
  2007-10-18 22:25 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
@ 2007-10-18 22:26 ` Matthew Wilcox
  2007-10-18 22:26 ` [PATCH 4/5] Add lock_page_killable Matthew Wilcox
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox @ 2007-10-18 22:26 UTC (permalink / raw)
  To: linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

Set TASK_WAKEKILL for TASK_STOPPED and TASK_TRACED, add TASK_KILLABLE and
use TASK_WAKEKILL in signal_wake_up()

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 include/linux/sched.h |   22 ++++++++++++++--------
 kernel/signal.c       |    8 ++++----
 2 files changed, 18 insertions(+), 12 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5ef5253..f02ade4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -169,27 +169,33 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #define TASK_RUNNING		0
 #define TASK_INTERRUPTIBLE	1
 #define TASK_UNINTERRUPTIBLE	2
-#define TASK_STOPPED		4
-#define TASK_TRACED		8
+#define __TASK_STOPPED		4
+#define __TASK_TRACED		8
 /* in tsk->exit_state */
 #define EXIT_ZOMBIE		16
 #define EXIT_DEAD		32
 /* in tsk->state again */
 #define TASK_DEAD		64
+#define TASK_WAKEKILL		128
+
+/* Convenience macros for the sake of set_task_state */
+#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
+#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
 
 /* Convenience macros for the sake of wake_up */
 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
-#define TASK_ALL		(TASK_NORMAL | TASK_STOPPED | TASK_TRACED)
+#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
 
 /* get_task_state() */
 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
-				 TASK_UNINTERRUPTIBLE | TASK_STOPPED | \
-				 TASK_TRACED)
+				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+				 __TASK_TRACED)
 
-#define is_task_traced(task)	((task->state & TASK_TRACED) != 0)
-#define is_task_stopped(task)	((task->state & TASK_STOPPED) != 0)
+#define is_task_traced(task)	((task->state & __TASK_TRACED) != 0)
+#define is_task_stopped(task)	((task->state & __TASK_STOPPED) != 0)
 #define is_task_stopped_or_traced(task)	\
-			((task->state & (TASK_STOPPED | TASK_TRACED)) != 0)
+			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 #define is_task_loadavg(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0)
 
 #define __set_task_state(tsk, state_value)		\
diff --git a/kernel/signal.c b/kernel/signal.c
index 16c16a3..3f28990 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -457,15 +457,15 @@ void signal_wake_up(struct task_struct *t, int resume)
 	set_tsk_thread_flag(t, TIF_SIGPENDING);
 
 	/*
-	 * For SIGKILL, we want to wake it up in the stopped/traced case.
-	 * We don't check t->state here because there is a race with it
+	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
+	 * case. We don't check t->state here because there is a race with it
 	 * executing another processor and just now entering stopped state.
 	 * By using wake_up_state, we ensure the process will wake up and
 	 * handle its death signal.
 	 */
 	mask = TASK_INTERRUPTIBLE;
 	if (resume)
-		mask |= TASK_STOPPED | TASK_TRACED;
+		mask |= TASK_WAKEKILL;
 	if (!wake_up_state(t, mask))
 		kick_process(t);
 }
@@ -621,7 +621,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
 			 * Wake up the stopped thread _after_ setting
 			 * TIF_SIGPENDING
 			 */
-			state = TASK_STOPPED;
+			state = __TASK_STOPPED;
 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
 				set_tsk_thread_flag(t, TIF_SIGPENDING);
 				state |= TASK_INTERRUPTIBLE;
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 4/5] Add lock_page_killable
  2007-10-18 22:25 [PATCH 0/5] TASK_KILLABLE Matthew Wilcox
                   ` (2 preceding siblings ...)
  2007-10-18 22:26 ` [PATCH 3/5] Add TASK_WAKEKILL Matthew Wilcox
@ 2007-10-18 22:26 ` Matthew Wilcox
  2007-10-18 22:26 ` [PATCH 5/5] Make wait_on_retry_sync_kiocb killable Matthew Wilcox
  2007-10-25  3:55 ` [PATCH 0/5] TASK_KILLABLE Nick Piggin
  5 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox @ 2007-10-18 22:26 UTC (permalink / raw)
  To: linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

and associated infrastructure such as sync_page_killable and
fatal_signal_pending.  Use lock_page_killable in do_generic_mapping_read()
to allow us to kill `cat' of a file on an NFS-mounted filesystem.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 include/linux/pagemap.h |   14 ++++++++++++++
 include/linux/sched.h   |    9 ++++++++-
 kernel/signal.c         |    5 +++++
 mm/filemap.c            |   25 +++++++++++++++++++++----
 4 files changed, 48 insertions(+), 5 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index db8a410..4b62a10 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 }
 
 extern void FASTCALL(__lock_page(struct page *page));
+extern int FASTCALL(__lock_page_killable(struct page *page));
 extern void FASTCALL(__lock_page_nosync(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
@@ -171,6 +172,19 @@ static inline void lock_page(struct page *page)
 }
 
 /*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals.  It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+	might_sleep();
+	if (TestSetPageLocked(page))
+		return __lock_page_killable(page);
+	return 0;
+}
+
+/*
  * lock_page_nosync should only be used if we can't pin the page's inode.
  * Doesn't play quite so well with block device plugging.
  */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f02ade4..077893d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1738,7 +1738,14 @@ static inline int signal_pending(struct task_struct *p)
 {
 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 }
-  
+
+extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+	return signal_pending(p) && __fatal_signal_pending(p);
+}
+
 static inline int need_resched(void)
 {
 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/kernel/signal.c b/kernel/signal.c
index 3f28990..f89ab8d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -995,6 +995,11 @@ void zap_other_threads(struct task_struct *p)
 	}
 }
 
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+	return sigismember(&tsk->pending.signal, SIGKILL);
+}
+
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
diff --git a/mm/filemap.c b/mm/filemap.c
index 79f24a9..1498e24 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -171,6 +171,12 @@ static int sync_page(void *word)
 	return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+	sync_page(word);
+	return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:	address space structure to write
@@ -575,6 +581,14 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int fastcall __lock_page_killable(struct page *page)
+{
+	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+	return __wait_on_bit_lock(page_waitqueue(page), &wait,
+					sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
@@ -966,7 +980,8 @@ page_ok:
 
 page_not_up_to_date:
 		/* Get exclusive access to the page ... */
-		lock_page(page);
+		if (lock_page_killable(page))
+			goto readpage_eio;
 
 		/* Did it get truncated before we got the lock? */
 		if (!page->mapping) {
@@ -994,7 +1009,8 @@ readpage:
 		}
 
 		if (!PageUptodate(page)) {
-			lock_page(page);
+			if (lock_page_killable(page))
+				goto readpage_eio;
 			if (!PageUptodate(page)) {
 				if (page->mapping == NULL) {
 					/*
@@ -1005,15 +1021,16 @@ readpage:
 					goto find_page;
 				}
 				unlock_page(page);
-				error = -EIO;
 				shrink_readahead_size_eio(filp, ra);
-				goto readpage_error;
+				goto readpage_eio;
 			}
 			unlock_page(page);
 		}
 
 		goto page_ok;
 
+readpage_eio:
+		error = -EIO;
 readpage_error:
 		/* UHHUH! A synchronous read error occurred. Report it */
 		desc->error = error;
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 5/5] Make wait_on_retry_sync_kiocb killable
  2007-10-18 22:25 [PATCH 0/5] TASK_KILLABLE Matthew Wilcox
                   ` (3 preceding siblings ...)
  2007-10-18 22:26 ` [PATCH 4/5] Add lock_page_killable Matthew Wilcox
@ 2007-10-18 22:26 ` Matthew Wilcox
  2007-10-25  3:53   ` Nick Piggin
  2007-10-25  3:55 ` [PATCH 0/5] TASK_KILLABLE Nick Piggin
  5 siblings, 1 reply; 13+ messages in thread
From: Matthew Wilcox @ 2007-10-18 22:26 UTC (permalink / raw)
  To: linux-kernel; +Cc: Matthew Wilcox

Use TASK_KILLABLE to allow wait_on_retry_sync_kiocb to return -EINTR.
All callers then check the return value and break out of their loops.

Signed-off-by: Matthew Wilcox <matthew@wil.cx>
---
 fs/read_write.c |   17 ++++++++++++-----
 1 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/fs/read_write.c b/fs/read_write.c
index 124693e..3196a3b 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -218,14 +218,15 @@ Einval:
 	return -EINVAL;
 }
 
-static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
+static int wait_on_retry_sync_kiocb(struct kiocb *iocb)
 {
-	set_current_state(TASK_UNINTERRUPTIBLE);
+	set_current_state(TASK_KILLABLE);
 	if (!kiocbIsKicked(iocb))
 		schedule();
 	else
 		kiocbClearKicked(iocb);
 	__set_current_state(TASK_RUNNING);
+	return fatal_signal_pending(current) ? -EINTR : 0;
 }
 
 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
@@ -242,7 +243,9 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp
 		ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
 		if (ret != -EIOCBRETRY)
 			break;
-		wait_on_retry_sync_kiocb(&kiocb);
+		ret = wait_on_retry_sync_kiocb(&kiocb);
+		if (ret)
+			break;
 	}
 
 	if (-EIOCBQUEUED == ret)
@@ -300,7 +303,9 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
 		ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
 		if (ret != -EIOCBRETRY)
 			break;
-		wait_on_retry_sync_kiocb(&kiocb);
+		ret = wait_on_retry_sync_kiocb(&kiocb);
+		if (ret)
+			break;
 	}
 
 	if (-EIOCBQUEUED == ret)
@@ -466,7 +471,9 @@ ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
 		ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
 		if (ret != -EIOCBRETRY)
 			break;
-		wait_on_retry_sync_kiocb(&kiocb);
+		ret = wait_on_retry_sync_kiocb(&kiocb);
+		if (ret)
+			break;
 	}
 
 	if (ret == -EIOCBQUEUED)
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/5] Use wake_up_locked() in eventpoll
  2007-10-18 22:25 ` [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
@ 2007-10-19  3:56   ` Arjan van de Ven
  2007-10-19 16:28     ` Matthew Wilcox
  0 siblings, 1 reply; 13+ messages in thread
From: Arjan van de Ven @ 2007-10-19  3:56 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-kernel, Matthew Wilcox, Matthew Wilcox

On Thu, 18 Oct 2007 18:25:58 -0400
Matthew Wilcox <matthew@wil.cx> wrote:

Have you tested this patch with LOCKDEP enabled? eventpoll is... tricky
in what it does with waitqueues and locks.... and some of this stuff is
there, afaik, to deal with that. You're now changing this ... call me
chicken :)

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/5] Use wake_up_locked() in eventpoll
  2007-10-19  3:56   ` Arjan van de Ven
@ 2007-10-19 16:28     ` Matthew Wilcox
  0 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox @ 2007-10-19 16:28 UTC (permalink / raw)
  To: Arjan van de Ven; +Cc: linux-kernel, Matthew Wilcox

On Thu, Oct 18, 2007 at 08:56:23PM -0700, Arjan van de Ven wrote:
> Have you tested this patch with LOCKDEP enabled? eventpoll is... tricky
> in what it does with waitqueues and locks.... and some of this stuff is
> there, afaik, to deal with that. You're now changing this ... call me
> chicken :)

I haven't tested it, but it's a simple textual substitution:

#define wake_up_locked(x)               __wake_up_locked((x),
TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)

so it should be identical in effect.

-- 
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours.  We can't possibly take such
a retrograde step."

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/5] Use macros instead of TASK_ flags
  2007-10-18 22:25 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
@ 2007-10-25  3:50   ` Nick Piggin
  0 siblings, 0 replies; 13+ messages in thread
From: Nick Piggin @ 2007-10-25  3:50 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-kernel, Matthew Wilcox

On Friday 19 October 2007 08:25, Matthew Wilcox wrote:
> Abstracting away direct uses of TASK_ flags allows us to change the
> definitions of the task flags more easily.
>
> Also restructure do_wait() a little
>
> Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
> ---
>  arch/ia64/kernel/perfmon.c |    4 +-
>  fs/proc/array.c            |    9 +---
>  fs/proc/base.c             |    2 +-
>  include/linux/sched.h      |   15 +++++++
>  include/linux/wait.h       |   11 +++--
>  kernel/exit.c              |   90
> +++++++++++++++++++------------------------ kernel/power/process.c     |   
> 7 +--
>  kernel/ptrace.c            |    8 ++--
>  kernel/sched.c             |   15 +++----
>  kernel/signal.c            |    6 +-
>  kernel/wait.c              |    2 +-
>  11 files changed, 83 insertions(+), 86 deletions(-)
>
> diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
> index f55fa07..6b0a6cf 100644
> --- a/arch/ia64/kernel/perfmon.c
> +++ b/arch/ia64/kernel/perfmon.c
> @@ -2630,7 +2630,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct
> task_struct *task) */
>  	if (task == current) return 0;
>
> -	if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
> +	if (!is_task_stopped_or_traced(task)) {
>  		DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid,
> task->state)); return -EBUSY;
>  	}
> @@ -4790,7 +4790,7 @@ recheck:
>  	 * the task must be stopped.
>  	 */
>  	if (PFM_CMD_STOPPED(cmd)) {
> -		if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
> +		if (!is_task_stopped_or_traced(task)) {
>  			DPRINT(("[%d] task not in stopped state\n", task->pid));
>  			return -EBUSY;
>  		}
> diff --git a/fs/proc/array.c b/fs/proc/array.c
> index 27b59f5..8939bf0 100644
> --- a/fs/proc/array.c
> +++ b/fs/proc/array.c
> @@ -140,13 +140,8 @@ static const char *task_state_array[] = {
>
>  static inline const char *get_task_state(struct task_struct *tsk)
>  {
> -	unsigned int state = (tsk->state & (TASK_RUNNING |
> -					    TASK_INTERRUPTIBLE |
> -					    TASK_UNINTERRUPTIBLE |
> -					    TASK_STOPPED |
> -					    TASK_TRACED)) |
> -			(tsk->exit_state & (EXIT_ZOMBIE |
> -					    EXIT_DEAD));
> +	unsigned int state = (tsk->state & TASK_REPORT) |
> +			(tsk->exit_state & (EXIT_ZOMBIE | EXIT_DEAD));
>  	const char **p = &task_state_array[0];
>
>  	while (state) {
> diff --git a/fs/proc/base.c b/fs/proc/base.c
> index 4fe74d1..e7e1815 100644
> --- a/fs/proc/base.c
> +++ b/fs/proc/base.c
> @@ -196,7 +196,7 @@ static int proc_root_link(struct inode *inode, struct
> dentry **dentry, struct vf (task == current || \
>  	(task->parent == current && \
>  	(task->ptrace & PT_PTRACED) && \
> -	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
> +	 (is_task_stopped_or_traced(task)) && \
>  	 security_ptrace(current,task) == 0))
>
>  static int proc_pid_cmdline(struct task_struct *task, char * buffer)
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index c204ab0..5ef5253 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -177,6 +177,21 @@ print_cfs_rq(struct seq_file *m, int cpu, struct
> cfs_rq *cfs_rq) /* in tsk->state again */
>  #define TASK_DEAD		64
>
> +/* Convenience macros for the sake of wake_up */
> +#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
> +#define TASK_ALL		(TASK_NORMAL | TASK_STOPPED | TASK_TRACED)
> +
> +/* get_task_state() */
> +#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
> +				 TASK_UNINTERRUPTIBLE | TASK_STOPPED | \
> +				 TASK_TRACED)

I think it would be nicer if you made it explicit in the name that
these are not individual flags. Maybe it doesn't matter though...

Also, TASK_NORMAL / TASK_ALL aren't very good names. TASK_SLEEP_NORMAL
TASK_SLEEP_ALL might be a bit more helpful?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 5/5] Make wait_on_retry_sync_kiocb killable
  2007-10-18 22:26 ` [PATCH 5/5] Make wait_on_retry_sync_kiocb killable Matthew Wilcox
@ 2007-10-25  3:53   ` Nick Piggin
  0 siblings, 0 replies; 13+ messages in thread
From: Nick Piggin @ 2007-10-25  3:53 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-kernel

On Friday 19 October 2007 08:26, Matthew Wilcox wrote:
> Use TASK_KILLABLE to allow wait_on_retry_sync_kiocb to return -EINTR.
> All callers then check the return value and break out of their loops.
>
> Signed-off-by: Matthew Wilcox <matthew@wil.cx>
> ---
>  fs/read_write.c |   17 ++++++++++++-----
>  1 files changed, 12 insertions(+), 5 deletions(-)
>
> diff --git a/fs/read_write.c b/fs/read_write.c
> index 124693e..3196a3b 100644
> --- a/fs/read_write.c
> +++ b/fs/read_write.c
> @@ -218,14 +218,15 @@ Einval:
>  	return -EINVAL;
>  }
>
> -static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
> +static int wait_on_retry_sync_kiocb(struct kiocb *iocb)
>  {
> -	set_current_state(TASK_UNINTERRUPTIBLE);
> +	set_current_state(TASK_KILLABLE);
>  	if (!kiocbIsKicked(iocb))
>  		schedule();
>  	else
>  		kiocbClearKicked(iocb);
>  	__set_current_state(TASK_RUNNING);
> +	return fatal_signal_pending(current) ? -EINTR : 0;

Although the EINTR never gets to userspace anyway, is there a good
reason why the last patch for do_generic_mapping_read doesn't pass
back -EINTR?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/5] TASK_KILLABLE
  2007-10-18 22:25 [PATCH 0/5] TASK_KILLABLE Matthew Wilcox
                   ` (4 preceding siblings ...)
  2007-10-18 22:26 ` [PATCH 5/5] Make wait_on_retry_sync_kiocb killable Matthew Wilcox
@ 2007-10-25  3:55 ` Nick Piggin
  5 siblings, 0 replies; 13+ messages in thread
From: Nick Piggin @ 2007-10-25  3:55 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-kernel

On Friday 19 October 2007 08:25, Matthew Wilcox wrote:
> This series of patches introduces the facility to deliver only fatal
> signals to tasks which are otherwise waiting uninterruptibly.


This is pretty nice I think. It also is a significant piece of
infrastructure required to fix some of the main oom kill deadlocks.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/5] Use wake_up_locked() in eventpoll
@ 2007-10-24 12:24 Matthew Wilcox
  0 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox @ 2007-10-24 12:24 UTC (permalink / raw)
  To: torvalds, akpm, linux-kernel; +Cc: Matthew Wilcox, Matthew Wilcox

Replace the uses of __wake_up_locked with wake_up_locked

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 fs/eventpoll.c |   11 ++++-------
 1 files changed, 4 insertions(+), 7 deletions(-)

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 34f68f3..81c04ab 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -656,8 +656,7 @@ is_linked:
 	 * wait list.
 	 */
 	if (waitqueue_active(&ep->wq))
-		__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-				 TASK_INTERRUPTIBLE);
+		wake_up_locked(&ep->wq);
 	if (waitqueue_active(&ep->poll_wait))
 		pwake++;
 
@@ -780,7 +779,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
 
 		/* Notify waiting tasks that events are available */
 		if (waitqueue_active(&ep->wq))
-			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
+			wake_up_locked(&ep->wq);
 		if (waitqueue_active(&ep->poll_wait))
 			pwake++;
 	}
@@ -854,8 +853,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
 
 			/* Notify waiting tasks that events are available */
 			if (waitqueue_active(&ep->wq))
-				__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-						 TASK_INTERRUPTIBLE);
+				wake_up_locked(&ep->wq);
 			if (waitqueue_active(&ep->poll_wait))
 				pwake++;
 		}
@@ -978,8 +976,7 @@ errxit:
 		 * wait list (delayed after we release the lock).
 		 */
 		if (waitqueue_active(&ep->wq))
-			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-					 TASK_INTERRUPTIBLE);
+			wake_up_locked(&ep->wq);
 		if (waitqueue_active(&ep->poll_wait))
 			pwake++;
 	}
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 1/5] Use wake_up_locked() in eventpoll
  2007-09-02  2:43 [PATCH] TASK_KILLABLE version 2 Matthew Wilcox
@ 2007-09-02  2:46 ` Matthew Wilcox
  0 siblings, 0 replies; 13+ messages in thread
From: Matthew Wilcox @ 2007-09-02  2:46 UTC (permalink / raw)
  To: akpm, torvalds; +Cc: linux-kernel, Matthew Wilcox

Replace the uses of __wake_up_locked with wake_up_locked

Signed-off-by: Matthew Wilcox <matthew@wil.cx>
---
 fs/eventpoll.c |   11 ++++-------
 1 files changed, 4 insertions(+), 7 deletions(-)

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 77b9953..72e4cb4 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -657,8 +657,7 @@ is_linked:
 	 * wait list.
 	 */
 	if (waitqueue_active(&ep->wq))
-		__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-				 TASK_INTERRUPTIBLE);
+		wake_up_locked(&ep->wq);
 	if (waitqueue_active(&ep->poll_wait))
 		pwake++;
 
@@ -781,7 +780,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
 
 		/* Notify waiting tasks that events are available */
 		if (waitqueue_active(&ep->wq))
-			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
+			wake_up_locked(&ep->wq);
 		if (waitqueue_active(&ep->poll_wait))
 			pwake++;
 	}
@@ -855,8 +854,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
 
 			/* Notify waiting tasks that events are available */
 			if (waitqueue_active(&ep->wq))
-				__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-						 TASK_INTERRUPTIBLE);
+				wake_up_locked(&ep->wq);
 			if (waitqueue_active(&ep->poll_wait))
 				pwake++;
 		}
@@ -979,8 +977,7 @@ errxit:
 		 * wait list (delayed after we release the lock).
 		 */
 		if (waitqueue_active(&ep->wq))
-			__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
-					 TASK_INTERRUPTIBLE);
+			wake_up_locked(&ep->wq);
 		if (waitqueue_active(&ep->poll_wait))
 			pwake++;
 	}
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2007-10-25  4:02 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-10-18 22:25 [PATCH 0/5] TASK_KILLABLE Matthew Wilcox
2007-10-18 22:25 ` [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
2007-10-19  3:56   ` Arjan van de Ven
2007-10-19 16:28     ` Matthew Wilcox
2007-10-18 22:25 ` [PATCH 2/5] Use macros instead of TASK_ flags Matthew Wilcox
2007-10-25  3:50   ` Nick Piggin
2007-10-18 22:26 ` [PATCH 3/5] Add TASK_WAKEKILL Matthew Wilcox
2007-10-18 22:26 ` [PATCH 4/5] Add lock_page_killable Matthew Wilcox
2007-10-18 22:26 ` [PATCH 5/5] Make wait_on_retry_sync_kiocb killable Matthew Wilcox
2007-10-25  3:53   ` Nick Piggin
2007-10-25  3:55 ` [PATCH 0/5] TASK_KILLABLE Nick Piggin
  -- strict thread matches above, loose matches on Subject: below --
2007-10-24 12:24 [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox
2007-09-02  2:43 [PATCH] TASK_KILLABLE version 2 Matthew Wilcox
2007-09-02  2:46 ` [PATCH 1/5] Use wake_up_locked() in eventpoll Matthew Wilcox

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).