linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Re: [rfc/patch] wake_up_info() draft ...
@ 2004-01-02  2:54 Manfred Spraul
  2004-01-02  3:31 ` Davide Libenzi
  0 siblings, 1 reply; 7+ messages in thread
From: Manfred Spraul @ 2004-01-02  2:54 UTC (permalink / raw)
  To: Davide Libenzi, linux-kernel

Hi Davide,

I think the patch adds unnecessary bloat, and mandates one particular 
use of the wait queue info interface.
For example, why does remove_wait_queue_info copy the wakeup info 
around? That's now how I would use it for fasync: I would send the 
necessary signals directly from the wakeup handler, and 
remove_wait_queue_info is called during sys_close handling, info discarded.

I'm thinking about a simpler approach: add a wake_up_info() function, 
and forward the info parameter to the wait_queue_func_t. This means 
changing the prototype of this function - there shouldn't be that many 
instances. NULL is passed if the normal wake_up functions are used. No 
additional fields in the wait queue entry are required. Then I would 
convert kill_fasync to that interface, with the band value from 
kill_fasync as the info parameter. A custom wait queue func does the 
signal sending. fasync_helper would be kmalloc+add_wait_queue.

--
    Manfred


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [rfc/patch] wake_up_info() draft ...
  2004-01-02  2:54 [rfc/patch] wake_up_info() draft Manfred Spraul
@ 2004-01-02  3:31 ` Davide Libenzi
  2004-01-02  9:32   ` Manfred Spraul
  0 siblings, 1 reply; 7+ messages in thread
From: Davide Libenzi @ 2004-01-02  3:31 UTC (permalink / raw)
  To: Manfred Spraul; +Cc: Linux Kernel Mailing List

On Fri, 2 Jan 2004, Manfred Spraul wrote:

> Hi Davide,

Hi Manfred,


> I think the patch adds unnecessary bloat, and mandates one particular 
> use of the wait queue info interface.

why are you saying so?


> For example, why does remove_wait_queue_info copy the wakeup info 
> around? That's now how I would use it for fasync: I would send the 
> necessary signals directly from the wakeup handler, and 
> remove_wait_queue_info is called during sys_close handling, info discarded.

As I wrote inside the email, only info-aware waiters will do a 
remove_wait_queue_info(). Others will simply do remove_wait_queue() w/out 
modfications to existing code. The patch I sent (the second one actually) 
is running in my machine, and the changes I had to make are exactly what 
you see inside the patch. Zero modifications to existing code. The patch, 
as is, perfectly fit what you want to do. The waker sets info.data to the 
signal mask, and the waked does a remove_wait_queue_info() and gets the 
signal mask. In your case no dtor/dup are necessary.
This is the one that include John suggestions ...



- Davide



--- linux-2.6.1-rc1/include/linux/wait.h._orig	2004-01-01 14:38:45.569267672 -0800
+++ linux-2.6.1-rc1/include/linux/wait.h	2004-01-01 14:48:41.206717016 -0800
@@ -16,16 +16,24 @@
 #include <linux/spinlock.h>
 #include <asm/system.h>
 
+typedef struct __wait_info wait_info_t;
 typedef struct __wait_queue wait_queue_t;
 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync);
 extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync);
 
+struct __wait_info {
+	void *data;
+	void *(*dup)(void *);
+	void (*dtor)(void *);
+};
+
 struct __wait_queue {
 	unsigned int flags;
 #define WQ_FLAG_EXCLUSIVE	0x01
 	struct task_struct * task;
 	wait_queue_func_t func;
 	struct list_head task_list;
+	wait_info_t info;
 };
 
 struct __wait_queue_head {
@@ -42,6 +50,7 @@
 #define __WAITQUEUE_INITIALIZER(name, tsk) {				\
 	.task		= tsk,						\
 	.func		= default_wake_function,			\
+	.info		= { NULL, NULL, NULL },				\
 	.task_list	= { NULL, NULL } }
 
 #define DECLARE_WAITQUEUE(name, tsk)					\
@@ -60,11 +69,40 @@
 	INIT_LIST_HEAD(&q->task_list);
 }
 
+static inline void init_wait_info(wait_info_t *i)
+{
+	i->data = NULL;
+	i->dup = NULL;
+	i->dtor = NULL;
+}
+
+static inline void close_wait_info(wait_info_t *i)
+{
+	if (i->dtor)
+		i->dtor(i->data);
+	init_wait_info(i);
+}
+
+static inline void transfer_wait_info(wait_info_t *d, wait_info_t *s)
+{
+	if (d->dtor)
+		d->dtor(d->data);
+	*d = *s;
+}
+
+static inline void dup_wait_info(wait_info_t *d, wait_info_t *s)
+{
+	transfer_wait_info(d, s);
+	if (s->dup)
+		d->data = s->dup(s->data);
+}
+
 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
 {
 	q->flags = 0;
 	q->task = p;
 	q->func = default_wake_function;
+	init_wait_info(&q->info);
 }
 
 static inline void init_waitqueue_func_entry(wait_queue_t *q,
@@ -73,6 +111,7 @@
 	q->flags = 0;
 	q->task = NULL;
 	q->func = func;
+	init_wait_info(&q->info);
 }
 
 static inline int waitqueue_active(wait_queue_head_t *q)
@@ -83,6 +122,10 @@
 extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
 extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
 extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
+extern void FASTCALL(remove_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+					    wait_info_t *info));
+extern void FASTCALL(geti_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+					  wait_info_t *info));
 
 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
 {
@@ -102,11 +145,13 @@
 							wait_queue_t *old)
 {
 	list_del(&old->task_list);
+	close_wait_info(&old->info);
 }
 
 extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
 extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
 extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+extern void FASTCALL(__wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr, wait_info_t *info));
 
 #define wake_up(x)			__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
 #define wake_up_nr(x, nr)		__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
@@ -117,6 +162,8 @@
 #define wake_up_interruptible_all(x)	__wake_up((x),TASK_INTERRUPTIBLE, 0)
 #define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
 #define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_info(x, i)		__wake_up_info((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, (i))
+#define wake_up_all_info(x, i)		__wake_up_info((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, (i))
 
 #define __wait_event(wq, condition) 					\
 do {									\
--- linux-2.6.1-rc1/kernel/fork.c._orig	2004-01-01 14:38:24.058537800 -0800
+++ linux-2.6.1-rc1/kernel/fork.c	2004-01-01 14:39:32.537127472 -0800
@@ -125,6 +125,32 @@
 
 EXPORT_SYMBOL(remove_wait_queue);
 
+void remove_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+			    wait_info_t *info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	transfer_wait_info(info, &wait->info);
+	__remove_wait_queue(q, wait);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(remove_wait_queue_info);
+
+void geti_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+			  wait_info_t *info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	transfer_wait_info(info, &wait->info);
+	init_wait_info(&wait->info);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(geti_wait_queue_info);
+
 
 /*
  * Note: we use "set_current_state()" _after_ the wait-queue add,
--- linux-2.6.1-rc1/kernel/sched.c._orig	2004-01-01 14:38:34.353972656 -0800
+++ linux-2.6.1-rc1/kernel/sched.c	2004-01-01 14:44:37.615748472 -0800
@@ -1649,7 +1649,8 @@
  * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
+static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+			     int nr_exclusive, int sync, wait_info_t *info)
 {
 	struct list_head *tmp, *next;
 
@@ -1658,6 +1659,8 @@
 		unsigned flags;
 		curr = list_entry(tmp, wait_queue_t, task_list);
 		flags = curr->flags;
+		if (info)
+			dup_wait_info(&curr->info, info);
 		if (curr->func(curr, mode, sync) &&
 		    (flags & WQ_FLAG_EXCLUSIVE) &&
 		    !--nr_exclusive)
@@ -1676,7 +1679,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&q->lock, flags);
-	__wake_up_common(q, mode, nr_exclusive, 0);
+	__wake_up_common(q, mode, nr_exclusive, 0, NULL);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
@@ -1687,7 +1690,7 @@
  */
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
 {
-	__wake_up_common(q, mode, 1, 0);
+	__wake_up_common(q, mode, 1, 0, NULL);
 }
 
 /**
@@ -1712,21 +1715,41 @@
 
 	spin_lock_irqsave(&q->lock, flags);
 	if (likely(nr_exclusive))
-		__wake_up_common(q, mode, nr_exclusive, 1);
+		__wake_up_common(q, mode, nr_exclusive, 1, NULL);
 	else
-		__wake_up_common(q, mode, nr_exclusive, 0);
+		__wake_up_common(q, mode, nr_exclusive, 0, NULL);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
 
+/**
+ * __wake_up_info - wake up threads blocked on a waitqueue by passing an information token.
+ * @q: the waitqueue
+ * @mode: which threads
+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @info: information token passed to waiters
+ */
+void __wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr_exclusive,
+		    wait_info_t *info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	__wake_up_common(q, mode, nr_exclusive, 0, info);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(__wake_up_info);
+
 void complete(struct completion *x)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 1, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 
@@ -1738,7 +1761,8 @@
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0);
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 0, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [rfc/patch] wake_up_info() draft ...
  2004-01-02  3:31 ` Davide Libenzi
@ 2004-01-02  9:32   ` Manfred Spraul
  2004-01-02 17:07     ` Davide Libenzi
  0 siblings, 1 reply; 7+ messages in thread
From: Manfred Spraul @ 2004-01-02  9:32 UTC (permalink / raw)
  To: Davide Libenzi; +Cc: Linux Kernel Mailing List

Davide Libenzi wrote:

>On Fri, 2 Jan 2004, Manfred Spraul wrote:
>
>  
>
>>Hi Davide,
>>    
>>
>
>Hi Manfred,
>
>
>  
>
>>I think the patch adds unnecessary bloat, and mandates one particular 
>>use of the wait queue info interface.
>>    
>>
>
>why are you saying so?
>
>  
>
sizeof(waitqueue_t) increases.

>@@ -1658,6 +1659,8 @@
> 		unsigned flags;
> 		curr = list_entry(tmp, wait_queue_t, task_list);
> 		flags = curr->flags;
>+		if (info)
>+			dup_wait_info(&curr->info, info);
> 		if (curr->func(curr, mode, sync) &&
> 		    (flags & WQ_FLAG_EXCLUSIVE) &&
> 		    !--nr_exclusive)
>
IMHO these two lines belong into curr->func, perhaps with a reference 
implementation that uses

struct wait_queue_entry_info {
    wait_queue_t wait;
    struct wait_info info;
};

We have already a callback pointer, so why add special case code into 
the common codepaths? Custom callbacks could handle the special case of 
an info wakeup.

--
    Manfred


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [rfc/patch] wake_up_info() draft ...
  2004-01-02  9:32   ` Manfred Spraul
@ 2004-01-02 17:07     ` Davide Libenzi
  0 siblings, 0 replies; 7+ messages in thread
From: Davide Libenzi @ 2004-01-02 17:07 UTC (permalink / raw)
  To: Manfred Spraul; +Cc: Linux Kernel Mailing List

On Fri, 2 Jan 2004, Manfred Spraul wrote:

> >why are you saying so?
> >
> >  
> >
> sizeof(waitqueue_t) increases.

Come on Manfred, you can do better :-)



> >@@ -1658,6 +1659,8 @@
> > 		unsigned flags;
> > 		curr = list_entry(tmp, wait_queue_t, task_list);
> > 		flags = curr->flags;
> >+		if (info)
> >+			dup_wait_info(&curr->info, info);
> > 		if (curr->func(curr, mode, sync) &&
> > 		    (flags & WQ_FLAG_EXCLUSIVE) &&
> > 		    !--nr_exclusive)
> >
> IMHO these two lines belong into curr->func, perhaps with a reference 
> implementation that uses
> 
> struct wait_queue_entry_info {
>     wait_queue_t wait;
>     struct wait_info info;
> };
> 
> We have already a callback pointer, so why add special case code into 
> the common codepaths? Custom callbacks could handle the special case of 
> an info wakeup.

I forgot you are the structure wrapper guy :-)) See, the idea of having a 
callback only thig works fine with things like epoll that uses it. So I'd 
be ok with it. The reason of having it done the way I did, is because even 
standard waked up task can have the ability to fetch info about the wake 
up from the wait queue item, that is the MCD of the whole wait/wake Linux 
mechanism. Also don't be fooled by the names dup/dtor by thinking that you 
have to kmalloc new data in sutuation where more that sizeof(void *) is 
needed. you can simply use a ref count method doing atomic_inc() and 
atomic_dec_and_test(). With this method you can fetch info either from the 
callback (it is already stored inside the wait queue item when the 
callback is called) or from the wait queue item itself for std woke up 
tasks.




- Davide



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [rfc/patch] wake_up_info() draft ...
  2004-01-01 21:31 ` John Gardiner Myers
@ 2004-01-01 22:57   ` Davide Libenzi
  0 siblings, 0 replies; 7+ messages in thread
From: Davide Libenzi @ 2004-01-01 22:57 UTC (permalink / raw)
  To: John Gardiner Myers; +Cc: Linux Kernel Mailing List

On Thu, 1 Jan 2004, John Gardiner Myers wrote:

> Minor issues:
> 
> I don't know why dup_wait_info() returns a value--it is always ignored.  
> If duping can fail, the situation is not particularly recoverable.

Agreed, turned to void.



> I don't like that the dup method is responsible for copying the dup and 
> dtor members of struct __wait_info.  It would be simpler for the common 
> code in dup_wait_info() to always copy the dup and dtor function pointers:
> 
> void * (*dup)(void *);
> 
> static inline void dup_wait_info(wait_info_t *s, wait_info_t *d)
> {
> 	close_wait_info(d);
> 	*d = *s;
> 	if (s->dup)
> 		d->data = s->dup(s->data);
> }

Agreed, also the destructor has been converted to "void (*dtor)(void *)".



> I prefer the style where assignment functions, such as dup_wait_info(), 
> place the destination argument to the left of the source, to mimic the 
> assignment operator and functions such as strcpy().

Ok.



> remove_wait_queue_info() could be optimized slightly by transferring 
> ownership of the wait queue info data instead of duping it.

Yes. I also introduced geti_wait_queue_info() to give code that leaves a 
permanent wait queue insertion (epoll), the ability to get-and-initialize 
info from the wait queue item.
Thank you for the feedback.



- Davide





--- linux-2.6.1-rc1/include/linux/wait.h._orig	2004-01-01 14:38:45.569267672 -0800
+++ linux-2.6.1-rc1/include/linux/wait.h	2004-01-01 14:48:41.206717016 -0800
@@ -16,16 +16,24 @@
 #include <linux/spinlock.h>
 #include <asm/system.h>
 
+typedef struct __wait_info wait_info_t;
 typedef struct __wait_queue wait_queue_t;
 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync);
 extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync);
 
+struct __wait_info {
+	void *data;
+	void *(*dup)(void *);
+	void (*dtor)(void *);
+};
+
 struct __wait_queue {
 	unsigned int flags;
 #define WQ_FLAG_EXCLUSIVE	0x01
 	struct task_struct * task;
 	wait_queue_func_t func;
 	struct list_head task_list;
+	wait_info_t info;
 };
 
 struct __wait_queue_head {
@@ -42,6 +50,7 @@
 #define __WAITQUEUE_INITIALIZER(name, tsk) {				\
 	.task		= tsk,						\
 	.func		= default_wake_function,			\
+	.info		= { NULL, NULL, NULL },				\
 	.task_list	= { NULL, NULL } }
 
 #define DECLARE_WAITQUEUE(name, tsk)					\
@@ -60,11 +69,40 @@
 	INIT_LIST_HEAD(&q->task_list);
 }
 
+static inline void init_wait_info(wait_info_t *i)
+{
+	i->data = NULL;
+	i->dup = NULL;
+	i->dtor = NULL;
+}
+
+static inline void close_wait_info(wait_info_t *i)
+{
+	if (i->dtor)
+		i->dtor(i->data);
+	init_wait_info(i);
+}
+
+static inline void transfer_wait_info(wait_info_t *d, wait_info_t *s)
+{
+	if (d->dtor)
+		d->dtor(d->data);
+	*d = *s;
+}
+
+static inline void dup_wait_info(wait_info_t *d, wait_info_t *s)
+{
+	transfer_wait_info(d, s);
+	if (s->dup)
+		d->data = s->dup(s->data);
+}
+
 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
 {
 	q->flags = 0;
 	q->task = p;
 	q->func = default_wake_function;
+	init_wait_info(&q->info);
 }
 
 static inline void init_waitqueue_func_entry(wait_queue_t *q,
@@ -73,6 +111,7 @@
 	q->flags = 0;
 	q->task = NULL;
 	q->func = func;
+	init_wait_info(&q->info);
 }
 
 static inline int waitqueue_active(wait_queue_head_t *q)
@@ -83,6 +122,10 @@
 extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
 extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
 extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
+extern void FASTCALL(remove_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+					    wait_info_t *info));
+extern void FASTCALL(geti_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+					  wait_info_t *info));
 
 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
 {
@@ -102,11 +145,13 @@
 							wait_queue_t *old)
 {
 	list_del(&old->task_list);
+	close_wait_info(&old->info);
 }
 
 extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
 extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
 extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+extern void FASTCALL(__wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr, wait_info_t *info));
 
 #define wake_up(x)			__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
 #define wake_up_nr(x, nr)		__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
@@ -117,6 +162,8 @@
 #define wake_up_interruptible_all(x)	__wake_up((x),TASK_INTERRUPTIBLE, 0)
 #define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
 #define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_info(x, i)		__wake_up_info((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, (i))
+#define wake_up_all_info(x, i)		__wake_up_info((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, (i))
 
 #define __wait_event(wq, condition) 					\
 do {									\
--- linux-2.6.1-rc1/kernel/fork.c._orig	2004-01-01 14:38:24.058537800 -0800
+++ linux-2.6.1-rc1/kernel/fork.c	2004-01-01 14:39:32.537127472 -0800
@@ -125,6 +125,32 @@
 
 EXPORT_SYMBOL(remove_wait_queue);
 
+void remove_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+			    wait_info_t *info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	transfer_wait_info(info, &wait->info);
+	__remove_wait_queue(q, wait);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(remove_wait_queue_info);
+
+void geti_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+			  wait_info_t *info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	transfer_wait_info(info, &wait->info);
+	init_wait_info(&wait->info);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(geti_wait_queue_info);
+
 
 /*
  * Note: we use "set_current_state()" _after_ the wait-queue add,
--- linux-2.6.1-rc1/kernel/sched.c._orig	2004-01-01 14:38:34.353972656 -0800
+++ linux-2.6.1-rc1/kernel/sched.c	2004-01-01 14:44:37.615748472 -0800
@@ -1649,7 +1649,8 @@
  * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
+static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+			     int nr_exclusive, int sync, wait_info_t *info)
 {
 	struct list_head *tmp, *next;
 
@@ -1658,6 +1659,8 @@
 		unsigned flags;
 		curr = list_entry(tmp, wait_queue_t, task_list);
 		flags = curr->flags;
+		if (info)
+			dup_wait_info(&curr->info, info);
 		if (curr->func(curr, mode, sync) &&
 		    (flags & WQ_FLAG_EXCLUSIVE) &&
 		    !--nr_exclusive)
@@ -1676,7 +1679,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&q->lock, flags);
-	__wake_up_common(q, mode, nr_exclusive, 0);
+	__wake_up_common(q, mode, nr_exclusive, 0, NULL);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
@@ -1687,7 +1690,7 @@
  */
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
 {
-	__wake_up_common(q, mode, 1, 0);
+	__wake_up_common(q, mode, 1, 0, NULL);
 }
 
 /**
@@ -1712,21 +1715,41 @@
 
 	spin_lock_irqsave(&q->lock, flags);
 	if (likely(nr_exclusive))
-		__wake_up_common(q, mode, nr_exclusive, 1);
+		__wake_up_common(q, mode, nr_exclusive, 1, NULL);
 	else
-		__wake_up_common(q, mode, nr_exclusive, 0);
+		__wake_up_common(q, mode, nr_exclusive, 0, NULL);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
 
+/**
+ * __wake_up_info - wake up threads blocked on a waitqueue by passing an information token.
+ * @q: the waitqueue
+ * @mode: which threads
+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @info: information token passed to waiters
+ */
+void __wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr_exclusive,
+		    wait_info_t *info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	__wake_up_common(q, mode, nr_exclusive, 0, info);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(__wake_up_info);
+
 void complete(struct completion *x)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 1, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 
@@ -1738,7 +1761,8 @@
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0);
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 0, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [rfc/patch] wake_up_info() draft ...
       [not found] <fa.nd6oiha.q2gq9k@ifi.uio.no>
@ 2004-01-01 21:31 ` John Gardiner Myers
  2004-01-01 22:57   ` Davide Libenzi
  0 siblings, 1 reply; 7+ messages in thread
From: John Gardiner Myers @ 2004-01-01 21:31 UTC (permalink / raw)
  To: linux-kernel

Minor issues:

I don't know why dup_wait_info() returns a value--it is always ignored.  
If duping can fail, the situation is not particularly recoverable.

I don't like that the dup method is responsible for copying the dup and 
dtor members of struct __wait_info.  It would be simpler for the common 
code in dup_wait_info() to always copy the dup and dtor function pointers:

void * (*dup)(void *);

static inline void dup_wait_info(wait_info_t *s, wait_info_t *d)
{
	close_wait_info(d);
	*d = *s;
	if (s->dup)
		d->data = s->dup(s->data);
}

I prefer the style where assignment functions, such as dup_wait_info(), 
place the destination argument to the left of the source, to mimic the 
assignment operator and functions such as strcpy().

remove_wait_queue_info() could be optimized slightly by transferring 
ownership of the wait queue info data instead of duping it.



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [rfc/patch] wake_up_info() draft ...
@ 2004-01-01  3:46 Davide Libenzi
  0 siblings, 0 replies; 7+ messages in thread
From: Davide Libenzi @ 2004-01-01  3:46 UTC (permalink / raw)
  To: Linux Kernel Mailing List; +Cc: Linus Torvalds


We currently have a nice wait/wake infrastructure to have processes to 
drop themselves inside wait queues waiting someone else to wake them up. 
The current bits do work fine but they are IMO missing of a powerful 
feature. That is, transmit to the waiters some information about the cause 
of the wake up. With this missing, waiters would then have to perform 
additional steps to acquire the requested informations. And this can 
involve grabbing lock/sems and calling functions like, for example, 
f_op->poll(). Adding such capability to the current infrastructure is both 
trivial and does not screw up the existing code, that can be (when 
necessary) ported gradually. The code will add a structure:

typedef struct wait_info_t;

struct __wait_info {
        void *data;
        int (*dup)(wait_info_t *, wait_info_t *);
        void (*dtor)(wait_info_t *);
};

to the wait queue item:

struct __wait_queue {
        unsigned int flags;
#define WQ_FLAG_EXCLUSIVE       0x01
        struct task_struct * task;
        wait_queue_func_t func;
        struct list_head task_list;
        wait_info_t info;
};

The wait info structure has a member "data" that can be anything from a 
pointer to an interger to a bitmask. Two functions can be provided to help 
managing more complex info data. The "dtor" is the (guess what) distructor 
of the info, while the "dup" is replicator of the structure itself. When 
the info structure does not require any special treatment, both "dtor" and 
"dup" will be simply NULL. A typicaly info-aware waiter will do something 
like:

wait_queue_t wait;
wait_info_t info;

init_waitqueue_entry(&wait, current);
add_wait_queue(&wait_list, &wait);

>> Usual Wait Loop

remove_wait_queue_info(&wait_list, &wait, &info);

>> Look up info

close_wait_info(&info);


The waker will instead do, for example, something like (in the most simple 
case):

wait_info_t info;

init_wait_info(&info);
info.data = (void *) POLLIN;

wake_up_info(&wait_list, &info);


The one below is a very first draft of the code. What do you think?



- Davide




--- linux-2.6.1-rc1-mm1/include/linux/wait.h.orig	2003-12-31 16:30:03.872807704 -0800
+++ linux-2.6.1-rc1-mm1/include/linux/wait.h	2003-12-31 19:05:42.196166768 -0800
@@ -16,16 +16,24 @@
 #include <linux/spinlock.h>
 #include <asm/system.h>
 
+typedef struct wait_info_t;
 typedef struct __wait_queue wait_queue_t;
 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync);
 extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync);
 
+struct __wait_info {
+	void *data;
+	int (*dup)(wait_info_t *, wait_info_t *);
+	void (*dtor)(wait_info_t *);
+};
+
 struct __wait_queue {
 	unsigned int flags;
 #define WQ_FLAG_EXCLUSIVE	0x01
 	struct task_struct * task;
 	wait_queue_func_t func;
 	struct list_head task_list;
+	wait_info_t info;
 };
 
 struct __wait_queue_head {
@@ -42,6 +50,7 @@
 #define __WAITQUEUE_INITIALIZER(name, tsk) {				\
 	.task		= tsk,						\
 	.func		= default_wake_function,			\
+	.info		= { NULL, NULL, NULL },				\
 	.task_list	= { NULL, NULL } }
 
 #define DECLARE_WAITQUEUE(name, tsk)					\
@@ -60,11 +69,34 @@
 	INIT_LIST_HEAD(&q->task_list);
 }
 
+static inline void init_wait_info(wait_info_t *i)
+{
+	i->data = NULL;
+	i->dup = NULL;
+	i->dtor = NULL;
+}
+
+static inline void close_wait_info(wait_info_t *i)
+{
+	if (i->dtor)
+		i->dtor(i);
+}
+
+static inline int dup_wait_info(wait_info_t *s, wait_info_t *d)
+{
+	close_wait_info(d);
+	if (s->dup)
+		return s->dup(s, d);
+	*d = *s;
+	return 0;
+}
+
 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
 {
 	q->flags = 0;
 	q->task = p;
 	q->func = default_wake_function;
+	init_wait_info(&q->info);
 }
 
 static inline void init_waitqueue_func_entry(wait_queue_t *q,
@@ -73,6 +105,7 @@
 	q->flags = 0;
 	q->task = NULL;
 	q->func = func;
+	init_wait_info(&q->info);
 }
 
 static inline int waitqueue_active(wait_queue_head_t *q)
@@ -92,6 +125,8 @@
 extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
 extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
 extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
+extern void FASTCALL(remove_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+					    wait_info_t *info));
 
 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
 {
@@ -111,11 +146,13 @@
 							wait_queue_t *old)
 {
 	list_del(&old->task_list);
+	close_wait_info(&old->info);
 }
 
 extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
 extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
 extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+extern void FASTCALL(__wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr, wait_info_t *info));
 
 #define wake_up(x)			__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
 #define wake_up_nr(x, nr)		__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
@@ -126,6 +163,7 @@
 #define wake_up_interruptible_all(x)	__wake_up((x),TASK_INTERRUPTIBLE, 0)
 #define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
 #define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_info(x, i)		__wake_up_info((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, (i))
 
 #define __wait_event(wq, condition) 					\
 do {									\
--- linux-2.6.1-rc1-mm1/kernel/sched.c.orig	2003-12-31 16:47:52.374370776 -0800
+++ linux-2.6.1-rc1-mm1/kernel/sched.c	2003-12-31 19:04:38.614832600 -0800
@@ -1708,7 +1708,7 @@
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
-			     int nr_exclusive, int sync)
+			     int nr_exclusive, int sync, wait_info_t *info)
 {
 	struct list_head *tmp, *next;
 
@@ -1717,6 +1717,8 @@
 		unsigned flags;
 		curr = list_entry(tmp, wait_queue_t, task_list);
 		flags = curr->flags;
+		if (info)
+			dup_wait_info(info, &curr->info);
 		if (curr->func(curr, mode, sync) &&
 		    (flags & WQ_FLAG_EXCLUSIVE) &&
 		    !--nr_exclusive)
@@ -1735,7 +1737,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&q->lock, flags);
-	__wake_up_common(q, mode, nr_exclusive, 0);
+	__wake_up_common(q, mode, nr_exclusive, 0, NULL);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
@@ -1746,7 +1748,7 @@
  */
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
 {
-	__wake_up_common(q, mode, 1, 0);
+	__wake_up_common(q, mode, 1, 0, NULL);
 }
 
 /**
@@ -1771,14 +1773,33 @@
 
 	spin_lock_irqsave(&q->lock, flags);
 	if (likely(nr_exclusive))
-		__wake_up_common(q, mode, nr_exclusive, 1);
+		__wake_up_common(q, mode, nr_exclusive, 1, NULL);
 	else
-		__wake_up_common(q, mode, nr_exclusive, 0);
+		__wake_up_common(q, mode, nr_exclusive, 0, NULL);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
 
+/**
+ * __wake_up_info - wake up threads blocked on a waitqueue by passing an information token.
+ * @q: the waitqueue
+ * @mode: which threads
+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @info: information token passed to waiters
+ */
+void __wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr_exclusive,
+		    wait_info_t *info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	__wake_up_common(q, mode, nr_exclusive, 0, info);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(__wake_up_info);
+
 void complete(struct completion *x)
 {
 	unsigned long flags;
@@ -1786,7 +1807,7 @@
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
 	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 1, 0);
+			 1, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 
@@ -1799,7 +1820,7 @@
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
 	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
-			 0, 0);
+			 0, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 
--- linux-2.6.1-rc1-mm1/kernel/fork.c.orig	2003-12-31 18:59:38.694427432 -0800
+++ linux-2.6.1-rc1-mm1/kernel/fork.c	2003-12-31 19:03:06.014909928 -0800
@@ -124,6 +124,19 @@
 
 EXPORT_SYMBOL(remove_wait_queue);
 
+void remove_wait_queue_info(wait_queue_head_t *q, wait_queue_t * wait,
+			    wait_info_t *info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	dup_wait_info(&wait->info, info);
+	__remove_wait_queue(q, wait);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(remove_wait_queue_info);
+
 
 /*
  * Note: we use "set_current_state()" _after_ the wait-queue add,



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2004-01-02 17:07 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2004-01-02  2:54 [rfc/patch] wake_up_info() draft Manfred Spraul
2004-01-02  3:31 ` Davide Libenzi
2004-01-02  9:32   ` Manfred Spraul
2004-01-02 17:07     ` Davide Libenzi
     [not found] <fa.nd6oiha.q2gq9k@ifi.uio.no>
2004-01-01 21:31 ` John Gardiner Myers
2004-01-01 22:57   ` Davide Libenzi
  -- strict thread matches above, loose matches on Subject: below --
2004-01-01  3:46 Davide Libenzi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).