linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Re: [Lse-tech] Re: CPU affinity & IPI latency (FIX)_
       [not found] <OF46AE0C03.7FA13916-ON85256A8C.005E4CEB@pok.ibm.com>
@ 2001-07-17 17:33 ` Hubertus Frnake
  2001-07-17 18:00   ` Davide Libenzi
  2001-07-17 18:11   ` Davide Libenzi
  0 siblings, 2 replies; 6+ messages in thread
From: Hubertus Frnake @ 2001-07-17 17:33 UTC (permalink / raw)
  To: ak, lse-tech, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 6062 bytes --]

In an attempt to inline the code, somehow the tabs got lost. So here is the
attached correct patch fo 2.4.5. Please try and let me know whether you
see your problems disappear and/or others arise.
The sketchy writeup is still the same.

-- Hubertus Franke  (frankeh@us.ibm.com)


>
>
> Enclosed is a patch for the fixing the process bouncing problem a.k.a
> "PU affinity & IPI latency".
>
> The patch is again 2.4.5 (all I had last night), so Andi could you please
> test  it
> on your stuff and see whether it works for you. It works on stock apps.
>
> Basic principle is as follows:
>
> When reschedule_idle(p) determines to IPI a task, it sets a pointer
> to <p> in schedule_data(target_cpu). We raise the <p->has_cpu> flag
> to indicate that p should not be considered for scheduling.
>
> In schedule(), we check after going to <still_running> whether, this call
> is based on an IPI, if so we always take this task.
>
> To be functionally correct, we also consider the reservation in
> reschedule_idle(), i.e., we first look for the reservation, then for idle
> and then cpu_curr to determine the best task.
> If indeed the decision is to "preempt" the reserving task, (actually, its
> not
> running yet), we simply lower the current reservation  has_cpu=0) and
> replace it with <p>.
>
> -- Hubertus Franke  (frankeh@us.ibm.com)
>
> diff -uwrbBN linux-2.4.5-van/kernel/sched.c linux-2.4.5-ca/kernel/sched.c
> --- linux-2.4.5-van/kernel/sched.c      Fri Apr 20 21:26:16 2001
> +++ linux-2.4.5-ca/kernel/sched.c       Tue Jul 17 07:31:10 2001
> @@ -97,12 +97,14 @@
>  static union {
>         struct schedule_data {
>                 struct task_struct * curr;
> +               struct task_struct * resched;
>                 cycles_t last_schedule;
>         } schedule_data;
>         char __pad [SMP_CACHE_BYTES];
> -} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
> +} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0,0}}};
>
>  #define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
> +#define cpu_resched(cpu) aligned_data[(cpu)].schedule_data.resched
>  #define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
>
>  struct kernel_stat kstat;
> @@ -208,7 +210,7 @@
>  {
>  #ifdef CONFIG_SMP
>         int this_cpu = smp_processor_id();
> -       struct task_struct *tsk, *target_tsk;
> +       struct task_struct *tsk, *target_tsk, *rtsk;
>         int cpu, best_cpu, i, max_prio;
>         cycles_t oldest_idle;
>
> @@ -219,7 +221,9 @@
>         best_cpu = p->processor;
>         if (can_schedule(p, best_cpu)) {
>                 tsk = idle_task(best_cpu);
> -               if (cpu_curr(best_cpu) == tsk) {
> +               if ((cpu_curr(best_cpu) == tsk) &&
> +                   (cpu_resched(best_cpu) == NULL))
> +               {
>                         int need_resched;
>  send_now_idle:
>                         /*
> @@ -244,13 +248,24 @@
>          */
>         oldest_idle = (cycles_t) -1;
>         target_tsk = NULL;
> +       best_cpu = 0;
>         max_prio = 1;
>
>         for (i = 0; i < smp_num_cpus; i++) {
>                 cpu = cpu_logical_map(i);
>                 if (!can_schedule(p, cpu))
>                         continue;
> +               /* first check whether there is an resched IPI
> +                * reservation for that cpu. If so consider priority
> +                * of the reservation instead of current.
> +                * We do not have to set the need_resched flag again
> +                * for the currently running task. It must have been
> +                * signalled before
> +                */
> +               tsk = cpu_resched(cpu);
> +               if (tsk == NULL)
>                 tsk = cpu_curr(cpu);
> +
>                 /*
>                  * We use the first available idle CPU. This creates
>                  * a priority list between idle CPUs, but this is not
> @@ -268,19 +283,30 @@
>                                 if (prio > max_prio) {
>                                         max_prio = prio;
>                                         target_tsk = tsk;
> +                                       best_cpu = cpu;
>                                 }
>                         }
>                 }
>         }
>         tsk = target_tsk;
>         if (tsk) {
> +               rtsk = cpu_resched(best_cpu);
> +               if (rtsk) {
> +                       rtsk->has_cpu = 0; /* return rtsk to scheduable */
> +                       tsk->has_cpu  = 1; /* can't schedule this one no
> more*/ +                       cpu_resched(best_cpu) = tsk;
> +                       return;
> +               }
>                 if (oldest_idle != -1ULL) {
>                         best_cpu = tsk->processor;
>                         goto send_now_idle;
>                 }
>                 tsk->need_resched = 1;
> -               if (tsk->processor != this_cpu)
> -                       smp_send_reschedule(tsk->processor);
> +               if (tsk->processor != this_cpu) {
> +                       tsk->has_cpu  = 1;
> +                       cpu_resched(best_cpu) = tsk;
> +                       smp_send_reschedule(best_cpu);
> +               }
>         }
>         return;
>
> @@ -578,6 +604,15 @@
>          */
>
>  repeat_schedule:
> +       /* we check whether we have a resched_IPI reservation:
> +        * if so simply select the reserving task and next and
> +        * go to switch to it.
> +        */
> +       next = cpu_resched(this_cpu);
> +       if (next) {
> +               next = p;
> +               goto found_next;
> +       }
>         /*
>          * Default process to select..
>          */
> @@ -604,6 +639,7 @@
>          * switching to the next task, save this fact in
>          * sched_data.
>          */
> +found_next:
>         sched_data->curr = next;
>  #ifdef CONFIG_SMP
>         next->has_cpu = 1;
>
> _______________________________________________
> Lse-tech mailing list
> Lse-tech@lists.sourceforge.net
> http://lists.sourceforge.net/lists/listinfo/lse-tech

[-- Attachment #2: cpuaff-patch --]
[-- Type: text/plain, Size: 3211 bytes --]

diff -uwrbBN linux-2.4.5-van/kernel/sched.c linux-2.4.5-ca/kernel/sched.c
--- linux-2.4.5-van/kernel/sched.c	Fri Apr 20 21:26:16 2001
+++ linux-2.4.5-ca/kernel/sched.c	Tue Jul 17 07:31:10 2001
@@ -97,12 +97,14 @@
 static union {
 	struct schedule_data {
 		struct task_struct * curr;
+		struct task_struct * resched;
 		cycles_t last_schedule;
 	} schedule_data;
 	char __pad [SMP_CACHE_BYTES];
-} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
+} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0,0}}};
 
 #define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
+#define cpu_resched(cpu) aligned_data[(cpu)].schedule_data.resched
 #define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
 
 struct kernel_stat kstat;
@@ -208,7 +210,7 @@
 {
 #ifdef CONFIG_SMP
 	int this_cpu = smp_processor_id();
-	struct task_struct *tsk, *target_tsk;
+	struct task_struct *tsk, *target_tsk, *rtsk;
 	int cpu, best_cpu, i, max_prio;
 	cycles_t oldest_idle;
 
@@ -219,7 +221,9 @@
 	best_cpu = p->processor;
 	if (can_schedule(p, best_cpu)) {
 		tsk = idle_task(best_cpu);
-		if (cpu_curr(best_cpu) == tsk) {
+		if ((cpu_curr(best_cpu) == tsk) &&
+		    (cpu_resched(best_cpu) == NULL))
+		{
 			int need_resched;
 send_now_idle:
 			/*
@@ -244,13 +248,24 @@
 	 */
 	oldest_idle = (cycles_t) -1;
 	target_tsk = NULL;
+	best_cpu = 0;
 	max_prio = 1;
 
 	for (i = 0; i < smp_num_cpus; i++) {
 		cpu = cpu_logical_map(i);
 		if (!can_schedule(p, cpu))
 			continue;
+		/* first check whether there is an resched IPI
+		 * reservation for that cpu. If so consider priority
+		 * of the reservation instead of current.
+		 * We do not have to set the need_resched flag again
+		 * for the currently running task. It must have been
+		 * signalled before
+		 */
+		tsk = cpu_resched(cpu);
+		if (tsk == NULL)
 		tsk = cpu_curr(cpu);
+
 		/*
 		 * We use the first available idle CPU. This creates
 		 * a priority list between idle CPUs, but this is not
@@ -268,19 +283,30 @@
 				if (prio > max_prio) {
 					max_prio = prio;
 					target_tsk = tsk;
+					best_cpu = cpu;
 				}
 			}
 		}
 	}
 	tsk = target_tsk;
 	if (tsk) {
+		rtsk = cpu_resched(best_cpu);
+		if (rtsk) {
+			rtsk->has_cpu = 0; /* return rtsk to scheduable */
+			tsk->has_cpu  = 1; /* can't schedule this one no more*/
+			cpu_resched(best_cpu) = tsk;
+			return;
+		}
 		if (oldest_idle != -1ULL) {
 			best_cpu = tsk->processor;
 			goto send_now_idle;
 		}
 		tsk->need_resched = 1;
-		if (tsk->processor != this_cpu)
-			smp_send_reschedule(tsk->processor);
+		if (tsk->processor != this_cpu) {
+			tsk->has_cpu  = 1; 
+			cpu_resched(best_cpu) = tsk;
+			smp_send_reschedule(best_cpu);
+		}
 	}
 	return;
 		
@@ -578,6 +604,15 @@
 	 */
 
 repeat_schedule:
+	/* we check whether we have a resched_IPI reservation:
+	 * if so simply select the reserving task and next and
+	 * go to switch to it.
+	 */
+	next = cpu_resched(this_cpu);
+	if (next) {
+		next = p;
+		goto found_next;
+	}
 	/*
 	 * Default process to select..
 	 */
@@ -604,6 +639,7 @@
 	 * switching to the next task, save this fact in
 	 * sched_data.
 	 */
+found_next:
 	sched_data->curr = next;
 #ifdef CONFIG_SMP
  	next->has_cpu = 1;

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Lse-tech] Re: CPU affinity & IPI latency (FIX)_
  2001-07-17 17:33 ` [Lse-tech] Re: CPU affinity & IPI latency (FIX)_ Hubertus Frnake
@ 2001-07-17 18:00   ` Davide Libenzi
  2001-07-17 18:11   ` Davide Libenzi
  1 sibling, 0 replies; 6+ messages in thread
From: Davide Libenzi @ 2001-07-17 18:00 UTC (permalink / raw)
  To: Hubertus Frnake; +Cc: linux-kernel, lse-tech, ak


On 17-Jul-2001 Hubertus Frnake wrote:
> In an attempt to inline the code, somehow the tabs got lost. So here is the
> attached correct patch fo 2.4.5. Please try and let me know whether you
> see your problems disappear and/or others arise.
> The sketchy writeup is still the same.

Did You tried the patch ?
Maybe this could help :

+       next = cpu_resched(this_cpu);
+       if (next) {
+               cpu_resched(this_cpu) = NULL;
-               next = p;
+               goto found_next;
+       }




- Davide


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Lse-tech] Re: CPU affinity & IPI latency (FIX)_
  2001-07-17 17:33 ` [Lse-tech] Re: CPU affinity & IPI latency (FIX)_ Hubertus Frnake
  2001-07-17 18:00   ` Davide Libenzi
@ 2001-07-17 18:11   ` Davide Libenzi
  1 sibling, 0 replies; 6+ messages in thread
From: Davide Libenzi @ 2001-07-17 18:11 UTC (permalink / raw)
  To: Hubertus Frnake; +Cc: linux-kernel, lse-tech, ak


On 17-Jul-2001 Hubertus Frnake wrote:
> In an attempt to inline the code, somehow the tabs got lost. So here is the
> attached correct patch fo 2.4.5. Please try and let me know whether you
> see your problems disappear and/or others arise.
> The sketchy writeup is still the same.

What is the reason You don't set the resched task in the fast path ?

        best_cpu = p->processor;
        if (can_schedule(p, best_cpu)) {
                tsk = idle_task(best_cpu);
                if ((cpu_curr(best_cpu) == tsk) &&
                    (cpu_resched(best_cpu) == NULL)) {
                        int need_resched;
send_now_idle:
                        /*
                         * If need_resched == -1 then we can skip sending
                         * the IPI altogether, tsk->need_resched is
                         * actively watched by the idle thread.
                         */
                        need_resched = tsk->need_resched;
                        tsk->need_resched = 1;
                        if ((best_cpu != this_cpu) && !need_resched) {
>>>>                            cpu_resched(best_cpu) = p;
                                smp_send_reschedule(best_cpu);
                        }
                        return;
                }
        }



- Davide


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Lse-tech] Re: CPU affinity & IPI latency (FIX)_
  2001-07-17 18:28 Hubertus Franke
@ 2001-07-19 16:56 ` Davide Libenzi
  0 siblings, 0 replies; 6+ messages in thread
From: Davide Libenzi @ 2001-07-19 16:56 UTC (permalink / raw)
  To: Hubertus Franke; +Cc: lse-tech, linux-kernel


On 17-Jul-2001 Hubertus Franke wrote:
> 
> 
> This only applies only to the idle thread and it says that the idle
> thread actively monitors its need_resched flag and hence will
> instantly call schedule() at that point. Hence there won't be any
> delay either for IPI or for waiting to return from the kernel.
> 
> You might be right that the problem situation still arises, because
> the idle_thread needs to content again for the lock.
> Let me ask the otherway around, why do we HAVE to put it in ?
> And if I missed something here, we put it outside the <if> clause.

Yep, we were talking about two different if-locations :)
Anyway, it's right, using the poll idle we've to change the position of the
assignment.




- Davide


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Lse-tech] Re: CPU affinity & IPI latency (FIX)_
@ 2001-07-17 18:28 Hubertus Franke
  2001-07-19 16:56 ` Davide Libenzi
  0 siblings, 1 reply; 6+ messages in thread
From: Hubertus Franke @ 2001-07-17 18:28 UTC (permalink / raw)
  To: Davide Libenzi; +Cc: linux-kernel, lse-tech



This only applies only to the idle thread and it says that the idle
thread actively monitors its need_resched flag and hence will
instantly call schedule() at that point. Hence there won't be any
delay either for IPI or for waiting to return from the kernel.

You might be right that the problem situation still arises, because
the idle_thread needs to content again for the lock.
Let me ask the otherway around, why do we HAVE to put it in ?
And if I missed something here, we put it outside the <if> clause.


Hubertus Franke
Enterprise Linux Group (Mgr),  Linux Technology Center (Member Scalability)
, OS-PIC (Chair)
email: frankeh@us.ibm.com
(w) 914-945-2003    (fax) 914-945-4425   TL: 862-2003



Davide Libenzi <davidel@xmailserver.org>@lists.sourceforge.net on
07/17/2001 02:11:55 PM

Sent by:  lse-tech-admin@lists.sourceforge.net


To:   Hubertus Frnake <frankeh@watson.ibm.com>
cc:   linux-kernel@vger.kernel.org, lse-tech@lists.sourceforge.net,
      ak@suse.de
Subject:  Re: [Lse-tech] Re: CPU affinity & IPI latency (FIX)_




On 17-Jul-2001 Hubertus Frnake wrote:
> In an attempt to inline the code, somehow the tabs got lost. So here is
the
> attached correct patch fo 2.4.5. Please try and let me know whether you
> see your problems disappear and/or others arise.
> The sketchy writeup is still the same.

What is the reason You don't set the resched task in the fast path ?

        best_cpu = p->processor;
        if (can_schedule(p, best_cpu)) {
                tsk = idle_task(best_cpu);
                if ((cpu_curr(best_cpu) == tsk) &&
                    (cpu_resched(best_cpu) == NULL)) {
                        int need_resched;
send_now_idle:
                        /*
                         * If need_resched == -1 then we can skip sending
                         * the IPI altogether, tsk->need_resched is
                         * actively watched by the idle thread.
                         */
                        need_resched = tsk->need_resched;
                        tsk->need_resched = 1;
                        if ((best_cpu != this_cpu) && !need_resched) {
>>>>                            cpu_resched(best_cpu) = p;
                                smp_send_reschedule(best_cpu);
                        }
                        return;
                }
        }



- Davide


_______________________________________________
Lse-tech mailing list
Lse-tech@lists.sourceforge.net
http://lists.sourceforge.net/lists/listinfo/lse-tech




^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Lse-tech] Re: CPU affinity & IPI latency (FIX)_
       [not found] <OF51487F8A.564A5A7D-ON85256A8C.0063C208@pok.ibm.com>
@ 2001-07-17 18:12 ` Hubertus Franke
  0 siblings, 0 replies; 6+ messages in thread
From: Hubertus Franke @ 2001-07-17 18:12 UTC (permalink / raw)
  To: linux-kernel, lse-tech

[-- Attachment #1: Type: text/plain, Size: 1416 bytes --]

David, thanks for pointing this out. Ofcourse, it has be reset at that point.
Since it is still running, it actually proves that priority overwrites for
reservations work.
Did you have a chance to run it on the offending app. I don't have that one,
hence I only tried on kernel builds etc.
Here is the update patch.

-- Hubertus (frankeh@us.ibm.com)

> Davide Libenzi <davidel@xmailserver.org>@lists.sourceforge.net on
> 07/17/2001 02:00:45 PM
>
> Sent by:  lse-tech-admin@lists.sourceforge.net
>
> To:   Hubertus Frnake <frankeh@watson.ibm.com>
> cc:   linux-kernel@vger.kernel.org, lse-tech@lists.sourceforge.net,
>       ak@suse.de
> Subject:  Re: [Lse-tech] Re: CPU affinity & IPI latency (FIX)_
>
> On 17-Jul-2001 Hubertus Frnake wrote:
> > In an attempt to inline the code, somehow the tabs got lost. So here is
> the
> > attached correct patch fo 2.4.5. Please try and let me know whether you
> > see your problems disappear and/or others arise.
> > The sketchy writeup is still the same.
>
> Did You tried the patch ?
> Maybe this could help :
>
> +       next = cpu_resched(this_cpu);
> +       if (next) {
> +               cpu_resched(this_cpu) = NULL;
> -               next = p;
> +               goto found_next;
> +       }
>
> - Davide
>
> _______________________________________________
> Lse-tech mailing list
> Lse-tech@lists.sourceforge.net
> http://lists.sourceforge.net/lists/listinfo/lse-tech

[-- Attachment #2: cpuaff-patch --]
[-- Type: text/plain, Size: 3244 bytes --]

diff -uwrbBN linux-2.4.5-van/kernel/sched.c linux-2.4.5-ca/kernel/sched.c
--- linux-2.4.5-van/kernel/sched.c	Fri Apr 20 21:26:16 2001
+++ linux-2.4.5-ca/kernel/sched.c	Tue Jul 17 14:02:13 2001
@@ -97,12 +97,14 @@
 static union {
 	struct schedule_data {
 		struct task_struct * curr;
+		struct task_struct * resched;
 		cycles_t last_schedule;
 	} schedule_data;
 	char __pad [SMP_CACHE_BYTES];
-} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
+} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0,0}}};
 
 #define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
+#define cpu_resched(cpu) aligned_data[(cpu)].schedule_data.resched
 #define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
 
 struct kernel_stat kstat;
@@ -208,7 +210,7 @@
 {
 #ifdef CONFIG_SMP
 	int this_cpu = smp_processor_id();
-	struct task_struct *tsk, *target_tsk;
+	struct task_struct *tsk, *target_tsk, *rtsk;
 	int cpu, best_cpu, i, max_prio;
 	cycles_t oldest_idle;
 
@@ -219,7 +221,9 @@
 	best_cpu = p->processor;
 	if (can_schedule(p, best_cpu)) {
 		tsk = idle_task(best_cpu);
-		if (cpu_curr(best_cpu) == tsk) {
+		if ((cpu_curr(best_cpu) == tsk) &&
+		    (cpu_resched(best_cpu) == NULL))
+		{
 			int need_resched;
 send_now_idle:
 			/*
@@ -244,13 +248,24 @@
 	 */
 	oldest_idle = (cycles_t) -1;
 	target_tsk = NULL;
+	best_cpu = 0;
 	max_prio = 1;
 
 	for (i = 0; i < smp_num_cpus; i++) {
 		cpu = cpu_logical_map(i);
 		if (!can_schedule(p, cpu))
 			continue;
+		/* first check whether there is an resched IPI
+		 * reservation for that cpu. If so consider priority
+		 * of the reservation instead of current.
+		 * We do not have to set the need_resched flag again
+		 * for the currently running task. It must have been
+		 * signalled before
+		 */
+		tsk = cpu_resched(cpu);
+		if (tsk == NULL)
 		tsk = cpu_curr(cpu);
+
 		/*
 		 * We use the first available idle CPU. This creates
 		 * a priority list between idle CPUs, but this is not
@@ -268,19 +283,30 @@
 				if (prio > max_prio) {
 					max_prio = prio;
 					target_tsk = tsk;
+					best_cpu = cpu;
 				}
 			}
 		}
 	}
 	tsk = target_tsk;
 	if (tsk) {
+		rtsk = cpu_resched(best_cpu);
+		if (rtsk) {
+			rtsk->has_cpu = 0; /* return rtsk to scheduable */
+			tsk->has_cpu  = 1; /* can't schedule this one no more*/
+			cpu_resched(best_cpu) = tsk;
+			return;
+		}
 		if (oldest_idle != -1ULL) {
 			best_cpu = tsk->processor;
 			goto send_now_idle;
 		}
 		tsk->need_resched = 1;
-		if (tsk->processor != this_cpu)
-			smp_send_reschedule(tsk->processor);
+		if (tsk->processor != this_cpu) {
+			tsk->has_cpu  = 1; 
+			cpu_resched(best_cpu) = tsk;
+			smp_send_reschedule(best_cpu);
+		}
 	}
 	return;
 		
@@ -578,6 +604,16 @@
 	 */
 
 repeat_schedule:
+	/* we check whether we have a resched_IPI reservation:
+	 * if so simply select the reserving task and next and
+	 * go to switch to it.
+	 */
+	next = cpu_resched(this_cpu);
+	if (next) {
+		cpu_resched(this_cpu) = NULL;
+		next = p;
+		goto found_next;
+	}
 	/*
 	 * Default process to select..
 	 */
@@ -604,6 +640,7 @@
 	 * switching to the next task, save this fact in
 	 * sched_data.
 	 */
+found_next:
 	sched_data->curr = next;
 #ifdef CONFIG_SMP
  	next->has_cpu = 1;

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2001-07-19 16:53 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <OF46AE0C03.7FA13916-ON85256A8C.005E4CEB@pok.ibm.com>
2001-07-17 17:33 ` [Lse-tech] Re: CPU affinity & IPI latency (FIX)_ Hubertus Frnake
2001-07-17 18:00   ` Davide Libenzi
2001-07-17 18:11   ` Davide Libenzi
     [not found] <OF51487F8A.564A5A7D-ON85256A8C.0063C208@pok.ibm.com>
2001-07-17 18:12 ` Hubertus Franke
2001-07-17 18:28 Hubertus Franke
2001-07-19 16:56 ` Davide Libenzi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).