All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH] fix gdbstub support for multiple threads in usermode
@ 2009-05-12 19:35 Nathan Froyd
  2009-05-12 20:53 ` [Qemu-devel] " Jan Kiszka
  0 siblings, 1 reply; 11+ messages in thread
From: Nathan Froyd @ 2009-05-12 19:35 UTC (permalink / raw)
  To: qemu-devel

When debugging multi-threaded programs, QEMU's gdb stub would report the
correct number of threads (the qfThreadInfo and qsThreadInfo packets).
However, the stub was unable to actually switch between threads (the T
packet), since it would report every thread except the first as being
dead.  Furthermore, the stub relied upon cpu_index as a reliable means
of assigning IDs to the threads.  This was a bad idea; if you have this
sequence of events:

initial thread created
new thread #1
new thread #2
thread #1 exits
new thread #3

thread #3 will have the same cpu_index as thread #1, which would confuse
GDB.

We fix this by adding a stable gdbstub_index field for each CPU; the
index is incremented for every CPU (thread) created.  We ignore
wraparound issues for now.  Once we have this field, the stub needs to
use this field instead of cpu_index for communicating with GDB.

Signed-off-by: Nathan Froyd <froydnj@codesourcery.com>
---
 cpu-defs.h |    1 +
 exec.c     |    5 +++++
 gdbstub.c  |   32 +++++++++++++++++++++++---------
 3 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/cpu-defs.h b/cpu-defs.h
index 0a82197..c923708 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -207,6 +207,7 @@ typedef struct CPUWatchpoint {
                                                                         \
     CPUState *next_cpu; /* next CPU sharing TB cache */                 \
     int cpu_index; /* CPU index (informative) */                        \
+    int gdbstub_index; /* index for gdbstub T and H packets */          \
     int numa_node; /* NUMA node this cpu is belonging to  */            \
     int running; /* Nonzero if cpu is currently running(usermode).  */  \
     /* user data */                                                     \
diff --git a/exec.c b/exec.c
index c5c9280..63e3411 100644
--- a/exec.c
+++ b/exec.c
@@ -538,6 +538,8 @@ static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
 }
 #endif
 
+static int next_gdbstub_index;
+
 void cpu_exec_init(CPUState *env)
 {
     CPUState **penv;
@@ -554,6 +556,7 @@ void cpu_exec_init(CPUState *env)
         cpu_index++;
     }
     env->cpu_index = cpu_index;
+    env->gdbstub_index = ++next_gdbstub_index;
     env->numa_node = 0;
     TAILQ_INIT(&env->breakpoints);
     TAILQ_INIT(&env->watchpoints);
@@ -1711,6 +1714,8 @@ CPUState *cpu_copy(CPUState *env)
                               wp->flags, NULL);
     }
 #endif
+    /* Need to assign a new thread ID for the gdbstub */
+    new_env->gdbstub_index = ++next_gdbstub_index;
 
     return new_env;
 }
diff --git a/gdbstub.c b/gdbstub.c
index 3c34741..61b065b 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -1718,9 +1718,11 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
             put_packet(s, "OK");
             break;
         }
-        for (env = first_cpu; env != NULL; env = env->next_cpu)
-            if (env->cpu_index + 1 == thread)
+        for (env = first_cpu; env != NULL; env = env->next_cpu) {
+            if (env->gdbstub_index == thread) {
                 break;
+            }
+        }
         if (env == NULL) {
             put_packet(s, "E22");
             break;
@@ -1741,14 +1743,25 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
         break;
     case 'T':
         thread = strtoull(p, (char **)&p, 16);
-#ifndef CONFIG_USER_ONLY
-        if (thread > 0 && thread < smp_cpus + 1)
+#if defined(CONFIG_USER_ONLY)
+        {
+            for (env = first_cpu; env != NULL; env = env->next_cpu) {
+                if (env->gdbstub_index == thread) {
+                    break;
+                }
+            }
+
+            if (env != NULL)
+                put_packet(s, "OK");
+            else
+                put_packet(s, "E22");
+        }
 #else
-        if (thread == 1)
-#endif
+        if (thread > 0 && thread < smp_cpus + 1)
              put_packet(s, "OK");
         else
             put_packet(s, "E22");
+#endif
         break;
     case 'q':
     case 'Q':
@@ -1786,7 +1799,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
         } else if (strcmp(p,"sThreadInfo") == 0) {
         report_cpuinfo:
             if (s->query_cpu) {
-                snprintf(buf, sizeof(buf), "m%x", s->query_cpu->cpu_index+1);
+                snprintf(buf, sizeof(buf), "m%x", s->query_cpu->gdbstub_index);
                 put_packet(s, buf);
                 s->query_cpu = s->query_cpu->next_cpu;
             } else
@@ -1794,8 +1807,8 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
             break;
         } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
             thread = strtoull(p+16, (char **)&p, 16);
-            for (env = first_cpu; env != NULL; env = env->next_cpu)
-                if (env->cpu_index + 1 == thread) {
+            for (env = first_cpu; env != NULL; env = env->next_cpu) {
+                if (env->gdbstub_index == thread) {
                     cpu_synchronize_state(env, 0);
                     len = snprintf((char *)mem_buf, sizeof(mem_buf),
                                    "CPU#%d [%s]", env->cpu_index,
@@ -1804,6 +1817,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
                     put_packet(s, buf);
                     break;
                 }
+            }
             break;
         }
 #ifdef CONFIG_USER_ONLY
-- 
1.6.0.5

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-12 19:35 [Qemu-devel] [PATCH] fix gdbstub support for multiple threads in usermode Nathan Froyd
@ 2009-05-12 20:53 ` Jan Kiszka
  2009-05-13  5:08   ` vibisreenivasan
                     ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Jan Kiszka @ 2009-05-12 20:53 UTC (permalink / raw)
  To: Nathan Froyd; +Cc: qemu-devel

[-- Attachment #1: Type: text/plain, Size: 6414 bytes --]

Nathan Froyd wrote:
> When debugging multi-threaded programs, QEMU's gdb stub would report the
> correct number of threads (the qfThreadInfo and qsThreadInfo packets).
> However, the stub was unable to actually switch between threads (the T
> packet), since it would report every thread except the first as being
> dead.  Furthermore, the stub relied upon cpu_index as a reliable means
> of assigning IDs to the threads.  This was a bad idea; if you have this
> sequence of events:
> 
> initial thread created
> new thread #1
> new thread #2
> thread #1 exits
> new thread #3
> 
> thread #3 will have the same cpu_index as thread #1, which would confuse
> GDB.
> 
> We fix this by adding a stable gdbstub_index field for each CPU; the
> index is incremented for every CPU (thread) created.  We ignore
> wraparound issues for now.  Once we have this field, the stub needs to
> use this field instead of cpu_index for communicating with GDB.
> 
> Signed-off-by: Nathan Froyd <froydnj@codesourcery.com>
> ---
>  cpu-defs.h |    1 +
>  exec.c     |    5 +++++
>  gdbstub.c  |   32 +++++++++++++++++++++++---------
>  3 files changed, 29 insertions(+), 9 deletions(-)
> 
> diff --git a/cpu-defs.h b/cpu-defs.h
> index 0a82197..c923708 100644
> --- a/cpu-defs.h
> +++ b/cpu-defs.h
> @@ -207,6 +207,7 @@ typedef struct CPUWatchpoint {
>                                                                          \
>      CPUState *next_cpu; /* next CPU sharing TB cache */                 \
>      int cpu_index; /* CPU index (informative) */                        \
> +    int gdbstub_index; /* index for gdbstub T and H packets */          \
>      int numa_node; /* NUMA node this cpu is belonging to  */            \
>      int running; /* Nonzero if cpu is currently running(usermode).  */  \
>      /* user data */                                                     \
> diff --git a/exec.c b/exec.c
> index c5c9280..63e3411 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -538,6 +538,8 @@ static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
>  }
>  #endif
>  
> +static int next_gdbstub_index;
> +
>  void cpu_exec_init(CPUState *env)
>  {
>      CPUState **penv;
> @@ -554,6 +556,7 @@ void cpu_exec_init(CPUState *env)
>          cpu_index++;
>      }
>      env->cpu_index = cpu_index;
> +    env->gdbstub_index = ++next_gdbstub_index;

While this is simple and sufficient for most cases, making
next_gdbstub_index robust against collisions due to overflow is not much
more complicated - so why not do this right from the beginning?

>      env->numa_node = 0;
>      TAILQ_INIT(&env->breakpoints);
>      TAILQ_INIT(&env->watchpoints);
> @@ -1711,6 +1714,8 @@ CPUState *cpu_copy(CPUState *env)
>                                wp->flags, NULL);
>      }
>  #endif
> +    /* Need to assign a new thread ID for the gdbstub */
> +    new_env->gdbstub_index = ++next_gdbstub_index;
>  
>      return new_env;
>  }
> diff --git a/gdbstub.c b/gdbstub.c
> index 3c34741..61b065b 100644
> --- a/gdbstub.c
> +++ b/gdbstub.c
> @@ -1718,9 +1718,11 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>              put_packet(s, "OK");
>              break;
>          }
> -        for (env = first_cpu; env != NULL; env = env->next_cpu)
> -            if (env->cpu_index + 1 == thread)
> +        for (env = first_cpu; env != NULL; env = env->next_cpu) {
> +            if (env->gdbstub_index == thread) {
>                  break;
> +            }
> +        }
>          if (env == NULL) {
>              put_packet(s, "E22");
>              break;
> @@ -1741,14 +1743,25 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>          break;
>      case 'T':
>          thread = strtoull(p, (char **)&p, 16);
> -#ifndef CONFIG_USER_ONLY
> -        if (thread > 0 && thread < smp_cpus + 1)
> +#if defined(CONFIG_USER_ONLY)
> +        {
> +            for (env = first_cpu; env != NULL; env = env->next_cpu) {
> +                if (env->gdbstub_index == thread) {
> +                    break;
> +                }
> +            }
> +
> +            if (env != NULL)
> +                put_packet(s, "OK");
> +            else
> +                put_packet(s, "E22");
> +        }
>  #else
> -        if (thread == 1)
> -#endif
> +        if (thread > 0 && thread < smp_cpus + 1)
>               put_packet(s, "OK");
>          else
>              put_packet(s, "E22");
> +#endif

I don't think we need these #ifdefs here. You assign continuously
increasing IDs also to system-mode CPUs, so we can handle them
identically (we have to anyway once cpu hotplugging hits upstream).

>          break;
>      case 'q':
>      case 'Q':
> @@ -1786,7 +1799,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>          } else if (strcmp(p,"sThreadInfo") == 0) {
>          report_cpuinfo:
>              if (s->query_cpu) {
> -                snprintf(buf, sizeof(buf), "m%x", s->query_cpu->cpu_index+1);
> +                snprintf(buf, sizeof(buf), "m%x", s->query_cpu->gdbstub_index);
>                  put_packet(s, buf);
>                  s->query_cpu = s->query_cpu->next_cpu;
>              } else
> @@ -1794,8 +1807,8 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>              break;
>          } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
>              thread = strtoull(p+16, (char **)&p, 16);
> -            for (env = first_cpu; env != NULL; env = env->next_cpu)
> -                if (env->cpu_index + 1 == thread) {
> +            for (env = first_cpu; env != NULL; env = env->next_cpu) {
> +                if (env->gdbstub_index == thread) {
>                      cpu_synchronize_state(env, 0);
>                      len = snprintf((char *)mem_buf, sizeof(mem_buf),
>                                     "CPU#%d [%s]", env->cpu_index,
> @@ -1804,6 +1817,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>                      put_packet(s, buf);
>                      break;
>                  }
> +            }
>              break;
>          }
>  #ifdef CONFIG_USER_ONLY

Hmm, I bet you now have some use for my good-old vCont patch (=>continue
single-stepping on different CPU / in different thread). Will repost soon.

Jan


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-12 20:53 ` [Qemu-devel] " Jan Kiszka
@ 2009-05-13  5:08   ` vibisreenivasan
  2009-05-13 14:13   ` vibisreenivasan
  2009-05-19 15:06   ` Nathan Froyd
  2 siblings, 0 replies; 11+ messages in thread
From: vibisreenivasan @ 2009-05-13  5:08 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: qemu-devel, Nathan Froyd


hi,

Nathan Froyd wrote:
> When debugging multi-threaded programs, QEMU's gdb stub would report the
> correct number of threads (the qfThreadInfo and qsThreadInfo packets).
> However, the stub was unable to actually switch between threads (the T
> packet), since it would report every thread except the first as being
> dead.  Furthermore, the stub relied upon cpu_index as a reliable means
> of assigning IDs to the threads.  This was a bad idea; if you have this
> sequence of events:
> 
> initial thread created
> new thread #1
> new thread #2
> thread #1 exits
> new thread #3
> 
> thread #3 will have the same cpu_index as thread #1, which would confuse
> GDB.
> 
> We fix this by adding a stable gdbstub_index field for each CPU; the
> index is incremented for every CPU (thread) created.  We ignore
> wraparound issues for now.  Once we have this field, the stub needs to
> use this field instead of cpu_index for communicating with GDB.
> 
> Signed-off-by: Nathan Froyd <froydnj@codesourcery.com>
> ---
>  cpu-defs.h |    1 +
>  exec.c     |    5 +++++
>  gdbstub.c  |   32 +++++++++++++++++++++++---------
>  3 files changed, 29 insertions(+), 9 deletions(-)
> 
> diff --git a/cpu-defs.h b/cpu-defs.h
> index 0a82197..c923708 100644
> --- a/cpu-defs.h
> +++ b/cpu-defs.h
> @@ -207,6 +207,7 @@ typedef struct CPUWatchpoint {
>                                                                          \
>      CPUState *next_cpu; /* next CPU sharing TB cache */                 \
>      int cpu_index; /* CPU index (informative) */                        \
> +    int gdbstub_index; /* index for gdbstub T and H packets */          \
>      int numa_node; /* NUMA node this cpu is belonging to  */            \
>      int running; /* Nonzero if cpu is currently running(usermode).  */  \
>      /* user data */                                                     \
> diff --git a/exec.c b/exec.c
> index c5c9280..63e3411 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -538,6 +538,8 @@ static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
>  }
>  #endif
>  
> +static int next_gdbstub_index;
> +
>  void cpu_exec_init(CPUState *env)
>  {
>      CPUState **penv;
> @@ -554,6 +556,7 @@ void cpu_exec_init(CPUState *env)
>          cpu_index++;
>      }
>      env->cpu_index = cpu_index;
> +    env->gdbstub_index = ++next_gdbstub_index;

While this is simple and sufficient for most cases, making
next_gdbstub_index robust against collisions due to overflow is not much
more complicated - so why not do this right from the beginning?

>      env->numa_node = 0;
>      TAILQ_INIT(&env->breakpoints);
>      TAILQ_INIT(&env->watchpoints);
> @@ -1711,6 +1714,8 @@ CPUState *cpu_copy(CPUState *env)
>                                wp->flags, NULL);
>      }
>  #endif
> +    /* Need to assign a new thread ID for the gdbstub */
> +    new_env->gdbstub_index = ++next_gdbstub_index;
>  
>      return new_env;
>  }
> diff --git a/gdbstub.c b/gdbstub.c
> index 3c34741..61b065b 100644
> --- a/gdbstub.c
> +++ b/gdbstub.c
> @@ -1718,9 +1718,11 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>              put_packet(s, "OK");
>              break;
>          }
> -        for (env = first_cpu; env != NULL; env = env->next_cpu)
> -            if (env->cpu_index + 1 == thread)
> +        for (env = first_cpu; env != NULL; env = env->next_cpu) {
> +            if (env->gdbstub_index == thread) {
>                  break;
> +            }
> +        }
>          if (env == NULL) {
>              put_packet(s, "E22");
>              break;
> @@ -1741,14 +1743,25 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>          break;
>      case 'T':
>          thread = strtoull(p, (char **)&p, 16);
> -#ifndef CONFIG_USER_ONLY
> -        if (thread > 0 && thread < smp_cpus + 1)
> +#if defined(CONFIG_USER_ONLY)
> +        {
> +            for (env = first_cpu; env != NULL; env = env->next_cpu) {
> +                if (env->gdbstub_index == thread) {
> +                    break;
> +                }
> +            }
> +
> +            if (env != NULL)
> +                put_packet(s, "OK");
> +            else
> +                put_packet(s, "E22");
> +        }
>  #else
> -        if (thread == 1)
> -#endif
> +        if (thread > 0 && thread < smp_cpus + 1)
>               put_packet(s, "OK");
>          else
>              put_packet(s, "E22");
> +#endif

I don't think we need these #ifdefs here. You assign continuously
increasing IDs also to system-mode CPUs, so we can handle them
identically (we have to anyway once cpu hotplugging hits upstream).

>          break;
>      case 'q':
>      case 'Q':
> @@ -1786,7 +1799,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>          } else if (strcmp(p,"sThreadInfo") == 0) {
>          report_cpuinfo:
>              if (s->query_cpu) {
> -                snprintf(buf, sizeof(buf), "m%x", s->query_cpu->cpu_index+1);
> +                snprintf(buf, sizeof(buf), "m%x", s->query_cpu->gdbstub_index);
>                  put_packet(s, buf);
>                  s->query_cpu = s->query_cpu->next_cpu;
>              } else
> @@ -1794,8 +1807,8 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>              break;
>          } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
>              thread = strtoull(p+16, (char **)&p, 16);
> -            for (env = first_cpu; env != NULL; env = env->next_cpu)
> -                if (env->cpu_index + 1 == thread) {
> +            for (env = first_cpu; env != NULL; env = env->next_cpu) {
> +                if (env->gdbstub_index == thread) {
>                      cpu_synchronize_state(env, 0);
>                      len = snprintf((char *)mem_buf, sizeof(mem_buf),
>                                     "CPU#%d [%s]", env->cpu_index,
> @@ -1804,6 +1817,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
>                      put_packet(s, buf);
>                      break;
>                  }
> +            }
>              break;
>          }
>  #ifdef CONFIG_USER_ONLY

>Hmm, I bet you now have some use for my good-old vCont patch (=>continue
>single-stepping on different CPU / in different thread). Will repost soon.

>Jan


Is user mode threading in a working state , it is written in the TODO file as
"- remove threading support as it cannot work at this point".

thanks & regards
vibi

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-12 20:53 ` [Qemu-devel] " Jan Kiszka
  2009-05-13  5:08   ` vibisreenivasan
@ 2009-05-13 14:13   ` vibisreenivasan
  2009-05-19 14:59     ` Nathan Froyd
  2009-05-19 15:06   ` Nathan Froyd
  2 siblings, 1 reply; 11+ messages in thread
From: vibisreenivasan @ 2009-05-13 14:13 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: qemu-devel, Nathan Froyd

hello,
	
	Is user mode threading in a working state , it is written in the
	TODO file as

	"- remove threading support as it cannot work at this point".

thanks & regards
vibi

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-13 14:13   ` vibisreenivasan
@ 2009-05-19 14:59     ` Nathan Froyd
  2009-05-21  8:19       ` vibi sreenivasan
  0 siblings, 1 reply; 11+ messages in thread
From: Nathan Froyd @ 2009-05-19 14:59 UTC (permalink / raw)
  To: vibisreenivasan; +Cc: qemu-devel

On Wed, May 13, 2009 at 07:43:31PM +0530, vibisreenivasan wrote:
> 	Is user mode threading in a working state , it is written in the
> 	TODO file as
> 
> 	"- remove threading support as it cannot work at this point".

It does work on ARM Linux binaries.  I have patches that make it work on
PPC Linux binaries, and I'm working on getting those patches submitted.

-Nathan

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-12 20:53 ` [Qemu-devel] " Jan Kiszka
  2009-05-13  5:08   ` vibisreenivasan
  2009-05-13 14:13   ` vibisreenivasan
@ 2009-05-19 15:06   ` Nathan Froyd
  2009-05-19 16:53     ` Jan Kiszka
  2 siblings, 1 reply; 11+ messages in thread
From: Nathan Froyd @ 2009-05-19 15:06 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: qemu-devel

On Tue, May 12, 2009 at 10:53:22PM +0200, Jan Kiszka wrote:
> Nathan Froyd wrote:
> > We fix this by adding a stable gdbstub_index field for each CPU; the
> > index is incremented for every CPU (thread) created.  We ignore
> > wraparound issues for now.  Once we have this field, the stub needs to
> > use this field instead of cpu_index for communicating with GDB.
> > [...]
> > @@ -554,6 +556,7 @@ void cpu_exec_init(CPUState *env)
> >          cpu_index++;
> >      }
> >      env->cpu_index = cpu_index;
> > +    env->gdbstub_index = ++next_gdbstub_index;
> 
> While this is simple and sufficient for most cases, making
> next_gdbstub_index robust against collisions due to overflow is not much
> more complicated - so why not do this right from the beginning?

We could just make it a 64-bit field. :)

The best way I can think of to do this is to maintain a
separately-chained list of CPUStates (through a new field similar to
next_cpu) ordered by gdbstub_index.  Grabbing a new gdbstub_index then
walks through the list, looking for "holes" between adjacent entries in
the list.  A new gdbstub_index is then picked if we find a hole; we die
if we can't find a hole.

Is this what you had in mind, or am I not being clever enough?

> I don't think we need these #ifdefs here. You assign continuously
> increasing IDs also to system-mode CPUs, so we can handle them
> identically (we have to anyway once cpu hotplugging hits upstream).

Will fix, thanks.

> Hmm, I bet you now have some use for my good-old vCont patch (=>continue
> single-stepping on different CPU / in different thread). Will repost soon.

Yes, I think that would be useful.

-Nathan

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-19 15:06   ` Nathan Froyd
@ 2009-05-19 16:53     ` Jan Kiszka
  2009-05-19 18:01       ` Nathan Froyd
  0 siblings, 1 reply; 11+ messages in thread
From: Jan Kiszka @ 2009-05-19 16:53 UTC (permalink / raw)
  To: qemu-devel

[-- Attachment #1: Type: text/plain, Size: 2009 bytes --]

Nathan Froyd wrote:
> On Tue, May 12, 2009 at 10:53:22PM +0200, Jan Kiszka wrote:
>> Nathan Froyd wrote:
>>> We fix this by adding a stable gdbstub_index field for each CPU; the
>>> index is incremented for every CPU (thread) created.  We ignore
>>> wraparound issues for now.  Once we have this field, the stub needs to
>>> use this field instead of cpu_index for communicating with GDB.
>>> [...]
>>> @@ -554,6 +556,7 @@ void cpu_exec_init(CPUState *env)
>>>          cpu_index++;
>>>      }
>>>      env->cpu_index = cpu_index;
>>> +    env->gdbstub_index = ++next_gdbstub_index;
>> While this is simple and sufficient for most cases, making
>> next_gdbstub_index robust against collisions due to overflow is not much
>> more complicated - so why not do this right from the beginning?
> 
> We could just make it a 64-bit field. :)

Well... kind of pragmatic.

> 
> The best way I can think of to do this is to maintain a
> separately-chained list of CPUStates (through a new field similar to
> next_cpu) ordered by gdbstub_index.  Grabbing a new gdbstub_index then
> walks through the list, looking for "holes" between adjacent entries in
> the list.  A new gdbstub_index is then picked if we find a hole; we die
> if we can't find a hole.

Why creating a new list? Just generate a new index and then walk through
all so far registered CPUStates until no collision is found.

> 
> Is this what you had in mind, or am I not being clever enough?
> 
>> I don't think we need these #ifdefs here. You assign continuously
>> increasing IDs also to system-mode CPUs, so we can handle them
>> identically (we have to anyway once cpu hotplugging hits upstream).
> 
> Will fix, thanks.
> 
>> Hmm, I bet you now have some use for my good-old vCont patch (=>continue
>> single-stepping on different CPU / in different thread). Will repost soon.
> 
> Yes, I think that would be useful.

On my todo list. I practically just need to include your patch in my queue.

Jan


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-19 16:53     ` Jan Kiszka
@ 2009-05-19 18:01       ` Nathan Froyd
  2009-05-20  6:39         ` Jan Kiszka
  0 siblings, 1 reply; 11+ messages in thread
From: Nathan Froyd @ 2009-05-19 18:01 UTC (permalink / raw)
  To: qemu-devel

On Tue, May 19, 2009 at 06:53:44PM +0200, Jan Kiszka wrote:
> Nathan Froyd wrote:
> > The best way I can think of to do this is to maintain a
> > separately-chained list of CPUStates (through a new field similar to
> > next_cpu) ordered by gdbstub_index.  Grabbing a new gdbstub_index then
> > walks through the list, looking for "holes" between adjacent entries in
> > the list.  A new gdbstub_index is then picked if we find a hole; we die
> > if we can't find a hole.
> 
> Why creating a new list? Just generate a new index and then walk through
> all so far registered CPUStates until no collision is found.

Do you mean something like:

  new_index = 0
 restart:
  for cpu in all_cpus:
    if new_index == cpu.gdbstub_index
      new_index += 1
      goto restart;
  new_cpu.gdbstub_index = new_index

I was trying to keep it a linear-time algorithm, and the above is
potentially quadratic.  (Not merely an academic exercise, either;
spinning up your first N threads will be enough to trigger quadratic
behavior.)  I don't think you can say:

  new_index = 0
  for cpu in all_cpus:
    if new_index == cpu.gdbstub_index
      new_index = cpu.gdbstub_index + 1
  new_cpu.gdbstub_index = new_index

(i.e. maximum+1 of all existing cpus) either, since that fails for a
list that looks like:

  1 -> 0

You could iterate, of course, but then you're just back to quadratic
behavior.

Nothing promises that the cpu list will be kept in sorted order
according to some criteria.  (A quick examination of things indicates
the list might be accidentally sorted by time of creation--and thereby
by gdbstub_index--but that falls apart once you wrap gdbstub_index, of
course.)  AFAICS, you need the cpus sorted by their gdbstub_index to
keep linear time bounds.  If using a quadratic algorithm is OK, then
I'll just do that, but that doesn't seem like a good idea, especially
since you're imposing a large slowdown on everything for debugging
support which might never get used.

Apologies if I'm missing something obvious here.

-Nathan

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-19 18:01       ` Nathan Froyd
@ 2009-05-20  6:39         ` Jan Kiszka
  2009-05-20 15:27           ` Jamie Lokier
  0 siblings, 1 reply; 11+ messages in thread
From: Jan Kiszka @ 2009-05-20  6:39 UTC (permalink / raw)
  To: qemu-devel

[-- Attachment #1: Type: text/plain, Size: 2688 bytes --]

Nathan Froyd wrote:
> On Tue, May 19, 2009 at 06:53:44PM +0200, Jan Kiszka wrote:
>> Nathan Froyd wrote:
>>> The best way I can think of to do this is to maintain a
>>> separately-chained list of CPUStates (through a new field similar to
>>> next_cpu) ordered by gdbstub_index.  Grabbing a new gdbstub_index then
>>> walks through the list, looking for "holes" between adjacent entries in
>>> the list.  A new gdbstub_index is then picked if we find a hole; we die
>>> if we can't find a hole.
>> Why creating a new list? Just generate a new index and then walk through
>> all so far registered CPUStates until no collision is found.
> 
> Do you mean something like:
> 
>   new_index = 0
>  restart:
>   for cpu in all_cpus:
>     if new_index == cpu.gdbstub_index
>       new_index += 1
>       goto restart;
>   new_cpu.gdbstub_index = new_index
> 
> I was trying to keep it a linear-time algorithm, and the above is
> potentially quadratic.  (Not merely an academic exercise, either;
> spinning up your first N threads will be enough to trigger quadratic
> behavior.)  I don't think you can say:
> 
>   new_index = 0
>   for cpu in all_cpus:
>     if new_index == cpu.gdbstub_index
>       new_index = cpu.gdbstub_index + 1
>   new_cpu.gdbstub_index = new_index
> 
> (i.e. maximum+1 of all existing cpus) either, since that fails for a
> list that looks like:
> 
>   1 -> 0
> 
> You could iterate, of course, but then you're just back to quadratic
> behavior.
> 
> Nothing promises that the cpu list will be kept in sorted order
> according to some criteria.  (A quick examination of things indicates
> the list might be accidentally sorted by time of creation--and thereby
> by gdbstub_index--but that falls apart once you wrap gdbstub_index, of
> course.)  AFAICS, you need the cpus sorted by their gdbstub_index to
> keep linear time bounds.  If using a quadratic algorithm is OK, then
> I'll just do that, but that doesn't seem like a good idea, especially
> since you're imposing a large slowdown on everything for debugging
> support which might never get used.

I see. So we basically have two options:

1. Stick head in sand - we will never see so many threads in reality, so
   neither wrap-around nor performance matters.

2. Go for a bitmap-based search of the next free ID (reducing the ID
   space so that the bitmap has a reasonable size)

But I have no clue if and how long option 1 will suffice as I have no
idea how many long-running highly dynamic multi-threaded use cases there
are or will be for qemu user mode. But if you find option 2 easy enough
to implement, I would do this nevertheless.

Jan


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-20  6:39         ` Jan Kiszka
@ 2009-05-20 15:27           ` Jamie Lokier
  0 siblings, 0 replies; 11+ messages in thread
From: Jamie Lokier @ 2009-05-20 15:27 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: qemu-devel

Jan Kiszka wrote:
> 1. Stick head in sand - we will never see so many threads in reality, so
>    neither wrap-around nor performance matters.
> 
> 2. Go for a bitmap-based search of the next free ID (reducing the ID
>    space so that the bitmap has a reasonable size)
> 
> But I have no clue if and how long option 1 will suffice as I have no
> idea how many long-running highly dynamic multi-threaded use cases there
> are or will be for qemu user mode. But if you find option 2 easy enough
> to implement, I would do this nevertheless.

Big web servers can realistically have 1000+ running threads.  But
realistically I don't see anyone running those in QEMU for a while :-)

Creating and destroying large numbers of threads during program
execution is mouch more common, though.

-- Jamie

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] Re: [PATCH] fix gdbstub support for multiple threads in usermode
  2009-05-19 14:59     ` Nathan Froyd
@ 2009-05-21  8:19       ` vibi sreenivasan
  0 siblings, 0 replies; 11+ messages in thread
From: vibi sreenivasan @ 2009-05-21  8:19 UTC (permalink / raw)
  To: Nathan Froyd; +Cc: qemu-devel

hi,

On Tue, 2009-05-19 at 07:59 -0700, Nathan Froyd wrote:
> On Wed, May 13, 2009 at 07:43:31PM +0530, vibisreenivasan wrote:
> > 	Is user mode threading in a working state , it is written in the
> > 	TODO file as
> > 
> > 	"- remove threading support as it cannot work at this point".
> 
> It does work on ARM Linux binaries.  I have patches that make it work on
> PPC Linux binaries, and I'm working on getting those patches submitted.
> 
any idea on i386 ?
Can you please update the TODO file.
Or else can i do that?
Thanks & Regards
Vibi Sreenivasan

> -Nathan
> 
> 
> 
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2009-05-21  8:17 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-05-12 19:35 [Qemu-devel] [PATCH] fix gdbstub support for multiple threads in usermode Nathan Froyd
2009-05-12 20:53 ` [Qemu-devel] " Jan Kiszka
2009-05-13  5:08   ` vibisreenivasan
2009-05-13 14:13   ` vibisreenivasan
2009-05-19 14:59     ` Nathan Froyd
2009-05-21  8:19       ` vibi sreenivasan
2009-05-19 15:06   ` Nathan Froyd
2009-05-19 16:53     ` Jan Kiszka
2009-05-19 18:01       ` Nathan Froyd
2009-05-20  6:39         ` Jan Kiszka
2009-05-20 15:27           ` Jamie Lokier

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.