Nathan Froyd wrote: > When debugging multi-threaded programs, QEMU's gdb stub would report the > correct number of threads (the qfThreadInfo and qsThreadInfo packets). > However, the stub was unable to actually switch between threads (the T > packet), since it would report every thread except the first as being > dead. Furthermore, the stub relied upon cpu_index as a reliable means > of assigning IDs to the threads. This was a bad idea; if you have this > sequence of events: > > initial thread created > new thread #1 > new thread #2 > thread #1 exits > new thread #3 > > thread #3 will have the same cpu_index as thread #1, which would confuse > GDB. > > We fix this by adding a stable gdbstub_index field for each CPU; the > index is incremented for every CPU (thread) created. We ignore > wraparound issues for now. Once we have this field, the stub needs to > use this field instead of cpu_index for communicating with GDB. > > Signed-off-by: Nathan Froyd > --- > cpu-defs.h | 1 + > exec.c | 5 +++++ > gdbstub.c | 32 +++++++++++++++++++++++--------- > 3 files changed, 29 insertions(+), 9 deletions(-) > > diff --git a/cpu-defs.h b/cpu-defs.h > index 0a82197..c923708 100644 > --- a/cpu-defs.h > +++ b/cpu-defs.h > @@ -207,6 +207,7 @@ typedef struct CPUWatchpoint { > \ > CPUState *next_cpu; /* next CPU sharing TB cache */ \ > int cpu_index; /* CPU index (informative) */ \ > + int gdbstub_index; /* index for gdbstub T and H packets */ \ > int numa_node; /* NUMA node this cpu is belonging to */ \ > int running; /* Nonzero if cpu is currently running(usermode). */ \ > /* user data */ \ > diff --git a/exec.c b/exec.c > index c5c9280..63e3411 100644 > --- a/exec.c > +++ b/exec.c > @@ -538,6 +538,8 @@ static int cpu_common_load(QEMUFile *f, void *opaque, int version_id) > } > #endif > > +static int next_gdbstub_index; > + > void cpu_exec_init(CPUState *env) > { > CPUState **penv; > @@ -554,6 +556,7 @@ void cpu_exec_init(CPUState *env) > cpu_index++; > } > env->cpu_index = cpu_index; > + env->gdbstub_index = ++next_gdbstub_index; While this is simple and sufficient for most cases, making next_gdbstub_index robust against collisions due to overflow is not much more complicated - so why not do this right from the beginning? > env->numa_node = 0; > TAILQ_INIT(&env->breakpoints); > TAILQ_INIT(&env->watchpoints); > @@ -1711,6 +1714,8 @@ CPUState *cpu_copy(CPUState *env) > wp->flags, NULL); > } > #endif > + /* Need to assign a new thread ID for the gdbstub */ > + new_env->gdbstub_index = ++next_gdbstub_index; > > return new_env; > } > diff --git a/gdbstub.c b/gdbstub.c > index 3c34741..61b065b 100644 > --- a/gdbstub.c > +++ b/gdbstub.c > @@ -1718,9 +1718,11 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf) > put_packet(s, "OK"); > break; > } > - for (env = first_cpu; env != NULL; env = env->next_cpu) > - if (env->cpu_index + 1 == thread) > + for (env = first_cpu; env != NULL; env = env->next_cpu) { > + if (env->gdbstub_index == thread) { > break; > + } > + } > if (env == NULL) { > put_packet(s, "E22"); > break; > @@ -1741,14 +1743,25 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf) > break; > case 'T': > thread = strtoull(p, (char **)&p, 16); > -#ifndef CONFIG_USER_ONLY > - if (thread > 0 && thread < smp_cpus + 1) > +#if defined(CONFIG_USER_ONLY) > + { > + for (env = first_cpu; env != NULL; env = env->next_cpu) { > + if (env->gdbstub_index == thread) { > + break; > + } > + } > + > + if (env != NULL) > + put_packet(s, "OK"); > + else > + put_packet(s, "E22"); > + } > #else > - if (thread == 1) > -#endif > + if (thread > 0 && thread < smp_cpus + 1) > put_packet(s, "OK"); > else > put_packet(s, "E22"); > +#endif I don't think we need these #ifdefs here. You assign continuously increasing IDs also to system-mode CPUs, so we can handle them identically (we have to anyway once cpu hotplugging hits upstream). > break; > case 'q': > case 'Q': > @@ -1786,7 +1799,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf) > } else if (strcmp(p,"sThreadInfo") == 0) { > report_cpuinfo: > if (s->query_cpu) { > - snprintf(buf, sizeof(buf), "m%x", s->query_cpu->cpu_index+1); > + snprintf(buf, sizeof(buf), "m%x", s->query_cpu->gdbstub_index); > put_packet(s, buf); > s->query_cpu = s->query_cpu->next_cpu; > } else > @@ -1794,8 +1807,8 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf) > break; > } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) { > thread = strtoull(p+16, (char **)&p, 16); > - for (env = first_cpu; env != NULL; env = env->next_cpu) > - if (env->cpu_index + 1 == thread) { > + for (env = first_cpu; env != NULL; env = env->next_cpu) { > + if (env->gdbstub_index == thread) { > cpu_synchronize_state(env, 0); > len = snprintf((char *)mem_buf, sizeof(mem_buf), > "CPU#%d [%s]", env->cpu_index, > @@ -1804,6 +1817,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf) > put_packet(s, buf); > break; > } > + } > break; > } > #ifdef CONFIG_USER_ONLY Hmm, I bet you now have some use for my good-old vCont patch (=>continue single-stepping on different CPU / in different thread). Will repost soon. Jan