All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH] qemu_mutex_iothread_locked not correctly synchronized
@ 2015-11-24 15:43 David Engraf
  2015-11-24 15:54 ` Paolo Bonzini
  0 siblings, 1 reply; 12+ messages in thread
From: David Engraf @ 2015-11-24 15:43 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini, Fam Zheng

Commit afbe70535ff1a8a7a32910cc15ebecc0ba92e7da introduced the function
qemu_mutex_iothread_locked to avoid recursive locking of the iothread 
lock. The iothread_locked variable uses the __thread attribute which 
results in a per thread storage location whereas the qemu_global_mutex 
is not thread specific. This leads to race conditions because the 
variable is not in sync between threads. I triggered this problem 
reproducible on a Windows machine whereas Linux works fine.

After removing the __thread attribute, iothread_locked may still return 
an invalid state because some functions directly use qemu_cond_wait 
which will unlock and lock the qemu_global_mutex based on a condition. 
These calls must be synced with iothread_locked as well.

The patch removes the __thread flag from iothread_locked and adds a 
function called qemu_cond_wait_iothread to keep iothread_locked in sync.

Signed-off-by: David Engraf <david.engraf@sysgo.com>
---
  cpus.c | 29 +++++++++++++++++++----------
  1 file changed, 19 insertions(+), 10 deletions(-)

diff --git a/cpus.c b/cpus.c
index 877bd70..d7cdd11 100644
--- a/cpus.c
+++ b/cpus.c
@@ -70,6 +70,8 @@ static CPUState *next_cpu;
  int64_t max_delay;
  int64_t max_advance;

+static void qemu_cond_wait_iothread(QemuCond *cond);
+
  /* vcpu throttling controls */
  static QEMUTimer *throttle_timer;
  static unsigned int throttle_percentage;
@@ -920,7 +922,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void 
*data), void *data)
      while (!atomic_mb_read(&wi.done)) {
          CPUState *self_cpu = current_cpu;

-        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_work_cond);
          current_cpu = self_cpu;
      }
  }
@@ -998,11 +1000,11 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
         /* Start accounting real time to the virtual clock if the CPUs
            are idle.  */
          qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
-        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(cpu->halt_cond);
      }

      while (iothread_requesting_mutex) {
-        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_io_proceeded_cond);
      }

      CPU_FOREACH(cpu) {
@@ -1013,7 +1015,7 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
  static void qemu_kvm_wait_io_event(CPUState *cpu)
  {
      while (cpu_thread_is_idle(cpu)) {
-        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(cpu->halt_cond);
      }

      qemu_kvm_eat_signals(cpu);
@@ -1123,7 +1125,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)

      /* wait for initial kick-off after machine start */
      while (first_cpu->stopped) {
-        qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(first_cpu->halt_cond);

          /* process any pending work */
          CPU_FOREACH(cpu) {
@@ -1208,13 +1210,20 @@ bool qemu_in_vcpu_thread(void)
      return current_cpu && qemu_cpu_is_self(current_cpu);
  }

-static __thread bool iothread_locked = false;
+static bool iothread_locked;

  bool qemu_mutex_iothread_locked(void)
  {
      return iothread_locked;
  }

+static void qemu_cond_wait_iothread(QemuCond *cond)
+{
+    iothread_locked = false;
+    qemu_cond_wait(cond, &qemu_global_mutex);
+    iothread_locked = true;
+}
+
  void qemu_mutex_lock_iothread(void)
  {
      atomic_inc(&iothread_requesting_mutex);
@@ -1277,7 +1286,7 @@ void pause_all_vcpus(void)
      }

      while (!all_vcpus_paused()) {
-        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_pause_cond);
          CPU_FOREACH(cpu) {
              qemu_cpu_kick(cpu);
          }
@@ -1326,7 +1335,7 @@ static void qemu_tcg_init_vcpu(CPUState *cpu)
          cpu->hThread = qemu_thread_get_handle(cpu->thread);
  #endif
          while (!cpu->created) {
-            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+            qemu_cond_wait_iothread(&qemu_cpu_cond);
          }
          tcg_cpu_thread = cpu->thread;
      } else {
@@ -1347,7 +1356,7 @@ static void qemu_kvm_start_vcpu(CPUState *cpu)
      qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
                         cpu, QEMU_THREAD_JOINABLE);
      while (!cpu->created) {
-        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_cpu_cond);
      }
  }

@@ -1363,7 +1372,7 @@ static void qemu_dummy_start_vcpu(CPUState *cpu)
      qemu_thread_create(cpu->thread, thread_name, 
qemu_dummy_cpu_thread_fn, cpu,
                         QEMU_THREAD_JOINABLE);
      while (!cpu->created) {
-        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_cpu_cond);
      }
  }

-- 
1.9.1

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-24 15:43 [Qemu-devel] [PATCH] qemu_mutex_iothread_locked not correctly synchronized David Engraf
@ 2015-11-24 15:54 ` Paolo Bonzini
  2015-11-25  9:08   ` David Engraf
  0 siblings, 1 reply; 12+ messages in thread
From: Paolo Bonzini @ 2015-11-24 15:54 UTC (permalink / raw)
  To: David Engraf, qemu-devel; +Cc: Fam Zheng



On 24/11/2015 16:43, David Engraf wrote:
> Commit afbe70535ff1a8a7a32910cc15ebecc0ba92e7da introduced the function
> qemu_mutex_iothread_locked to avoid recursive locking of the iothread
> lock. The iothread_locked variable uses the __thread attribute which
> results in a per thread storage location whereas the qemu_global_mutex
> is not thread specific. This leads to race conditions because the
> variable is not in sync between threads.

Frankly, this makes no sense.  You're modifying the
qemu_mutex_iothread_locked function to return whether _some_ thread has
taken the mutex, but the function tells you whether _the calling_ thread
has taken the mutex (that's the point about recursive locking).  This
breaks the callers of qemu_mutex_iothread_locked completely.

The variable need not be in sync between the different threads; each
thread only needs to know about itself.  The current code works because:

1) the iothread mutex is not locked before qemu_mutex_lock_iothread

2) the iothread mutex is not locked after qemu_mutex_lock_iothread

3) qemu_cond_wait doesn't matter because the thread does not run during
a qemu_cond_wait.

> I triggered this problem reproducible on a Windows machine whereas Linux works fine. 

What are the symptoms, and what is your Windows compiler?

Paolo

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-24 15:54 ` Paolo Bonzini
@ 2015-11-25  9:08   ` David Engraf
  2015-11-25 12:31     ` [Qemu-devel] [PATCH v2] " David Engraf
  0 siblings, 1 reply; 12+ messages in thread
From: David Engraf @ 2015-11-25  9:08 UTC (permalink / raw)
  To: Paolo Bonzini, qemu-devel; +Cc: Fam Zheng

Hi Paolo,

Am 24.11.2015 um 16:54 schrieb Paolo Bonzini:
> On 24/11/2015 16:43, David Engraf wrote:
>> Commit afbe70535ff1a8a7a32910cc15ebecc0ba92e7da introduced the function
>> qemu_mutex_iothread_locked to avoid recursive locking of the iothread
>> lock. The iothread_locked variable uses the __thread attribute which
>> results in a per thread storage location whereas the qemu_global_mutex
>> is not thread specific. This leads to race conditions because the
>> variable is not in sync between threads.
>
> Frankly, this makes no sense.  You're modifying the
> qemu_mutex_iothread_locked function to return whether _some_ thread has
> taken the mutex, but the function tells you whether _the calling_ thread
> has taken the mutex (that's the point about recursive locking).  This
> breaks the callers of qemu_mutex_iothread_locked completely.
>
> The variable need not be in sync between the different threads; each
> thread only needs to know about itself.  The current code works because:
 >
> 1) the iothread mutex is not locked before qemu_mutex_lock_iothread
>
> 2) the iothread mutex is not locked after qemu_mutex_lock_iothread
>
> 3) qemu_cond_wait doesn't matter because the thread does not run during
> a qemu_cond_wait.

But this is my issue on Windows:

qemu_tcg_cpu_thread_fn
-> qemu_tcg_wait_io_event
    -> qemu_cond_wait acquires the mutex

qemu_tcg_cpu_thread_fn
-> tcg_exec_all -> tcg_cpu_exec -> cpu_exec
    -> cpu_exec ends up in calling prepare_mmio_access
       prepare_mmio_access uses qemu_mutex_iothread_locked to check if
       the lock is already held for this thread, but it returns 0
       because qemu_cond_wait doesn't set iothread_locked but the mutex
       is locked. Thus the function calls qemu_mutex_lock_iothread which
       tries to acquire the mutex again. On Windows this results in an
       assertion:

Assertion failed: mutex->owner == 0, file util/qemu-thread-win32.c, line 60

I'm using a self compiled, cross gcc-5.2.0 mingw compiler.

David

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v2] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-25  9:08   ` David Engraf
@ 2015-11-25 12:31     ` David Engraf
  2015-11-25 13:26       ` Paolo Bonzini
  0 siblings, 1 reply; 12+ messages in thread
From: David Engraf @ 2015-11-25 12:31 UTC (permalink / raw)
  To: Paolo Bonzini, qemu-devel; +Cc: Fam Zheng

Hi Paolo,

please check the new version. I removed changing the iothread_locked
variable. But I still need to set the correct value of iothread_locked
when using qemu_cond_wait. This will fix my race condition on Windows
when prepare_mmio_access is called and checks if the lock is already
held by the current thread.

David


Commit afbe70535ff1a8a7a32910cc15ebecc0ba92e7da introduced the function
qemu_mutex_iothread_locked to avoid recursive locking of the iothread
lock. But the function qemu_cond_wait directly acquires the lock without
modifying iothread_locked. This leads to condition where
qemu_tcg_wait_io_event acquires the mutex by using qemu_cond_wait and
later calls tcg_exec_all > tcg_cpu_exec -> cpu_exec >
prepare_mmio_access checking if the current thread already holds the
lock. This is done by checking the variable iothread_locked which is
still 0, thus the function tries to acquire the mutex again.

The patch adds a function called qemu_cond_wait_iothread to keep
iothread_locked in sync.

Signed-off-by: David Engraf <david.engraf@sysgo.com>
---
  cpus.c | 27 ++++++++++++++++++---------
  1 file changed, 18 insertions(+), 9 deletions(-)

diff --git a/cpus.c b/cpus.c
index 877bd70..e4d2d4b 100644
--- a/cpus.c
+++ b/cpus.c
@@ -70,6 +70,8 @@ static CPUState *next_cpu;
  int64_t max_delay;
  int64_t max_advance;

+static void qemu_cond_wait_iothread(QemuCond *cond);
+
  /* vcpu throttling controls */
  static QEMUTimer *throttle_timer;
  static unsigned int throttle_percentage;
@@ -920,7 +922,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void 
*data), void *data)
      while (!atomic_mb_read(&wi.done)) {
          CPUState *self_cpu = current_cpu;

-        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_work_cond);
          current_cpu = self_cpu;
      }
  }
@@ -998,11 +1000,11 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
         /* Start accounting real time to the virtual clock if the CPUs
            are idle.  */
          qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
-        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(cpu->halt_cond);
      }

      while (iothread_requesting_mutex) {
-        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_io_proceeded_cond);
      }

      CPU_FOREACH(cpu) {
@@ -1013,7 +1015,7 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
  static void qemu_kvm_wait_io_event(CPUState *cpu)
  {
      while (cpu_thread_is_idle(cpu)) {
-        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(cpu->halt_cond);
      }

      qemu_kvm_eat_signals(cpu);
@@ -1123,7 +1125,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)

      /* wait for initial kick-off after machine start */
      while (first_cpu->stopped) {
-        qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(first_cpu->halt_cond);

          /* process any pending work */
          CPU_FOREACH(cpu) {
@@ -1215,6 +1217,13 @@ bool qemu_mutex_iothread_locked(void)
      return iothread_locked;
  }

+static void qemu_cond_wait_iothread(QemuCond *cond)
+{
+    iothread_locked = false;
+    qemu_cond_wait(cond, &qemu_global_mutex);
+    iothread_locked = true;
+}
+
  void qemu_mutex_lock_iothread(void)
  {
      atomic_inc(&iothread_requesting_mutex);
@@ -1277,7 +1286,7 @@ void pause_all_vcpus(void)
      }

      while (!all_vcpus_paused()) {
-        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_pause_cond);
          CPU_FOREACH(cpu) {
              qemu_cpu_kick(cpu);
          }
@@ -1326,7 +1335,7 @@ static void qemu_tcg_init_vcpu(CPUState *cpu)
          cpu->hThread = qemu_thread_get_handle(cpu->thread);
  #endif
          while (!cpu->created) {
-            qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+            qemu_cond_wait_iothread(&qemu_cpu_cond);
          }
          tcg_cpu_thread = cpu->thread;
      } else {
@@ -1347,7 +1356,7 @@ static void qemu_kvm_start_vcpu(CPUState *cpu)
      qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
                         cpu, QEMU_THREAD_JOINABLE);
      while (!cpu->created) {
-        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_cpu_cond);
      }
  }

@@ -1363,7 +1372,7 @@ static void qemu_dummy_start_vcpu(CPUState *cpu)
      qemu_thread_create(cpu->thread, thread_name, 
qemu_dummy_cpu_thread_fn, cpu,
                         QEMU_THREAD_JOINABLE);
      while (!cpu->created) {
-        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+        qemu_cond_wait_iothread(&qemu_cpu_cond);
      }
  }

-- 
1.9.1



Am 25.11.2015 um 10:08 schrieb David Engraf:
> Hi Paolo,
>
> Am 24.11.2015 um 16:54 schrieb Paolo Bonzini:
>> On 24/11/2015 16:43, David Engraf wrote:
>>> Commit afbe70535ff1a8a7a32910cc15ebecc0ba92e7da introduced the function
>>> qemu_mutex_iothread_locked to avoid recursive locking of the iothread
>>> lock. The iothread_locked variable uses the __thread attribute which
>>> results in a per thread storage location whereas the qemu_global_mutex
>>> is not thread specific. This leads to race conditions because the
>>> variable is not in sync between threads.
>>
>> Frankly, this makes no sense.  You're modifying the
>> qemu_mutex_iothread_locked function to return whether _some_ thread has
>> taken the mutex, but the function tells you whether _the calling_ thread
>> has taken the mutex (that's the point about recursive locking).  This
>> breaks the callers of qemu_mutex_iothread_locked completely.
>>
>> The variable need not be in sync between the different threads; each
>> thread only needs to know about itself.  The current code works because:
>  >
>> 1) the iothread mutex is not locked before qemu_mutex_lock_iothread
>>
>> 2) the iothread mutex is not locked after qemu_mutex_lock_iothread
>>
>> 3) qemu_cond_wait doesn't matter because the thread does not run during
>> a qemu_cond_wait.
>
> But this is my issue on Windows:
>
> qemu_tcg_cpu_thread_fn
> -> qemu_tcg_wait_io_event
>     -> qemu_cond_wait acquires the mutex
>
> qemu_tcg_cpu_thread_fn
> -> tcg_exec_all -> tcg_cpu_exec -> cpu_exec
>     -> cpu_exec ends up in calling prepare_mmio_access
>        prepare_mmio_access uses qemu_mutex_iothread_locked to check if
>        the lock is already held for this thread, but it returns 0
>        because qemu_cond_wait doesn't set iothread_locked but the mutex
>        is locked. Thus the function calls qemu_mutex_lock_iothread which
>        tries to acquire the mutex again. On Windows this results in an
>        assertion:
>
> Assertion failed: mutex->owner == 0, file util/qemu-thread-win32.c, line 60
>
> I'm using a self compiled, cross gcc-5.2.0 mingw compiler.
>
> David

-- 
Mit freundlichen Grüßen/Best regards,

David Engraf
Product Engineer

SYSGO AG
Office Mainz
Am Pfaffenstein 14 / D-55270 Klein-Winternheim / Germany
Phone: +49-6136-9948-0 / Fax: +49-6136-9948-10
VoIP: SIP:den@sysgo.com
E-mail: david.engraf@sysgo.com / Web: http://www.sysgo.com

Handelsregister/Commercial Registry: HRB Mainz 90 HRB 8066
Vorstand/Executive Board: Knut Degen (CEO), Kai Sablotny (COO)
Aufsichtsratsvorsitzender/Supervisory Board Chairman: Pascal Bouchiat
USt-Id-Nr./VAT-Id-No.: DE 149062328

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v2] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-25 12:31     ` [Qemu-devel] [PATCH v2] " David Engraf
@ 2015-11-25 13:26       ` Paolo Bonzini
  2015-11-25 14:04         ` David Engraf
  0 siblings, 1 reply; 12+ messages in thread
From: Paolo Bonzini @ 2015-11-25 13:26 UTC (permalink / raw)
  To: David Engraf, qemu-devel; +Cc: Fam Zheng



On 25/11/2015 13:31, David Engraf wrote:
> Hi Paolo,
> 
> please check the new version. I removed changing the iothread_locked
> variable. But I still need to set the correct value of iothread_locked
> when using qemu_cond_wait. 

No, you don't.  Who is reading iothread_locked during
qemu_cond_wait_iothread?  No one, because it is a thread-local variable
whose address is never taken.

Paolo

> +static void qemu_cond_wait_iothread(QemuCond *cond)
> +{
> +    iothread_locked = false;
> +    qemu_cond_wait(cond, &qemu_global_mutex);
> +    iothread_locked = true;
> +}

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v2] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-25 13:26       ` Paolo Bonzini
@ 2015-11-25 14:04         ` David Engraf
  2015-11-25 14:36           ` Paolo Bonzini
  0 siblings, 1 reply; 12+ messages in thread
From: David Engraf @ 2015-11-25 14:04 UTC (permalink / raw)
  To: Paolo Bonzini, qemu-devel; +Cc: Fam Zheng

Am 25.11.2015 um 14:26 schrieb Paolo Bonzini:
>
>
> On 25/11/2015 13:31, David Engraf wrote:
>> Hi Paolo,
>>
>> please check the new version. I removed changing the iothread_locked
>> variable. But I still need to set the correct value of iothread_locked
>> when using qemu_cond_wait.
>
> No, you don't.  Who is reading iothread_locked during
> qemu_cond_wait_iothread?  No one, because it is a thread-local variable
> whose address is never taken.

prepare_mmio_access is reading iothread_locked by using 
qemu_mutex_iothread_locked after qemu_tcg_wait_io_event calls 
qemu_cond_wait. All one the same thread.

qemu_tcg_cpu_thread_fn
-> qemu_tcg_wait_io_event
    -> qemu_cond_wait acquires the mutex

qemu_tcg_cpu_thread_fn
-> tcg_exec_all -> tcg_cpu_exec -> cpu_exec
-> cpu_exec ends up in calling prepare_mmio_access

David

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v2] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-25 14:04         ` David Engraf
@ 2015-11-25 14:36           ` Paolo Bonzini
  2015-11-25 15:48             ` David Engraf
  0 siblings, 1 reply; 12+ messages in thread
From: Paolo Bonzini @ 2015-11-25 14:36 UTC (permalink / raw)
  To: David Engraf, qemu-devel; +Cc: Fam Zheng



On 25/11/2015 15:04, David Engraf wrote:
>>>
>>
>> No, you don't.  Who is reading iothread_locked during
>> qemu_cond_wait_iothread?  No one, because it is a thread-local variable
>> whose address is never taken.
> 
> prepare_mmio_access is reading iothread_locked by using
> qemu_mutex_iothread_locked after qemu_tcg_wait_io_event calls
> qemu_cond_wait. All one the same thread.

Sure, but who has set iothread_locked to false during the execution of
qemu_cond_wait?  No one, because it's a thread-local variable.  If it's
true before qemu_cond_wait, it will be true after qemu_cond_wait and you
don't need qemu_cond_wait_iothread... unless your compiler is broken and
doesn't generate TLS properly.

Can you compile cpus.c with -S and attach it?

Paolo

> qemu_tcg_cpu_thread_fn
> -> qemu_tcg_wait_io_event
>    -> qemu_cond_wait acquires the mutex
> 
> qemu_tcg_cpu_thread_fn
> -> tcg_exec_all -> tcg_cpu_exec -> cpu_exec
> -> cpu_exec ends up in calling prepare_mmio_access

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v2] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-25 14:36           ` Paolo Bonzini
@ 2015-11-25 15:48             ` David Engraf
  2015-11-25 16:16               ` Paolo Bonzini
  0 siblings, 1 reply; 12+ messages in thread
From: David Engraf @ 2015-11-25 15:48 UTC (permalink / raw)
  To: Paolo Bonzini, qemu-devel; +Cc: Fam Zheng

Am 25.11.2015 um 15:36 schrieb Paolo Bonzini:
>
>
> On 25/11/2015 15:04, David Engraf wrote:
>>>>
>>>
>>> No, you don't.  Who is reading iothread_locked during
>>> qemu_cond_wait_iothread?  No one, because it is a thread-local variable
>>> whose address is never taken.
>>
>> prepare_mmio_access is reading iothread_locked by using
>> qemu_mutex_iothread_locked after qemu_tcg_wait_io_event calls
>> qemu_cond_wait. All one the same thread.
>
> Sure, but who has set iothread_locked to false during the execution of
> qemu_cond_wait?  No one, because it's a thread-local variable.  If it's
> true before qemu_cond_wait, it will be true after qemu_cond_wait and you
> don't need qemu_cond_wait_iothread... unless your compiler is broken and
> doesn't generate TLS properly.

Indeed, TLS handling is broken. The address of iothread_locked is always 
the same between threads and I can see that a different thread sets 
iothread_locked to false, thus my current thread uses an invalid state. 
I will have to check why my compiler produces invalid TLS code.

David


> Can you compile cpus.c with -S and attach it?
>
> Paolo
>
>> qemu_tcg_cpu_thread_fn
>> -> qemu_tcg_wait_io_event
>>     -> qemu_cond_wait acquires the mutex
>>
>> qemu_tcg_cpu_thread_fn
>> -> tcg_exec_all -> tcg_cpu_exec -> cpu_exec
>> -> cpu_exec ends up in calling prepare_mmio_access

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v2] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-25 15:48             ` David Engraf
@ 2015-11-25 16:16               ` Paolo Bonzini
  2015-11-26  9:12                 ` David Engraf
  0 siblings, 1 reply; 12+ messages in thread
From: Paolo Bonzini @ 2015-11-25 16:16 UTC (permalink / raw)
  To: David Engraf, qemu-devel; +Cc: Stefan Weil, Fam Zheng



On 25/11/2015 16:48, David Engraf wrote:
> 
> Indeed, TLS handling is broken. The address of iothread_locked is always
> the same between threads and I can see that a different thread sets
> iothread_locked to false, thus my current thread uses an invalid state.
> I will have to check why my compiler produces invalid TLS code.

That rings a bell, I think there are different CRT libraries or
something like that.  Stefan, what would break TLS under Windows?

Paolo

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v2] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-25 16:16               ` Paolo Bonzini
@ 2015-11-26  9:12                 ` David Engraf
  2015-11-26 11:25                   ` Stefan Weil
  0 siblings, 1 reply; 12+ messages in thread
From: David Engraf @ 2015-11-26  9:12 UTC (permalink / raw)
  To: Paolo Bonzini, qemu-devel; +Cc: Stefan Weil, Fam Zheng

Am 25.11.2015 um 17:16 schrieb Paolo Bonzini:
>
>
> On 25/11/2015 16:48, David Engraf wrote:
>>
>> Indeed, TLS handling is broken. The address of iothread_locked is always
>> the same between threads and I can see that a different thread sets
>> iothread_locked to false, thus my current thread uses an invalid state.
>> I will have to check why my compiler produces invalid TLS code.
>
> That rings a bell, I think there are different CRT libraries or
> something like that.  Stefan, what would break TLS under Windows?

"--extra-cflags=-mthreads" is the solution. iothread_locked is unique 
for each thread now. I saw that this is already mentioned in the 
Documentation [1], but why isn't that enabled by default if QEMU 
requires this option on Windows? Without this option, even the latest 
version of gcc doesn't produce working TLS code.

Many thanks for your support!

David


[1] http://wiki.qemu.org/Hosts/W32

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v2] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-26  9:12                 ` David Engraf
@ 2015-11-26 11:25                   ` Stefan Weil
  2015-11-26 14:26                     ` David Engraf
  0 siblings, 1 reply; 12+ messages in thread
From: Stefan Weil @ 2015-11-26 11:25 UTC (permalink / raw)
  To: David Engraf, Paolo Bonzini, qemu-devel; +Cc: Fam Zheng

Am 26.11.2015 um 10:12 schrieb David Engraf:
> Am 25.11.2015 um 17:16 schrieb Paolo Bonzini:
>>
>>
>> On 25/11/2015 16:48, David Engraf wrote:
>>>
>>> Indeed, TLS handling is broken. The address of iothread_locked is
>>> always
>>> the same between threads and I can see that a different thread sets
>>> iothread_locked to false, thus my current thread uses an invalid state.
>>> I will have to check why my compiler produces invalid TLS code.
>>
>> That rings a bell, I think there are different CRT libraries or
>> something like that.  Stefan, what would break TLS under Windows?
>
> "--extra-cflags=-mthreads" is the solution. iothread_locked is unique
> for each thread now. I saw that this is already mentioned in the
> Documentation [1], but why isn't that enabled by default if QEMU
> requires this option on Windows? Without this option, even the latest
> version of gcc doesn't produce working TLS code.
>
> Many thanks for your support!
>
> David
>
> [1] http://wiki.qemu.org/Hosts/W32

Hi David,

I just prepared a patch which will enable that option by default
for all compilations targeting Windows.

Thanks for your report!

Regards,
Stefan

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Qemu-devel] [PATCH v2] qemu_mutex_iothread_locked not correctly synchronized
  2015-11-26 11:25                   ` Stefan Weil
@ 2015-11-26 14:26                     ` David Engraf
  0 siblings, 0 replies; 12+ messages in thread
From: David Engraf @ 2015-11-26 14:26 UTC (permalink / raw)
  To: Stefan Weil, Paolo Bonzini, qemu-devel; +Cc: Fam Zheng

Am 26.11.2015 um 12:25 schrieb Stefan Weil:
> Am 26.11.2015 um 10:12 schrieb David Engraf:
>> Am 25.11.2015 um 17:16 schrieb Paolo Bonzini:
>>>
>>>
>>> On 25/11/2015 16:48, David Engraf wrote:
>>>>
>>>> Indeed, TLS handling is broken. The address of iothread_locked is
>>>> always
>>>> the same between threads and I can see that a different thread sets
>>>> iothread_locked to false, thus my current thread uses an invalid state.
>>>> I will have to check why my compiler produces invalid TLS code.
>>>
>>> That rings a bell, I think there are different CRT libraries or
>>> something like that.  Stefan, what would break TLS under Windows?
>>
>> "--extra-cflags=-mthreads" is the solution. iothread_locked is unique
>> for each thread now. I saw that this is already mentioned in the
>> Documentation [1], but why isn't that enabled by default if QEMU
>> requires this option on Windows? Without this option, even the latest
>> version of gcc doesn't produce working TLS code.
>>
>> Many thanks for your support!
>>
>> David
>>
>> [1] http://wiki.qemu.org/Hosts/W32
>
> Hi David,
>
> I just prepared a patch which will enable that option by default
> for all compilations targeting Windows.
>
> Thanks for your report!

Thank you.

David

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2015-11-26 14:27 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-11-24 15:43 [Qemu-devel] [PATCH] qemu_mutex_iothread_locked not correctly synchronized David Engraf
2015-11-24 15:54 ` Paolo Bonzini
2015-11-25  9:08   ` David Engraf
2015-11-25 12:31     ` [Qemu-devel] [PATCH v2] " David Engraf
2015-11-25 13:26       ` Paolo Bonzini
2015-11-25 14:04         ` David Engraf
2015-11-25 14:36           ` Paolo Bonzini
2015-11-25 15:48             ` David Engraf
2015-11-25 16:16               ` Paolo Bonzini
2015-11-26  9:12                 ` David Engraf
2015-11-26 11:25                   ` Stefan Weil
2015-11-26 14:26                     ` David Engraf

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.