From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from [140.186.70.92] (port=42948 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1PhTXH-0003qg-1X for qemu-devel@nongnu.org; Mon, 24 Jan 2011 16:01:21 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1PhTXE-0005bm-DA for qemu-devel@nongnu.org; Mon, 24 Jan 2011 16:01:14 -0500 Received: from e3.ny.us.ibm.com ([32.97.182.143]:34870) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1PhTXE-0005bX-9l for qemu-devel@nongnu.org; Mon, 24 Jan 2011 16:01:12 -0500 Received: from d01dlp02.pok.ibm.com (d01dlp02.pok.ibm.com [9.56.224.85]) by e3.ny.us.ibm.com (8.14.4/8.13.1) with ESMTP id p0OKg2vl003133 for ; Mon, 24 Jan 2011 15:42:02 -0500 Received: from d01relay05.pok.ibm.com (d01relay05.pok.ibm.com [9.56.227.237]) by d01dlp02.pok.ibm.com (Postfix) with ESMTP id 45DB14DE8047 for ; Mon, 24 Jan 2011 15:57:43 -0500 (EST) Received: from d01av03.pok.ibm.com (d01av03.pok.ibm.com [9.56.224.217]) by d01relay05.pok.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id p0OL16Qm186456 for ; Mon, 24 Jan 2011 16:01:06 -0500 Received: from d01av03.pok.ibm.com (loopback [127.0.0.1]) by d01av03.pok.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id p0OL15YY023978 for ; Mon, 24 Jan 2011 19:01:05 -0200 From: Anthony Liguori Date: Mon, 24 Jan 2011 15:00:43 -0600 Message-Id: <1295902845-29807-6-git-send-email-aliguori@us.ibm.com> In-Reply-To: <1295902845-29807-1-git-send-email-aliguori@us.ibm.com> References: <1295902845-29807-1-git-send-email-aliguori@us.ibm.com> Subject: [Qemu-devel] [PATCH 5/7] threads: get rid of QemuCond and teach callers about GCond List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: Anthony Liguori , Stefan Hajnoczi , Marcelo Tosatti , Paul Brook , Paulo Bonzini , Arun Bharadwaj Signed-off-by: Anthony Liguori diff --git a/cpu-defs.h b/cpu-defs.h index 8d4bf86..9343824 100644 --- a/cpu-defs.h +++ b/cpu-defs.h @@ -204,7 +204,7 @@ typedef struct CPUWatchpoint { uint32_t stop; /* Stop request */ \ uint32_t stopped; /* Artificially stopped */ \ struct QemuThread *thread; \ - struct QemuCond *halt_cond; \ + struct _GCond *halt_cond; \ struct qemu_work_item *queued_work_first, *queued_work_last; \ const char *cpu_model_str; \ struct KVMState *kvm_state; \ diff --git a/cpus.c b/cpus.c index 0f8e33b..bc7363f 100644 --- a/cpus.c +++ b/cpus.c @@ -327,15 +327,15 @@ static GStaticMutex qemu_fair_mutex; static QemuThread io_thread; static QemuThread *tcg_cpu_thread; -static QemuCond *tcg_halt_cond; +static GCond *tcg_halt_cond; static int qemu_system_ready; /* cpu creation */ -static QemuCond qemu_cpu_cond; +static GCond *qemu_cpu_cond; /* system init */ -static QemuCond qemu_system_cond; -static QemuCond qemu_pause_cond; -static QemuCond qemu_work_cond; +static GCond *qemu_system_cond; +static GCond *qemu_pause_cond; +static GCond *qemu_work_cond; static void tcg_init_ipi(void); static void kvm_init_ipi(CPUState *env); @@ -412,10 +412,10 @@ int qemu_init_main_loop(void) if (ret) return ret; - qemu_cond_init(&qemu_cpu_cond); - qemu_cond_init(&qemu_system_cond); - qemu_cond_init(&qemu_pause_cond); - qemu_cond_init(&qemu_work_cond); + qemu_cpu_cond = g_cond_new(); + qemu_system_cond = g_cond_new(); + qemu_pause_cond = g_cond_new(); + qemu_work_cond = g_cond_new(); g_static_mutex_init(&qemu_fair_mutex); g_static_mutex_init(&qemu_global_mutex); g_static_mutex_lock(&qemu_global_mutex); @@ -428,7 +428,7 @@ int qemu_init_main_loop(void) void qemu_main_loop_start(void) { qemu_system_ready = 1; - qemu_cond_broadcast(&qemu_system_cond); + g_cond_broadcast(qemu_system_cond); } void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) @@ -454,8 +454,8 @@ void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) while (!wi.done) { CPUState *self_env = cpu_single_env; - qemu_cond_wait(&qemu_work_cond, - g_static_mutex_get_mutex(&qemu_global_mutex)); + g_cond_wait(qemu_work_cond, + g_static_mutex_get_mutex(&qemu_global_mutex)); cpu_single_env = self_env; } } @@ -473,7 +473,7 @@ static void flush_queued_work(CPUState *env) wi->done = true; } env->queued_work_last = NULL; - qemu_cond_broadcast(&qemu_work_cond); + g_cond_broadcast(qemu_work_cond); } static void qemu_wait_io_event_common(CPUState *env) @@ -481,7 +481,7 @@ static void qemu_wait_io_event_common(CPUState *env) if (env->stop) { env->stop = 0; env->stopped = 1; - qemu_cond_signal(&qemu_pause_cond); + g_cond_signal(qemu_pause_cond); } flush_queued_work(env); } @@ -490,9 +490,13 @@ static void qemu_tcg_wait_io_event(void) { CPUState *env; - while (!any_cpu_has_work()) - qemu_cond_timedwait(tcg_halt_cond, - g_static_mutex_get_mutex(&qemu_global_mutex), 1000); + while (!any_cpu_has_work()) { + GTimeVal t; + g_get_current_time(&t); + g_time_val_add(&t, 1000000); + g_cond_timed_wait(tcg_halt_cond, + g_static_mutex_get_mutex(&qemu_global_mutex), &t); + } g_static_mutex_unlock(&qemu_global_mutex); @@ -586,9 +590,13 @@ static void qemu_kvm_eat_signal(CPUState *env, int timeout) static void qemu_kvm_wait_io_event(CPUState *env) { - while (!cpu_has_work(env)) - qemu_cond_timedwait(env->halt_cond, - g_static_mutex_get_mutex(&qemu_global_mutex), 1000); + while (!cpu_has_work(env)) { + GTimeVal t; + g_get_current_time(&t); + g_time_val_add(&t, 1000000); + g_cond_timed_wait(env->halt_cond, + g_static_mutex_get_mutex(&qemu_global_mutex), &t); + } qemu_kvm_eat_signal(env, 0); qemu_wait_io_event_common(env); @@ -609,12 +617,16 @@ static void *kvm_cpu_thread_fn(void *arg) /* signal CPU creation */ env->created = 1; - qemu_cond_signal(&qemu_cpu_cond); + g_cond_signal(qemu_cpu_cond); /* and wait for machine initialization */ - while (!qemu_system_ready) - qemu_cond_timedwait(&qemu_system_cond, - g_static_mutex_get_mutex(&qemu_global_mutex), 100); + while (!qemu_system_ready) { + GTimeVal t; + g_get_current_time(&t); + g_time_val_add(&t, 100000); + g_cond_timed_wait(qemu_system_cond, + g_static_mutex_get_mutex(&qemu_global_mutex), &t); + } while (1) { if (cpu_can_run(env)) @@ -636,12 +648,16 @@ static void *tcg_cpu_thread_fn(void *arg) g_static_mutex_lock(&qemu_global_mutex); for (env = first_cpu; env != NULL; env = env->next_cpu) env->created = 1; - qemu_cond_signal(&qemu_cpu_cond); + g_cond_signal(qemu_cpu_cond); /* and wait for machine initialization */ - while (!qemu_system_ready) - qemu_cond_timedwait(&qemu_system_cond, - g_static_mutex_get_mutex(&qemu_global_mutex), 100); + while (!qemu_system_ready) { + GTimeVal t; + g_get_current_time(&t); + g_time_val_add(&t, 100000); + g_cond_timed_wait(qemu_system_cond, + g_static_mutex_get_mutex(&qemu_global_mutex), &t); + } while (1) { cpu_exec_all(); @@ -654,7 +670,7 @@ static void *tcg_cpu_thread_fn(void *arg) void qemu_cpu_kick(void *_env) { CPUState *env = _env; - qemu_cond_broadcast(env->halt_cond); + g_cond_broadcast(env->halt_cond); qemu_thread_signal(env->thread, SIG_IPI); } @@ -784,8 +800,11 @@ void pause_all_vcpus(void) } while (!all_vcpus_paused()) { - qemu_cond_timedwait(&qemu_pause_cond, - g_static_mutex_get_mutex(&qemu_global_mutex), 100); + GTimeVal t; + g_get_current_time(&t); + g_time_val_add(&t, 10000); + g_cond_timed_wait(qemu_pause_cond, + g_static_mutex_get_mutex(&qemu_global_mutex), &t); penv = first_cpu; while (penv) { qemu_cpu_kick(penv); @@ -812,13 +831,15 @@ static void tcg_init_vcpu(void *_env) /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { env->thread = qemu_mallocz(sizeof(QemuThread)); - env->halt_cond = qemu_mallocz(sizeof(QemuCond)); - qemu_cond_init(env->halt_cond); + env->halt_cond = g_cond_new(); qemu_thread_create(env->thread, tcg_cpu_thread_fn, env); - while (env->created == 0) - qemu_cond_timedwait(&qemu_cpu_cond, - g_static_mutex_get_mutex(&qemu_global_mutex), - 100); + while (env->created == 0) { + GTimeVal t; + g_get_current_time(&t); + g_time_val_add(&t, 10000); + g_cond_timed_wait(qemu_cpu_cond, + g_static_mutex_get_mutex(&qemu_global_mutex), &t); + } tcg_cpu_thread = env->thread; tcg_halt_cond = env->halt_cond; } else { @@ -830,12 +851,15 @@ static void tcg_init_vcpu(void *_env) static void kvm_start_vcpu(CPUState *env) { env->thread = qemu_mallocz(sizeof(QemuThread)); - env->halt_cond = qemu_mallocz(sizeof(QemuCond)); - qemu_cond_init(env->halt_cond); + env->halt_cond = g_cond_new(); qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); - while (env->created == 0) - qemu_cond_timedwait(&qemu_cpu_cond, - g_static_mutex_get_mutex(&qemu_global_mutex), 100); + while (env->created == 0) { + GTimeVal t; + g_get_current_time(&t); + g_time_val_add(&t, 100000); + g_cond_timed_wait(qemu_cpu_cond, + g_static_mutex_get_mutex(&qemu_global_mutex), &t); + } } void qemu_init_vcpu(void *_env) diff --git a/qemu-thread.c b/qemu-thread.c index df17eb4..748da5e 100644 --- a/qemu-thread.c +++ b/qemu-thread.c @@ -14,43 +14,6 @@ #include "qemu-common.h" #include "qemu-thread.h" -void qemu_cond_init(QemuCond *cond) -{ - cond->cond = g_cond_new(); -} - -void qemu_cond_destroy(QemuCond *cond) -{ - g_cond_free(cond->cond); -} - -void qemu_cond_signal(QemuCond *cond) -{ - g_cond_signal(cond->cond); -} - -void qemu_cond_broadcast(QemuCond *cond) -{ - g_cond_broadcast(cond->cond); -} - -void qemu_cond_wait(QemuCond *cond, GMutex *mutex) -{ - g_cond_wait(cond->cond, mutex); -} - -int qemu_cond_timedwait(QemuCond *cond, GMutex *mutex, uint64_t msecs) -{ - GTimeVal abs_time; - - assert(cond->cond != NULL); - - g_get_current_time(&abs_time); - g_time_val_add(&abs_time, msecs * 1000); /* MSEC to USEC */ - - return g_cond_timed_wait(cond->cond, mutex, &abs_time); -} - struct trampoline_data { QemuThread *thread; diff --git a/qemu-thread.h b/qemu-thread.h index dec6848..2c99c94 100644 --- a/qemu-thread.h +++ b/qemu-thread.h @@ -3,25 +3,13 @@ #include #include -struct QemuCond { - GCond *cond; -}; - struct QemuThread { GThread *thread; pthread_t tid; }; -typedef struct QemuCond QemuCond; typedef struct QemuThread QemuThread; -void qemu_cond_init(QemuCond *cond); -void qemu_cond_destroy(QemuCond *cond); -void qemu_cond_signal(QemuCond *cond); -void qemu_cond_broadcast(QemuCond *cond); -void qemu_cond_wait(QemuCond *cond, GMutex *mutex); -int qemu_cond_timedwait(QemuCond *cond, GMutex *mutex, uint64_t msecs); - void qemu_thread_create(QemuThread *thread, void *(*start_routine)(void*), void *arg); diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c index 48f567e..0c2b1a0 100644 --- a/ui/vnc-jobs-async.c +++ b/ui/vnc-jobs-async.c @@ -49,7 +49,7 @@ */ struct VncJobQueue { - QemuCond cond; + GCond *cond; GStaticMutex mutex; QemuThread thread; Buffer buffer; @@ -108,7 +108,7 @@ void vnc_job_push(VncJob *job) qemu_free(job); } else { QTAILQ_INSERT_TAIL(&queue->jobs, job, next); - qemu_cond_broadcast(&queue->cond); + g_cond_broadcast(queue->cond); } vnc_unlock_queue(queue); } @@ -152,7 +152,7 @@ void vnc_jobs_join(VncState *vs) { vnc_lock_queue(queue); while (vnc_has_job_locked(vs)) { - qemu_cond_wait(&queue->cond, g_static_mutex_get_mutex(&queue->mutex)); + g_cond_wait(queue->cond, g_static_mutex_get_mutex(&queue->mutex)); } vnc_unlock_queue(queue); } @@ -195,7 +195,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) vnc_lock_queue(queue); while (QTAILQ_EMPTY(&queue->jobs) && !queue->exit) { - qemu_cond_wait(&queue->cond, g_static_mutex_get_mutex(&queue->mutex)); + g_cond_wait(queue->cond, g_static_mutex_get_mutex(&queue->mutex)); } /* Here job can only be NULL if queue->exit is true */ job = QTAILQ_FIRST(&queue->jobs); @@ -265,7 +265,7 @@ disconnected: vnc_lock_queue(queue); QTAILQ_REMOVE(&queue->jobs, job, next); vnc_unlock_queue(queue); - qemu_cond_broadcast(&queue->cond); + g_cond_broadcast(queue->cond); qemu_free(job); return 0; } @@ -274,7 +274,7 @@ static VncJobQueue *vnc_queue_init(void) { VncJobQueue *queue = qemu_mallocz(sizeof(VncJobQueue)); - qemu_cond_init(&queue->cond); + queue->cond = g_cond_new(); g_static_mutex_init(&queue->mutex); QTAILQ_INIT(&queue->jobs); return queue; @@ -282,7 +282,7 @@ static VncJobQueue *vnc_queue_init(void) static void vnc_queue_clear(VncJobQueue *q) { - qemu_cond_destroy(&queue->cond); + g_cond_free(queue->cond); g_static_mutex_free(&queue->mutex); buffer_free(&queue->buffer); qemu_free(q); @@ -327,5 +327,5 @@ void vnc_stop_worker_thread(void) queue->exit = true; vnc_unlock_queue(queue); vnc_jobs_clear(NULL); - qemu_cond_broadcast(&queue->cond); + g_cond_broadcast(queue->cond); } -- 1.7.0.4