All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch uq/master 0/4] uq/master: iothread consume signals via sigtimedwait and cleanups
@ 2010-02-17 22:14 ` Marcelo Tosatti
  0 siblings, 0 replies; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-17 22:14 UTC (permalink / raw)
  To: kvm; +Cc: avi

See individual patches for details.



^ permalink raw reply	[flat|nested] 15+ messages in thread

* [Qemu-devel] [patch uq/master 0/4] uq/master: iothread consume signals via sigtimedwait and cleanups
@ 2010-02-17 22:14 ` Marcelo Tosatti
  0 siblings, 0 replies; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-17 22:14 UTC (permalink / raw)
  To: kvm, qemu-devel; +Cc: avi

See individual patches for details.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [patch uq/master 1/4] qemu: block SIGCHLD in vcpu thread(s)
  2010-02-17 22:14 ` [Qemu-devel] " Marcelo Tosatti
  (?)
@ 2010-02-17 22:14 ` Marcelo Tosatti
  -1 siblings, 0 replies; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-17 22:14 UTC (permalink / raw)
  To: kvm; +Cc: avi, Marcelo Tosatti

[-- Attachment #1: block-sigchld --]
[-- Type: text/plain, Size: 519 bytes --]

Otherwise a vcpu thread can run the sigchild handler causing
waitpid() from iothread to fail.

Signed-off-by: Marcelo Tosatti <mtosa...@redhat.com>

Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c
+++ qemu/vl.c
@@ -3514,6 +3514,7 @@ static void block_io_signals(void)
     sigaddset(&set, SIGUSR2);
     sigaddset(&set, SIGIO);
     sigaddset(&set, SIGALRM);
+    sigaddset(&set, SIGCHLD);
     pthread_sigmask(SIG_BLOCK, &set, NULL);
 
     sigemptyset(&set);



^ permalink raw reply	[flat|nested] 15+ messages in thread

* [patch uq/master 2/4] qemu: kvm specific wait_io_event
  2010-02-17 22:14 ` [Qemu-devel] " Marcelo Tosatti
@ 2010-02-17 22:14   ` Marcelo Tosatti
  -1 siblings, 0 replies; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-17 22:14 UTC (permalink / raw)
  To: kvm; +Cc: avi, Marcelo Tosatti

[-- Attachment #1: kvm-specific-wait-io-event --]
[-- Type: text/plain, Size: 1857 bytes --]

In KVM mode the global mutex is released when vcpus are executing,
which means acquiring the fairness mutex is not required.

Also for KVM there is one thread per vcpu, so tcg_has_work is meaningless.

Add a new qemu_wait_io_event_common function to hold common code
between TCG/KVM.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c
+++ qemu/vl.c
@@ -3382,6 +3382,7 @@ static QemuCond qemu_pause_cond;
 static void block_io_signals(void);
 static void unblock_io_signals(void);
 static int tcg_has_work(void);
+static int cpu_has_work(CPUState *env);
 
 static int qemu_init_main_loop(void)
 {
@@ -3402,6 +3403,15 @@ static int qemu_init_main_loop(void)
     return 0;
 }
 
+static void qemu_wait_io_event_common(CPUState *env)
+{
+    if (env->stop) {
+        env->stop = 0;
+        env->stopped = 1;
+        qemu_cond_signal(&qemu_pause_cond);
+    }
+}
+
 static void qemu_wait_io_event(CPUState *env)
 {
     while (!tcg_has_work())
@@ -3418,11 +3428,15 @@ static void qemu_wait_io_event(CPUState 
     qemu_mutex_unlock(&qemu_fair_mutex);
 
     qemu_mutex_lock(&qemu_global_mutex);
-    if (env->stop) {
-        env->stop = 0;
-        env->stopped = 1;
-        qemu_cond_signal(&qemu_pause_cond);
-    }
+    qemu_wait_io_event_common(env);
+}
+
+static void qemu_kvm_wait_io_event(CPUState *env)
+{
+    while (!cpu_has_work(env))
+        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
+
+    qemu_wait_io_event_common(env);
 }
 
 static int qemu_cpu_exec(CPUState *env);
@@ -3448,7 +3462,7 @@ static void *kvm_cpu_thread_fn(void *arg
     while (1) {
         if (cpu_can_run(env))
             qemu_cpu_exec(env);
-        qemu_wait_io_event(env);
+        qemu_kvm_wait_io_event(env);
     }
 
     return NULL;



^ permalink raw reply	[flat|nested] 15+ messages in thread

* [Qemu-devel] [patch uq/master 2/4] qemu: kvm specific wait_io_event
@ 2010-02-17 22:14   ` Marcelo Tosatti
  0 siblings, 0 replies; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-17 22:14 UTC (permalink / raw)
  To: kvm, qemu-devel; +Cc: Marcelo Tosatti, avi

[-- Attachment #1: kvm-specific-wait-io-event --]
[-- Type: text/plain, Size: 1855 bytes --]

In KVM mode the global mutex is released when vcpus are executing,
which means acquiring the fairness mutex is not required.

Also for KVM there is one thread per vcpu, so tcg_has_work is meaningless.

Add a new qemu_wait_io_event_common function to hold common code
between TCG/KVM.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c
+++ qemu/vl.c
@@ -3382,6 +3382,7 @@ static QemuCond qemu_pause_cond;
 static void block_io_signals(void);
 static void unblock_io_signals(void);
 static int tcg_has_work(void);
+static int cpu_has_work(CPUState *env);
 
 static int qemu_init_main_loop(void)
 {
@@ -3402,6 +3403,15 @@ static int qemu_init_main_loop(void)
     return 0;
 }
 
+static void qemu_wait_io_event_common(CPUState *env)
+{
+    if (env->stop) {
+        env->stop = 0;
+        env->stopped = 1;
+        qemu_cond_signal(&qemu_pause_cond);
+    }
+}
+
 static void qemu_wait_io_event(CPUState *env)
 {
     while (!tcg_has_work())
@@ -3418,11 +3428,15 @@ static void qemu_wait_io_event(CPUState 
     qemu_mutex_unlock(&qemu_fair_mutex);
 
     qemu_mutex_lock(&qemu_global_mutex);
-    if (env->stop) {
-        env->stop = 0;
-        env->stopped = 1;
-        qemu_cond_signal(&qemu_pause_cond);
-    }
+    qemu_wait_io_event_common(env);
+}
+
+static void qemu_kvm_wait_io_event(CPUState *env)
+{
+    while (!cpu_has_work(env))
+        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
+
+    qemu_wait_io_event_common(env);
 }
 
 static int qemu_cpu_exec(CPUState *env);
@@ -3448,7 +3462,7 @@ static void *kvm_cpu_thread_fn(void *arg
     while (1) {
         if (cpu_can_run(env))
             qemu_cpu_exec(env);
-        qemu_wait_io_event(env);
+        qemu_kvm_wait_io_event(env);
     }
 
     return NULL;

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [patch uq/master 3/4] qemu: kvm: consume internal signal with sigtimedwait
  2010-02-17 22:14 ` [Qemu-devel] " Marcelo Tosatti
@ 2010-02-17 22:14   ` Marcelo Tosatti
  -1 siblings, 0 replies; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-17 22:14 UTC (permalink / raw)
  To: kvm; +Cc: avi, Marcelo Tosatti

[-- Attachment #1: kvm-block-cpu-exit-signal --]
[-- Type: text/plain, Size: 7434 bytes --]

Change the way the internal qemu signal, used for communication between 
iothread and vcpus, is handled.

Block and consume it with sigtimedwait on the outer vcpu loop, which
allows more precise timing control.

Change from standard signal (SIGUSR1) to real-time one, so multiple
signals are not collapsed.

Set the signal number on KVM's in-kernel allowed sigmask.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>


Index: qemu-kvm/vl.c
===================================================================
--- qemu-kvm.orig/vl.c
+++ qemu-kvm/vl.c
@@ -271,6 +271,12 @@ uint8_t qemu_uuid[16];
 static QEMUBootSetHandler *boot_set_handler;
 static void *boot_set_opaque;
 
+#ifdef SIGRTMIN
+#define SIG_IPI (SIGRTMIN+4)
+#else
+#define SIG_IPI SIGUSR1
+#endif
+
 static int default_serial = 1;
 static int default_parallel = 1;
 static int default_virtcon = 1;
@@ -3379,7 +3385,8 @@ static QemuCond qemu_cpu_cond;
 static QemuCond qemu_system_cond;
 static QemuCond qemu_pause_cond;
 
-static void block_io_signals(void);
+static void tcg_block_io_signals(void);
+static void kvm_block_io_signals(CPUState *env);
 static void unblock_io_signals(void);
 static int tcg_has_work(void);
 static int cpu_has_work(CPUState *env);
@@ -3431,11 +3438,36 @@ static void qemu_wait_io_event(CPUState 
     qemu_wait_io_event_common(env);
 }
 
+static void qemu_kvm_eat_signal(CPUState *env, int timeout)
+{
+    struct timespec ts;
+    int r, e;
+    siginfo_t siginfo;
+    sigset_t waitset;
+
+    ts.tv_sec = timeout / 1000;
+    ts.tv_nsec = (timeout % 1000) * 1000000;
+
+    sigemptyset(&waitset);
+    sigaddset(&waitset, SIG_IPI);
+
+    qemu_mutex_unlock(&qemu_global_mutex);
+    r = sigtimedwait(&waitset, &siginfo, &ts);
+    e = errno;
+    qemu_mutex_lock(&qemu_global_mutex);
+
+    if (r == -1 && !(e == EAGAIN || e == EINTR)) {
+        fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
+        exit(1);
+    }
+}
+
 static void qemu_kvm_wait_io_event(CPUState *env)
 {
     while (!cpu_has_work(env))
         qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
 
+    qemu_kvm_eat_signal(env, 0);
     qemu_wait_io_event_common(env);
 }
 
@@ -3445,11 +3477,12 @@ static void *kvm_cpu_thread_fn(void *arg
 {
     CPUState *env = arg;
 
-    block_io_signals();
     qemu_thread_self(env->thread);
     if (kvm_enabled())
         kvm_init_vcpu(env);
 
+    kvm_block_io_signals(env);
+
     /* signal CPU creation */
     qemu_mutex_lock(&qemu_global_mutex);
     env->created = 1;
@@ -3474,7 +3507,7 @@ static void *tcg_cpu_thread_fn(void *arg
 {
     CPUState *env = arg;
 
-    block_io_signals();
+    tcg_block_io_signals();
     qemu_thread_self(env->thread);
 
     /* signal CPU creation */
@@ -3500,7 +3533,7 @@ void qemu_cpu_kick(void *_env)
     CPUState *env = _env;
     qemu_cond_broadcast(env->halt_cond);
     if (kvm_enabled())
-        qemu_thread_signal(env->thread, SIGUSR1);
+        qemu_thread_signal(env->thread, SIG_IPI);
 }
 
 int qemu_cpu_self(void *_env)
@@ -3519,7 +3552,7 @@ static void cpu_signal(int sig)
         cpu_exit(cpu_single_env);
 }
 
-static void block_io_signals(void)
+static void tcg_block_io_signals(void)
 {
     sigset_t set;
     struct sigaction sigact;
@@ -3532,12 +3565,44 @@ static void block_io_signals(void)
     pthread_sigmask(SIG_BLOCK, &set, NULL);
 
     sigemptyset(&set);
-    sigaddset(&set, SIGUSR1);
+    sigaddset(&set, SIG_IPI);
     pthread_sigmask(SIG_UNBLOCK, &set, NULL);
 
     memset(&sigact, 0, sizeof(sigact));
     sigact.sa_handler = cpu_signal;
-    sigaction(SIGUSR1, &sigact, NULL);
+    sigaction(SIG_IPI, &sigact, NULL);
+}
+
+static void dummy_signal(int sig)
+{
+}
+
+static void kvm_block_io_signals(CPUState *env)
+{
+    int r;
+    sigset_t set;
+    struct sigaction sigact;
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR2);
+    sigaddset(&set, SIGIO);
+    sigaddset(&set, SIGALRM);
+    sigaddset(&set, SIGCHLD);
+    sigaddset(&set, SIG_IPI);
+    pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+    pthread_sigmask(SIG_BLOCK, NULL, &set);
+    sigdelset(&set, SIG_IPI);
+
+    memset(&sigact, 0, sizeof(sigact));
+    sigact.sa_handler = dummy_signal;
+    sigaction(SIG_IPI, &sigact, NULL);
+
+    r = kvm_set_signal_mask(env, &set);
+    if (r) {
+        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
+        exit(1);
+    }
 }
 
 static void unblock_io_signals(void)
@@ -3551,7 +3616,7 @@ static void unblock_io_signals(void)
     pthread_sigmask(SIG_UNBLOCK, &set, NULL);
 
     sigemptyset(&set);
-    sigaddset(&set, SIGUSR1);
+    sigaddset(&set, SIG_IPI);
     pthread_sigmask(SIG_BLOCK, &set, NULL);
 }
 
@@ -3560,7 +3625,7 @@ static void qemu_signal_lock(unsigned in
     qemu_mutex_lock(&qemu_fair_mutex);
 
     while (qemu_mutex_trylock(&qemu_global_mutex)) {
-        qemu_thread_signal(tcg_cpu_thread, SIGUSR1);
+        qemu_thread_signal(tcg_cpu_thread, SIG_IPI);
         if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
             break;
     }
@@ -3601,7 +3666,7 @@ static void pause_all_vcpus(void)
 
     while (penv) {
         penv->stop = 1;
-        qemu_thread_signal(penv->thread, SIGUSR1);
+        qemu_thread_signal(penv->thread, SIG_IPI);
         qemu_cpu_kick(penv);
         penv = (CPUState *)penv->next_cpu;
     }
@@ -3610,7 +3675,7 @@ static void pause_all_vcpus(void)
         qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
         penv = first_cpu;
         while (penv) {
-            qemu_thread_signal(penv->thread, SIGUSR1);
+            qemu_thread_signal(penv->thread, SIG_IPI);
             penv = (CPUState *)penv->next_cpu;
         }
     }
@@ -3623,7 +3688,7 @@ static void resume_all_vcpus(void)
     while (penv) {
         penv->stop = 0;
         penv->stopped = 0;
-        qemu_thread_signal(penv->thread, SIGUSR1);
+        qemu_thread_signal(penv->thread, SIG_IPI);
         qemu_cpu_kick(penv);
         penv = (CPUState *)penv->next_cpu;
     }
Index: qemu-kvm/kvm-all.c
===================================================================
--- qemu-kvm.orig/kvm-all.c
+++ qemu-kvm/kvm-all.c
@@ -771,6 +771,7 @@ int kvm_cpu_exec(CPUState *env)
         kvm_arch_post_run(env, run);
 
         if (ret == -EINTR || ret == -EAGAIN) {
+            cpu_exit(env);
             dprintf("io window exit\n");
             ret = 0;
             break;
@@ -1116,3 +1117,21 @@ void kvm_remove_all_breakpoints(CPUState
 {
 }
 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
+
+int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
+{
+    struct kvm_signal_mask *sigmask;
+    int r;
+
+    if (!sigset)
+        return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
+
+    sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset));
+
+    sigmask->len = 8;
+    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
+    r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
+    free(sigmask);
+
+    return r;
+}
Index: qemu-kvm/kvm.h
===================================================================
--- qemu-kvm.orig/kvm.h
+++ qemu-kvm/kvm.h
@@ -53,6 +53,7 @@ int kvm_remove_breakpoint(CPUState *curr
                           target_ulong len, int type);
 void kvm_remove_all_breakpoints(CPUState *current_env);
 int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap);
+int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset);
 
 int kvm_pit_in_kernel(void);
 int kvm_irqchip_in_kernel(void);



^ permalink raw reply	[flat|nested] 15+ messages in thread

* [Qemu-devel] [patch uq/master 3/4] qemu: kvm: consume internal signal with sigtimedwait
@ 2010-02-17 22:14   ` Marcelo Tosatti
  0 siblings, 0 replies; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-17 22:14 UTC (permalink / raw)
  To: kvm, qemu-devel; +Cc: Marcelo Tosatti, avi

[-- Attachment #1: kvm-block-cpu-exit-signal --]
[-- Type: text/plain, Size: 7432 bytes --]

Change the way the internal qemu signal, used for communication between 
iothread and vcpus, is handled.

Block and consume it with sigtimedwait on the outer vcpu loop, which
allows more precise timing control.

Change from standard signal (SIGUSR1) to real-time one, so multiple
signals are not collapsed.

Set the signal number on KVM's in-kernel allowed sigmask.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>


Index: qemu-kvm/vl.c
===================================================================
--- qemu-kvm.orig/vl.c
+++ qemu-kvm/vl.c
@@ -271,6 +271,12 @@ uint8_t qemu_uuid[16];
 static QEMUBootSetHandler *boot_set_handler;
 static void *boot_set_opaque;
 
+#ifdef SIGRTMIN
+#define SIG_IPI (SIGRTMIN+4)
+#else
+#define SIG_IPI SIGUSR1
+#endif
+
 static int default_serial = 1;
 static int default_parallel = 1;
 static int default_virtcon = 1;
@@ -3379,7 +3385,8 @@ static QemuCond qemu_cpu_cond;
 static QemuCond qemu_system_cond;
 static QemuCond qemu_pause_cond;
 
-static void block_io_signals(void);
+static void tcg_block_io_signals(void);
+static void kvm_block_io_signals(CPUState *env);
 static void unblock_io_signals(void);
 static int tcg_has_work(void);
 static int cpu_has_work(CPUState *env);
@@ -3431,11 +3438,36 @@ static void qemu_wait_io_event(CPUState 
     qemu_wait_io_event_common(env);
 }
 
+static void qemu_kvm_eat_signal(CPUState *env, int timeout)
+{
+    struct timespec ts;
+    int r, e;
+    siginfo_t siginfo;
+    sigset_t waitset;
+
+    ts.tv_sec = timeout / 1000;
+    ts.tv_nsec = (timeout % 1000) * 1000000;
+
+    sigemptyset(&waitset);
+    sigaddset(&waitset, SIG_IPI);
+
+    qemu_mutex_unlock(&qemu_global_mutex);
+    r = sigtimedwait(&waitset, &siginfo, &ts);
+    e = errno;
+    qemu_mutex_lock(&qemu_global_mutex);
+
+    if (r == -1 && !(e == EAGAIN || e == EINTR)) {
+        fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
+        exit(1);
+    }
+}
+
 static void qemu_kvm_wait_io_event(CPUState *env)
 {
     while (!cpu_has_work(env))
         qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
 
+    qemu_kvm_eat_signal(env, 0);
     qemu_wait_io_event_common(env);
 }
 
@@ -3445,11 +3477,12 @@ static void *kvm_cpu_thread_fn(void *arg
 {
     CPUState *env = arg;
 
-    block_io_signals();
     qemu_thread_self(env->thread);
     if (kvm_enabled())
         kvm_init_vcpu(env);
 
+    kvm_block_io_signals(env);
+
     /* signal CPU creation */
     qemu_mutex_lock(&qemu_global_mutex);
     env->created = 1;
@@ -3474,7 +3507,7 @@ static void *tcg_cpu_thread_fn(void *arg
 {
     CPUState *env = arg;
 
-    block_io_signals();
+    tcg_block_io_signals();
     qemu_thread_self(env->thread);
 
     /* signal CPU creation */
@@ -3500,7 +3533,7 @@ void qemu_cpu_kick(void *_env)
     CPUState *env = _env;
     qemu_cond_broadcast(env->halt_cond);
     if (kvm_enabled())
-        qemu_thread_signal(env->thread, SIGUSR1);
+        qemu_thread_signal(env->thread, SIG_IPI);
 }
 
 int qemu_cpu_self(void *_env)
@@ -3519,7 +3552,7 @@ static void cpu_signal(int sig)
         cpu_exit(cpu_single_env);
 }
 
-static void block_io_signals(void)
+static void tcg_block_io_signals(void)
 {
     sigset_t set;
     struct sigaction sigact;
@@ -3532,12 +3565,44 @@ static void block_io_signals(void)
     pthread_sigmask(SIG_BLOCK, &set, NULL);
 
     sigemptyset(&set);
-    sigaddset(&set, SIGUSR1);
+    sigaddset(&set, SIG_IPI);
     pthread_sigmask(SIG_UNBLOCK, &set, NULL);
 
     memset(&sigact, 0, sizeof(sigact));
     sigact.sa_handler = cpu_signal;
-    sigaction(SIGUSR1, &sigact, NULL);
+    sigaction(SIG_IPI, &sigact, NULL);
+}
+
+static void dummy_signal(int sig)
+{
+}
+
+static void kvm_block_io_signals(CPUState *env)
+{
+    int r;
+    sigset_t set;
+    struct sigaction sigact;
+
+    sigemptyset(&set);
+    sigaddset(&set, SIGUSR2);
+    sigaddset(&set, SIGIO);
+    sigaddset(&set, SIGALRM);
+    sigaddset(&set, SIGCHLD);
+    sigaddset(&set, SIG_IPI);
+    pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+    pthread_sigmask(SIG_BLOCK, NULL, &set);
+    sigdelset(&set, SIG_IPI);
+
+    memset(&sigact, 0, sizeof(sigact));
+    sigact.sa_handler = dummy_signal;
+    sigaction(SIG_IPI, &sigact, NULL);
+
+    r = kvm_set_signal_mask(env, &set);
+    if (r) {
+        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
+        exit(1);
+    }
 }
 
 static void unblock_io_signals(void)
@@ -3551,7 +3616,7 @@ static void unblock_io_signals(void)
     pthread_sigmask(SIG_UNBLOCK, &set, NULL);
 
     sigemptyset(&set);
-    sigaddset(&set, SIGUSR1);
+    sigaddset(&set, SIG_IPI);
     pthread_sigmask(SIG_BLOCK, &set, NULL);
 }
 
@@ -3560,7 +3625,7 @@ static void qemu_signal_lock(unsigned in
     qemu_mutex_lock(&qemu_fair_mutex);
 
     while (qemu_mutex_trylock(&qemu_global_mutex)) {
-        qemu_thread_signal(tcg_cpu_thread, SIGUSR1);
+        qemu_thread_signal(tcg_cpu_thread, SIG_IPI);
         if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
             break;
     }
@@ -3601,7 +3666,7 @@ static void pause_all_vcpus(void)
 
     while (penv) {
         penv->stop = 1;
-        qemu_thread_signal(penv->thread, SIGUSR1);
+        qemu_thread_signal(penv->thread, SIG_IPI);
         qemu_cpu_kick(penv);
         penv = (CPUState *)penv->next_cpu;
     }
@@ -3610,7 +3675,7 @@ static void pause_all_vcpus(void)
         qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
         penv = first_cpu;
         while (penv) {
-            qemu_thread_signal(penv->thread, SIGUSR1);
+            qemu_thread_signal(penv->thread, SIG_IPI);
             penv = (CPUState *)penv->next_cpu;
         }
     }
@@ -3623,7 +3688,7 @@ static void resume_all_vcpus(void)
     while (penv) {
         penv->stop = 0;
         penv->stopped = 0;
-        qemu_thread_signal(penv->thread, SIGUSR1);
+        qemu_thread_signal(penv->thread, SIG_IPI);
         qemu_cpu_kick(penv);
         penv = (CPUState *)penv->next_cpu;
     }
Index: qemu-kvm/kvm-all.c
===================================================================
--- qemu-kvm.orig/kvm-all.c
+++ qemu-kvm/kvm-all.c
@@ -771,6 +771,7 @@ int kvm_cpu_exec(CPUState *env)
         kvm_arch_post_run(env, run);
 
         if (ret == -EINTR || ret == -EAGAIN) {
+            cpu_exit(env);
             dprintf("io window exit\n");
             ret = 0;
             break;
@@ -1116,3 +1117,21 @@ void kvm_remove_all_breakpoints(CPUState
 {
 }
 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
+
+int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
+{
+    struct kvm_signal_mask *sigmask;
+    int r;
+
+    if (!sigset)
+        return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
+
+    sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset));
+
+    sigmask->len = 8;
+    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
+    r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
+    free(sigmask);
+
+    return r;
+}
Index: qemu-kvm/kvm.h
===================================================================
--- qemu-kvm.orig/kvm.h
+++ qemu-kvm/kvm.h
@@ -53,6 +53,7 @@ int kvm_remove_breakpoint(CPUState *curr
                           target_ulong len, int type);
 void kvm_remove_all_breakpoints(CPUState *current_env);
 int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap);
+int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset);
 
 int kvm_pit_in_kernel(void);
 int kvm_irqchip_in_kernel(void);

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [patch uq/master 4/4] qemu: kvm: remove pre-entry exit_request check with iothread enabled
  2010-02-17 22:14 ` [Qemu-devel] " Marcelo Tosatti
@ 2010-02-17 22:14   ` Marcelo Tosatti
  -1 siblings, 0 replies; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-17 22:14 UTC (permalink / raw)
  To: kvm; +Cc: avi, Marcelo Tosatti

[-- Attachment #1: kvm-vcpu-loop-exit-request --]
[-- Type: text/plain, Size: 625 bytes --]

With SIG_IPI blocked vcpu loop exit notification happens via -EAGAIN
from KVM_RUN.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: qemu/kvm-all.c
===================================================================
--- qemu.orig/kvm-all.c
+++ qemu/kvm-all.c
@@ -753,11 +753,13 @@ int kvm_cpu_exec(CPUState *env)
     dprintf("kvm_cpu_exec()\n");
 
     do {
+#ifndef CONFIG_IOTHREAD
         if (env->exit_request) {
             dprintf("interrupt exit requested\n");
             ret = 0;
             break;
         }
+#endif
 
         if (env->kvm_vcpu_dirty) {
             kvm_arch_put_registers(env);



^ permalink raw reply	[flat|nested] 15+ messages in thread

* [Qemu-devel] [patch uq/master 4/4] qemu: kvm: remove pre-entry exit_request check with iothread enabled
@ 2010-02-17 22:14   ` Marcelo Tosatti
  0 siblings, 0 replies; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-17 22:14 UTC (permalink / raw)
  To: kvm, qemu-devel; +Cc: Marcelo Tosatti, avi

[-- Attachment #1: kvm-vcpu-loop-exit-request --]
[-- Type: text/plain, Size: 623 bytes --]

With SIG_IPI blocked vcpu loop exit notification happens via -EAGAIN
from KVM_RUN.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Index: qemu/kvm-all.c
===================================================================
--- qemu.orig/kvm-all.c
+++ qemu/kvm-all.c
@@ -753,11 +753,13 @@ int kvm_cpu_exec(CPUState *env)
     dprintf("kvm_cpu_exec()\n");
 
     do {
+#ifndef CONFIG_IOTHREAD
         if (env->exit_request) {
             dprintf("interrupt exit requested\n");
             ret = 0;
             break;
         }
+#endif
 
         if (env->kvm_vcpu_dirty) {
             kvm_arch_put_registers(env);

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch uq/master 0/4] uq/master: iothread consume signals via sigtimedwait and cleanups
  2010-02-17 22:14 ` [Qemu-devel] " Marcelo Tosatti
                   ` (4 preceding siblings ...)
  (?)
@ 2010-02-18  8:26 ` Avi Kivity
  -1 siblings, 0 replies; 15+ messages in thread
From: Avi Kivity @ 2010-02-18  8:26 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: kvm

On 02/18/2010 12:14 AM, Marcelo Tosatti wrote:
> See individual patches for details.
>
>    

Please repost, copying qemu-devel, since this code is to be queued for 
qemu.git.

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to panic.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch uq/master 2/4] qemu: kvm specific wait_io_event
  2010-02-17 22:14   ` [Qemu-devel] " Marcelo Tosatti
  (?)
@ 2010-02-18  8:29   ` Avi Kivity
  2010-02-18 13:58     ` Marcelo Tosatti
  -1 siblings, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2010-02-18  8:29 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: kvm

On 02/18/2010 12:14 AM, Marcelo Tosatti wrote:
> In KVM mode the global mutex is released when vcpus are executing,
> which means acquiring the fairness mutex is not required.
>
> Also for KVM there is one thread per vcpu, so tcg_has_work is meaningless.
>
> Add a new qemu_wait_io_event_common function to hold common code
> between TCG/KVM.
>
> Signed-off-by: Marcelo Tosatti<mtosatti@redhat.com>
>
> Index: qemu/vl.c
> ===================================================================
> --- qemu.orig/vl.c
> +++ qemu/vl.c
> @@ -3382,6 +3382,7 @@ static QemuCond qemu_pause_cond;
>   static void block_io_signals(void);
>   static void unblock_io_signals(void);
>   static int tcg_has_work(void);
> +static int cpu_has_work(CPUState *env);
>
>   static int qemu_init_main_loop(void)
>   {
> @@ -3402,6 +3403,15 @@ static int qemu_init_main_loop(void)
>       return 0;
>   }
>
> +static void qemu_wait_io_event_common(CPUState *env)
> +{
> +    if (env->stop) {
> +        env->stop = 0;
> +        env->stopped = 1;
> +        qemu_cond_signal(&qemu_pause_cond);
> +    }
> +}
> +
>   static void qemu_wait_io_event(CPUState *env)
>   {
>       while (!tcg_has_work())
> @@ -3418,11 +3428,15 @@ static void qemu_wait_io_event(CPUState
>       qemu_mutex_unlock(&qemu_fair_mutex);
>
>       qemu_mutex_lock(&qemu_global_mutex);
> -    if (env->stop) {
> -        env->stop = 0;
> -        env->stopped = 1;
> -        qemu_cond_signal(&qemu_pause_cond);
> -    }
> +    qemu_wait_io_event_common(env);
> +}
> +
> +static void qemu_kvm_wait_io_event(CPUState *env)
> +{
> +    while (!cpu_has_work(env))
> +        qemu_cond_timedwait(env->halt_cond,&qemu_global_mutex, 1000);
> +
> +    qemu_wait_io_event_common(env);
>   }
>    

Shouldn't kvm specific code be in kvm-all.c?

>
>   static int qemu_cpu_exec(CPUState *env);
> @@ -3448,7 +3462,7 @@ static void *kvm_cpu_thread_fn(void *arg
>       while (1) {
>           if (cpu_can_run(env))
>               qemu_cpu_exec(env);
> -        qemu_wait_io_event(env);
> +        qemu_kvm_wait_io_event(env);
>       }
>
>       return NULL;
>    

Well, kvm_cpu_thread_fn() apparently isn't.

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to panic.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch uq/master 2/4] qemu: kvm specific wait_io_event
  2010-02-18  8:29   ` Avi Kivity
@ 2010-02-18 13:58     ` Marcelo Tosatti
  2010-02-18 14:33       ` Avi Kivity
  0 siblings, 1 reply; 15+ messages in thread
From: Marcelo Tosatti @ 2010-02-18 13:58 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm

On Thu, Feb 18, 2010 at 10:29:35AM +0200, Avi Kivity wrote:
> >+static void qemu_kvm_wait_io_event(CPUState *env)
> >+{
> >+    while (!cpu_has_work(env))
> >+        qemu_cond_timedwait(env->halt_cond,&qemu_global_mutex, 1000);
> >+
> >+    qemu_wait_io_event_common(env);
> >  }
> 
> Shouldn't kvm specific code be in kvm-all.c?

The context is in vl.c, so don't see much gain.

> >
> >  static int qemu_cpu_exec(CPUState *env);
> >@@ -3448,7 +3462,7 @@ static void *kvm_cpu_thread_fn(void *arg
> >      while (1) {
> >          if (cpu_can_run(env))
> >              qemu_cpu_exec(env);
> >-        qemu_wait_io_event(env);
> >+        qemu_kvm_wait_io_event(env);
> >      }
> >
> >      return NULL;
> 
> Well, kvm_cpu_thread_fn() apparently isn't.



^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch uq/master 2/4] qemu: kvm specific wait_io_event
  2010-02-18 13:58     ` Marcelo Tosatti
@ 2010-02-18 14:33       ` Avi Kivity
  0 siblings, 0 replies; 15+ messages in thread
From: Avi Kivity @ 2010-02-18 14:33 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: kvm

On 02/18/2010 03:58 PM, Marcelo Tosatti wrote:
> On Thu, Feb 18, 2010 at 10:29:35AM +0200, Avi Kivity wrote:
>    
>>> +static void qemu_kvm_wait_io_event(CPUState *env)
>>> +{
>>> +    while (!cpu_has_work(env))
>>> +        qemu_cond_timedwait(env->halt_cond,&qemu_global_mutex, 1000);
>>> +
>>> +    qemu_wait_io_event_common(env);
>>>   }
>>>        
>> Shouldn't kvm specific code be in kvm-all.c?
>>      
> The context is in vl.c, so don't see much gain.
>    

ok.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch uq/master 0/4] uq/master: iothread consume signals via sigtimedwait and cleanups
  2010-02-17 22:14 ` [Qemu-devel] " Marcelo Tosatti
@ 2010-02-22  8:59   ` Avi Kivity
  -1 siblings, 0 replies; 15+ messages in thread
From: Avi Kivity @ 2010-02-22  8:59 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: qemu-devel, kvm

On 02/18/2010 12:14 AM, Marcelo Tosatti wrote:
> See individual patches for details.
>
>
>    

Applied, thanks.

-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [Qemu-devel] Re: [patch uq/master 0/4] uq/master: iothread consume signals via sigtimedwait and cleanups
@ 2010-02-22  8:59   ` Avi Kivity
  0 siblings, 0 replies; 15+ messages in thread
From: Avi Kivity @ 2010-02-22  8:59 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: qemu-devel, kvm

On 02/18/2010 12:14 AM, Marcelo Tosatti wrote:
> See individual patches for details.
>
>
>    

Applied, thanks.

-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2010-02-22  8:59 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-02-17 22:14 [patch uq/master 0/4] uq/master: iothread consume signals via sigtimedwait and cleanups Marcelo Tosatti
2010-02-17 22:14 ` [Qemu-devel] " Marcelo Tosatti
2010-02-17 22:14 ` [patch uq/master 1/4] qemu: block SIGCHLD in vcpu thread(s) Marcelo Tosatti
2010-02-17 22:14 ` [patch uq/master 2/4] qemu: kvm specific wait_io_event Marcelo Tosatti
2010-02-17 22:14   ` [Qemu-devel] " Marcelo Tosatti
2010-02-18  8:29   ` Avi Kivity
2010-02-18 13:58     ` Marcelo Tosatti
2010-02-18 14:33       ` Avi Kivity
2010-02-17 22:14 ` [patch uq/master 3/4] qemu: kvm: consume internal signal with sigtimedwait Marcelo Tosatti
2010-02-17 22:14   ` [Qemu-devel] " Marcelo Tosatti
2010-02-17 22:14 ` [patch uq/master 4/4] qemu: kvm: remove pre-entry exit_request check with iothread enabled Marcelo Tosatti
2010-02-17 22:14   ` [Qemu-devel] " Marcelo Tosatti
2010-02-18  8:26 ` [patch uq/master 0/4] uq/master: iothread consume signals via sigtimedwait and cleanups Avi Kivity
2010-02-22  8:59 ` Avi Kivity
2010-02-22  8:59   ` [Qemu-devel] " Avi Kivity

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.