All of lore.kernel.org
 help / color / mirror / Atom feed
From: Robert Foley <robert.foley@linaro.org>
To: qemu-devel@nongnu.org
Cc: alex.bennee@linaro.org, cota@braap.org, peter.puhov@linaro.org,
	robert.foley@linaro.org, Eduardo Habkost <ehabkost@redhat.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Richard Henderson <rth@twiddle.net>,
	Wenchao Wang <wenchao.wang@intel.com>,
	Colin Xu <colin.xu@intel.com>,
	Roman Bolshakov <r.bolshakov@yadro.com>,
	Marcelo Tosatti <mtosatti@redhat.com>,
	Sunil Muthuswamy <sunilmut@microsoft.com>,
	haxm-team@intel.com (open list:X86 HAXM CPUs),
	kvm@vger.kernel.org (open list:X86 KVM CPUs)
Subject: [PATCH v10 20/73] i386: convert to cpu_halted
Date: Wed, 17 Jun 2020 17:01:38 -0400	[thread overview]
Message-ID: <20200617210231.4393-21-robert.foley@linaro.org> (raw)
In-Reply-To: <20200617210231.4393-1-robert.foley@linaro.org>

From: "Emilio G. Cota" <cota@braap.org>

Cc: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
[RF: Converted new code in i386/hax-all.c to cpu_halted]
Signed-off-by: Robert Foley <robert.foley@linaro.org>
---
 target/i386/cpu.c         |  2 +-
 target/i386/cpu.h         |  2 +-
 target/i386/hax-all.c     |  6 +++---
 target/i386/helper.c      |  4 ++--
 target/i386/hvf/hvf.c     |  4 ++--
 target/i386/hvf/x86hvf.c  |  4 ++--
 target/i386/kvm.c         | 10 +++++-----
 target/i386/misc_helper.c |  2 +-
 target/i386/whpx-all.c    |  6 +++---
 9 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index b1b311baa2..4d3ab0f3a2 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -6084,7 +6084,7 @@ static void x86_cpu_reset(DeviceState *dev)
     /* We hard-wire the BSP to the first CPU. */
     apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
 
-    s->halted = !cpu_is_bsp(cpu);
+    cpu_halted_set(s, !cpu_is_bsp(cpu));
 
     if (kvm_enabled()) {
         kvm_arch_reset_vcpu(cpu);
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 7d77efd9e4..c618b90568 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1880,7 +1880,7 @@ static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
                            sipi_vector << 12,
                            env->segs[R_CS].limit,
                            env->segs[R_CS].flags);
-    cs->halted = 0;
+    cpu_halted_set(cs, 0);
 }
 
 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index c93bb23a44..acfb7a6e10 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -511,7 +511,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
     if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
          (env->eflags & IF_MASK)) ||
         (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cpu->halted = 0;
+        cpu_halted_set(cpu, 0);
     }
 
     if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
@@ -529,7 +529,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
         hax_vcpu_sync_state(env, 1);
     }
 
-    if (cpu->halted) {
+    if (cpu_halted(cpu)) {
         /* If this vcpu is halted, we must not ask HAXM to run it. Instead, we
          * break out of hax_smp_cpu_exec() as if this vcpu had executed HLT.
          * That way, this vcpu thread will be trapped in qemu_wait_io_event(),
@@ -594,7 +594,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
                 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
                 /* hlt instruction with interrupt disabled is shutdown */
                 env->eflags |= IF_MASK;
-                cpu->halted = 1;
+                cpu_halted_set(cpu, 1);
                 cpu->exception_index = EXCP_HLT;
                 ret = 1;
             }
diff --git a/target/i386/helper.c b/target/i386/helper.c
index c3a6e4fabe..058de4073d 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -450,7 +450,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
                      (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
                      (env->a20_mask >> 20) & 1,
                      (env->hflags >> HF_SMM_SHIFT) & 1,
-                     cs->halted);
+                    cpu_halted(cs));
     } else
 #endif
     {
@@ -477,7 +477,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
                      (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
                      (env->a20_mask >> 20) & 1,
                      (env->hflags >> HF_SMM_SHIFT) & 1,
-                     cs->halted);
+                    cpu_halted(cs));
     }
 
     for(i = 0; i < 6; i++) {
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index be016b951a..b3bd2285fa 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -709,7 +709,7 @@ int hvf_vcpu_exec(CPUState *cpu)
         vmx_update_tpr(cpu);
 
         qemu_mutex_unlock_iothread();
-        if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
+        if (!cpu_is_bsp(X86_CPU(cpu)) && cpu_halted(cpu)) {
             qemu_mutex_lock_iothread();
             return EXCP_HLT;
         }
@@ -742,7 +742,7 @@ int hvf_vcpu_exec(CPUState *cpu)
                 (env->eflags & IF_MASK))
                 && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
                 !(idtvec_info & VMCS_IDT_VEC_VALID)) {
-                cpu->halted = 1;
+                cpu_halted_set(cpu, 1);
                 ret = EXCP_HLT;
                 break;
             }
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index 5cbcb32ab6..c09cf160ef 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -446,7 +446,7 @@ int hvf_process_events(CPUState *cpu_state)
     if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
         (env->eflags & IF_MASK)) ||
         (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cpu_state->halted = 0;
+        cpu_halted_set(cpu_state, 0);
     }
     if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
         hvf_cpu_synchronize_state(cpu_state);
@@ -458,5 +458,5 @@ int hvf_process_events(CPUState *cpu_state)
         apic_handle_tpr_access_report(cpu->apic_state, env->eip,
                                       env->tpr_access_type);
     }
-    return cpu_state->halted;
+    return cpu_halted(cpu_state);
 }
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index b3c13cb898..eda51904dd 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -3594,7 +3594,7 @@ static int kvm_get_mp_state(X86CPU *cpu)
     }
     env->mp_state = mp_state.mp_state;
     if (kvm_irqchip_in_kernel()) {
-        cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
+        cpu_halted_set(cs, mp_state.mp_state == KVM_MP_STATE_HALTED);
     }
     return 0;
 }
@@ -4152,7 +4152,7 @@ int kvm_arch_process_async_events(CPUState *cs)
         kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
         env->has_error_code = 0;
 
-        cs->halted = 0;
+        cpu_halted_set(cs, 0);
         if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
             env->mp_state = KVM_MP_STATE_RUNNABLE;
         }
@@ -4175,7 +4175,7 @@ int kvm_arch_process_async_events(CPUState *cs)
     if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
          (env->eflags & IF_MASK)) ||
         (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cs->halted = 0;
+        cpu_halted_set(cs, 0);
     }
     if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
         kvm_cpu_synchronize_state(cs);
@@ -4188,7 +4188,7 @@ int kvm_arch_process_async_events(CPUState *cs)
                                       env->tpr_access_type);
     }
 
-    return cs->halted;
+    return cpu_halted(cs);
 }
 
 static int kvm_handle_halt(X86CPU *cpu)
@@ -4199,7 +4199,7 @@ static int kvm_handle_halt(X86CPU *cpu)
     if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
           (env->eflags & IF_MASK)) &&
         !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cs->halted = 1;
+        cpu_halted_set(cs, 1);
         return EXCP_HLT;
     }
 
diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c
index b6b1d41b14..c396b6c7b9 100644
--- a/target/i386/misc_helper.c
+++ b/target/i386/misc_helper.c
@@ -558,7 +558,7 @@ static void do_hlt(X86CPU *cpu)
     CPUX86State *env = &cpu->env;
 
     env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
-    cs->halted = 1;
+    cpu_halted_set(cs, 1);
     cs->exception_index = EXCP_HLT;
     cpu_loop_exit(cs);
 }
diff --git a/target/i386/whpx-all.c b/target/i386/whpx-all.c
index c78baac6df..efc2d88810 100644
--- a/target/i386/whpx-all.c
+++ b/target/i386/whpx-all.c
@@ -759,7 +759,7 @@ static int whpx_handle_halt(CPUState *cpu)
           (env->eflags & IF_MASK)) &&
         !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
         cpu->exception_index = EXCP_HLT;
-        cpu->halted = true;
+        cpu_halted_set(cpu, true);
         ret = 1;
     }
     qemu_mutex_unlock_iothread();
@@ -918,7 +918,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
     if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
          (env->eflags & IF_MASK)) ||
         (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cpu->halted = false;
+        cpu_halted_set(cpu, false);
     }
 
     if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
@@ -944,7 +944,7 @@ static int whpx_vcpu_run(CPUState *cpu)
     int ret;
 
     whpx_vcpu_process_async_events(cpu);
-    if (cpu->halted) {
+    if (cpu_halted(cpu)) {
         cpu->exception_index = EXCP_HLT;
         atomic_set(&cpu->exit_request, false);
         return 0;
-- 
2.17.1


WARNING: multiple messages have this Message-ID (diff)
From: Robert Foley <robert.foley@linaro.org>
To: qemu-devel@nongnu.org
Cc: robert.foley@linaro.org,
	"open list:X86 KVM CPUs" <kvm@vger.kernel.org>,
	"open list:X86 HAXM CPUs" <haxm-team@intel.com>,
	Marcelo Tosatti <mtosatti@redhat.com>,
	Roman Bolshakov <r.bolshakov@yadro.com>,
	cota@braap.org, Colin Xu <colin.xu@intel.com>,
	Wenchao Wang <wenchao.wang@intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	peter.puhov@linaro.org, Sunil Muthuswamy <sunilmut@microsoft.com>,
	Richard Henderson <rth@twiddle.net>,
	alex.bennee@linaro.org, Eduardo Habkost <ehabkost@redhat.com>
Subject: [PATCH v10 20/73] i386: convert to cpu_halted
Date: Wed, 17 Jun 2020 17:01:38 -0400	[thread overview]
Message-ID: <20200617210231.4393-21-robert.foley@linaro.org> (raw)
In-Reply-To: <20200617210231.4393-1-robert.foley@linaro.org>

From: "Emilio G. Cota" <cota@braap.org>

Cc: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
[RF: Converted new code in i386/hax-all.c to cpu_halted]
Signed-off-by: Robert Foley <robert.foley@linaro.org>
---
 target/i386/cpu.c         |  2 +-
 target/i386/cpu.h         |  2 +-
 target/i386/hax-all.c     |  6 +++---
 target/i386/helper.c      |  4 ++--
 target/i386/hvf/hvf.c     |  4 ++--
 target/i386/hvf/x86hvf.c  |  4 ++--
 target/i386/kvm.c         | 10 +++++-----
 target/i386/misc_helper.c |  2 +-
 target/i386/whpx-all.c    |  6 +++---
 9 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index b1b311baa2..4d3ab0f3a2 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -6084,7 +6084,7 @@ static void x86_cpu_reset(DeviceState *dev)
     /* We hard-wire the BSP to the first CPU. */
     apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
 
-    s->halted = !cpu_is_bsp(cpu);
+    cpu_halted_set(s, !cpu_is_bsp(cpu));
 
     if (kvm_enabled()) {
         kvm_arch_reset_vcpu(cpu);
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 7d77efd9e4..c618b90568 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1880,7 +1880,7 @@ static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
                            sipi_vector << 12,
                            env->segs[R_CS].limit,
                            env->segs[R_CS].flags);
-    cs->halted = 0;
+    cpu_halted_set(cs, 0);
 }
 
 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index c93bb23a44..acfb7a6e10 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -511,7 +511,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
     if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
          (env->eflags & IF_MASK)) ||
         (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cpu->halted = 0;
+        cpu_halted_set(cpu, 0);
     }
 
     if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
@@ -529,7 +529,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
         hax_vcpu_sync_state(env, 1);
     }
 
-    if (cpu->halted) {
+    if (cpu_halted(cpu)) {
         /* If this vcpu is halted, we must not ask HAXM to run it. Instead, we
          * break out of hax_smp_cpu_exec() as if this vcpu had executed HLT.
          * That way, this vcpu thread will be trapped in qemu_wait_io_event(),
@@ -594,7 +594,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
                 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
                 /* hlt instruction with interrupt disabled is shutdown */
                 env->eflags |= IF_MASK;
-                cpu->halted = 1;
+                cpu_halted_set(cpu, 1);
                 cpu->exception_index = EXCP_HLT;
                 ret = 1;
             }
diff --git a/target/i386/helper.c b/target/i386/helper.c
index c3a6e4fabe..058de4073d 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -450,7 +450,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
                      (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
                      (env->a20_mask >> 20) & 1,
                      (env->hflags >> HF_SMM_SHIFT) & 1,
-                     cs->halted);
+                    cpu_halted(cs));
     } else
 #endif
     {
@@ -477,7 +477,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
                      (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
                      (env->a20_mask >> 20) & 1,
                      (env->hflags >> HF_SMM_SHIFT) & 1,
-                     cs->halted);
+                    cpu_halted(cs));
     }
 
     for(i = 0; i < 6; i++) {
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index be016b951a..b3bd2285fa 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -709,7 +709,7 @@ int hvf_vcpu_exec(CPUState *cpu)
         vmx_update_tpr(cpu);
 
         qemu_mutex_unlock_iothread();
-        if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
+        if (!cpu_is_bsp(X86_CPU(cpu)) && cpu_halted(cpu)) {
             qemu_mutex_lock_iothread();
             return EXCP_HLT;
         }
@@ -742,7 +742,7 @@ int hvf_vcpu_exec(CPUState *cpu)
                 (env->eflags & IF_MASK))
                 && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
                 !(idtvec_info & VMCS_IDT_VEC_VALID)) {
-                cpu->halted = 1;
+                cpu_halted_set(cpu, 1);
                 ret = EXCP_HLT;
                 break;
             }
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index 5cbcb32ab6..c09cf160ef 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -446,7 +446,7 @@ int hvf_process_events(CPUState *cpu_state)
     if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
         (env->eflags & IF_MASK)) ||
         (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cpu_state->halted = 0;
+        cpu_halted_set(cpu_state, 0);
     }
     if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
         hvf_cpu_synchronize_state(cpu_state);
@@ -458,5 +458,5 @@ int hvf_process_events(CPUState *cpu_state)
         apic_handle_tpr_access_report(cpu->apic_state, env->eip,
                                       env->tpr_access_type);
     }
-    return cpu_state->halted;
+    return cpu_halted(cpu_state);
 }
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index b3c13cb898..eda51904dd 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -3594,7 +3594,7 @@ static int kvm_get_mp_state(X86CPU *cpu)
     }
     env->mp_state = mp_state.mp_state;
     if (kvm_irqchip_in_kernel()) {
-        cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
+        cpu_halted_set(cs, mp_state.mp_state == KVM_MP_STATE_HALTED);
     }
     return 0;
 }
@@ -4152,7 +4152,7 @@ int kvm_arch_process_async_events(CPUState *cs)
         kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
         env->has_error_code = 0;
 
-        cs->halted = 0;
+        cpu_halted_set(cs, 0);
         if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
             env->mp_state = KVM_MP_STATE_RUNNABLE;
         }
@@ -4175,7 +4175,7 @@ int kvm_arch_process_async_events(CPUState *cs)
     if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
          (env->eflags & IF_MASK)) ||
         (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cs->halted = 0;
+        cpu_halted_set(cs, 0);
     }
     if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
         kvm_cpu_synchronize_state(cs);
@@ -4188,7 +4188,7 @@ int kvm_arch_process_async_events(CPUState *cs)
                                       env->tpr_access_type);
     }
 
-    return cs->halted;
+    return cpu_halted(cs);
 }
 
 static int kvm_handle_halt(X86CPU *cpu)
@@ -4199,7 +4199,7 @@ static int kvm_handle_halt(X86CPU *cpu)
     if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
           (env->eflags & IF_MASK)) &&
         !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cs->halted = 1;
+        cpu_halted_set(cs, 1);
         return EXCP_HLT;
     }
 
diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c
index b6b1d41b14..c396b6c7b9 100644
--- a/target/i386/misc_helper.c
+++ b/target/i386/misc_helper.c
@@ -558,7 +558,7 @@ static void do_hlt(X86CPU *cpu)
     CPUX86State *env = &cpu->env;
 
     env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
-    cs->halted = 1;
+    cpu_halted_set(cs, 1);
     cs->exception_index = EXCP_HLT;
     cpu_loop_exit(cs);
 }
diff --git a/target/i386/whpx-all.c b/target/i386/whpx-all.c
index c78baac6df..efc2d88810 100644
--- a/target/i386/whpx-all.c
+++ b/target/i386/whpx-all.c
@@ -759,7 +759,7 @@ static int whpx_handle_halt(CPUState *cpu)
           (env->eflags & IF_MASK)) &&
         !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
         cpu->exception_index = EXCP_HLT;
-        cpu->halted = true;
+        cpu_halted_set(cpu, true);
         ret = 1;
     }
     qemu_mutex_unlock_iothread();
@@ -918,7 +918,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
     if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
          (env->eflags & IF_MASK)) ||
         (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
-        cpu->halted = false;
+        cpu_halted_set(cpu, false);
     }
 
     if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
@@ -944,7 +944,7 @@ static int whpx_vcpu_run(CPUState *cpu)
     int ret;
 
     whpx_vcpu_process_async_events(cpu);
-    if (cpu->halted) {
+    if (cpu_halted(cpu)) {
         cpu->exception_index = EXCP_HLT;
         atomic_set(&cpu->exit_request, false);
         return 0;
-- 
2.17.1



  parent reply	other threads:[~2020-06-17 21:03 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-17 21:01 [PATCH v10 00/73] per-CPU locks Robert Foley
2020-06-17 21:01 ` [PATCH v10 01/73] cpu: rename cpu->work_mutex to cpu->lock Robert Foley
2020-06-17 21:01 ` [PATCH v10 02/73] cpu: introduce cpu_mutex_lock/unlock Robert Foley
2020-06-17 21:01 ` [PATCH v10 03/73] cpu: make qemu_work_cond per-cpu Robert Foley
2020-06-17 21:01 ` [PATCH v10 04/73] cpu: move run_on_cpu to cpus-common Robert Foley
2020-06-17 21:01 ` [PATCH v10 05/73] cpu: introduce process_queued_cpu_work_locked Robert Foley
2020-06-17 21:01 ` [PATCH v10 06/73] cpu: make per-CPU locks an alias of the BQL in TCG rr mode Robert Foley
2020-06-17 21:01 ` [PATCH v10 07/73] tcg-runtime: define helper_cpu_halted_set Robert Foley
2020-06-17 21:01 ` [PATCH v10 08/73] ppc: convert to helper_cpu_halted_set Robert Foley
2020-06-17 21:01 ` [PATCH v10 09/73] cris: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 10/73] hppa: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 11/73] m68k: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 12/73] alpha: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 13/73] microblaze: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 14/73] cpu: define cpu_halted helpers Robert Foley
2020-06-17 21:01 ` [PATCH v10 15/73] tcg-runtime: convert to cpu_halted_set Robert Foley
2020-06-17 21:01 ` [PATCH v10 16/73] hw/semihosting: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 17/73] arm: convert to cpu_halted Robert Foley
2020-06-17 21:01 ` [PATCH v10 18/73] ppc: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 19/73] sh4: " Robert Foley
2020-06-17 21:01 ` Robert Foley [this message]
2020-06-17 21:01   ` [PATCH v10 20/73] i386: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 21/73] lm32: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 22/73] m68k: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 23/73] mips: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 24/73] riscv: " Robert Foley
2020-06-17 21:01   ` Robert Foley
2020-06-17 21:01 ` [PATCH v10 25/73] s390x: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 26/73] sparc: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 27/73] xtensa: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 28/73] gdbstub: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 29/73] openrisc: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 30/73] cpu-exec: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 31/73] cpu: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 32/73] cpu: define cpu_interrupt_request helpers Robert Foley
2020-06-17 21:01 ` [PATCH v10 33/73] ppc: use cpu_reset_interrupt Robert Foley
2020-06-17 21:01 ` [PATCH v10 34/73] exec: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 35/73] i386: " Robert Foley
2020-06-17 21:01   ` Robert Foley
2020-06-17 21:01 ` [PATCH v10 36/73] s390x: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 37/73] openrisc: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 38/73] arm: convert to cpu_interrupt_request Robert Foley
2020-06-17 21:01 ` [PATCH v10 39/73] i386: " Robert Foley
2020-06-17 21:01 ` [PATCH v10 40/73] i386/kvm: " Robert Foley
2020-06-17 21:01   ` Robert Foley
2020-06-17 21:01 ` [PATCH v10 41/73] i386/hax-all: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 42/73] i386/whpx-all: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 43/73] i386/hvf: convert to cpu_request_interrupt Robert Foley
2020-06-17 21:02 ` [PATCH v10 44/73] ppc: convert to cpu_interrupt_request Robert Foley
2020-06-17 21:02 ` [PATCH v10 45/73] sh4: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 46/73] cris: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 47/73] hppa: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 48/73] lm32: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 49/73] m68k: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 50/73] mips: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 51/73] nios: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 52/73] s390x: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 53/73] alpha: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 54/73] moxie: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 55/73] sparc: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 56/73] openrisc: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 57/73] unicore32: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 58/73] microblaze: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 59/73] accel/tcg: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 60/73] cpu: convert to interrupt_request Robert Foley
2020-06-17 21:02 ` [PATCH v10 61/73] cpu: call .cpu_has_work with the CPU lock held Robert Foley
2020-06-17 21:02 ` [PATCH v10 62/73] cpu: introduce cpu_has_work_with_iothread_lock Robert Foley
2020-06-17 21:02 ` [PATCH v10 63/73] ppc: convert to cpu_has_work_with_iothread_lock Robert Foley
2020-06-17 21:02 ` [PATCH v10 64/73] mips: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 65/73] s390x: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 66/73] riscv: " Robert Foley
2020-06-17 21:02   ` Robert Foley
2020-06-17 21:02 ` [PATCH v10 67/73] sparc: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 68/73] xtensa: " Robert Foley
2020-06-17 21:02 ` [PATCH v10 69/73] cpu: rename all_cpu_threads_idle to qemu_tcg_rr_all_cpu_threads_idle Robert Foley
2020-06-17 21:02 ` [PATCH v10 70/73] cpu: protect CPU state with cpu->lock instead of the BQL Robert Foley
2020-06-17 21:02 ` [PATCH v10 71/73] cpus-common: release BQL earlier in run_on_cpu Robert Foley
2020-06-17 21:02 ` [PATCH v10 72/73] cpu: add async_run_on_cpu_no_bql Robert Foley
2020-06-17 21:02 ` [PATCH v10 73/73] cputlb: queue async flush jobs without the BQL Robert Foley
2020-06-17 22:20 ` [PATCH v10 00/73] per-CPU locks no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200617210231.4393-21-robert.foley@linaro.org \
    --to=robert.foley@linaro.org \
    --cc=alex.bennee@linaro.org \
    --cc=colin.xu@intel.com \
    --cc=cota@braap.org \
    --cc=ehabkost@redhat.com \
    --cc=haxm-team@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=mtosatti@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peter.puhov@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=r.bolshakov@yadro.com \
    --cc=rth@twiddle.net \
    --cc=sunilmut@microsoft.com \
    --cc=wenchao.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.