* [Qemu-devel] [PATCH v5 1/7] exec: [tcg] Refactor flush of per-CPU virtual TB cache
2016-12-28 14:07 [Qemu-devel] [PATCH v5 0/7] trace: [tcg] Optimize per-vCPU tracing states with separate TB caches Lluís Vilanova
@ 2016-12-28 14:07 ` Lluís Vilanova
2016-12-28 14:07 ` [Qemu-devel] [PATCH v5 2/7] trace: Make trace_get_vcpu_event_count() inlinable Lluís Vilanova
` (5 subsequent siblings)
6 siblings, 0 replies; 10+ messages in thread
From: Lluís Vilanova @ 2016-12-28 14:07 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Eduardo Habkost, Stefan Hajnoczi, Paolo Bonzini,
Peter Crosthwaite, Richard Henderson
The function is reused in later patches.
Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu>
---
cputlb.c | 2 +-
include/exec/exec-all.h | 6 ++++++
translate-all.c | 14 +++++++++-----
3 files changed, 16 insertions(+), 6 deletions(-)
diff --git a/cputlb.c b/cputlb.c
index 813279f3bc..9bf9960e1b 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -80,7 +80,7 @@ void tlb_flush(CPUState *cpu, int flush_global)
memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
- memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+ tb_flush_jmp_cache_all(cpu);
env->vtlb_index = 0;
env->tlb_flush_addr = -1;
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index a8c13cee66..57cd978578 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -256,6 +256,12 @@ struct TranslationBlock {
};
void tb_free(TranslationBlock *tb);
+/**
+ * tb_flush_jmp_cache_all:
+ *
+ * Flush the virtual translation block cache.
+ */
+void tb_flush_jmp_cache_all(CPUState *env);
void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
diff --git a/translate-all.c b/translate-all.c
index 3dd9214904..29ccb9e546 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -941,11 +941,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
}
CPU_FOREACH(cpu) {
- int i;
-
- for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
- atomic_set(&cpu->tb_jmp_cache[i], NULL);
- }
+ tb_flush_jmp_cache_all(cpu);
}
tcg_ctx.tb_ctx.nb_tbs = 0;
@@ -1741,6 +1737,14 @@ void tb_check_watchpoint(CPUState *cpu)
}
}
+void tb_flush_jmp_cache_all(CPUState *cpu)
+{
+ int i;
+ for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
+ atomic_set(&cpu->tb_jmp_cache[i], NULL);
+ }
+}
+
#ifndef CONFIG_USER_ONLY
/* in deterministic execution mode, instructions doing device I/Os
must be at the end of the TB */
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Qemu-devel] [PATCH v5 2/7] trace: Make trace_get_vcpu_event_count() inlinable
2016-12-28 14:07 [Qemu-devel] [PATCH v5 0/7] trace: [tcg] Optimize per-vCPU tracing states with separate TB caches Lluís Vilanova
2016-12-28 14:07 ` [Qemu-devel] [PATCH v5 1/7] exec: [tcg] Refactor flush of per-CPU virtual TB cache Lluís Vilanova
@ 2016-12-28 14:07 ` Lluís Vilanova
2016-12-28 14:07 ` [Qemu-devel] [PATCH v5 3/7] trace: [tcg] Delay changes to dynamic state when translating Lluís Vilanova
` (4 subsequent siblings)
6 siblings, 0 replies; 10+ messages in thread
From: Lluís Vilanova @ 2016-12-28 14:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Eric Blake, Eduardo Habkost, Stefan Hajnoczi
Later patches will make use of it.
Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu>
---
trace/control-internal.h | 5 +++++
trace/control.c | 9 ++-------
trace/control.h | 2 +-
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/trace/control-internal.h b/trace/control-internal.h
index a9d395a587..beb98a0d2c 100644
--- a/trace/control-internal.h
+++ b/trace/control-internal.h
@@ -16,6 +16,7 @@
extern int trace_events_enabled_count;
+extern uint32_t trace_next_vcpu_id;
static inline bool trace_event_is_pattern(const char *str)
@@ -82,6 +83,10 @@ static inline bool trace_event_get_vcpu_state_dynamic(CPUState *vcpu,
return trace_event_get_vcpu_state_dynamic_by_vcpu_id(vcpu, vcpu_id);
}
+static inline uint32_t trace_get_vcpu_event_count(void)
+{
+ return trace_next_vcpu_id;
+}
void trace_event_register_group(TraceEvent **events);
diff --git a/trace/control.c b/trace/control.c
index 1a7bee6ddc..52d0e343fa 100644
--- a/trace/control.c
+++ b/trace/control.c
@@ -36,7 +36,7 @@ typedef struct TraceEventGroup {
static TraceEventGroup *event_groups;
static size_t nevent_groups;
static uint32_t next_id;
-static uint32_t next_vcpu_id;
+uint32_t trace_next_vcpu_id;
QemuOptsList qemu_trace_opts = {
.name = "trace",
@@ -65,7 +65,7 @@ void trace_event_register_group(TraceEvent **events)
for (i = 0; events[i] != NULL; i++) {
events[i]->id = next_id++;
if (events[i]->vcpu_id != TRACE_VCPU_EVENT_NONE) {
- events[i]->vcpu_id = next_vcpu_id++;
+ events[i]->vcpu_id = trace_next_vcpu_id++;
}
}
event_groups = g_renew(TraceEventGroup, event_groups, nevent_groups + 1);
@@ -299,8 +299,3 @@ char *trace_opt_parse(const char *optarg)
return trace_file;
}
-
-uint32_t trace_get_vcpu_event_count(void)
-{
- return next_vcpu_id;
-}
diff --git a/trace/control.h b/trace/control.h
index ccaeac8552..80d326c4d1 100644
--- a/trace/control.h
+++ b/trace/control.h
@@ -237,7 +237,7 @@ char *trace_opt_parse(const char *optarg);
*
* Return the number of known vcpu-specific events
*/
-uint32_t trace_get_vcpu_event_count(void);
+static uint32_t trace_get_vcpu_event_count(void);
#include "trace/control-internal.h"
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Qemu-devel] [PATCH v5 3/7] trace: [tcg] Delay changes to dynamic state when translating
2016-12-28 14:07 [Qemu-devel] [PATCH v5 0/7] trace: [tcg] Optimize per-vCPU tracing states with separate TB caches Lluís Vilanova
2016-12-28 14:07 ` [Qemu-devel] [PATCH v5 1/7] exec: [tcg] Refactor flush of per-CPU virtual TB cache Lluís Vilanova
2016-12-28 14:07 ` [Qemu-devel] [PATCH v5 2/7] trace: Make trace_get_vcpu_event_count() inlinable Lluís Vilanova
@ 2016-12-28 14:07 ` Lluís Vilanova
2016-12-28 14:08 ` [Qemu-devel] [PATCH v5 4/7] exec: [tcg] Use different TBs according to the vCPU's dynamic tracing state Lluís Vilanova
` (3 subsequent siblings)
6 siblings, 0 replies; 10+ messages in thread
From: Lluís Vilanova @ 2016-12-28 14:07 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Eduardo Habkost, Stefan Hajnoczi, Paolo Bonzini,
Peter Crosthwaite, Richard Henderson
This keeps consistency across all decisions taken during translation
when the dynamic state of a vCPU is changed in the middle of translating
some guest code.
Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu>
---
cpu-exec.c | 26 ++++++++++++++++++++++++++
include/qom/cpu.h | 7 +++++++
qom/cpu.c | 4 ++++
trace/control-target.c | 11 +++++++++--
4 files changed, 46 insertions(+), 2 deletions(-)
diff --git a/cpu-exec.c b/cpu-exec.c
index 4188fed3c6..1b7366efb0 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -33,6 +33,7 @@
#include "hw/i386/apic.h"
#endif
#include "sysemu/replay.h"
+#include "trace/control.h"
/* -icount align implementation. */
@@ -451,9 +452,21 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
#ifndef CONFIG_USER_ONLY
} else if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
+ /* delay changes to this vCPU's dstate during translation */
+ atomic_set(&cpu->trace_dstate_delayed_req, false);
+ atomic_set(&cpu->trace_dstate_must_delay, true);
+
/* try to cause an exception pending in the log */
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
*ret = -1;
+
+ /* apply and disable delayed dstate changes */
+ atomic_set(&cpu->trace_dstate_must_delay, false);
+ if (unlikely(atomic_read(&cpu->trace_dstate_delayed_req))) {
+ bitmap_copy(cpu->trace_dstate, cpu->trace_dstate_delayed,
+ trace_get_vcpu_event_count());
+ }
+
return true;
#endif
}
@@ -634,8 +647,21 @@ int cpu_exec(CPUState *cpu)
for(;;) {
cpu_handle_interrupt(cpu, &last_tb);
+
+ /* delay changes to this vCPU's dstate during translation */
+ atomic_set(&cpu->trace_dstate_delayed_req, false);
+ atomic_set(&cpu->trace_dstate_must_delay, true);
+
tb = tb_find(cpu, last_tb, tb_exit);
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
+
+ /* apply and disable delayed dstate changes */
+ atomic_set(&cpu->trace_dstate_must_delay, false);
+ if (unlikely(atomic_read(&cpu->trace_dstate_delayed_req))) {
+ bitmap_copy(cpu->trace_dstate, cpu->trace_dstate_delayed,
+ trace_get_vcpu_event_count());
+ }
+
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 3f79a8e955..58255d06fa 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -295,6 +295,10 @@ struct qemu_work_item;
* @kvm_fd: vCPU file descriptor for KVM.
* @work_mutex: Lock to prevent multiple access to queued_work_*.
* @queued_work_first: First asynchronous work pending.
+ * @trace_dstate_must_delay: Whether a change to trace_dstate must be delayed.
+ * @trace_dstate_delayed_req: Whether a change to trace_dstate was delayed.
+ * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
+ * to @trace_dstate).
* @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
*
* State of one CPU core or thread.
@@ -370,6 +374,9 @@ struct CPUState {
* Dynamically allocated based on bitmap requried to hold up to
* trace_get_vcpu_event_count() entries.
*/
+ bool trace_dstate_must_delay;
+ bool trace_dstate_delayed_req;
+ unsigned long *trace_dstate_delayed;
unsigned long *trace_dstate;
/* TODO Move common fields from CPUArchState here. */
diff --git a/qom/cpu.c b/qom/cpu.c
index 03d9190f8c..d56496d28d 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -367,6 +367,9 @@ static void cpu_common_initfn(Object *obj)
QTAILQ_INIT(&cpu->breakpoints);
QTAILQ_INIT(&cpu->watchpoints);
+ cpu->trace_dstate_must_delay = false;
+ cpu->trace_dstate_delayed_req = false;
+ cpu->trace_dstate_delayed = bitmap_new(trace_get_vcpu_event_count());
cpu->trace_dstate = bitmap_new(trace_get_vcpu_event_count());
cpu_exec_initfn(cpu);
@@ -375,6 +378,7 @@ static void cpu_common_initfn(Object *obj)
static void cpu_common_finalize(Object *obj)
{
CPUState *cpu = CPU(obj);
+ g_free(cpu->trace_dstate_delayed);
g_free(cpu->trace_dstate);
}
diff --git a/trace/control-target.c b/trace/control-target.c
index 7ebf6e0bcb..aba8db55de 100644
--- a/trace/control-target.c
+++ b/trace/control-target.c
@@ -69,13 +69,20 @@ void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
if (state_pre != state) {
if (state) {
trace_events_enabled_count++;
- set_bit(vcpu_id, vcpu->trace_dstate);
+ set_bit(vcpu_id, vcpu->trace_dstate_delayed);
+ if (!atomic_read(&vcpu->trace_dstate_must_delay)) {
+ set_bit(vcpu_id, vcpu->trace_dstate);
+ }
(*ev->dstate)++;
} else {
trace_events_enabled_count--;
- clear_bit(vcpu_id, vcpu->trace_dstate);
+ clear_bit(vcpu_id, vcpu->trace_dstate_delayed);
+ if (!atomic_read(&vcpu->trace_dstate_must_delay)) {
+ clear_bit(vcpu_id, vcpu->trace_dstate);
+ }
(*ev->dstate)--;
}
+ atomic_set(&vcpu->trace_dstate_delayed_req, true);
}
}
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Qemu-devel] [PATCH v5 4/7] exec: [tcg] Use different TBs according to the vCPU's dynamic tracing state
2016-12-28 14:07 [Qemu-devel] [PATCH v5 0/7] trace: [tcg] Optimize per-vCPU tracing states with separate TB caches Lluís Vilanova
` (2 preceding siblings ...)
2016-12-28 14:07 ` [Qemu-devel] [PATCH v5 3/7] trace: [tcg] Delay changes to dynamic state when translating Lluís Vilanova
@ 2016-12-28 14:08 ` Lluís Vilanova
2016-12-28 16:08 ` Richard Henderson
2016-12-28 14:08 ` [Qemu-devel] [PATCH v5 5/7] trace: [tcg] Do not generate TCG code to trace dinamically-disabled events Lluís Vilanova
` (2 subsequent siblings)
6 siblings, 1 reply; 10+ messages in thread
From: Lluís Vilanova @ 2016-12-28 14:08 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Eduardo Habkost, Stefan Hajnoczi, Paolo Bonzini,
Peter Crosthwaite, Richard Henderson
Every vCPU now uses a separate set of TBs for each set of dynamic
tracing event state values. Each set of TBs can be used by any number of
vCPUs to maximize TB reuse when vCPUs have the same tracing state.
This feature is later used by tracetool to optimize tracing of guest
code events.
The maximum number of TB sets is defined as 2^E, where E is the number
of events that have the 'vcpu' property (their state is stored in
CPUState->trace_dstate).
For this to work, a change on the dynamic tracing state of a vCPU will
force it to flush its virtual TB cache (which is only indexed by
address), and fall back to the physical TB cache (which now contains the
vCPU's dynamic tracing state as part of the hashing function).
Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu>
---
cpu-exec.c | 26 +++++++++++++++++++++-----
include/exec/exec-all.h | 5 +++++
include/exec/tb-hash-xx.h | 11 ++++++++++-
include/exec/tb-hash.h | 5 +++--
include/qemu-common.h | 3 +++
tests/qht-bench.c | 2 +-
trace/control-target.c | 3 +++
trace/control.h | 3 +++
translate-all.c | 16 ++++++++++++++--
9 files changed, 63 insertions(+), 11 deletions(-)
diff --git a/cpu-exec.c b/cpu-exec.c
index 1b7366efb0..a377505b9c 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -262,6 +262,7 @@ struct tb_desc {
CPUArchState *env;
tb_page_addr_t phys_page1;
uint32_t flags;
+ TRACE_QHT_VCPU_DSTATE_TYPE trace_vcpu_dstate;
};
static bool tb_cmp(const void *p, const void *d)
@@ -273,6 +274,7 @@ static bool tb_cmp(const void *p, const void *d)
tb->page_addr[0] == desc->phys_page1 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
+ tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
!atomic_read(&tb->invalid)) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
@@ -294,7 +296,8 @@ static bool tb_cmp(const void *p, const void *d)
static TranslationBlock *tb_htable_lookup(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
- uint32_t flags)
+ uint32_t flags,
+ uint32_t trace_vcpu_dstate)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
@@ -303,10 +306,11 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu,
desc.env = (CPUArchState *)cpu->env_ptr;
desc.cs_base = cs_base;
desc.flags = flags;
+ desc.trace_vcpu_dstate = trace_vcpu_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
- h = tb_hash_func(phys_pc, pc, flags);
+ h = tb_hash_func(phys_pc, pc, flags, trace_vcpu_dstate);
return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
}
@@ -318,16 +322,24 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
+ unsigned long trace_vcpu_dstate_bitmap;
+ TRACE_QHT_VCPU_DSTATE_TYPE trace_vcpu_dstate;
bool have_tb_lock = false;
+ bitmap_copy(&trace_vcpu_dstate_bitmap, cpu->trace_dstate,
+ trace_get_vcpu_event_count());
+ memcpy(&trace_vcpu_dstate, &trace_vcpu_dstate_bitmap,
+ sizeof(trace_vcpu_dstate));
+
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
- tb->flags != flags)) {
- tb = tb_htable_lookup(cpu, pc, cs_base, flags);
+ tb->flags != flags ||
+ tb->trace_vcpu_dstate != trace_vcpu_dstate)) {
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, trace_vcpu_dstate);
if (!tb) {
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
@@ -341,7 +353,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
/* There's a chance that our desired tb has been translated while
* taking the locks so we check again inside the lock.
*/
- tb = tb_htable_lookup(cpu, pc, cs_base, flags);
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, trace_vcpu_dstate);
if (!tb) {
/* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
@@ -465,6 +477,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (unlikely(atomic_read(&cpu->trace_dstate_delayed_req))) {
bitmap_copy(cpu->trace_dstate, cpu->trace_dstate_delayed,
trace_get_vcpu_event_count());
+ tb_flush_jmp_cache_all(cpu);
}
return true;
@@ -660,6 +673,9 @@ int cpu_exec(CPUState *cpu)
if (unlikely(atomic_read(&cpu->trace_dstate_delayed_req))) {
bitmap_copy(cpu->trace_dstate, cpu->trace_dstate_delayed,
trace_get_vcpu_event_count());
+ tb_flush_jmp_cache_all(cpu);
+ /* avoid chaining TBs with different dstates */
+ last_tb = NULL;
}
/* Try to align the host and virtual clocks
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 57cd978578..ae74f61ea2 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -200,6 +200,10 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
#define USE_DIRECT_JUMP
#endif
+/**
+ * TranslationBlock:
+ * @trace_vcpu_dstate: Per-vCPU dynamic tracing state used to generate this TB.
+ */
struct TranslationBlock {
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
target_ulong cs_base; /* CS base for this block */
@@ -215,6 +219,7 @@ struct TranslationBlock {
#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
uint16_t invalid;
+ TRACE_QHT_VCPU_DSTATE_TYPE trace_vcpu_dstate;
void *tc_ptr; /* pointer to the translated code */
uint8_t *tc_search; /* pointer to search data */
diff --git a/include/exec/tb-hash-xx.h b/include/exec/tb-hash-xx.h
index 2c40b5c466..96f7c0539c 100644
--- a/include/exec/tb-hash-xx.h
+++ b/include/exec/tb-hash-xx.h
@@ -35,6 +35,7 @@
#define EXEC_TB_HASH_XX_H
#include "qemu/bitops.h"
+#include "qemu-common.h"
#define PRIME32_1 2654435761U
#define PRIME32_2 2246822519U
@@ -49,7 +50,8 @@
* contiguous in memory.
*/
static inline
-uint32_t tb_hash_func5(uint64_t a0, uint64_t b0, uint32_t e)
+uint32_t tb_hash_func6(uint64_t a0, uint64_t b0, uint32_t e,
+ TRACE_QHT_VCPU_DSTATE_TYPE f)
{
uint32_t v1 = TB_HASH_XX_SEED + PRIME32_1 + PRIME32_2;
uint32_t v2 = TB_HASH_XX_SEED + PRIME32_2;
@@ -83,6 +85,13 @@ uint32_t tb_hash_func5(uint64_t a0, uint64_t b0, uint32_t e)
h32 += e * PRIME32_3;
h32 = rol32(h32, 17) * PRIME32_4;
+ if (sizeof(TRACE_QHT_VCPU_DSTATE_TYPE) == sizeof(uint32_t)) {
+ h32 += f * PRIME32_3;
+ h32 = rol32(h32, 17) * PRIME32_4;
+ } else {
+ abort();
+ }
+
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
diff --git a/include/exec/tb-hash.h b/include/exec/tb-hash.h
index 2c27490cb8..a042f24c97 100644
--- a/include/exec/tb-hash.h
+++ b/include/exec/tb-hash.h
@@ -46,9 +46,10 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
}
static inline
-uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags)
+uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc,
+ uint32_t flags, uint32_t trace_vcpu_dstate)
{
- return tb_hash_func5(phys_pc, pc, flags);
+ return tb_hash_func6(phys_pc, pc, flags, trace_vcpu_dstate);
}
#endif
diff --git a/include/qemu-common.h b/include/qemu-common.h
index 1430390eb6..aaaa73a6fe 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -151,4 +151,7 @@ void page_size_init(void);
* returned. */
bool dump_in_progress(void);
+/* Use a macro to allow safe changes to its size in the future */
+#define TRACE_QHT_VCPU_DSTATE_TYPE uint32_t
+
#endif
diff --git a/tests/qht-bench.c b/tests/qht-bench.c
index 2afa09d859..11c1cec766 100644
--- a/tests/qht-bench.c
+++ b/tests/qht-bench.c
@@ -103,7 +103,7 @@ static bool is_equal(const void *obj, const void *userp)
static inline uint32_t h(unsigned long v)
{
- return tb_hash_func5(v, 0, 0);
+ return tb_hash_func6(v, 0, 0, 0);
}
/*
diff --git a/trace/control-target.c b/trace/control-target.c
index aba8db55de..61e6a4545b 100644
--- a/trace/control-target.c
+++ b/trace/control-target.c
@@ -82,7 +82,10 @@ void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
}
(*ev->dstate)--;
}
+ /* Make sure next translated/executed TB uses the new dstate */
atomic_set(&vcpu->trace_dstate_delayed_req, true);
+ /* NOTE: checked by all TBs in gen_tb_start() */
+ atomic_set(&vcpu->tcg_exit_req, 1);
}
}
diff --git a/trace/control.h b/trace/control.h
index 80d326c4d1..cab84a0308 100644
--- a/trace/control.h
+++ b/trace/control.h
@@ -165,6 +165,9 @@ void trace_event_set_state_dynamic(TraceEvent *ev, bool state);
* Set the dynamic tracing state of an event for the given vCPU.
*
* Pre-condition: trace_event_get_vcpu_state_static(ev) == true
+ *
+ * Note: Changes for execution-time events with the 'tcg' property will not be
+ * propagated until the next TB is executed (iff executing in TCG mode).
*/
void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
TraceEvent *ev, bool state);
diff --git a/translate-all.c b/translate-all.c
index 29ccb9e546..6e1b1d474c 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -54,6 +54,7 @@
#include "exec/tb-hash.h"
#include "translate-all.h"
#include "qemu/bitmap.h"
+#include "qemu/error-report.h"
#include "qemu/timer.h"
#include "exec/log.h"
@@ -813,6 +814,12 @@ static void tb_htable_init(void)
{
unsigned int mode = QHT_MODE_AUTO_RESIZE;
+ /* Ensure TB hash function covers the bitmap size */
+ if (DIV_ROUND_UP(trace_get_vcpu_event_count(), BITS_PER_BYTE) >
+ sizeof(TRACE_QHT_VCPU_DSTATE_TYPE)) {
+ error_report("too many 'vcpu' events for the TB hash function");
+ }
+
qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
}
@@ -1106,7 +1113,7 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_hash_func(phys_pc, tb->pc, tb->flags);
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
/* remove the TB from the page list */
@@ -1251,7 +1258,7 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
}
/* add in the hash table */
- h = tb_hash_func(phys_pc, tb->pc, tb->flags);
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
#ifdef DEBUG_TB_CHECK
@@ -1270,6 +1277,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong virt_page2;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size;
+ unsigned long trace_vcpu_dstate_bitmap;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
@@ -1294,6 +1302,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
+ bitmap_copy(&trace_vcpu_dstate_bitmap, cpu->trace_dstate,
+ trace_get_vcpu_event_count());
+ memcpy(&tb->trace_vcpu_dstate, &trace_vcpu_dstate_bitmap,
+ sizeof(tb->trace_vcpu_dstate));
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count1++; /* includes aborted translations because of
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [Qemu-devel] [PATCH v5 4/7] exec: [tcg] Use different TBs according to the vCPU's dynamic tracing state
2016-12-28 14:08 ` [Qemu-devel] [PATCH v5 4/7] exec: [tcg] Use different TBs according to the vCPU's dynamic tracing state Lluís Vilanova
@ 2016-12-28 16:08 ` Richard Henderson
2016-12-28 16:23 ` Lluís Vilanova
0 siblings, 1 reply; 10+ messages in thread
From: Richard Henderson @ 2016-12-28 16:08 UTC (permalink / raw)
To: Lluís Vilanova, qemu-devel
Cc: Eduardo Habkost, Peter Crosthwaite, Stefan Hajnoczi, Paolo Bonzini
On 12/28/2016 06:08 AM, Lluís Vilanova wrote:
> @@ -83,6 +85,13 @@ uint32_t tb_hash_func5(uint64_t a0, uint64_t b0, uint32_t e)
> h32 += e * PRIME32_3;
> h32 = rol32(h32, 17) * PRIME32_4;
>
> + if (sizeof(TRACE_QHT_VCPU_DSTATE_TYPE) == sizeof(uint32_t)) {
> + h32 += f * PRIME32_3;
> + h32 = rol32(h32, 17) * PRIME32_4;
> + } else {
> + abort();
> + }
> +
QEMU_BUILD_BUG_ON.
r~
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [Qemu-devel] [PATCH v5 4/7] exec: [tcg] Use different TBs according to the vCPU's dynamic tracing state
2016-12-28 16:08 ` Richard Henderson
@ 2016-12-28 16:23 ` Lluís Vilanova
0 siblings, 0 replies; 10+ messages in thread
From: Lluís Vilanova @ 2016-12-28 16:23 UTC (permalink / raw)
To: Richard Henderson
Cc: qemu-devel, Paolo Bonzini, Eduardo Habkost, Stefan Hajnoczi,
Peter Crosthwaite
Richard Henderson writes:
> On 12/28/2016 06:08 AM, Lluís Vilanova wrote:
>> @@ -83,6 +85,13 @@ uint32_t tb_hash_func5(uint64_t a0, uint64_t b0, uint32_t e)
>> h32 += e * PRIME32_3;
>> h32 = rol32(h32, 17) * PRIME32_4;
>>
>> + if (sizeof(TRACE_QHT_VCPU_DSTATE_TYPE) == sizeof(uint32_t)) {
>> + h32 += f * PRIME32_3;
>> + h32 = rol32(h32, 17) * PRIME32_4;
>> + } else {
>> + abort();
>> + }
>> +
> QEMU_BUILD_BUG_ON.
Right, thanks.
Lluis
^ permalink raw reply [flat|nested] 10+ messages in thread
* [Qemu-devel] [PATCH v5 5/7] trace: [tcg] Do not generate TCG code to trace dinamically-disabled events
2016-12-28 14:07 [Qemu-devel] [PATCH v5 0/7] trace: [tcg] Optimize per-vCPU tracing states with separate TB caches Lluís Vilanova
` (3 preceding siblings ...)
2016-12-28 14:08 ` [Qemu-devel] [PATCH v5 4/7] exec: [tcg] Use different TBs according to the vCPU's dynamic tracing state Lluís Vilanova
@ 2016-12-28 14:08 ` Lluís Vilanova
2016-12-28 14:08 ` [Qemu-devel] [PATCH v5 6/7] trace: [tcg, trivial] Re-align generated code Lluís Vilanova
2016-12-28 14:08 ` [Qemu-devel] [PATCH v5 7/7] trace: [trivial] Statically enable all guest events Lluís Vilanova
6 siblings, 0 replies; 10+ messages in thread
From: Lluís Vilanova @ 2016-12-28 14:08 UTC (permalink / raw)
To: qemu-devel; +Cc: Eric Blake, Eduardo Habkost, Stefan Hajnoczi
If an event is dynamically disabled, the TCG code that calls the
execution-time tracer is not generated.
Removes the overheads of execution-time tracers for dynamically disabled
events. As a bonus, also avoids checking the event state when the
execution-time tracer is called from TCG-generated code (since otherwise
TCG would simply not call it).
Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu>
---
scripts/tracetool/__init__.py | 1 +
scripts/tracetool/format/h.py | 24 ++++++++++++++++++------
scripts/tracetool/format/tcg_h.py | 19 ++++++++++++++++---
scripts/tracetool/format/tcg_helper_c.py | 3 ++-
4 files changed, 37 insertions(+), 10 deletions(-)
diff --git a/scripts/tracetool/__init__.py b/scripts/tracetool/__init__.py
index 365446fa53..63168ccdf0 100644
--- a/scripts/tracetool/__init__.py
+++ b/scripts/tracetool/__init__.py
@@ -264,6 +264,7 @@ class Event(object):
return self._FMT.findall(self.fmt)
QEMU_TRACE = "trace_%(name)s"
+ QEMU_TRACE_NOCHECK = "_nocheck__" + QEMU_TRACE
QEMU_TRACE_TCG = QEMU_TRACE + "_tcg"
QEMU_DSTATE = "_TRACE_%(NAME)s_DSTATE"
QEMU_EVENT = "_TRACE_%(NAME)s_EVENT"
diff --git a/scripts/tracetool/format/h.py b/scripts/tracetool/format/h.py
index 3682f4e6a8..a78e50ef35 100644
--- a/scripts/tracetool/format/h.py
+++ b/scripts/tracetool/format/h.py
@@ -49,6 +49,19 @@ def generate(events, backend, group):
backend.generate_begin(events, group)
for e in events:
+ # tracer without checks
+ out('',
+ 'static inline void %(api)s(%(args)s)',
+ '{',
+ api=e.api(e.QEMU_TRACE_NOCHECK),
+ args=e.args)
+
+ if "disable" not in e.properties:
+ backend.generate(e, group)
+
+ out('}')
+
+ # tracer wrapper with checks (per-vCPU tracing)
if "vcpu" in e.properties:
trace_cpu = next(iter(e.args))[1]
cond = "trace_event_get_vcpu_state(%(cpu)s,"\
@@ -63,16 +76,15 @@ def generate(events, backend, group):
'static inline void %(api)s(%(args)s)',
'{',
' if (%(cond)s) {',
+ ' %(api_nocheck)s(%(names)s);',
+ ' }',
+ '}',
api=e.api(),
+ api_nocheck=e.api(e.QEMU_TRACE_NOCHECK),
args=e.args,
+ names=", ".join(e.args.names()),
cond=cond)
- if "disable" not in e.properties:
- backend.generate(e, group)
-
- out(' }',
- '}')
-
backend.generate_end(events, group)
out('#endif /* TRACE_%s_GENERATED_TRACERS_H */' % group.upper())
diff --git a/scripts/tracetool/format/tcg_h.py b/scripts/tracetool/format/tcg_h.py
index 5f213f6cba..71b5c09432 100644
--- a/scripts/tracetool/format/tcg_h.py
+++ b/scripts/tracetool/format/tcg_h.py
@@ -41,7 +41,7 @@ def generate(events, backend, group):
for e in events:
# just keep one of them
- if "tcg-trans" not in e.properties:
+ if "tcg-exec" not in e.properties:
continue
out('static inline void %(name_tcg)s(%(args)s)',
@@ -53,12 +53,25 @@ def generate(events, backend, group):
args_trans = e.original.event_trans.args
args_exec = tracetool.vcpu.transform_args(
"tcg_helper_c", e.original.event_exec, "wrapper")
+ if "vcpu" in e.properties:
+ trace_cpu = e.args.names()[0]
+ cond = "trace_event_get_vcpu_state(%(cpu)s,"\
+ " TRACE_%(id)s)"\
+ % dict(
+ cpu=trace_cpu,
+ id=e.original.event_exec.name.upper())
+ else:
+ cond = "true"
+
out(' %(name_trans)s(%(argnames_trans)s);',
- ' gen_helper_%(name_exec)s(%(argnames_exec)s);',
+ ' if (%(cond)s) {',
+ ' gen_helper_%(name_exec)s(%(argnames_exec)s);',
+ ' }',
name_trans=e.original.event_trans.api(e.QEMU_TRACE),
name_exec=e.original.event_exec.api(e.QEMU_TRACE),
argnames_trans=", ".join(args_trans.names()),
- argnames_exec=", ".join(args_exec.names()))
+ argnames_exec=", ".join(args_exec.names()),
+ cond=cond)
out('}')
diff --git a/scripts/tracetool/format/tcg_helper_c.py b/scripts/tracetool/format/tcg_helper_c.py
index cc26e03008..c2a05d756c 100644
--- a/scripts/tracetool/format/tcg_helper_c.py
+++ b/scripts/tracetool/format/tcg_helper_c.py
@@ -66,10 +66,11 @@ def generate(events, backend, group):
out('void %(name_tcg)s(%(args_api)s)',
'{',
+ # NOTE: the check was already performed at TCG-generation time
' %(name)s(%(args_call)s);',
'}',
name_tcg="helper_%s_proxy" % e.api(),
- name=e.api(),
+ name=e.api(e.QEMU_TRACE_NOCHECK),
args_api=e_args_api,
args_call=", ".join(e_args_call.casted()),
)
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Qemu-devel] [PATCH v5 6/7] trace: [tcg, trivial] Re-align generated code
2016-12-28 14:07 [Qemu-devel] [PATCH v5 0/7] trace: [tcg] Optimize per-vCPU tracing states with separate TB caches Lluís Vilanova
` (4 preceding siblings ...)
2016-12-28 14:08 ` [Qemu-devel] [PATCH v5 5/7] trace: [tcg] Do not generate TCG code to trace dinamically-disabled events Lluís Vilanova
@ 2016-12-28 14:08 ` Lluís Vilanova
2016-12-28 14:08 ` [Qemu-devel] [PATCH v5 7/7] trace: [trivial] Statically enable all guest events Lluís Vilanova
6 siblings, 0 replies; 10+ messages in thread
From: Lluís Vilanova @ 2016-12-28 14:08 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Eduardo Habkost, Stefan Hajnoczi, Michael Tokarev,
Laurent Vivier, open list:Trivial patches
Last patch removed a nesting level in generated code. Re-align all code
generated by backends to be 4-column aligned.
Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu>
---
scripts/tracetool/backend/dtrace.py | 2 +-
scripts/tracetool/backend/ftrace.py | 20 ++++++++++----------
scripts/tracetool/backend/log.py | 17 +++++++++--------
scripts/tracetool/backend/simple.py | 2 +-
scripts/tracetool/backend/syslog.py | 6 +++---
scripts/tracetool/backend/ust.py | 2 +-
6 files changed, 25 insertions(+), 24 deletions(-)
diff --git a/scripts/tracetool/backend/dtrace.py b/scripts/tracetool/backend/dtrace.py
index 79505c6b1a..b3a8645bf0 100644
--- a/scripts/tracetool/backend/dtrace.py
+++ b/scripts/tracetool/backend/dtrace.py
@@ -41,6 +41,6 @@ def generate_h_begin(events, group):
def generate_h(event, group):
- out(' QEMU_%(uppername)s(%(argnames)s);',
+ out(' QEMU_%(uppername)s(%(argnames)s);',
uppername=event.name.upper(),
argnames=", ".join(event.args.names()))
diff --git a/scripts/tracetool/backend/ftrace.py b/scripts/tracetool/backend/ftrace.py
index db9fe7ad57..dd0eda4441 100644
--- a/scripts/tracetool/backend/ftrace.py
+++ b/scripts/tracetool/backend/ftrace.py
@@ -29,17 +29,17 @@ def generate_h(event, group):
if len(event.args) > 0:
argnames = ", " + argnames
- out(' {',
- ' char ftrace_buf[MAX_TRACE_STRLEN];',
- ' int unused __attribute__ ((unused));',
- ' int trlen;',
- ' if (trace_event_get_state(%(event_id)s)) {',
- ' trlen = snprintf(ftrace_buf, MAX_TRACE_STRLEN,',
- ' "%(name)s " %(fmt)s "\\n" %(argnames)s);',
- ' trlen = MIN(trlen, MAX_TRACE_STRLEN - 1);',
- ' unused = write(trace_marker_fd, ftrace_buf, trlen);',
- ' }',
+ out(' {',
+ ' char ftrace_buf[MAX_TRACE_STRLEN];',
+ ' int unused __attribute__ ((unused));',
+ ' int trlen;',
+ ' if (trace_event_get_state(%(event_id)s)) {',
+ ' trlen = snprintf(ftrace_buf, MAX_TRACE_STRLEN,',
+ ' "%(name)s " %(fmt)s "\\n" %(argnames)s);',
+ ' trlen = MIN(trlen, MAX_TRACE_STRLEN - 1);',
+ ' unused = write(trace_marker_fd, ftrace_buf, trlen);',
' }',
+ ' }',
name=event.name,
args=event.args,
event_id="TRACE_" + event.name.upper(),
diff --git a/scripts/tracetool/backend/log.py b/scripts/tracetool/backend/log.py
index 4f4a4d38b1..7d2c3abe75 100644
--- a/scripts/tracetool/backend/log.py
+++ b/scripts/tracetool/backend/log.py
@@ -35,14 +35,15 @@ def generate_h(event, group):
else:
cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper())
- out(' if (%(cond)s) {',
- ' struct timeval _now;',
- ' gettimeofday(&_now, NULL);',
- ' qemu_log_mask(LOG_TRACE, "%%d@%%zd.%%06zd:%(name)s " %(fmt)s "\\n",',
- ' getpid(),',
- ' (size_t)_now.tv_sec, (size_t)_now.tv_usec',
- ' %(argnames)s);',
- ' }',
+ out(' if (%(cond)s) {',
+ ' struct timeval _now;',
+ ' gettimeofday(&_now, NULL);',
+ ' qemu_log_mask(LOG_TRACE,',
+ ' "%%d@%%zd.%%06zd:%(name)s " %(fmt)s "\\n",',
+ ' getpid(),',
+ ' (size_t)_now.tv_sec, (size_t)_now.tv_usec',
+ ' %(argnames)s);',
+ ' }',
cond=cond,
name=event.name,
fmt=event.fmt.rstrip("\n"),
diff --git a/scripts/tracetool/backend/simple.py b/scripts/tracetool/backend/simple.py
index 85f61028e2..a28460b1e4 100644
--- a/scripts/tracetool/backend/simple.py
+++ b/scripts/tracetool/backend/simple.py
@@ -37,7 +37,7 @@ def generate_h_begin(events, group):
def generate_h(event, group):
- out(' _simple_%(api)s(%(args)s);',
+ out(' _simple_%(api)s(%(args)s);',
api=event.api(),
args=", ".join(event.args.names()))
diff --git a/scripts/tracetool/backend/syslog.py b/scripts/tracetool/backend/syslog.py
index b8ff2790c4..1ce627f0fc 100644
--- a/scripts/tracetool/backend/syslog.py
+++ b/scripts/tracetool/backend/syslog.py
@@ -35,9 +35,9 @@ def generate_h(event, group):
else:
cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper())
- out(' if (%(cond)s) {',
- ' syslog(LOG_INFO, "%(name)s " %(fmt)s %(argnames)s);',
- ' }',
+ out(' if (%(cond)s) {',
+ ' syslog(LOG_INFO, "%(name)s " %(fmt)s %(argnames)s);',
+ ' }',
cond=cond,
name=event.name,
fmt=event.fmt.rstrip("\n"),
diff --git a/scripts/tracetool/backend/ust.py b/scripts/tracetool/backend/ust.py
index 4594db6128..2d289b2e3c 100644
--- a/scripts/tracetool/backend/ust.py
+++ b/scripts/tracetool/backend/ust.py
@@ -30,6 +30,6 @@ def generate_h(event, group):
if len(event.args) > 0:
argnames = ", " + argnames
- out(' tracepoint(qemu, %(name)s%(tp_args)s);',
+ out(' tracepoint(qemu, %(name)s%(tp_args)s);',
name=event.name,
tp_args=argnames)
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Qemu-devel] [PATCH v5 7/7] trace: [trivial] Statically enable all guest events
2016-12-28 14:07 [Qemu-devel] [PATCH v5 0/7] trace: [tcg] Optimize per-vCPU tracing states with separate TB caches Lluís Vilanova
` (5 preceding siblings ...)
2016-12-28 14:08 ` [Qemu-devel] [PATCH v5 6/7] trace: [tcg, trivial] Re-align generated code Lluís Vilanova
@ 2016-12-28 14:08 ` Lluís Vilanova
6 siblings, 0 replies; 10+ messages in thread
From: Lluís Vilanova @ 2016-12-28 14:08 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Eduardo Habkost, Stefan Hajnoczi, Michael Tokarev,
Laurent Vivier, open list:Trivial patches
The optimizations of this series makes it feasible to have them
available on all builds.
Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu>
---
trace-events | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/trace-events b/trace-events
index f74e1d3d22..0a0f4d9cd6 100644
--- a/trace-events
+++ b/trace-events
@@ -159,7 +159,7 @@ vcpu guest_cpu_reset(void)
#
# Mode: user, softmmu
# Targets: TCG(all)
-disable vcpu tcg guest_mem_before(TCGv vaddr, uint8_t info) "info=%d", "vaddr=0x%016"PRIx64" info=%d"
+vcpu tcg guest_mem_before(TCGv vaddr, uint8_t info) "info=%d", "vaddr=0x%016"PRIx64" info=%d"
# @num: System call number.
# @arg*: System call argument value.
@@ -168,7 +168,7 @@ disable vcpu tcg guest_mem_before(TCGv vaddr, uint8_t info) "info=%d", "vaddr=0x
#
# Mode: user
# Targets: TCG(all)
-disable vcpu guest_user_syscall(uint64_t num, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, uint64_t arg6, uint64_t arg7, uint64_t arg8) "num=0x%016"PRIx64" arg1=0x%016"PRIx64" arg2=0x%016"PRIx64" arg3=0x%016"PRIx64" arg4=0x%016"PRIx64" arg5=0x%016"PRIx64" arg6=0x%016"PRIx64" arg7=0x%016"PRIx64" arg8=0x%016"PRIx64
+vcpu guest_user_syscall(uint64_t num, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, uint64_t arg6, uint64_t arg7, uint64_t arg8) "num=0x%016"PRIx64" arg1=0x%016"PRIx64" arg2=0x%016"PRIx64" arg3=0x%016"PRIx64" arg4=0x%016"PRIx64" arg5=0x%016"PRIx64" arg6=0x%016"PRIx64" arg7=0x%016"PRIx64" arg8=0x%016"PRIx64
# @num: System call number.
# @ret: System call result value.
@@ -177,4 +177,4 @@ disable vcpu guest_user_syscall(uint64_t num, uint64_t arg1, uint64_t arg2, uint
#
# Mode: user
# Targets: TCG(all)
-disable vcpu guest_user_syscall_ret(uint64_t num, uint64_t ret) "num=0x%016"PRIx64" ret=0x%016"PRIx64
+vcpu guest_user_syscall_ret(uint64_t num, uint64_t ret) "num=0x%016"PRIx64" ret=0x%016"PRIx64
^ permalink raw reply related [flat|nested] 10+ messages in thread