qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] vhost-vdpa: add trace functions in vhost-vdpa.c
@ 2020-09-21 13:04 Laurent Vivier
  2020-09-21 13:04 ` [PATCH v2 1/2] util/hexdump: introduce qemu_hexdump_line() Laurent Vivier
  2020-09-21 13:04 ` [PATCH v2 2/2] vhost-vdpa: add trace-events Laurent Vivier
  0 siblings, 2 replies; 7+ messages in thread
From: Laurent Vivier @ 2020-09-21 13:04 UTC (permalink / raw)
  To: qemu-devel
  Cc: Laurent Vivier, Jason Wang, Cindy Lu, Stefan Hajnoczi,
	Michael S. Tsirkin

Add trace events functions in vdpa functions.

Modify qemu_hexdump() to be able to dump the vdpa config
in trace logs in hexadecimal form.

v2: fix problems reported by checkpatch (TAB, missing 0x and 80+ line)
    added Stefan Acked-by

Laurent Vivier (2):
  util/hexdump: introduce qemu_hexdump_line()
  vhost-vdpa: add trace-events

 hw/virtio/trace-events | 29 ++++++++++++++
 hw/virtio/vhost-vdpa.c | 86 +++++++++++++++++++++++++++++++++++++++---
 include/qemu-common.h  |  8 ++++
 util/hexdump.c         | 54 ++++++++++++++++----------
 4 files changed, 152 insertions(+), 25 deletions(-)

-- 
2.26.2




^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v2 1/2] util/hexdump: introduce qemu_hexdump_line()
  2020-09-21 13:04 [PATCH v2 0/2] vhost-vdpa: add trace functions in vhost-vdpa.c Laurent Vivier
@ 2020-09-21 13:04 ` Laurent Vivier
  2020-09-21 13:04 ` [PATCH v2 2/2] vhost-vdpa: add trace-events Laurent Vivier
  1 sibling, 0 replies; 7+ messages in thread
From: Laurent Vivier @ 2020-09-21 13:04 UTC (permalink / raw)
  To: qemu-devel
  Cc: Laurent Vivier, Jason Wang, Cindy Lu, Stefan Hajnoczi,
	Michael S. Tsirkin

Dumping one line of hexadecimal/ASCII from a buffer is often needed.
Move this part from qemu_hexdump() and use it

Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
 include/qemu-common.h |  8 +++++++
 util/hexdump.c        | 54 +++++++++++++++++++++++++++----------------
 2 files changed, 42 insertions(+), 20 deletions(-)

diff --git a/include/qemu-common.h b/include/qemu-common.h
index 9cfd62669bf8..cc902b690db3 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -134,6 +134,14 @@ void os_setup_early_signal_handling(void);
 char *os_find_datadir(void);
 int os_parse_cmd_args(int index, const char *optarg);
 
+/*
+ * Hexdump a line of a byte buffer into a hexadecimal/ASCII buffer
+ */
+#define QEMU_HEXDUMP_LINE_BYTES 16 /* Number of bytes to dump */
+#define QEMU_HEXDUMP_LINE_LEN 75   /* Number of characters in line */
+void qemu_hexdump_line(char *line, unsigned int b, const void *bufptr,
+                       unsigned int len, bool ascii);
+
 /*
  * Hexdump a buffer to a file. An optional string prefix is added to every line
  */
diff --git a/util/hexdump.c b/util/hexdump.c
index 0b4662e701d8..2c105a884620 100644
--- a/util/hexdump.c
+++ b/util/hexdump.c
@@ -16,36 +16,50 @@
 #include "qemu/osdep.h"
 #include "qemu-common.h"
 
-void qemu_hexdump(FILE *fp, const char *prefix,
-                  const void *bufptr, size_t size)
+void qemu_hexdump_line(char *line, unsigned int b, const void *bufptr,
+                       unsigned int len, bool ascii)
 {
     const char *buf = bufptr;
-    unsigned int b, len, i, c;
+    int i, c;
 
-    for (b = 0; b < size; b += 16) {
-        len = size - b;
-        if (len > 16) {
-            len = 16;
+    if (len > QEMU_HEXDUMP_LINE_BYTES) {
+        len = QEMU_HEXDUMP_LINE_BYTES;
+    }
+
+    line += snprintf(line, 6, "%04x:", b);
+    for (i = 0; i < QEMU_HEXDUMP_LINE_BYTES; i++) {
+        if ((i % 4) == 0) {
+            *line++ = ' ';
         }
-        fprintf(fp, "%s: %04x:", prefix, b);
-        for (i = 0; i < 16; i++) {
-            if ((i % 4) == 0) {
-                fprintf(fp, " ");
-            }
-            if (i < len) {
-                fprintf(fp, " %02x", (unsigned char)buf[b + i]);
-            } else {
-                fprintf(fp, "   ");
-            }
+        if (i < len) {
+            line += sprintf(line, " %02x", (unsigned char)buf[b + i]);
+        } else {
+            line += sprintf(line, "   ");
         }
-        fprintf(fp, " ");
+    }
+    if (ascii) {
+        *line++ = ' ';
         for (i = 0; i < len; i++) {
             c = buf[b + i];
             if (c < ' ' || c > '~') {
                 c = '.';
             }
-            fprintf(fp, "%c", c);
+            *line++ = c;
         }
-        fprintf(fp, "\n");
     }
+    *line = '\0';
+}
+
+void qemu_hexdump(FILE *fp, const char *prefix,
+                  const void *bufptr, size_t size)
+{
+    unsigned int b, len;
+    char line[QEMU_HEXDUMP_LINE_LEN];
+
+    for (b = 0; b < size; b += QEMU_HEXDUMP_LINE_BYTES) {
+        len = size - b;
+        qemu_hexdump_line(line, b, bufptr, len, true);
+        fprintf(fp, "%s: %s\n", prefix, line);
+    }
+
 }
-- 
2.26.2



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v2 2/2] vhost-vdpa: add trace-events
  2020-09-21 13:04 [PATCH v2 0/2] vhost-vdpa: add trace functions in vhost-vdpa.c Laurent Vivier
  2020-09-21 13:04 ` [PATCH v2 1/2] util/hexdump: introduce qemu_hexdump_line() Laurent Vivier
@ 2020-09-21 13:04 ` Laurent Vivier
  2020-09-22  2:09   ` Jason Wang
  2020-09-22  7:37   ` Philippe Mathieu-Daudé
  1 sibling, 2 replies; 7+ messages in thread
From: Laurent Vivier @ 2020-09-21 13:04 UTC (permalink / raw)
  To: qemu-devel
  Cc: Laurent Vivier, Jason Wang, Cindy Lu, Stefan Hajnoczi,
	Michael S. Tsirkin

Add trace functionis in vhost-vdpa.c.

All traces from this file can be enabled with '-trace vhost_vdpa*'.

Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
 hw/virtio/trace-events | 29 ++++++++++++++
 hw/virtio/vhost-vdpa.c | 86 +++++++++++++++++++++++++++++++++++++++---
 2 files changed, 110 insertions(+), 5 deletions(-)

diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 845200bf109d..2b453f77a4e3 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -22,6 +22,35 @@ vhost_user_postcopy_waker(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
 vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64
 vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
 
+# vhost-vdpa.c
+vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
+vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64
+vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
+vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p"
+vhost_vdpa_cleanup(void *dev, void *vdpa) "dev: %p vdpa: %p"
+vhost_vdpa_memslots_limit(void *dev, int ret) "dev: %p = 0x%x"
+vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t padding) "dev: %p nregions: %"PRIu32" padding: 0x%"PRIx32
+vhost_vdpa_dump_regions(void *dev, int i, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint64_t flags_padding) "dev: %p %d: guest_phys_addr: 0x%"PRIx64" memory_size: 0x%"PRIx64" userspace_addr: 0x%"PRIx64" flags_padding: 0x%"PRIx64
+vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
+vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p device_id %"PRIu32
+vhost_vdpa_reset_device(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
+vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx: %d vq idx: %d"
+vhost_vdpa_set_vring_ready(void *dev) "dev: %p"
+vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
+vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32
+vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32
+vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d"
+vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p"
+vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64
+vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
+vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
+vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
+vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"
+vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"
+vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
+vhost_vdpa_set_owner(void *dev) "dev: %p"
+vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
+
 # virtio.c
 virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
 virtqueue_fill(void *vq, const void *elem, unsigned int len, unsigned int idx) "vq %p elem %p len %u idx %u"
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 4580f3efd8a2..dc987704dcd9 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -20,6 +20,8 @@
 #include "hw/virtio/vhost-vdpa.h"
 #include "qemu/main-loop.h"
 #include "cpu.h"
+#include "trace.h"
+#include "qemu-common.h"
 
 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
 {
@@ -114,6 +116,9 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
             section->offset_within_region +
             (iova - section->offset_within_address_space);
 
+    trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
+                                         vaddr, section->readonly);
+
     llsize = int128_sub(llend, int128_make64(iova));
 
     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
@@ -170,6 +175,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
     llend = int128_add(llend, section->size);
     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
 
+    trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
+
     if (int128_ge(int128_make64(iova), llend)) {
         return;
     }
@@ -210,6 +217,7 @@ static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
 {
     uint8_t s;
 
+    trace_vhost_vdpa_add_status(dev, status);
     if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
         return;
     }
@@ -224,6 +232,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque)
     struct vhost_vdpa *v;
     uint64_t features;
     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
+    trace_vhost_vdpa_init(dev, opaque);
 
     v = opaque;
     dev->opaque =  opaque ;
@@ -243,6 +252,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
     struct vhost_vdpa *v;
     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
     v = dev->opaque;
+    trace_vhost_vdpa_cleanup(dev, v);
     memory_listener_unregister(&v->listener);
 
     dev->opaque = NULL;
@@ -251,13 +261,25 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
 
 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
 {
+    trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
     return INT_MAX;
 }
 
 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
                                     struct vhost_memory *mem)
 {
-
+    trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
+    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
+        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
+        int i;
+        for (i = 0; i < mem->nregions; i++) {
+            trace_vhost_vdpa_dump_regions(dev, i,
+                                          mem->regions[i].guest_phys_addr,
+                                          mem->regions[i].memory_size,
+                                          mem->regions[i].userspace_addr,
+                                          mem->regions[i].flags_padding);
+        }
+    }
     if (mem->padding) {
         return -1;
     }
@@ -269,6 +291,7 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
                                    uint64_t features)
 {
     int ret;
+    trace_vhost_vdpa_set_features(dev, features);
     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
     uint8_t status = 0;
     if (ret) {
@@ -283,26 +306,34 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
 int vhost_vdpa_get_device_id(struct vhost_dev *dev,
                                    uint32_t *device_id)
 {
-    return vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
+    int ret;
+    ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
+    trace_vhost_vdpa_get_device_id(dev, *device_id);
+    return ret;
 }
 
 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
 {
+    int ret;
     uint8_t status = 0;
 
-    return vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
+    ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
+    trace_vhost_vdpa_reset_device(dev, status);
+    return ret;
 }
 
 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
 {
     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
 
+    trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
     return idx - dev->vq_index;
 }
 
 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
 {
     int i;
+    trace_vhost_vdpa_set_vring_ready(dev);
     for (i = 0; i < dev->nvqs; ++i) {
         struct vhost_vring_state state = {
             .index = dev->vq_index + i,
@@ -313,6 +344,19 @@ static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
     return 0;
 }
 
+static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
+                                   uint32_t config_len)
+{
+    int b, len;
+    char line[QEMU_HEXDUMP_LINE_LEN];
+
+    for (b = 0; b < config_len; b += 16) {
+        len = config_len - b;
+        qemu_hexdump_line(line, b, config, len, false);
+        trace_vhost_vdpa_dump_config(dev, line);
+    }
+}
+
 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
                                    uint32_t offset, uint32_t size,
                                    uint32_t flags)
@@ -320,6 +364,7 @@ static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
     struct vhost_vdpa_config *config;
     int ret;
     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
+    trace_vhost_vdpa_set_config(dev, offset, size, flags);
     config = g_malloc(size + config_size);
     if (config == NULL) {
         return -1;
@@ -327,6 +372,10 @@ static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
     config->off = offset;
     config->len = size;
     memcpy(config->buf, data, size);
+    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
+        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
+        vhost_vdpa_dump_config(dev, data, size);
+    }
     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
     g_free(config);
     return ret;
@@ -339,6 +388,7 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
     int ret;
 
+    trace_vhost_vdpa_get_config(dev, config, config_len);
     v_config = g_malloc(config_len + config_size);
     if (v_config == NULL) {
         return -1;
@@ -348,12 +398,17 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
     memcpy(config, v_config->buf, config_len);
     g_free(v_config);
+    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
+        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
+        vhost_vdpa_dump_config(dev, config, config_len);
+    }
     return ret;
  }
 
 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
 {
     struct vhost_vdpa *v = dev->opaque;
+    trace_vhost_vdpa_dev_start(dev, started);
     if (started) {
         uint8_t status = 0;
         memory_listener_register(&v->listener, &address_space_memory);
@@ -375,53 +430,72 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
                                      struct vhost_log *log)
 {
+    trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
+                                  log->log);
     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
 }
 
 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
                                        struct vhost_vring_addr *addr)
 {
+    trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
+                                    addr->desc_user_addr, addr->used_user_addr,
+                                    addr->avail_user_addr,
+                                    addr->log_guest_addr);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
 }
 
 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
                                       struct vhost_vring_state *ring)
 {
+    trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
 }
 
 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
                                        struct vhost_vring_state *ring)
 {
+    trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
 }
 
 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
                                        struct vhost_vring_state *ring)
 {
-    return vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
+    int ret;
+
+    ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
+    trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
+    return ret;
 }
 
 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
                                        struct vhost_vring_file *file)
 {
+    trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
 }
 
 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
                                        struct vhost_vring_file *file)
 {
+    trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
 }
 
 static int vhost_vdpa_get_features(struct vhost_dev *dev,
                                      uint64_t *features)
 {
-    return vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
+    int ret;
+
+    ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
+    trace_vhost_vdpa_get_features(dev, *features);
+    return ret;
 }
 
 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
 {
+    trace_vhost_vdpa_set_owner(dev);
     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
 }
 
@@ -432,6 +506,8 @@ static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
+    trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
+                                 addr->avail_user_addr, addr->used_user_addr);
     return 0;
 }
 
-- 
2.26.2



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 2/2] vhost-vdpa: add trace-events
  2020-09-21 13:04 ` [PATCH v2 2/2] vhost-vdpa: add trace-events Laurent Vivier
@ 2020-09-22  2:09   ` Jason Wang
  2020-09-24  8:42     ` Laurent Vivier
  2020-09-22  7:37   ` Philippe Mathieu-Daudé
  1 sibling, 1 reply; 7+ messages in thread
From: Jason Wang @ 2020-09-22  2:09 UTC (permalink / raw)
  To: Laurent Vivier, qemu-devel; +Cc: Stefan Hajnoczi, Cindy Lu, Michael S. Tsirkin


On 2020/9/21 下午9:04, Laurent Vivier wrote:
> Add trace functionis in vhost-vdpa.c.
>
> All traces from this file can be enabled with '-trace vhost_vdpa*'.
>
> Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
> Signed-off-by: Laurent Vivier <lvivier@redhat.com>
> ---
>   hw/virtio/trace-events | 29 ++++++++++++++
>   hw/virtio/vhost-vdpa.c | 86 +++++++++++++++++++++++++++++++++++++++---
>   2 files changed, 110 insertions(+), 5 deletions(-)
>
> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> index 845200bf109d..2b453f77a4e3 100644
> --- a/hw/virtio/trace-events
> +++ b/hw/virtio/trace-events
> @@ -22,6 +22,35 @@ vhost_user_postcopy_waker(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
>   vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64
>   vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
>   
> +# vhost-vdpa.c
> +vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
> +vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64
> +vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
> +vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p"
> +vhost_vdpa_cleanup(void *dev, void *vdpa) "dev: %p vdpa: %p"
> +vhost_vdpa_memslots_limit(void *dev, int ret) "dev: %p = 0x%x"
> +vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t padding) "dev: %p nregions: %"PRIu32" padding: 0x%"PRIx32
> +vhost_vdpa_dump_regions(void *dev, int i, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint64_t flags_padding) "dev: %p %d: guest_phys_addr: 0x%"PRIx64" memory_size: 0x%"PRIx64" userspace_addr: 0x%"PRIx64" flags_padding: 0x%"PRIx64
> +vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
> +vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p device_id %"PRIu32
> +vhost_vdpa_reset_device(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
> +vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx: %d vq idx: %d"
> +vhost_vdpa_set_vring_ready(void *dev) "dev: %p"
> +vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
> +vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32
> +vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32
> +vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d"
> +vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p"
> +vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64
> +vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
> +vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
> +vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
> +vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"
> +vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"


It's better to add set/get_vring_addr() and dma_map()/dma_unmap().

Thanks


> +vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
> +vhost_vdpa_set_owner(void *dev) "dev: %p"
> +vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
> +
>   # virtio.c
>   virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
>   virtqueue_fill(void *vq, const void *elem, unsigned int len, unsigned int idx) "vq %p elem %p len %u idx %u"
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 4580f3efd8a2..dc987704dcd9 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -20,6 +20,8 @@
>   #include "hw/virtio/vhost-vdpa.h"
>   #include "qemu/main-loop.h"
>   #include "cpu.h"
> +#include "trace.h"
> +#include "qemu-common.h"
>   
>   static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
>   {
> @@ -114,6 +116,9 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
>               section->offset_within_region +
>               (iova - section->offset_within_address_space);
>   
> +    trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
> +                                         vaddr, section->readonly);
> +
>       llsize = int128_sub(llend, int128_make64(iova));
>   
>       ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
> @@ -170,6 +175,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
>       llend = int128_add(llend, section->size);
>       llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
>   
> +    trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
> +
>       if (int128_ge(int128_make64(iova), llend)) {
>           return;
>       }
> @@ -210,6 +217,7 @@ static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
>   {
>       uint8_t s;
>   
> +    trace_vhost_vdpa_add_status(dev, status);
>       if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
>           return;
>       }
> @@ -224,6 +232,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque)
>       struct vhost_vdpa *v;
>       uint64_t features;
>       assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> +    trace_vhost_vdpa_init(dev, opaque);
>   
>       v = opaque;
>       dev->opaque =  opaque ;
> @@ -243,6 +252,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
>       struct vhost_vdpa *v;
>       assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
>       v = dev->opaque;
> +    trace_vhost_vdpa_cleanup(dev, v);
>       memory_listener_unregister(&v->listener);
>   
>       dev->opaque = NULL;
> @@ -251,13 +261,25 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
>   
>   static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
>   {
> +    trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
>       return INT_MAX;
>   }
>   
>   static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
>                                       struct vhost_memory *mem)
>   {
> -
> +    trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
> +    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
> +        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
> +        int i;
> +        for (i = 0; i < mem->nregions; i++) {
> +            trace_vhost_vdpa_dump_regions(dev, i,
> +                                          mem->regions[i].guest_phys_addr,
> +                                          mem->regions[i].memory_size,
> +                                          mem->regions[i].userspace_addr,
> +                                          mem->regions[i].flags_padding);
> +        }
> +    }
>       if (mem->padding) {
>           return -1;
>       }
> @@ -269,6 +291,7 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
>                                      uint64_t features)
>   {
>       int ret;
> +    trace_vhost_vdpa_set_features(dev, features);
>       ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
>       uint8_t status = 0;
>       if (ret) {
> @@ -283,26 +306,34 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
>   int vhost_vdpa_get_device_id(struct vhost_dev *dev,
>                                      uint32_t *device_id)
>   {
> -    return vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
> +    int ret;
> +    ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
> +    trace_vhost_vdpa_get_device_id(dev, *device_id);
> +    return ret;
>   }
>   
>   static int vhost_vdpa_reset_device(struct vhost_dev *dev)
>   {
> +    int ret;
>       uint8_t status = 0;
>   
> -    return vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
> +    ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
> +    trace_vhost_vdpa_reset_device(dev, status);
> +    return ret;
>   }
>   
>   static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
>   {
>       assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
>   
> +    trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
>       return idx - dev->vq_index;
>   }
>   
>   static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
>   {
>       int i;
> +    trace_vhost_vdpa_set_vring_ready(dev);
>       for (i = 0; i < dev->nvqs; ++i) {
>           struct vhost_vring_state state = {
>               .index = dev->vq_index + i,
> @@ -313,6 +344,19 @@ static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
>       return 0;
>   }
>   
> +static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
> +                                   uint32_t config_len)
> +{
> +    int b, len;
> +    char line[QEMU_HEXDUMP_LINE_LEN];
> +
> +    for (b = 0; b < config_len; b += 16) {
> +        len = config_len - b;
> +        qemu_hexdump_line(line, b, config, len, false);
> +        trace_vhost_vdpa_dump_config(dev, line);
> +    }
> +}
> +
>   static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
>                                      uint32_t offset, uint32_t size,
>                                      uint32_t flags)
> @@ -320,6 +364,7 @@ static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
>       struct vhost_vdpa_config *config;
>       int ret;
>       unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
> +    trace_vhost_vdpa_set_config(dev, offset, size, flags);
>       config = g_malloc(size + config_size);
>       if (config == NULL) {
>           return -1;
> @@ -327,6 +372,10 @@ static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
>       config->off = offset;
>       config->len = size;
>       memcpy(config->buf, data, size);
> +    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
> +        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
> +        vhost_vdpa_dump_config(dev, data, size);
> +    }
>       ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
>       g_free(config);
>       return ret;
> @@ -339,6 +388,7 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
>       unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
>       int ret;
>   
> +    trace_vhost_vdpa_get_config(dev, config, config_len);
>       v_config = g_malloc(config_len + config_size);
>       if (v_config == NULL) {
>           return -1;
> @@ -348,12 +398,17 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
>       ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
>       memcpy(config, v_config->buf, config_len);
>       g_free(v_config);
> +    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
> +        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
> +        vhost_vdpa_dump_config(dev, config, config_len);
> +    }
>       return ret;
>    }
>   
>   static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
>   {
>       struct vhost_vdpa *v = dev->opaque;
> +    trace_vhost_vdpa_dev_start(dev, started);
>       if (started) {
>           uint8_t status = 0;
>           memory_listener_register(&v->listener, &address_space_memory);
> @@ -375,53 +430,72 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
>   static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
>                                        struct vhost_log *log)
>   {
> +    trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
> +                                  log->log);
>       return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
>   }
>   
>   static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
>                                          struct vhost_vring_addr *addr)
>   {
> +    trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
> +                                    addr->desc_user_addr, addr->used_user_addr,
> +                                    addr->avail_user_addr,
> +                                    addr->log_guest_addr);
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
>   }
>   
>   static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
>                                         struct vhost_vring_state *ring)
>   {
> +    trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
>   }
>   
>   static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
>                                          struct vhost_vring_state *ring)
>   {
> +    trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
>   }
>   
>   static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
>                                          struct vhost_vring_state *ring)
>   {
> -    return vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
> +    int ret;
> +
> +    ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
> +    trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
> +    return ret;
>   }
>   
>   static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
>                                          struct vhost_vring_file *file)
>   {
> +    trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
>   }
>   
>   static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
>                                          struct vhost_vring_file *file)
>   {
> +    trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
>   }
>   
>   static int vhost_vdpa_get_features(struct vhost_dev *dev,
>                                        uint64_t *features)
>   {
> -    return vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
> +    int ret;
> +
> +    ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
> +    trace_vhost_vdpa_get_features(dev, *features);
> +    return ret;
>   }
>   
>   static int vhost_vdpa_set_owner(struct vhost_dev *dev)
>   {
> +    trace_vhost_vdpa_set_owner(dev);
>       return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
>   }
>   
> @@ -432,6 +506,8 @@ static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
>       addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
>       addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
>       addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
> +    trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
> +                                 addr->avail_user_addr, addr->used_user_addr);
>       return 0;
>   }
>   



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 2/2] vhost-vdpa: add trace-events
  2020-09-21 13:04 ` [PATCH v2 2/2] vhost-vdpa: add trace-events Laurent Vivier
  2020-09-22  2:09   ` Jason Wang
@ 2020-09-22  7:37   ` Philippe Mathieu-Daudé
  1 sibling, 0 replies; 7+ messages in thread
From: Philippe Mathieu-Daudé @ 2020-09-22  7:37 UTC (permalink / raw)
  To: Laurent Vivier, qemu-devel
  Cc: Jason Wang, Stefan Hajnoczi, Cindy Lu, Michael S. Tsirkin

On 9/21/20 3:04 PM, Laurent Vivier wrote:
> Add trace functionis in vhost-vdpa.c.

Typo "function"?

> 
> All traces from this file can be enabled with '-trace vhost_vdpa*'.
> 
> Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
> Signed-off-by: Laurent Vivier <lvivier@redhat.com>
> ---
>  hw/virtio/trace-events | 29 ++++++++++++++
>  hw/virtio/vhost-vdpa.c | 86 +++++++++++++++++++++++++++++++++++++++---
>  2 files changed, 110 insertions(+), 5 deletions(-)



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 2/2] vhost-vdpa: add trace-events
  2020-09-22  2:09   ` Jason Wang
@ 2020-09-24  8:42     ` Laurent Vivier
  2020-09-24  9:13       ` Jason Wang
  0 siblings, 1 reply; 7+ messages in thread
From: Laurent Vivier @ 2020-09-24  8:42 UTC (permalink / raw)
  To: Jason Wang, qemu-devel; +Cc: Stefan Hajnoczi, Cindy Lu, Michael S. Tsirkin

On 22/09/2020 04:09, Jason Wang wrote:
> 
> On 2020/9/21 下午9:04, Laurent Vivier wrote:
>> Add trace functionis in vhost-vdpa.c.
>>
>> All traces from this file can be enabled with '-trace vhost_vdpa*'.
>>
>> Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
>> Signed-off-by: Laurent Vivier <lvivier@redhat.com>
>> ---
>>   hw/virtio/trace-events | 29 ++++++++++++++
>>   hw/virtio/vhost-vdpa.c | 86 +++++++++++++++++++++++++++++++++++++++---
>>   2 files changed, 110 insertions(+), 5 deletions(-)
>>
>> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
>> index 845200bf109d..2b453f77a4e3 100644
>> --- a/hw/virtio/trace-events
>> +++ b/hw/virtio/trace-events
>> @@ -22,6 +22,35 @@ vhost_user_postcopy_waker(const char *rb, uint64_t
>> rb_offset) "%s + 0x%"PRIx64
>>   vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64
>>   vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t
>> rb_offset) "%s + 0x%"PRIx64
>>   +# vhost-vdpa.c
>> +vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t
>> llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend
>> 0x%"PRIx64" vaddr: %p read-only: %d"
>> +vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t
>> llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64
>> +vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status:
>> 0x%"PRIx8
>> +vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p"
>> +vhost_vdpa_cleanup(void *dev, void *vdpa) "dev: %p vdpa: %p"
>> +vhost_vdpa_memslots_limit(void *dev, int ret) "dev: %p = 0x%x"
>> +vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t
>> padding) "dev: %p nregions: %"PRIu32" padding: 0x%"PRIx32
>> +vhost_vdpa_dump_regions(void *dev, int i, uint64_t guest_phys_addr,
>> uint64_t memory_size, uint64_t userspace_addr, uint64_t flags_padding)
>> "dev: %p %d: guest_phys_addr: 0x%"PRIx64" memory_size: 0x%"PRIx64"
>> userspace_addr: 0x%"PRIx64" flags_padding: 0x%"PRIx64
>> +vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p
>> features: 0x%"PRIx64
>> +vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p
>> device_id %"PRIu32
>> +vhost_vdpa_reset_device(void *dev, uint8_t status) "dev: %p status:
>> 0x%"PRIx8
>> +vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx:
>> %d vq idx: %d"
>> +vhost_vdpa_set_vring_ready(void *dev) "dev: %p"
>> +vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
>> +vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size,
>> uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags:
>> 0x%"PRIx32
>> +vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len)
>> "dev: %p config: %p config_len: %"PRIu32
>> +vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d"
>> +vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long
>> size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size:
>> %llu refcnt: %d fd: %d log: %p"
>> +vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int
>> flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t
>> avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags:
>> 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64"
>> avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64
>> +vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int
>> num) "dev: %p index: %u num: %u"
>> +vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int
>> num) "dev: %p index: %u num: %u"
>> +vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int
>> num) "dev: %p index: %u num: %u"
>> +vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd)
>> "dev: %p index: %u fd: %d"
>> +vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd)
>> "dev: %p index: %u fd: %d"
> 
> 
> It's better to add set/get_vring_addr() and dma_map()/dma_unmap().

I'm adding dma_map()/dma_unmap().

set_vring_addr() is already in the list, and get_vring_addr() doesn't
exist. As I don't see the link with vhost_vdpa_set_vring_call() I would
keep it.

Did I miss something?

Thanks,
Laurent



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 2/2] vhost-vdpa: add trace-events
  2020-09-24  8:42     ` Laurent Vivier
@ 2020-09-24  9:13       ` Jason Wang
  0 siblings, 0 replies; 7+ messages in thread
From: Jason Wang @ 2020-09-24  9:13 UTC (permalink / raw)
  To: Laurent Vivier, qemu-devel; +Cc: Cindy Lu, Stefan Hajnoczi, Michael S. Tsirkin


On 2020/9/24 下午4:42, Laurent Vivier wrote:
> On 22/09/2020 04:09, Jason Wang wrote:
>> On 2020/9/21 下午9:04, Laurent Vivier wrote:
>>> Add trace functionis in vhost-vdpa.c.
>>>
>>> All traces from this file can be enabled with '-trace vhost_vdpa*'.
>>>
>>> Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
>>> Signed-off-by: Laurent Vivier <lvivier@redhat.com>
>>> ---
>>>    hw/virtio/trace-events | 29 ++++++++++++++
>>>    hw/virtio/vhost-vdpa.c | 86 +++++++++++++++++++++++++++++++++++++++---
>>>    2 files changed, 110 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
>>> index 845200bf109d..2b453f77a4e3 100644
>>> --- a/hw/virtio/trace-events
>>> +++ b/hw/virtio/trace-events
>>> @@ -22,6 +22,35 @@ vhost_user_postcopy_waker(const char *rb, uint64_t
>>> rb_offset) "%s + 0x%"PRIx64
>>>    vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64
>>>    vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t
>>> rb_offset) "%s + 0x%"PRIx64
>>>    +# vhost-vdpa.c
>>> +vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t
>>> llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend
>>> 0x%"PRIx64" vaddr: %p read-only: %d"
>>> +vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t
>>> llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64
>>> +vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status:
>>> 0x%"PRIx8
>>> +vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p"
>>> +vhost_vdpa_cleanup(void *dev, void *vdpa) "dev: %p vdpa: %p"
>>> +vhost_vdpa_memslots_limit(void *dev, int ret) "dev: %p = 0x%x"
>>> +vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t
>>> padding) "dev: %p nregions: %"PRIu32" padding: 0x%"PRIx32
>>> +vhost_vdpa_dump_regions(void *dev, int i, uint64_t guest_phys_addr,
>>> uint64_t memory_size, uint64_t userspace_addr, uint64_t flags_padding)
>>> "dev: %p %d: guest_phys_addr: 0x%"PRIx64" memory_size: 0x%"PRIx64"
>>> userspace_addr: 0x%"PRIx64" flags_padding: 0x%"PRIx64
>>> +vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p
>>> features: 0x%"PRIx64
>>> +vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p
>>> device_id %"PRIu32
>>> +vhost_vdpa_reset_device(void *dev, uint8_t status) "dev: %p status:
>>> 0x%"PRIx8
>>> +vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx:
>>> %d vq idx: %d"
>>> +vhost_vdpa_set_vring_ready(void *dev) "dev: %p"
>>> +vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
>>> +vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size,
>>> uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags:
>>> 0x%"PRIx32
>>> +vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len)
>>> "dev: %p config: %p config_len: %"PRIu32
>>> +vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d"
>>> +vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long
>>> size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size:
>>> %llu refcnt: %d fd: %d log: %p"
>>> +vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int
>>> flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t
>>> avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags:
>>> 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64"
>>> avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64
>>> +vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int
>>> num) "dev: %p index: %u num: %u"
>>> +vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int
>>> num) "dev: %p index: %u num: %u"
>>> +vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int
>>> num) "dev: %p index: %u num: %u"
>>> +vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd)
>>> "dev: %p index: %u fd: %d"
>>> +vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd)
>>> "dev: %p index: %u fd: %d"
>>
>> It's better to add set/get_vring_addr() and dma_map()/dma_unmap().
> I'm adding dma_map()/dma_unmap().
>
> set_vring_addr() is already in the list, and get_vring_addr() doesn't
> exist.


Your are right. We don't have get_vring_addr() actually.


> As I don't see the link with vhost_vdpa_set_vring_call() I would
> keep it.
>
> Did I miss something?


Nope, there's no link.

Please keep that.

Thanks


>
> Thanks,
> Laurent
>
>



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-09-24  9:14 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-21 13:04 [PATCH v2 0/2] vhost-vdpa: add trace functions in vhost-vdpa.c Laurent Vivier
2020-09-21 13:04 ` [PATCH v2 1/2] util/hexdump: introduce qemu_hexdump_line() Laurent Vivier
2020-09-21 13:04 ` [PATCH v2 2/2] vhost-vdpa: add trace-events Laurent Vivier
2020-09-22  2:09   ` Jason Wang
2020-09-24  8:42     ` Laurent Vivier
2020-09-24  9:13       ` Jason Wang
2020-09-22  7:37   ` Philippe Mathieu-Daudé

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).