All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
	Peter Maydell <peter.maydell@linaro.org>,
	Jason Wang <jasowang@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>
Subject: [PULL v4 14/48] vhost-vdpa: add trace-events
Date: Tue, 29 Sep 2020 03:21:28 -0400	[thread overview]
Message-ID: <20200929071948.281157-15-mst@redhat.com> (raw)
In-Reply-To: <20200929071948.281157-1-mst@redhat.com>

From: Laurent Vivier <lvivier@redhat.com>

Add trace functions in vhost-vdpa.c.

All traces from this file can be enabled with '-trace vhost_vdpa*'.

Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Message-Id: <20200925091055.186023-3-lvivier@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/virtio/vhost-vdpa.c | 92 +++++++++++++++++++++++++++++++++++++++---
 hw/virtio/trace-events | 31 ++++++++++++++
 2 files changed, 118 insertions(+), 5 deletions(-)

diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index dbf2643ff7..4f1039910a 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -20,6 +20,8 @@
 #include "hw/virtio/vhost-vdpa.h"
 #include "qemu/main-loop.h"
 #include "cpu.h"
+#include "trace.h"
+#include "qemu-common.h"
 
 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
 {
@@ -48,6 +50,9 @@ static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
     msg.iotlb.type = VHOST_IOTLB_UPDATE;
 
+   trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
+                            msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
+
     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
         error_report("failed to write, fd=%d, errno=%d (%s)",
             fd, errno, strerror(errno));
@@ -69,6 +74,9 @@ static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
     msg.iotlb.size = size;
     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
 
+    trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
+                               msg.iotlb.size, msg.iotlb.type);
+
     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
         error_report("failed to write, fd=%d, errno=%d (%s)",
             fd, errno, strerror(errno));
@@ -154,6 +162,9 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
             section->offset_within_region +
             (iova - section->offset_within_address_space);
 
+    trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
+                                         vaddr, section->readonly);
+
     llsize = int128_sub(llend, int128_make64(iova));
 
     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
@@ -209,6 +220,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
     llend = int128_add(llend, section->size);
     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
 
+    trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
+
     if (int128_ge(int128_make64(iova), llend)) {
         return;
     }
@@ -249,6 +262,7 @@ static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
 {
     uint8_t s;
 
+    trace_vhost_vdpa_add_status(dev, status);
     if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
         return;
     }
@@ -263,6 +277,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque)
     struct vhost_vdpa *v;
     uint64_t features;
     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
+    trace_vhost_vdpa_init(dev, opaque);
 
     v = opaque;
     v->dev = dev;
@@ -283,6 +298,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
     struct vhost_vdpa *v;
     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
     v = dev->opaque;
+    trace_vhost_vdpa_cleanup(dev, v);
     memory_listener_unregister(&v->listener);
 
     dev->opaque = NULL;
@@ -291,13 +307,25 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
 
 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
 {
+    trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
     return INT_MAX;
 }
 
 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
                                     struct vhost_memory *mem)
 {
-
+    trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
+    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
+        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
+        int i;
+        for (i = 0; i < mem->nregions; i++) {
+            trace_vhost_vdpa_dump_regions(dev, i,
+                                          mem->regions[i].guest_phys_addr,
+                                          mem->regions[i].memory_size,
+                                          mem->regions[i].userspace_addr,
+                                          mem->regions[i].flags_padding);
+        }
+    }
     if (mem->padding) {
         return -1;
     }
@@ -309,6 +337,7 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
                                    uint64_t features)
 {
     int ret;
+    trace_vhost_vdpa_set_features(dev, features);
     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
     uint8_t status = 0;
     if (ret) {
@@ -345,26 +374,34 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
 int vhost_vdpa_get_device_id(struct vhost_dev *dev,
                                    uint32_t *device_id)
 {
-    return vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
+    int ret;
+    ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
+    trace_vhost_vdpa_get_device_id(dev, *device_id);
+    return ret;
 }
 
 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
 {
+    int ret;
     uint8_t status = 0;
 
-    return vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
+    ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
+    trace_vhost_vdpa_reset_device(dev, status);
+    return ret;
 }
 
 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
 {
     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
 
+    trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
     return idx - dev->vq_index;
 }
 
 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
 {
     int i;
+    trace_vhost_vdpa_set_vring_ready(dev);
     for (i = 0; i < dev->nvqs; ++i) {
         struct vhost_vring_state state = {
             .index = dev->vq_index + i,
@@ -375,6 +412,19 @@ static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
     return 0;
 }
 
+static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
+                                   uint32_t config_len)
+{
+    int b, len;
+    char line[QEMU_HEXDUMP_LINE_LEN];
+
+    for (b = 0; b < config_len; b += 16) {
+        len = config_len - b;
+        qemu_hexdump_line(line, b, config, len, false);
+        trace_vhost_vdpa_dump_config(dev, line);
+    }
+}
+
 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
                                    uint32_t offset, uint32_t size,
                                    uint32_t flags)
@@ -383,10 +433,15 @@ static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
     int ret;
     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
 
+    trace_vhost_vdpa_set_config(dev, offset, size, flags);
     config = g_malloc(size + config_size);
     config->off = offset;
     config->len = size;
     memcpy(config->buf, data, size);
+    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
+        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
+        vhost_vdpa_dump_config(dev, data, size);
+    }
     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
     g_free(config);
     return ret;
@@ -399,18 +454,24 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
     int ret;
 
+    trace_vhost_vdpa_get_config(dev, config, config_len);
     v_config = g_malloc(config_len + config_size);
     v_config->len = config_len;
     v_config->off = 0;
     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
     memcpy(config, v_config->buf, config_len);
     g_free(v_config);
+    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
+        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
+        vhost_vdpa_dump_config(dev, config, config_len);
+    }
     return ret;
  }
 
 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
 {
     struct vhost_vdpa *v = dev->opaque;
+    trace_vhost_vdpa_dev_start(dev, started);
     if (started) {
         uint8_t status = 0;
         memory_listener_register(&v->listener, &address_space_memory);
@@ -432,53 +493,72 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
                                      struct vhost_log *log)
 {
+    trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
+                                  log->log);
     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
 }
 
 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
                                        struct vhost_vring_addr *addr)
 {
+    trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
+                                    addr->desc_user_addr, addr->used_user_addr,
+                                    addr->avail_user_addr,
+                                    addr->log_guest_addr);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
 }
 
 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
                                       struct vhost_vring_state *ring)
 {
+    trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
 }
 
 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
                                        struct vhost_vring_state *ring)
 {
+    trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
 }
 
 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
                                        struct vhost_vring_state *ring)
 {
-    return vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
+    int ret;
+
+    ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
+    trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
+    return ret;
 }
 
 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
                                        struct vhost_vring_file *file)
 {
+    trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
 }
 
 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
                                        struct vhost_vring_file *file)
 {
+    trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
 }
 
 static int vhost_vdpa_get_features(struct vhost_dev *dev,
                                      uint64_t *features)
 {
-    return vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
+    int ret;
+
+    ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
+    trace_vhost_vdpa_get_features(dev, *features);
+    return ret;
 }
 
 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
 {
+    trace_vhost_vdpa_set_owner(dev);
     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
 }
 
@@ -489,6 +569,8 @@ static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
+    trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
+                                 addr->avail_user_addr, addr->used_user_addr);
     return 0;
 }
 
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 845200bf10..cf1e59de30 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -22,6 +22,37 @@ vhost_user_postcopy_waker(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
 vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64
 vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
 
+# vhost-vdpa.c
+vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
+vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
+vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
+vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64
+vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
+vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p"
+vhost_vdpa_cleanup(void *dev, void *vdpa) "dev: %p vdpa: %p"
+vhost_vdpa_memslots_limit(void *dev, int ret) "dev: %p = 0x%x"
+vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t padding) "dev: %p nregions: %"PRIu32" padding: 0x%"PRIx32
+vhost_vdpa_dump_regions(void *dev, int i, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint64_t flags_padding) "dev: %p %d: guest_phys_addr: 0x%"PRIx64" memory_size: 0x%"PRIx64" userspace_addr: 0x%"PRIx64" flags_padding: 0x%"PRIx64
+vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
+vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p device_id %"PRIu32
+vhost_vdpa_reset_device(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
+vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx: %d vq idx: %d"
+vhost_vdpa_set_vring_ready(void *dev) "dev: %p"
+vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
+vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32
+vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32
+vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d"
+vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p"
+vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64
+vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
+vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
+vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
+vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"
+vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"
+vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
+vhost_vdpa_set_owner(void *dev) "dev: %p"
+vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
+
 # virtio.c
 virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
 virtqueue_fill(void *vq, const void *elem, unsigned int len, unsigned int idx) "vq %p elem %p len %u idx %u"
-- 
MST



  parent reply	other threads:[~2020-09-29  7:28 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-29  7:20 [PULL v4 00/48] virtio,pc,acpi: fixes, tests Michael S. Tsirkin
2020-09-29  7:20 ` [PULL v4 01/48] linux headers: sync to 5.9-rc4 Michael S. Tsirkin
2020-09-29  7:20   ` Michael S. Tsirkin
2020-09-29  7:20 ` [PULL v4 03/48] vhost-vdpa: batch updating IOTLB mappings Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 04/48] virtio-mem: detach the element from the virtqueue when error occurs Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 05/48] pc: fix auto_enable_numa_with_memhp/auto_enable_numa_with_memdev for the 5.0 machine Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 06/48] vhost: recheck dev state in the vhost_migration_log routine Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 07/48] vhost: check queue state in the vhost_dev_set_log routine Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 08/48] tests/qtest/vhost-user-test: prepare the tests for adding new dev class Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 09/48] cphp: remove deprecated cpu-add command(s) Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 10/48] virtio-iommu: Check gtrees are non null before destroying them Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 11/48] virtio-iommu-pci: force virtio version 1 Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 12/48] virtio-pmem-pci: " Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 13/48] util/hexdump: introduce qemu_hexdump_line() Michael S. Tsirkin
2020-09-29  7:21 ` Michael S. Tsirkin [this message]
2020-09-29  7:21 ` [PULL v4 15/48] configure: Fix build dependencies with vhost-vdpa Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 16/48] virtio: skip legacy support check on machine types less than 5.1 Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 17/48] vhost-vsock-pci: force virtio version 1 Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 18/48] vhost-user-vsock-pci: " Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 19/48] vhost-vsock-ccw: " Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 20/48] virtio: update MemoryRegionCaches when guest set bad features Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 21/48] x86: lpc9: let firmware negotiate 'CPU hotplug with SMI' features Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 22/48] x86: cpuhp: prevent guest crash on CPU hotplug when broadcast SMI is in use Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 23/48] x86: cpuhp: refuse cpu hot-unplug request earlier if not supported Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 24/48] acpi: add aml_land() and aml_break() primitives Michael S. Tsirkin
2020-09-29  7:21 ` [PULL v4 25/48] tests: acpi: mark to be changed tables in bios-tables-test-allowed-diff Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 26/48] x86: ich9: expose "smi_negotiated_features" as a QOM property Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 27/48] x86: acpi: introduce AcpiPmInfo::smi_on_cpuhp Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 28/48] x86: acpi: introduce the PCI0.SMI0 ACPI device Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 29/48] x68: acpi: trigger SMI before sending hotplug Notify event to OSPM Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 30/48] tests: acpi: update acpi blobs with new AML Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 31/48] hw/smbios: support loading OEM strings values from a file Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 32/48] hw/smbios: report error if table size is too large Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 33/48] qemu-options: document SMBIOS type 11 settings Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 34/48] vhost-user: save features of multiqueues if chardev is closed Michael S. Tsirkin
2021-05-12  7:58   ` Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 35/48] tests/acpi: mark addition of table DSDT.roothp for unit testing root pci hotplug Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 36/48] tests/acpi: add new unit test to test hotplug off/on feature on the root pci bus Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 37/48] tests/acpi: add a new ACPI table in order to test root pci hotplug on/off Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 38/48] Fix a gap where acpi_pcihp_find_hotplug_bus() returns a non-hotpluggable bus Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 39/48] i440fx/acpi: do not add hotplug related amls for cold plugged bridges Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 40/48] tests/acpi: list added acpi table binary file for pci bridge hotplug test Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 41/48] tests/acpi: unit test for 'acpi-pci-hotplug-with-bridge-support' bridge flag Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 42/48] tests/acpi: add newly added acpi DSDT table blob for pci bridge hotplug flag Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 43/48] Add ACPI DSDT tables for q35 that are being updated by the next patch Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 44/48] piix4: don't reserve hw resources when hotplug is off globally Michael S. Tsirkin
2020-11-07 10:10   ` Philippe Mathieu-Daudé
2020-11-07 11:14     ` Philippe Mathieu-Daudé
2020-11-07 12:22     ` Ani Sinha
2020-11-07 14:18       ` Philippe Mathieu-Daudé
2020-11-07 14:28         ` Michael S. Tsirkin
2020-09-29  7:22 ` [PULL v4 45/48] tests/acpi: update golden master DSDT binary table blobs for q35 Michael S. Tsirkin
2020-09-29  7:23 ` [PULL v4 46/48] hw: virtio-pmem: detach the element fromt the virtqueue when error occurs Michael S. Tsirkin
2020-09-29  7:23 ` [PULL v4 47/48] libvhost-user: return early on virtqueue errors Michael S. Tsirkin
2020-09-29  7:23 ` [PULL v4 48/48] libvhost-user: return on error in vu_log_queue_fill() Michael S. Tsirkin
2020-09-29  7:25 ` [PULL v4 02/48] vhost: switch to use IOTLB v2 format Michael S. Tsirkin
2020-09-29  8:13 ` [PULL v4 00/48] virtio,pc,acpi: fixes, tests no-reply
2020-09-29  8:50 ` no-reply
2020-09-29 11:02 ` Peter Maydell
2020-09-29 11:04 ` Michael S. Tsirkin
2020-09-29 11:07   ` Peter Maydell
2020-09-29 11:13     ` Michael S. Tsirkin
2020-10-01  9:16       ` Laszlo Ersek

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200929071948.281157-15-mst@redhat.com \
    --to=mst@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.