All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings
@ 2017-01-20 17:07 Paolo Bonzini
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 1/7] virtio: make virtio_should_notify static Paolo Bonzini
                   ` (8 more replies)
  0 siblings, 9 replies; 18+ messages in thread
From: Paolo Bonzini @ 2017-01-20 17:07 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst, stefanha

Patch posted during 2.8 hard freeze.  Quick measurements give a 10%
performance improvement on various virtio-blk benchmarks, but the machine
I used seems to love my patches particularly today!

Paolo

Paolo Bonzini (7):
  virtio: make virtio_should_notify static
  virtio: add virtio_*_phys_cached
  virtio: use address_space_map/unmap to access descriptors
  virtio: use MemoryRegionCache to access descriptors
  virtio: add MemoryListener to cache ring translations
  virtio: use VRingMemoryRegionCaches for descriptor ring
  virtio: use VRingMemoryRegionCaches for avail and used rings

 hw/net/virtio-net.c               |  14 +-
 hw/virtio/virtio.c                | 322 ++++++++++++++++++++++++++++++--------
 include/hw/virtio/virtio-access.h |  52 ++++++
 include/hw/virtio/virtio.h        |   2 +-
 4 files changed, 322 insertions(+), 68 deletions(-)

-- 
2.9.3

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH 1/7] virtio: make virtio_should_notify static
  2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
@ 2017-01-20 17:07 ` Paolo Bonzini
  2017-01-24 12:42   ` Stefan Hajnoczi
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 2/7] virtio: add virtio_*_phys_cached Paolo Bonzini
                   ` (7 subsequent siblings)
  8 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2017-01-20 17:07 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst, stefanha

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 hw/virtio/virtio.c         | 2 +-
 include/hw/virtio/virtio.h | 1 -
 2 files changed, 1 insertion(+), 2 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index cc17b97..b1c5563 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -1383,7 +1383,7 @@ static void virtio_set_isr(VirtIODevice *vdev, int value)
     }
 }
 
-bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
+static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
 {
     uint16_t old, new;
     bool v;
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 6523bac..525da24 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -182,7 +182,6 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
                                unsigned int *out_bytes,
                                unsigned max_in_bytes, unsigned max_out_bytes);
 
-bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq);
 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq);
 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
 
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH 2/7] virtio: add virtio_*_phys_cached
  2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 1/7] virtio: make virtio_should_notify static Paolo Bonzini
@ 2017-01-20 17:07 ` Paolo Bonzini
  2017-01-24 12:43   ` Stefan Hajnoczi
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 3/7] virtio: use address_space_map/unmap to access descriptors Paolo Bonzini
                   ` (6 subsequent siblings)
  8 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2017-01-20 17:07 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst, stefanha

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 include/hw/virtio/virtio-access.h | 52 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 52 insertions(+)

diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h
index 91ae14d..2e92074 100644
--- a/include/hw/virtio/virtio-access.h
+++ b/include/hw/virtio/virtio-access.h
@@ -156,6 +156,58 @@ static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s)
 #endif
 }
 
+static inline uint16_t virtio_lduw_phys_cached(VirtIODevice *vdev,
+                                               MemoryRegionCache *cache,
+                                               hwaddr pa)
+{
+    if (virtio_access_is_big_endian(vdev)) {
+        return lduw_be_phys_cached(cache, pa);
+    }
+    return lduw_le_phys_cached(cache, pa);
+}
+
+static inline uint32_t virtio_ldl_phys_cached(VirtIODevice *vdev,
+                                              MemoryRegionCache *cache,
+                                              hwaddr pa)
+{
+    if (virtio_access_is_big_endian(vdev)) {
+        return ldl_be_phys_cached(cache, pa);
+    }
+    return ldl_le_phys_cached(cache, pa);
+}
+
+static inline uint64_t virtio_ldq_phys_cached(VirtIODevice *vdev,
+                                              MemoryRegionCache *cache,
+                                              hwaddr pa)
+{
+    if (virtio_access_is_big_endian(vdev)) {
+        return ldq_be_phys_cached(cache, pa);
+    }
+    return ldq_le_phys_cached(cache, pa);
+}
+
+static inline void virtio_stw_phys_cached(VirtIODevice *vdev,
+                                          MemoryRegionCache *cache,
+                                          hwaddr pa, uint16_t value)
+{
+    if (virtio_access_is_big_endian(vdev)) {
+        stw_be_phys_cached(cache, pa, value);
+    } else {
+        stw_le_phys_cached(cache, pa, value);
+    }
+}
+
+static inline void virtio_stl_phys_cached(VirtIODevice *vdev,
+                                          MemoryRegionCache *cache,
+                                          hwaddr pa, uint32_t value)
+{
+    if (virtio_access_is_big_endian(vdev)) {
+        stl_be_phys_cached(cache, pa, value);
+    } else {
+        stl_le_phys_cached(cache, pa, value);
+    }
+}
+
 static inline void virtio_tswap16s(VirtIODevice *vdev, uint16_t *s)
 {
     *s = virtio_tswap16(vdev, *s);
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH 3/7] virtio: use address_space_map/unmap to access descriptors
  2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 1/7] virtio: make virtio_should_notify static Paolo Bonzini
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 2/7] virtio: add virtio_*_phys_cached Paolo Bonzini
@ 2017-01-20 17:07 ` Paolo Bonzini
  2017-01-24 12:30   ` Stefan Hajnoczi
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 4/7] virtio: use MemoryRegionCache " Paolo Bonzini
                   ` (5 subsequent siblings)
  8 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2017-01-20 17:07 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst, stefanha

This makes little difference, but it makes the code change smaller
for the next patch that introduces MemoryRegionCache.  This is
because map/unmap are similar to MemoryRegionCache init/destroy.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 hw/virtio/virtio.c | 76 ++++++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 57 insertions(+), 19 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index b1c5563..7a49f2e 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -120,10 +120,9 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
 }
 
 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
-                            hwaddr desc_pa, int i)
+                            uint8_t *desc_ptr, int i)
 {
-    address_space_read(vdev->dma_as, desc_pa + i * sizeof(VRingDesc),
-                       MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
+    memcpy(desc, desc_ptr + i * sizeof(VRingDesc), sizeof(VRingDesc));
     virtio_tswap64s(vdev, &desc->addr);
     virtio_tswap32s(vdev, &desc->len);
     virtio_tswap16s(vdev, &desc->flags);
@@ -408,7 +407,7 @@ enum {
 };
 
 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
-                                    hwaddr desc_pa, unsigned int max,
+                                    void *desc_ptr, unsigned int max,
                                     unsigned int *next)
 {
     /* If this descriptor says it doesn't chain, we're done. */
@@ -426,7 +425,7 @@ static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
         return VIRTQUEUE_READ_DESC_ERROR;
     }
 
-    vring_desc_read(vdev, desc, desc_pa, *next);
+    vring_desc_read(vdev, desc, desc_ptr, *next);
     return VIRTQUEUE_READ_DESC_MORE;
 }
 
@@ -434,18 +433,19 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
                                unsigned int *out_bytes,
                                unsigned max_in_bytes, unsigned max_out_bytes)
 {
+    VirtIODevice *vdev = vq->vdev;
     unsigned int idx;
     unsigned int total_bufs, in_total, out_total;
+    void *desc_ptr = NULL;
+    hwaddr len = 0;
     int rc;
 
     idx = vq->last_avail_idx;
 
     total_bufs = in_total = out_total = 0;
     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
-        VirtIODevice *vdev = vq->vdev;
         unsigned int max, num_bufs, indirect = 0;
         VRingDesc desc;
-        hwaddr desc_pa;
         unsigned int i;
 
         max = vq->vring.num;
@@ -455,10 +455,18 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
             goto err;
         }
 
-        desc_pa = vq->vring.desc;
-        vring_desc_read(vdev, &desc, desc_pa, i);
+        len = max * sizeof(VRingDesc);
+        desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
+        if (len < max * sizeof(VRingDesc)) {
+            virtio_error(vdev, "Cannot map descriptor ring");
+            goto err;
+        }
+
+        vring_desc_read(vdev, &desc, desc_ptr, i);
 
         if (desc.flags & VRING_DESC_F_INDIRECT) {
+            address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
+            len = desc.len;
             if (desc.len % sizeof(VRingDesc)) {
                 virtio_error(vdev, "Invalid size for indirect buffer table");
                 goto err;
@@ -471,11 +479,16 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
             }
 
             /* loop over the indirect descriptor table */
+            desc_ptr = address_space_map(vdev->dma_as, desc.addr, &len, false);
+            if (len < desc.len) {
+                virtio_error(vdev, "Cannot map indirect buffer");
+                goto err;
+            }
+
             indirect = 1;
             max = desc.len / sizeof(VRingDesc);
-            desc_pa = desc.addr;
             num_bufs = i = 0;
-            vring_desc_read(vdev, &desc, desc_pa, i);
+            vring_desc_read(vdev, &desc, desc_ptr, i);
         }
 
         do {
@@ -494,7 +507,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
                 goto done;
             }
 
-            rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
+            rc = virtqueue_read_next_desc(vdev, &desc, desc_ptr, max, &i);
         } while (rc == VIRTQUEUE_READ_DESC_MORE);
 
         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
@@ -512,6 +525,9 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
     }
 
 done:
+    if (desc_ptr) {
+        address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
+    }
     if (in_bytes) {
         *in_bytes = in_total;
     }
@@ -651,9 +667,10 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu
 void *virtqueue_pop(VirtQueue *vq, size_t sz)
 {
     unsigned int i, head, max;
-    hwaddr desc_pa = vq->vring.desc;
+    void *desc_ptr = NULL;
+    hwaddr len;
     VirtIODevice *vdev = vq->vdev;
-    VirtQueueElement *elem;
+    VirtQueueElement *elem = NULL;
     unsigned out_num, in_num;
     hwaddr addr[VIRTQUEUE_MAX_SIZE];
     struct iovec iov[VIRTQUEUE_MAX_SIZE];
@@ -689,18 +706,33 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
     }
 
     i = head;
-    vring_desc_read(vdev, &desc, desc_pa, i);
+
+    len = max * sizeof(VRingDesc);
+    desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
+    if (len < max * sizeof(VRingDesc)) {
+        virtio_error(vdev, "Cannot map descriptor ring");
+        return NULL;
+    }
+
+    vring_desc_read(vdev, &desc, desc_ptr, i);
     if (desc.flags & VRING_DESC_F_INDIRECT) {
+        address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
         if (desc.len % sizeof(VRingDesc)) {
             virtio_error(vdev, "Invalid size for indirect buffer table");
             return NULL;
         }
 
         /* loop over the indirect descriptor table */
+        len = desc.len;
+        desc_ptr = address_space_map(vdev->dma_as, desc.addr, &len, false);
+        if (len < desc.len) {
+            virtio_error(vdev, "Cannot map indirect buffer");
+            return NULL;
+        }
+
         max = desc.len / sizeof(VRingDesc);
-        desc_pa = desc.addr;
         i = 0;
-        vring_desc_read(vdev, &desc, desc_pa, i);
+        vring_desc_read(vdev, &desc, desc_ptr, i);
     }
 
     /* Collect all the descriptors */
@@ -731,7 +763,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
             goto err_undo_map;
         }
 
-        rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
+        rc = virtqueue_read_next_desc(vdev, &desc, desc_ptr, max, &i);
     } while (rc == VIRTQUEUE_READ_DESC_MORE);
 
     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
@@ -753,11 +785,17 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
     vq->inuse++;
 
     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
+done:
+    if (desc_ptr) {
+        address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
+    }
+
     return elem;
 
 err_undo_map:
     virtqueue_undo_map_desc(out_num, in_num, iov);
-    return NULL;
+    elem = NULL;
+    goto done;
 }
 
 /* virtqueue_drop_all:
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH 4/7] virtio: use MemoryRegionCache to access descriptors
  2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
                   ` (2 preceding siblings ...)
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 3/7] virtio: use address_space_map/unmap to access descriptors Paolo Bonzini
@ 2017-01-20 17:07 ` Paolo Bonzini
  2017-01-24 12:32   ` Stefan Hajnoczi
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 5/7] virtio: add MemoryListener to cache ring translations Paolo Bonzini
                   ` (4 subsequent siblings)
  8 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2017-01-20 17:07 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst, stefanha

For now, the cache is created on every virtqueue_pop.  Later on,
direct descriptors will be able to reuse it.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 hw/virtio/virtio.c | 91 ++++++++++++++++++++++++++++++------------------------
 1 file changed, 50 insertions(+), 41 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 7a49f2e..52673d1 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -120,9 +120,10 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
 }
 
 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
-                            uint8_t *desc_ptr, int i)
+                            MemoryRegionCache *cache, int i)
 {
-    memcpy(desc, desc_ptr + i * sizeof(VRingDesc), sizeof(VRingDesc));
+    address_space_read_cached(cache, i * sizeof(VRingDesc),
+                              desc, sizeof(VRingDesc));
     virtio_tswap64s(vdev, &desc->addr);
     virtio_tswap32s(vdev, &desc->len);
     virtio_tswap16s(vdev, &desc->flags);
@@ -407,7 +408,7 @@ enum {
 };
 
 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
-                                    void *desc_ptr, unsigned int max,
+                                    MemoryRegionCache *desc_cache, unsigned int max,
                                     unsigned int *next)
 {
     /* If this descriptor says it doesn't chain, we're done. */
@@ -425,7 +426,7 @@ static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
         return VIRTQUEUE_READ_DESC_ERROR;
     }
 
-    vring_desc_read(vdev, desc, desc_ptr, *next);
+    vring_desc_read(vdev, desc, desc_cache, *next);
     return VIRTQUEUE_READ_DESC_MORE;
 }
 
@@ -434,39 +435,40 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
                                unsigned max_in_bytes, unsigned max_out_bytes)
 {
     VirtIODevice *vdev = vq->vdev;
-    unsigned int idx;
+    unsigned int max, idx;
     unsigned int total_bufs, in_total, out_total;
-    void *desc_ptr = NULL;
-    hwaddr len = 0;
+    MemoryRegionCache *desc_cache = NULL;
+    MemoryRegionCache vring_desc_cache;
+    MemoryRegionCache indirect_desc_cache;
+    int64_t len = 0;
     int rc;
 
     idx = vq->last_avail_idx;
+    max = vq->vring.num;
+    len = address_space_cache_init(&vring_desc_cache, vdev->dma_as,
+                                   vq->vring.desc, max * sizeof(VRingDesc),
+                                   false);
+    if (len < max * sizeof(VRingDesc)) {
+        virtio_error(vdev, "Cannot map descriptor ring");
+        goto err;
+    }
 
     total_bufs = in_total = out_total = 0;
     while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
-        unsigned int max, num_bufs, indirect = 0;
+        unsigned int num_bufs;
         VRingDesc desc;
         unsigned int i;
 
-        max = vq->vring.num;
         num_bufs = total_bufs;
 
         if (!virtqueue_get_head(vq, idx++, &i)) {
             goto err;
         }
 
-        len = max * sizeof(VRingDesc);
-        desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
-        if (len < max * sizeof(VRingDesc)) {
-            virtio_error(vdev, "Cannot map descriptor ring");
-            goto err;
-        }
-
-        vring_desc_read(vdev, &desc, desc_ptr, i);
+        desc_cache = &vring_desc_cache;
+        vring_desc_read(vdev, &desc, desc_cache, i);
 
         if (desc.flags & VRING_DESC_F_INDIRECT) {
-            address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
-            len = desc.len;
             if (desc.len % sizeof(VRingDesc)) {
                 virtio_error(vdev, "Invalid size for indirect buffer table");
                 goto err;
@@ -479,16 +481,18 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
             }
 
             /* loop over the indirect descriptor table */
-            desc_ptr = address_space_map(vdev->dma_as, desc.addr, &len, false);
+            len = address_space_cache_init(&indirect_desc_cache,
+                                           vdev->dma_as,
+                                           desc.addr, desc.len, false);
+            desc_cache = &indirect_desc_cache;
             if (len < desc.len) {
                 virtio_error(vdev, "Cannot map indirect buffer");
                 goto err;
             }
 
-            indirect = 1;
             max = desc.len / sizeof(VRingDesc);
             num_bufs = i = 0;
-            vring_desc_read(vdev, &desc, desc_ptr, i);
+            vring_desc_read(vdev, &desc, desc_cache, i);
         }
 
         do {
@@ -507,17 +511,19 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
                 goto done;
             }
 
-            rc = virtqueue_read_next_desc(vdev, &desc, desc_ptr, max, &i);
+            rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
         } while (rc == VIRTQUEUE_READ_DESC_MORE);
 
         if (rc == VIRTQUEUE_READ_DESC_ERROR) {
             goto err;
         }
 
-        if (!indirect)
-            total_bufs = num_bufs;
-        else
+        if (desc_cache == &indirect_desc_cache) {
+            address_space_cache_destroy(&indirect_desc_cache);
             total_bufs++;
+        } else {
+            total_bufs = num_bufs;
+        }
     }
 
     if (rc < 0) {
@@ -525,9 +531,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
     }
 
 done:
-    if (desc_ptr) {
-        address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
-    }
+    address_space_cache_destroy(&vring_desc_cache);
     if (in_bytes) {
         *in_bytes = in_total;
     }
@@ -667,8 +671,10 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu
 void *virtqueue_pop(VirtQueue *vq, size_t sz)
 {
     unsigned int i, head, max;
-    void *desc_ptr = NULL;
-    hwaddr len;
+    MemoryRegionCache *desc_cache = NULL;
+    MemoryRegionCache indirect_desc_cache;
+    MemoryRegionCache vring_desc_cache;
+    int64_t len;
     VirtIODevice *vdev = vq->vdev;
     VirtQueueElement *elem = NULL;
     unsigned out_num, in_num;
@@ -707,24 +713,26 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
 
     i = head;
 
-    len = max * sizeof(VRingDesc);
-    desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
+    len = address_space_cache_init(&vring_desc_cache, vdev->dma_as,
+                                   vq->vring.desc, max * sizeof(VRingDesc),
+                                   false);
+    desc_cache = &vring_desc_cache;
     if (len < max * sizeof(VRingDesc)) {
         virtio_error(vdev, "Cannot map descriptor ring");
         return NULL;
     }
 
-    vring_desc_read(vdev, &desc, desc_ptr, i);
+    vring_desc_read(vdev, &desc, desc_cache, i);
     if (desc.flags & VRING_DESC_F_INDIRECT) {
-        address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
         if (desc.len % sizeof(VRingDesc)) {
             virtio_error(vdev, "Invalid size for indirect buffer table");
             return NULL;
         }
 
         /* loop over the indirect descriptor table */
-        len = desc.len;
-        desc_ptr = address_space_map(vdev->dma_as, desc.addr, &len, false);
+        len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
+                                       desc.addr, desc.len, false);
+        desc_cache = &indirect_desc_cache;
         if (len < desc.len) {
             virtio_error(vdev, "Cannot map indirect buffer");
             return NULL;
@@ -732,7 +740,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
 
         max = desc.len / sizeof(VRingDesc);
         i = 0;
-        vring_desc_read(vdev, &desc, desc_ptr, i);
+        vring_desc_read(vdev, &desc, desc_cache, i);
     }
 
     /* Collect all the descriptors */
@@ -763,7 +771,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
             goto err_undo_map;
         }
 
-        rc = virtqueue_read_next_desc(vdev, &desc, desc_ptr, max, &i);
+        rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
     } while (rc == VIRTQUEUE_READ_DESC_MORE);
 
     if (rc == VIRTQUEUE_READ_DESC_ERROR) {
@@ -786,9 +794,10 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
 
     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
 done:
-    if (desc_ptr) {
-        address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
+    if (desc_cache == &indirect_desc_cache) {
+        address_space_cache_destroy(&indirect_desc_cache);
     }
+    address_space_cache_destroy(&vring_desc_cache);
 
     return elem;
 
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH 5/7] virtio: add MemoryListener to cache ring translations
  2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
                   ` (3 preceding siblings ...)
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 4/7] virtio: use MemoryRegionCache " Paolo Bonzini
@ 2017-01-20 17:07 ` Paolo Bonzini
  2017-01-24 12:37   ` Stefan Hajnoczi
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 6/7] virtio: use VRingMemoryRegionCaches for descriptor ring Paolo Bonzini
                   ` (3 subsequent siblings)
  8 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2017-01-20 17:07 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst, stefanha

The cached translations are RCU-protected to allow efficient use
when processing virtqueues.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 hw/virtio/virtio.c         | 91 ++++++++++++++++++++++++++++++++++++++++++++--
 include/hw/virtio/virtio.h |  1 +
 2 files changed, 89 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 52673d1..848c30f 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -60,6 +60,13 @@ typedef struct VRingUsed
     VRingUsedElem ring[0];
 } VRingUsed;
 
+typedef struct VRingMemoryRegionCaches {
+    struct rcu_head rcu;
+    MemoryRegionCache desc;
+    MemoryRegionCache avail;
+    MemoryRegionCache used;
+} VRingMemoryRegionCaches;
+
 typedef struct VRing
 {
     unsigned int num;
@@ -68,6 +75,7 @@ typedef struct VRing
     hwaddr desc;
     hwaddr avail;
     hwaddr used;
+    VRingMemoryRegionCaches *caches;
 } VRing;
 
 struct VirtQueue
@@ -104,6 +112,46 @@ struct VirtQueue
     QLIST_ENTRY(VirtQueue) node;
 };
 
+static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
+{
+    address_space_cache_destroy(&caches->desc);
+    address_space_cache_destroy(&caches->avail);
+    address_space_cache_destroy(&caches->used);
+    g_free(caches);
+}
+
+static void virtio_init_region_cache(VirtIODevice *vdev, int n)
+{
+    VirtQueue *vq = &vdev->vq[n];
+    VRingMemoryRegionCaches *old = vq->vring.caches;
+    VRingMemoryRegionCaches *new = g_new0(VRingMemoryRegionCaches, 1);
+    hwaddr addr, size;
+    int event_size;
+
+    event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+    addr = vq->vring.desc;
+    if (!addr) {
+        return;
+    }
+    size = virtio_queue_get_desc_size(vdev, n);
+    address_space_cache_init(&new->desc, vdev->dma_as,
+                             addr, size, false);
+
+    size = virtio_queue_get_used_size(vdev, n) + event_size;
+    address_space_cache_init(&new->used, vdev->dma_as,
+                             vq->vring.used, size, true);
+
+    size = virtio_queue_get_avail_size(vdev, n) + event_size;
+    address_space_cache_init(&new->avail, vdev->dma_as,
+                             vq->vring.avail, size, false);
+
+    atomic_rcu_set(&vq->vring.caches, new);
+    if (old) {
+        call_rcu(old, virtio_free_region_cache, rcu);
+    }
+}
+
 /* virt queue functions */
 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
 {
@@ -117,6 +165,7 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
     vring->used = vring_align(vring->avail +
                               offsetof(VRingAvail, ring[vring->num]),
                               vring->align);
+    virtio_init_region_cache(vdev, n);
 }
 
 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
@@ -1266,6 +1315,7 @@ void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
     vdev->vq[n].vring.desc = desc;
     vdev->vq[n].vring.avail = avail;
     vdev->vq[n].vring.used = used;
+    virtio_init_region_cache(vdev, n);
 }
 
 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
@@ -1977,9 +2027,6 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
 void virtio_cleanup(VirtIODevice *vdev)
 {
     qemu_del_vm_change_state_handler(vdev->vmstate);
-    g_free(vdev->config);
-    g_free(vdev->vq);
-    g_free(vdev->vector_queues);
 }
 
 static void virtio_vmstate_change(void *opaque, int running, RunState state)
@@ -2240,6 +2287,19 @@ void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
     }
 }
 
+static void virtio_memory_listener_commit(MemoryListener *listener)
+{
+    VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
+    int i;
+
+    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+        if (vdev->vq[i].vring.num == 0) {
+            break;
+        }
+        virtio_init_region_cache(vdev, i);
+    }
+}
+
 static void virtio_device_realize(DeviceState *dev, Error **errp)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -2262,6 +2322,9 @@ static void virtio_device_realize(DeviceState *dev, Error **errp)
         error_propagate(errp, err);
         return;
     }
+
+    vdev->listener.commit = virtio_memory_listener_commit;
+    memory_listener_register(&vdev->listener, vdev->dma_as);
 }
 
 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
@@ -2284,6 +2347,27 @@ static void virtio_device_unrealize(DeviceState *dev, Error **errp)
     vdev->bus_name = NULL;
 }
 
+static void virtio_device_instance_finalize(Object *obj)
+{
+    VirtIODevice *vdev = VIRTIO_DEVICE(obj);
+    int i;
+
+    memory_listener_unregister(&vdev->listener);
+    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+        VRingMemoryRegionCaches *caches;
+        if (vdev->vq[i].vring.num == 0) {
+            break;
+        }
+        caches = atomic_read(&vdev->vq[i].vring.caches);
+        atomic_set(&vdev->vq[i].vring.caches, NULL);
+        virtio_free_region_cache(caches);
+    }
+
+    g_free(vdev->config);
+    g_free(vdev->vq);
+    g_free(vdev->vector_queues);
+}
+
 static Property virtio_properties[] = {
     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
     DEFINE_PROP_END_OF_LIST(),
@@ -2410,6 +2494,7 @@ static const TypeInfo virtio_device_info = {
     .parent = TYPE_DEVICE,
     .instance_size = sizeof(VirtIODevice),
     .class_init = virtio_device_class_init,
+    .instance_finalize = virtio_device_instance_finalize,
     .abstract = true,
     .class_size = sizeof(VirtioDeviceClass),
 };
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 525da24..f1b2673 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -85,6 +85,7 @@ struct VirtIODevice
     uint32_t generation;
     int nvectors;
     VirtQueue *vq;
+    MemoryListener listener;
     uint16_t device_id;
     bool vm_running;
     bool broken; /* device in invalid state, needs reset */
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH 6/7] virtio: use VRingMemoryRegionCaches for descriptor ring
  2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
                   ` (4 preceding siblings ...)
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 5/7] virtio: add MemoryListener to cache ring translations Paolo Bonzini
@ 2017-01-20 17:07 ` Paolo Bonzini
  2017-01-24 12:38   ` Stefan Hajnoczi
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 7/7] virtio: use VRingMemoryRegionCaches for avail and used rings Paolo Bonzini
                   ` (2 subsequent siblings)
  8 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2017-01-20 17:07 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst, stefanha

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 hw/virtio/virtio.c | 34 ++++++++++++++++------------------
 1 file changed, 16 insertions(+), 18 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 848c30f..668a97f 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -487,17 +487,16 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
     unsigned int max, idx;
     unsigned int total_bufs, in_total, out_total;
     MemoryRegionCache *desc_cache = NULL;
-    MemoryRegionCache vring_desc_cache;
     MemoryRegionCache indirect_desc_cache;
+    VRingMemoryRegionCaches *caches;
     int64_t len = 0;
     int rc;
 
+    rcu_read_lock();
     idx = vq->last_avail_idx;
     max = vq->vring.num;
-    len = address_space_cache_init(&vring_desc_cache, vdev->dma_as,
-                                   vq->vring.desc, max * sizeof(VRingDesc),
-                                   false);
-    if (len < max * sizeof(VRingDesc)) {
+    caches = atomic_rcu_read(&vq->vring.caches);
+    if (caches->desc.len < max * sizeof(VRingDesc)) {
         virtio_error(vdev, "Cannot map descriptor ring");
         goto err;
     }
@@ -514,7 +513,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
             goto err;
         }
 
-        desc_cache = &vring_desc_cache;
+        desc_cache = &caches->desc;
         vring_desc_read(vdev, &desc, desc_cache, i);
 
         if (desc.flags & VRING_DESC_F_INDIRECT) {
@@ -580,13 +579,13 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
     }
 
 done:
-    address_space_cache_destroy(&vring_desc_cache);
     if (in_bytes) {
         *in_bytes = in_total;
     }
     if (out_bytes) {
         *out_bytes = out_total;
     }
+    rcu_read_unlock();
     return;
 
 err:
@@ -722,7 +721,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
     unsigned int i, head, max;
     MemoryRegionCache *desc_cache = NULL;
     MemoryRegionCache indirect_desc_cache;
-    MemoryRegionCache vring_desc_cache;
+    VRingMemoryRegionCaches *caches;
     int64_t len;
     VirtIODevice *vdev = vq->vdev;
     VirtQueueElement *elem = NULL;
@@ -762,15 +761,14 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
 
     i = head;
 
-    len = address_space_cache_init(&vring_desc_cache, vdev->dma_as,
-                                   vq->vring.desc, max * sizeof(VRingDesc),
-                                   false);
-    desc_cache = &vring_desc_cache;
-    if (len < max * sizeof(VRingDesc)) {
+    rcu_read_lock();
+    caches = atomic_rcu_read(&vq->vring.caches);
+    if (caches->desc.len < max * sizeof(VRingDesc)) {
         virtio_error(vdev, "Cannot map descriptor ring");
         return NULL;
     }
 
+    desc_cache = &caches->desc;
     vring_desc_read(vdev, &desc, desc_cache, i);
     if (desc.flags & VRING_DESC_F_INDIRECT) {
         if (desc.len % sizeof(VRingDesc)) {
@@ -846,7 +844,7 @@ done:
     if (desc_cache == &indirect_desc_cache) {
         address_space_cache_destroy(&indirect_desc_cache);
     }
-    address_space_cache_destroy(&vring_desc_cache);
+    rcu_read_unlock();
 
     return elem;
 
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH 7/7] virtio: use VRingMemoryRegionCaches for avail and used rings
  2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
                   ` (5 preceding siblings ...)
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 6/7] virtio: use VRingMemoryRegionCaches for descriptor ring Paolo Bonzini
@ 2017-01-20 17:07 ` Paolo Bonzini
  2017-01-24 12:42   ` Stefan Hajnoczi
  2017-01-20 17:54 ` [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Michael S. Tsirkin
  2017-01-24 16:37 ` Michael S. Tsirkin
  8 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2017-01-20 17:07 UTC (permalink / raw)
  To: qemu-devel; +Cc: mst, stefanha

The virtio-net change is necessary because it uses virtqueue_fill
and virtqueue_flush instead of the more convenient virtqueue_push.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 hw/net/virtio-net.c |  14 +++++-
 hw/virtio/virtio.c  | 138 +++++++++++++++++++++++++++++++++++++---------------
 2 files changed, 112 insertions(+), 40 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 7b3ad4a..6f0e397 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1130,7 +1130,8 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
     return 0;
 }
 
-static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+                                      size_t size)
 {
     VirtIONet *n = qemu_get_nic_opaque(nc);
     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
@@ -1233,6 +1234,17 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
     return size;
 }
 
+static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
+                                  size_t size)
+{
+    ssize_t r;
+
+    rcu_read_lock();
+    r = virtio_net_receive_rcu(nc, buf, size);
+    rcu_read_unlock();
+    return r;
+}
+
 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
 
 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 668a97f..fbee9b2 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -168,6 +168,7 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
     virtio_init_region_cache(vdev, n);
 }
 
+/* Called within rcu_read_lock().  */
 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
                             MemoryRegionCache *cache, int i)
 {
@@ -179,88 +180,110 @@ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
     virtio_tswap16s(vdev, &desc->next);
 }
 
+/* Called within rcu_read_lock().  */
 static inline uint16_t vring_avail_flags(VirtQueue *vq)
 {
-    hwaddr pa;
-    pa = vq->vring.avail + offsetof(VRingAvail, flags);
-    return virtio_lduw_phys(vq->vdev, pa);
+    VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+    hwaddr pa = offsetof(VRingAvail, flags);
+    return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
 }
 
+/* Called within rcu_read_lock().  */
 static inline uint16_t vring_avail_idx(VirtQueue *vq)
 {
-    hwaddr pa;
-    pa = vq->vring.avail + offsetof(VRingAvail, idx);
-    vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
+    VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+    hwaddr pa = offsetof(VRingAvail, idx);
+    vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
     return vq->shadow_avail_idx;
 }
 
+/* Called within rcu_read_lock().  */
 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
 {
-    hwaddr pa;
-    pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
-    return virtio_lduw_phys(vq->vdev, pa);
+    VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+    hwaddr pa = offsetof(VRingAvail, ring[i]);
+    return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
 }
 
+/* Called within rcu_read_lock().  */
 static inline uint16_t vring_get_used_event(VirtQueue *vq)
 {
     return vring_avail_ring(vq, vq->vring.num);
 }
 
+/* Called within rcu_read_lock().  */
 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
                                     int i)
 {
-    hwaddr pa;
+    VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+    hwaddr pa = offsetof(VRingUsed, ring[i]);
     virtio_tswap32s(vq->vdev, &uelem->id);
     virtio_tswap32s(vq->vdev, &uelem->len);
-    pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
-    address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED,
-                       (void *)uelem, sizeof(VRingUsedElem));
+    address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
+    address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
 }
 
+/* Called within rcu_read_lock().  */
 static uint16_t vring_used_idx(VirtQueue *vq)
 {
-    hwaddr pa;
-    pa = vq->vring.used + offsetof(VRingUsed, idx);
-    return virtio_lduw_phys(vq->vdev, pa);
+    VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+    hwaddr pa = vq->vring.used + offsetof(VRingUsed, idx);
+    return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
 }
 
+/* Called within rcu_read_lock().  */
 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
 {
-    hwaddr pa;
-    pa = vq->vring.used + offsetof(VRingUsed, idx);
-    virtio_stw_phys(vq->vdev, pa, val);
+    VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+    hwaddr pa = offsetof(VRingUsed, idx);
+    virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
+    address_space_cache_invalidate(&caches->used, pa, sizeof(val));
     vq->used_idx = val;
 }
 
+/* Called within rcu_read_lock().  */
 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
 {
+    VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
     VirtIODevice *vdev = vq->vdev;
-    hwaddr pa;
-    pa = vq->vring.used + offsetof(VRingUsed, flags);
-    virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
+    hwaddr pa = offsetof(VRingUsed, flags);
+    uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
+
+    virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
+    address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
 }
 
+/* Called within rcu_read_lock().  */
 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
 {
+    VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
     VirtIODevice *vdev = vq->vdev;
-    hwaddr pa;
-    pa = vq->vring.used + offsetof(VRingUsed, flags);
-    virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
+    hwaddr pa = offsetof(VRingUsed, flags);
+    uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
+
+    virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
+    address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
 }
 
+/* Called within rcu_read_lock().  */
 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
 {
+    VRingMemoryRegionCaches *caches;
     hwaddr pa;
     if (!vq->notification) {
         return;
     }
-    pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
-    virtio_stw_phys(vq->vdev, pa, val);
+
+    caches = atomic_rcu_read(&vq->vring.caches);
+    pa = offsetof(VRingUsed, ring[vq->vring.num]);
+    virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
 }
 
 void virtio_queue_set_notification(VirtQueue *vq, int enable)
 {
     vq->notification = enable;
+
+    rcu_read_lock();
     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
         vring_set_avail_event(vq, vring_avail_idx(vq));
     } else if (enable) {
@@ -272,6 +295,7 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable)
         /* Expose avail event/used flags before caller checks the avail idx. */
         smp_mb();
     }
+    rcu_read_unlock();
 }
 
 int virtio_queue_ready(VirtQueue *vq)
@@ -280,8 +304,9 @@ int virtio_queue_ready(VirtQueue *vq)
 }
 
 /* Fetch avail_idx from VQ memory only when we really need to know if
- * guest has added some buffers. */
-int virtio_queue_empty(VirtQueue *vq)
+ * guest has added some buffers.
+ * Called within rcu_read_lock().  */
+static int virtio_queue_empty_rcu(VirtQueue *vq)
 {
     if (vq->shadow_avail_idx != vq->last_avail_idx) {
         return 0;
@@ -290,6 +315,20 @@ int virtio_queue_empty(VirtQueue *vq)
     return vring_avail_idx(vq) == vq->last_avail_idx;
 }
 
+int virtio_queue_empty(VirtQueue *vq)
+{
+    bool empty;
+
+    if (vq->shadow_avail_idx != vq->last_avail_idx) {
+        return 0;
+    }
+
+    rcu_read_lock();
+    empty = vring_avail_idx(vq) == vq->last_avail_idx;
+    rcu_read_unlock();
+    return empty;
+}
+
 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
                                unsigned int len)
 {
@@ -368,6 +407,7 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
     return true;
 }
 
+/* Called within rcu_read_lock().  */
 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
                     unsigned int len, unsigned int idx)
 {
@@ -388,6 +428,7 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
     vring_used_write(vq, &uelem, idx);
 }
 
+/* Called within rcu_read_lock().  */
 void virtqueue_flush(VirtQueue *vq, unsigned int count)
 {
     uint16_t old, new;
@@ -411,10 +452,13 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
                     unsigned int len)
 {
+    rcu_read_lock();
     virtqueue_fill(vq, elem, len, 0);
     virtqueue_flush(vq, 1);
+    rcu_read_unlock();
 }
 
+/* Called within rcu_read_lock().  */
 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
 {
     uint16_t num_heads = vring_avail_idx(vq) - idx;
@@ -434,6 +478,7 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
     return num_heads;
 }
 
+/* Called within rcu_read_lock().  */
 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
                                unsigned int *head)
 {
@@ -734,8 +779,9 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
     if (unlikely(vdev->broken)) {
         return NULL;
     }
-    if (virtio_queue_empty(vq)) {
-        return NULL;
+    rcu_read_lock();
+    if (virtio_queue_empty_rcu(vq)) {
+        goto out_rcu;
     }
     /* Needed after virtio_queue_empty(), see comment in
      * virtqueue_num_heads(). */
@@ -748,11 +794,11 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
 
     if (vq->inuse >= vq->vring.num) {
         virtio_error(vdev, "Virtqueue size exceeded");
-        return NULL;
+        goto out_rcu;
     }
 
     if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
-        return NULL;
+        goto out_rcu;
     }
 
     if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
@@ -765,7 +811,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
     caches = atomic_rcu_read(&vq->vring.caches);
     if (caches->desc.len < max * sizeof(VRingDesc)) {
         virtio_error(vdev, "Cannot map descriptor ring");
-        return NULL;
+        goto out_rcu;
     }
 
     desc_cache = &caches->desc;
@@ -773,7 +819,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
     if (desc.flags & VRING_DESC_F_INDIRECT) {
         if (desc.len % sizeof(VRingDesc)) {
             virtio_error(vdev, "Invalid size for indirect buffer table");
-            return NULL;
+            goto out_rcu;
         }
 
         /* loop over the indirect descriptor table */
@@ -782,7 +828,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
         desc_cache = &indirect_desc_cache;
         if (len < desc.len) {
             virtio_error(vdev, "Cannot map indirect buffer");
-            return NULL;
+            goto out_rcu;
         }
 
         max = desc.len / sizeof(VRingDesc);
@@ -844,12 +890,13 @@ done:
     if (desc_cache == &indirect_desc_cache) {
         address_space_cache_destroy(&indirect_desc_cache);
     }
-    rcu_read_unlock();
 
+    rcu_read_unlock();
     return elem;
 
 err_undo_map:
     virtqueue_undo_map_desc(out_num, in_num, iov);
+out_rcu:
     elem = NULL;
     goto done;
 }
@@ -1478,6 +1525,7 @@ static void virtio_set_isr(VirtIODevice *vdev, int value)
     }
 }
 
+/* Called within rcu_read_lock().  */
 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
 {
     uint16_t old, new;
@@ -1503,7 +1551,12 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
 
 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
 {
-    if (!virtio_should_notify(vdev, vq)) {
+    bool should_notify;
+    rcu_read_lock();
+    should_notify = virtio_should_notify(vdev, vq);
+    rcu_read_unlock();
+
+    if (!should_notify) {
         return;
     }
 
@@ -1530,7 +1583,12 @@ void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
 
 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
 {
-    if (!virtio_should_notify(vdev, vq)) {
+    bool should_notify;
+    rcu_read_lock();
+    should_notify = virtio_should_notify(vdev, vq);
+    rcu_read_unlock();
+
+    if (!should_notify) {
         return;
     }
 
@@ -1984,6 +2042,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
         }
     }
 
+    rcu_read_lock();
     for (i = 0; i < num; i++) {
         if (vdev->vq[i].vring.desc) {
             uint16_t nheads;
@@ -2018,6 +2077,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
             }
         }
     }
+    rcu_read_unlock();
 
     return 0;
 }
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings
  2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
                   ` (6 preceding siblings ...)
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 7/7] virtio: use VRingMemoryRegionCaches for avail and used rings Paolo Bonzini
@ 2017-01-20 17:54 ` Michael S. Tsirkin
  2017-01-24 16:37 ` Michael S. Tsirkin
  8 siblings, 0 replies; 18+ messages in thread
From: Michael S. Tsirkin @ 2017-01-20 17:54 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, stefanha

On Fri, Jan 20, 2017 at 06:07:50PM +0100, Paolo Bonzini wrote:
> Patch posted during 2.8 hard freeze.  Quick measurements give a 10%
> performance improvement on various virtio-blk benchmarks, but the machine
> I used seems to love my patches particularly today!
> 
> Paolo

Will review, thanks!


> Paolo Bonzini (7):
>   virtio: make virtio_should_notify static
>   virtio: add virtio_*_phys_cached
>   virtio: use address_space_map/unmap to access descriptors
>   virtio: use MemoryRegionCache to access descriptors
>   virtio: add MemoryListener to cache ring translations
>   virtio: use VRingMemoryRegionCaches for descriptor ring
>   virtio: use VRingMemoryRegionCaches for avail and used rings
> 
>  hw/net/virtio-net.c               |  14 +-
>  hw/virtio/virtio.c                | 322 ++++++++++++++++++++++++++++++--------
>  include/hw/virtio/virtio-access.h |  52 ++++++
>  include/hw/virtio/virtio.h        |   2 +-
>  4 files changed, 322 insertions(+), 68 deletions(-)
> 
> -- 
> 2.9.3

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 3/7] virtio: use address_space_map/unmap to access descriptors
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 3/7] virtio: use address_space_map/unmap to access descriptors Paolo Bonzini
@ 2017-01-24 12:30   ` Stefan Hajnoczi
  2017-01-24 16:06     ` Paolo Bonzini
  0 siblings, 1 reply; 18+ messages in thread
From: Stefan Hajnoczi @ 2017-01-24 12:30 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, stefanha, mst

[-- Attachment #1: Type: text/plain, Size: 1369 bytes --]

On Fri, Jan 20, 2017 at 06:07:53PM +0100, Paolo Bonzini wrote:
> @@ -455,10 +455,18 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
>              goto err;
>          }
>  
> -        desc_pa = vq->vring.desc;
> -        vring_desc_read(vdev, &desc, desc_pa, i);
> +        len = max * sizeof(VRingDesc);
> +        desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
> +        if (len < max * sizeof(VRingDesc)) {
> +            virtio_error(vdev, "Cannot map descriptor ring");
> +            goto err;
> +        }
> +
> +        vring_desc_read(vdev, &desc, desc_ptr, i);
>  
>          if (desc.flags & VRING_DESC_F_INDIRECT) {
> +            address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);

Missing "dest_ptr = NULL" to prevent double unmap if the next goto err
is taken.

> @@ -689,18 +706,33 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
>      }
>  
>      i = head;
> -    vring_desc_read(vdev, &desc, desc_pa, i);
> +
> +    len = max * sizeof(VRingDesc);
> +    desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
> +    if (len < max * sizeof(VRingDesc)) {
> +        virtio_error(vdev, "Cannot map descriptor ring");
> +        return NULL;

desc_ptr still needs to be unmapped if non-NULL.  The same applies
below in virtqueue_pop().

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 455 bytes --]

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 4/7] virtio: use MemoryRegionCache to access descriptors
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 4/7] virtio: use MemoryRegionCache " Paolo Bonzini
@ 2017-01-24 12:32   ` Stefan Hajnoczi
  0 siblings, 0 replies; 18+ messages in thread
From: Stefan Hajnoczi @ 2017-01-24 12:32 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, stefanha, mst

[-- Attachment #1: Type: text/plain, Size: 436 bytes --]

On Fri, Jan 20, 2017 at 06:07:54PM +0100, Paolo Bonzini wrote:
> For now, the cache is created on every virtqueue_pop.  Later on,
> direct descriptors will be able to reuse it.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  hw/virtio/virtio.c | 91 ++++++++++++++++++++++++++++++------------------------
>  1 file changed, 50 insertions(+), 41 deletions(-)

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 455 bytes --]

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 5/7] virtio: add MemoryListener to cache ring translations
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 5/7] virtio: add MemoryListener to cache ring translations Paolo Bonzini
@ 2017-01-24 12:37   ` Stefan Hajnoczi
  0 siblings, 0 replies; 18+ messages in thread
From: Stefan Hajnoczi @ 2017-01-24 12:37 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, stefanha, mst

[-- Attachment #1: Type: text/plain, Size: 532 bytes --]

On Fri, Jan 20, 2017 at 06:07:55PM +0100, Paolo Bonzini wrote:
> +static void virtio_init_region_cache(VirtIODevice *vdev, int n)
> +{
> +    VirtQueue *vq = &vdev->vq[n];
> +    VRingMemoryRegionCaches *old = vq->vring.caches;
> +    VRingMemoryRegionCaches *new = g_new0(VRingMemoryRegionCaches, 1);
> +    hwaddr addr, size;
> +    int event_size;
> +
> +    event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
> +
> +    addr = vq->vring.desc;
> +    if (!addr) {
> +        return;

new is leaked.

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 455 bytes --]

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 6/7] virtio: use VRingMemoryRegionCaches for descriptor ring
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 6/7] virtio: use VRingMemoryRegionCaches for descriptor ring Paolo Bonzini
@ 2017-01-24 12:38   ` Stefan Hajnoczi
  0 siblings, 0 replies; 18+ messages in thread
From: Stefan Hajnoczi @ 2017-01-24 12:38 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, stefanha, mst

[-- Attachment #1: Type: text/plain, Size: 720 bytes --]

On Fri, Jan 20, 2017 at 06:07:56PM +0100, Paolo Bonzini wrote:
> @@ -762,15 +761,14 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
>  
>      i = head;
>  
> -    len = address_space_cache_init(&vring_desc_cache, vdev->dma_as,
> -                                   vq->vring.desc, max * sizeof(VRingDesc),
> -                                   false);
> -    desc_cache = &vring_desc_cache;
> -    if (len < max * sizeof(VRingDesc)) {
> +    rcu_read_lock();
> +    caches = atomic_rcu_read(&vq->vring.caches);
> +    if (caches->desc.len < max * sizeof(VRingDesc)) {
>          virtio_error(vdev, "Cannot map descriptor ring");
>          return NULL;

Missing rcu_read_unlock() in all return cases.

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 455 bytes --]

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 7/7] virtio: use VRingMemoryRegionCaches for avail and used rings
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 7/7] virtio: use VRingMemoryRegionCaches for avail and used rings Paolo Bonzini
@ 2017-01-24 12:42   ` Stefan Hajnoczi
  0 siblings, 0 replies; 18+ messages in thread
From: Stefan Hajnoczi @ 2017-01-24 12:42 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, stefanha, mst

[-- Attachment #1: Type: text/plain, Size: 498 bytes --]

On Fri, Jan 20, 2017 at 06:07:57PM +0100, Paolo Bonzini wrote:
> The virtio-net change is necessary because it uses virtqueue_fill
> and virtqueue_flush instead of the more convenient virtqueue_push.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  hw/net/virtio-net.c |  14 +++++-
>  hw/virtio/virtio.c  | 138 +++++++++++++++++++++++++++++++++++++---------------
>  2 files changed, 112 insertions(+), 40 deletions(-)

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 455 bytes --]

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 1/7] virtio: make virtio_should_notify static
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 1/7] virtio: make virtio_should_notify static Paolo Bonzini
@ 2017-01-24 12:42   ` Stefan Hajnoczi
  0 siblings, 0 replies; 18+ messages in thread
From: Stefan Hajnoczi @ 2017-01-24 12:42 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, stefanha, mst

[-- Attachment #1: Type: text/plain, Size: 298 bytes --]

On Fri, Jan 20, 2017 at 06:07:51PM +0100, Paolo Bonzini wrote:
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  hw/virtio/virtio.c         | 2 +-
>  include/hw/virtio/virtio.h | 1 -
>  2 files changed, 1 insertion(+), 2 deletions(-)

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 455 bytes --]

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 2/7] virtio: add virtio_*_phys_cached
  2017-01-20 17:07 ` [Qemu-devel] [PATCH 2/7] virtio: add virtio_*_phys_cached Paolo Bonzini
@ 2017-01-24 12:43   ` Stefan Hajnoczi
  0 siblings, 0 replies; 18+ messages in thread
From: Stefan Hajnoczi @ 2017-01-24 12:43 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, stefanha, mst

[-- Attachment #1: Type: text/plain, Size: 292 bytes --]

On Fri, Jan 20, 2017 at 06:07:52PM +0100, Paolo Bonzini wrote:
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  include/hw/virtio/virtio-access.h | 52 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 52 insertions(+)

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 455 bytes --]

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 3/7] virtio: use address_space_map/unmap to access descriptors
  2017-01-24 12:30   ` Stefan Hajnoczi
@ 2017-01-24 16:06     ` Paolo Bonzini
  0 siblings, 0 replies; 18+ messages in thread
From: Paolo Bonzini @ 2017-01-24 16:06 UTC (permalink / raw)
  To: Stefan Hajnoczi; +Cc: qemu-devel, stefanha, mst



On 24/01/2017 13:30, Stefan Hajnoczi wrote:
> On Fri, Jan 20, 2017 at 06:07:53PM +0100, Paolo Bonzini wrote:
>> @@ -455,10 +455,18 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
>>              goto err;
>>          }
>>  
>> -        desc_pa = vq->vring.desc;
>> -        vring_desc_read(vdev, &desc, desc_pa, i);
>> +        len = max * sizeof(VRingDesc);
>> +        desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
>> +        if (len < max * sizeof(VRingDesc)) {
>> +            virtio_error(vdev, "Cannot map descriptor ring");
>> +            goto err;
>> +        }
>> +
>> +        vring_desc_read(vdev, &desc, desc_ptr, i);
>>  
>>          if (desc.flags & VRING_DESC_F_INDIRECT) {
>> +            address_space_unmap(vdev->dma_as, desc_ptr, len, false, 0);
> 
> Missing "dest_ptr = NULL" to prevent double unmap if the next goto err
> is taken.
> 
>> @@ -689,18 +706,33 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
>>      }
>>  
>>      i = head;
>> -    vring_desc_read(vdev, &desc, desc_pa, i);
>> +
>> +    len = max * sizeof(VRingDesc);
>> +    desc_ptr = address_space_map(vdev->dma_as, vq->vring.desc, &len, false);
>> +    if (len < max * sizeof(VRingDesc)) {
>> +        virtio_error(vdev, "Cannot map descriptor ring");
>> +        return NULL;
> 
> desc_ptr still needs to be unmapped if non-NULL.  The same applies
> below in virtqueue_pop().
> 

I'll redo this patch to look a lot more like 4/7.

Paolo

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings
  2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
                   ` (7 preceding siblings ...)
  2017-01-20 17:54 ` [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Michael S. Tsirkin
@ 2017-01-24 16:37 ` Michael S. Tsirkin
  8 siblings, 0 replies; 18+ messages in thread
From: Michael S. Tsirkin @ 2017-01-24 16:37 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: qemu-devel, stefanha

On Fri, Jan 20, 2017 at 06:07:50PM +0100, Paolo Bonzini wrote:
> Patch posted during 2.8 hard freeze.  Quick measurements give a 10%
> performance improvement on various virtio-blk benchmarks, but the machine
> I used seems to love my patches particularly today!
> 
> Paolo

Looks nice to me. I'll apply patch 1 as you are
still tweaking the rest of the series.

> Paolo Bonzini (7):
>   virtio: make virtio_should_notify static
>   virtio: add virtio_*_phys_cached
>   virtio: use address_space_map/unmap to access descriptors
>   virtio: use MemoryRegionCache to access descriptors
>   virtio: add MemoryListener to cache ring translations
>   virtio: use VRingMemoryRegionCaches for descriptor ring
>   virtio: use VRingMemoryRegionCaches for avail and used rings
> 
>  hw/net/virtio-net.c               |  14 +-
>  hw/virtio/virtio.c                | 322 ++++++++++++++++++++++++++++++--------
>  include/hw/virtio/virtio-access.h |  52 ++++++
>  include/hw/virtio/virtio.h        |   2 +-
>  4 files changed, 322 insertions(+), 68 deletions(-)
> 
> -- 
> 2.9.3

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2017-01-24 16:37 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-20 17:07 [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Paolo Bonzini
2017-01-20 17:07 ` [Qemu-devel] [PATCH 1/7] virtio: make virtio_should_notify static Paolo Bonzini
2017-01-24 12:42   ` Stefan Hajnoczi
2017-01-20 17:07 ` [Qemu-devel] [PATCH 2/7] virtio: add virtio_*_phys_cached Paolo Bonzini
2017-01-24 12:43   ` Stefan Hajnoczi
2017-01-20 17:07 ` [Qemu-devel] [PATCH 3/7] virtio: use address_space_map/unmap to access descriptors Paolo Bonzini
2017-01-24 12:30   ` Stefan Hajnoczi
2017-01-24 16:06     ` Paolo Bonzini
2017-01-20 17:07 ` [Qemu-devel] [PATCH 4/7] virtio: use MemoryRegionCache " Paolo Bonzini
2017-01-24 12:32   ` Stefan Hajnoczi
2017-01-20 17:07 ` [Qemu-devel] [PATCH 5/7] virtio: add MemoryListener to cache ring translations Paolo Bonzini
2017-01-24 12:37   ` Stefan Hajnoczi
2017-01-20 17:07 ` [Qemu-devel] [PATCH 6/7] virtio: use VRingMemoryRegionCaches for descriptor ring Paolo Bonzini
2017-01-24 12:38   ` Stefan Hajnoczi
2017-01-20 17:07 ` [Qemu-devel] [PATCH 7/7] virtio: use VRingMemoryRegionCaches for avail and used rings Paolo Bonzini
2017-01-24 12:42   ` Stefan Hajnoczi
2017-01-20 17:54 ` [Qemu-devel] [PATCH 0/7] virtio: use MemoryRegionCache for descriptors and rings Michael S. Tsirkin
2017-01-24 16:37 ` Michael S. Tsirkin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.