All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alex Williamson <alex.williamson@hp.com>
To: kvm@vger.kernel.org
Cc: markmc@redhat.com
Subject: [PATCH] kvm:virtio-net: Run TX from the I/O thread
Date: Wed, 21 Jan 2009 16:08:42 -0700	[thread overview]
Message-ID: <20090121230642.10404.65372.stgit@kvm.aw> (raw)

This is an attempt to improve the latency of virtio-net while not hurting
throughput.  I wanted to try moving packet TX into a different thread
so we can quickly return to the guest after it kicks us to send packets
out.  I also switched the order of when the tx_timer comes into play, so
we can get an inital burst of packets out, then wait for the timer to
fire and notify us if there's more to do.  Here's what it does for me
(average of 5 runs each, testing to a remote system on a 1Gb network):

netperf TCP_STREAM: 939.22Mb/s -> 935.24Mb/s =  99.58%
netperf TCP_RR:      2028.72/s -> 3927.99/s  = 193.62%
tbench:              92.99MB/s -> 99.97MB/s  = 107.51%

I'd be interested to hear if it helps or hurts anyone else.  Thanks,

Alex


Signed-off-by: Alex Williamson <alex.williamson@hp.com>
---

 qemu/hw/virtio-net.c |   57 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 57 insertions(+), 0 deletions(-)

diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index 8f3c41d..26508bd 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -17,6 +17,8 @@
 #include "virtio-net.h"
 #ifdef USE_KVM
 #include "qemu-kvm.h"
+#include "compatfd.h"
+#include "qemu-char.h"
 #endif
 
 #define TAP_VNET_HDR
@@ -49,6 +51,9 @@ typedef struct VirtIONet
         int enabled;
         uint32_t *vlans;
     } vlan_table;
+#ifdef USE_KVM
+    int fds[2];
+#endif
 } VirtIONet;
 
 /* TODO
@@ -570,10 +575,50 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
     }
 }
 
+#ifdef USE_KVM
+static void virtio_net_do_tx(void *opaque)
+{
+    VirtIONet *n = opaque;
+
+    virtio_net_flush_tx(n, n->tx_vq);
+    qemu_mod_timer(n->tx_timer,
+                   qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL);
+}
+
+static void virtio_net_notify_tx(int fd)
+{
+    uint64_t value = 1;
+    char buffer[8];
+    size_t offset = 0;
+
+    memcpy(buffer, &value, sizeof(value));
+
+    while (offset < 8) {
+        ssize_t len;
+
+        len = write(fd, buffer + offset, 8 - offset);
+        if (len == -1 && errno == EINTR)
+            continue;
+
+        if (len <= 0)
+            break;
+
+        offset += len;
+    }
+
+    if (offset != 8)
+        fprintf(stderr, "virtio-net: failed to notify tx thread\n");
+}
+#endif
+
 static void virtio_net_handle_tx(VirtIODevice *vdev, VirtQueue *vq)
 {
     VirtIONet *n = to_virtio_net(vdev);
 
+#ifdef USE_KVM
+    virtio_queue_set_notification(n->tx_vq, 0);
+    virtio_net_notify_tx(n->fds[0]);
+#else
     if (n->tx_timer_active) {
         virtio_queue_set_notification(vq, 1);
         qemu_del_timer(n->tx_timer);
@@ -585,6 +630,7 @@ static void virtio_net_handle_tx(VirtIODevice *vdev, VirtQueue *vq)
         n->tx_timer_active = 1;
         virtio_queue_set_notification(vq, 0);
     }
+#endif
 }
 
 static void virtio_net_tx_timer(void *opaque)
@@ -598,7 +644,9 @@ static void virtio_net_tx_timer(void *opaque)
         return;
 
     virtio_queue_set_notification(n->tx_vq, 1);
+#ifndef USE_KVM
     virtio_net_flush_tx(n, n->tx_vq);
+#endif
 }
 
 /*
@@ -712,6 +760,15 @@ PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn)
                                  virtio_net_receive, virtio_net_can_receive, n);
     n->vc->link_status_changed = virtio_net_set_link_status;
 
+#ifdef USE_KVM
+    if (qemu_eventfd(n->fds) == -1) {
+        fprintf(stderr, "failed to setup virtio-net eventfd\n");
+        return NULL;
+    }
+
+    qemu_set_fd_handler2(n->fds[0], NULL, virtio_net_do_tx, NULL, (void *)n);
+#endif
+
     qemu_format_nic_info_str(n->vc, n->mac);
 
     n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n);


             reply	other threads:[~2009-01-21 23:11 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-01-21 23:08 Alex Williamson [this message]
2009-01-22 12:12 ` [PATCH] kvm:virtio-net: Run TX from the I/O thread Avi Kivity
2009-01-22 12:48   ` Mark McLoughlin
2009-01-22 18:36     ` Alex Williamson
2009-01-22 12:47 ` Mark McLoughlin
2009-01-22 13:41   ` Avi Kivity
2009-01-22 18:43     ` Anthony Liguori
2009-01-22 13:42   ` Avi Kivity

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20090121230642.10404.65372.stgit@kvm.aw \
    --to=alex.williamson@hp.com \
    --cc=kvm@vger.kernel.org \
    --cc=markmc@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.