All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: aliguori@us.ibm.com, mst@redhat.com, qemu-devel@nongnu.org,
	blauwirbel@gmail.com, shajnocz@redhat.com
Cc: krkumar2@in.ibm.com, kvm@vger.kernel.org, mprivozn@redhat.com,
	rusty@rustcorp.com.au, gaowanlong@cn.fujitsu.com,
	jwhan@filewood.snu.ac.kr, shiyer@redhat.com,
	Jason Wang <jasowang@redhat.com>
Subject: [PATCH V4 16/22] vhost: multiqueue support
Date: Wed, 30 Jan 2013 19:12:35 +0800	[thread overview]
Message-ID: <1359544361-5089-17-git-send-email-jasowang@redhat.com> (raw)
In-Reply-To: <1359544361-5089-1-git-send-email-jasowang@redhat.com>

This patch lets vhost support multiqueue. The idea is simple, just launching
multiple threads of vhost and let each of vhost thread processing a subset of
the virtqueues of the device. After this change each emulated device can have
multiple vhost threads as its backend.

To do this, a virtqueue index were introduced to record to first virtqueue that
will be handled by this vhost_net device. Based on this and nvqs, vhost could
calculate its relative index to setup vhost_net device.

Since we may have many vhost/net devices for a virtio-net device. The setting of
guest notifiers were moved out of the starting/stopping of a specific vhost
thread. The vhost_net_{start|stop}() were renamed to
vhost_net_{start|stop}_one(), and a new vhost_net_{start|stop}() were introduced
to configure the guest notifiers and start/stop all vhost/vhost_net devices.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/vhost.c      |   82 +++++++++++++++++++++++-----------------------------
 hw/vhost.h      |    2 +
 hw/vhost_net.c  |   86 +++++++++++++++++++++++++++++++++++++++++++++++++-----
 hw/vhost_net.h  |    4 +-
 hw/virtio-net.c |    4 +-
 5 files changed, 120 insertions(+), 58 deletions(-)

diff --git a/hw/vhost.c b/hw/vhost.c
index cee8aad..38257b9 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -619,14 +619,17 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
 {
     hwaddr s, l, a;
     int r;
+    int vhost_vq_index = idx - dev->vq_index;
     struct vhost_vring_file file = {
-        .index = idx,
+        .index = vhost_vq_index
     };
     struct vhost_vring_state state = {
-        .index = idx,
+        .index = vhost_vq_index
     };
     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
 
+    assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
+
     vq->num = state.num = virtio_queue_get_num(vdev, idx);
     r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
     if (r) {
@@ -669,11 +672,12 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
         goto fail_alloc_ring;
     }
 
-    r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
+    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
     if (r < 0) {
         r = -errno;
         goto fail_alloc;
     }
+
     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
     r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
     if (r) {
@@ -709,9 +713,10 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
                                     unsigned idx)
 {
     struct vhost_vring_state state = {
-        .index = idx,
+        .index = idx - dev->vq_index
     };
     int r;
+    assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
     r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
     if (r < 0) {
         fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
@@ -867,7 +872,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
     }
 
     for (i = 0; i < hdev->nvqs; ++i) {
-        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true);
+        r = vdev->binding->set_host_notifier(vdev->binding_opaque,
+                                             hdev->vq_index + i,
+                                             true);
         if (r < 0) {
             fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
             goto fail_vq;
@@ -877,7 +884,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
     return 0;
 fail_vq:
     while (--i >= 0) {
-        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
+        r = vdev->binding->set_host_notifier(vdev->binding_opaque,
+                                             hdev->vq_index + i,
+                                             false);
         if (r < 0) {
             fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
             fflush(stderr);
@@ -898,7 +907,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
     int i, r;
 
     for (i = 0; i < hdev->nvqs; ++i) {
-        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
+        r = vdev->binding->set_host_notifier(vdev->binding_opaque,
+                                             hdev->vq_index + i,
+                                             false);
         if (r < 0) {
             fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
             fflush(stderr);
@@ -912,8 +923,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
  */
 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
 {
-    struct vhost_virtqueue *vq = hdev->vqs + n;
+    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
     assert(hdev->started);
+    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
     return event_notifier_test_and_clear(&vq->masked_notifier);
 }
 
@@ -922,15 +934,16 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
                          bool mask)
 {
     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
-    int r;
+    int r, index = n - hdev->vq_index;
 
     assert(hdev->started);
+    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
 
     struct vhost_vring_file file = {
-        .index = n,
+        .index = index
     };
     if (mask) {
-        file.fd = event_notifier_get_fd(&hdev->vqs[n].masked_notifier);
+        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
     } else {
         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
     }
@@ -945,20 +958,6 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
 
     hdev->started = true;
 
-    if (!vdev->binding->set_guest_notifiers) {
-        fprintf(stderr, "binding does not support guest notifiers\n");
-        r = -ENOSYS;
-        goto fail;
-    }
-
-    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
-                                           hdev->nvqs,
-                                           true);
-    if (r < 0) {
-        fprintf(stderr, "Error binding guest notifier: %d\n", -r);
-        goto fail_notifiers;
-    }
-
     r = vhost_dev_set_features(hdev, hdev->log_enabled);
     if (r < 0) {
         goto fail_features;
@@ -970,9 +969,9 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
     }
     for (i = 0; i < hdev->nvqs; ++i) {
         r = vhost_virtqueue_start(hdev,
-                                 vdev,
-                                 hdev->vqs + i,
-                                 i);
+                                  vdev,
+                                  hdev->vqs + i,
+                                  hdev->vq_index + i);
         if (r < 0) {
             goto fail_vq;
         }
@@ -995,15 +994,13 @@ fail_log:
 fail_vq:
     while (--i >= 0) {
         vhost_virtqueue_stop(hdev,
-                                vdev,
-                                hdev->vqs + i,
-                                i);
+                             vdev,
+                             hdev->vqs + i,
+                             hdev->vq_index + i);
     }
+    i = hdev->nvqs;
 fail_mem:
 fail_features:
-    vdev->binding->set_guest_notifiers(vdev->binding_opaque, hdev->nvqs, false);
-fail_notifiers:
-fail:
 
     hdev->started = false;
     return r;
@@ -1012,29 +1009,22 @@ fail:
 /* Host notifiers must be enabled at this point. */
 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
 {
-    int i, r;
+    int i;
 
     for (i = 0; i < hdev->nvqs; ++i) {
         vhost_virtqueue_stop(hdev,
-                                vdev,
-                                hdev->vqs + i,
-                                i);
+                             vdev,
+                             hdev->vqs + i,
+                             hdev->vq_index + i);
     }
     for (i = 0; i < hdev->n_mem_sections; ++i) {
         vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
                                 0, (hwaddr)~0x0ull);
     }
-    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
-                                           hdev->nvqs,
-                                           false);
-    if (r < 0) {
-        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
-        fflush(stderr);
-    }
-    assert (r >= 0);
 
     hdev->started = false;
     g_free(hdev->log);
     hdev->log = NULL;
     hdev->log_size = 0;
 }
+
diff --git a/hw/vhost.h b/hw/vhost.h
index 44c61a5..f062d48 100644
--- a/hw/vhost.h
+++ b/hw/vhost.h
@@ -35,6 +35,8 @@ struct vhost_dev {
     MemoryRegionSection *mem_sections;
     struct vhost_virtqueue *vqs;
     int nvqs;
+    /* the first virtuque which would be used by this vhost dev */
+    int vq_index;
     unsigned long long features;
     unsigned long long acked_features;
     unsigned long long backend_features;
diff --git a/hw/vhost_net.c b/hw/vhost_net.c
index d3a04ca..8693ac2 100644
--- a/hw/vhost_net.c
+++ b/hw/vhost_net.c
@@ -140,12 +140,21 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
     return vhost_dev_query(&net->dev, dev);
 }
 
-int vhost_net_start(struct vhost_net *net,
-                    VirtIODevice *dev)
+static int vhost_net_start_one(struct vhost_net *net,
+                               VirtIODevice *dev,
+                               int vq_index)
 {
     struct vhost_vring_file file = { };
     int r;
 
+    if (net->dev.started) {
+        return 0;
+    }
+
+    net->dev.nvqs = 2;
+    net->dev.vqs = net->vqs;
+    net->dev.vq_index = vq_index;
+
     r = vhost_dev_enable_notifiers(&net->dev, dev);
     if (r < 0) {
         goto fail_notifiers;
@@ -181,11 +190,15 @@ fail_notifiers:
     return r;
 }
 
-void vhost_net_stop(struct vhost_net *net,
-                    VirtIODevice *dev)
+static void vhost_net_stop_one(struct vhost_net *net,
+                               VirtIODevice *dev)
 {
     struct vhost_vring_file file = { .fd = -1 };
 
+    if (!net->dev.started) {
+        return;
+    }
+
     for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
         int r = ioctl(net->dev.control, VHOST_NET_SET_BACKEND, &file);
         assert(r >= 0);
@@ -195,6 +208,61 @@ void vhost_net_stop(struct vhost_net *net,
     vhost_dev_disable_notifiers(&net->dev, dev);
 }
 
+int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
+                    int total_queues)
+{
+    int r, i = 0;
+
+    if (!dev->binding->set_guest_notifiers) {
+        error_report("binding does not support guest notifiers\n");
+        r = -ENOSYS;
+        goto err;
+    }
+
+    for (i = 0; i < total_queues; i++) {
+        r = vhost_net_start_one(tap_get_vhost_net(ncs[i].peer), dev, i * 2);
+
+        if (r < 0) {
+            goto err;
+        }
+    }
+
+    r = dev->binding->set_guest_notifiers(dev->binding_opaque,
+                                          total_queues * 2,
+                                          true);
+    if (r < 0) {
+        error_report("Error binding guest notifier: %d\n", -r);
+        goto err;
+    }
+
+    return 0;
+
+err:
+    while (--i >= 0) {
+        vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
+    }
+    return r;
+}
+
+void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
+                    int total_queues)
+{
+    int i, r;
+
+    r = dev->binding->set_guest_notifiers(dev->binding_opaque,
+                                          total_queues * 2,
+                                          false);
+    if (r < 0) {
+        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
+        fflush(stderr);
+    }
+    assert(r >= 0);
+
+    for (i = 0; i < total_queues; i++) {
+        vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
+    }
+}
+
 void vhost_net_cleanup(struct vhost_net *net)
 {
     vhost_dev_cleanup(&net->dev);
@@ -224,13 +292,15 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
     return false;
 }
 
-int vhost_net_start(struct vhost_net *net,
-		    VirtIODevice *dev)
+int vhost_net_start(VirtIODevice *dev,
+                    NetClientState *ncs,
+                    int total_queues)
 {
     return -ENOSYS;
 }
-void vhost_net_stop(struct vhost_net *net,
-		    VirtIODevice *dev)
+void vhost_net_stop(VirtIODevice *dev,
+                    NetClientState *ncs,
+                    int total_queues)
 {
 }
 
diff --git a/hw/vhost_net.h b/hw/vhost_net.h
index 88912b8..2d936bb 100644
--- a/hw/vhost_net.h
+++ b/hw/vhost_net.h
@@ -9,8 +9,8 @@ typedef struct vhost_net VHostNetState;
 VHostNetState *vhost_net_init(NetClientState *backend, int devfd, bool force);
 
 bool vhost_net_query(VHostNetState *net, VirtIODevice *dev);
-int vhost_net_start(VHostNetState *net, VirtIODevice *dev);
-void vhost_net_stop(VHostNetState *net, VirtIODevice *dev);
+int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
+void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
 
 void vhost_net_cleanup(VHostNetState *net);
 
diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index 1a3fc74..d30cc31 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -129,14 +129,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
             return;
         }
         n->vhost_started = 1;
-        r = vhost_net_start(tap_get_vhost_net(nc->peer), &n->vdev);
+        r = vhost_net_start(&n->vdev, nc, 1);
         if (r < 0) {
             error_report("unable to start vhost net: %d: "
                          "falling back on userspace virtio", -r);
             n->vhost_started = 0;
         }
     } else {
-        vhost_net_stop(tap_get_vhost_net(nc->peer), &n->vdev);
+        vhost_net_stop(&n->vdev, nc, 1);
         n->vhost_started = 0;
     }
 }
-- 
1.7.1


  parent reply	other threads:[~2013-01-30 11:23 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-01-30 11:12 [PATCH V4 00/22] Multiqueue virtio-net Jason Wang
2013-01-30 11:12 ` [PATCH V4 01/22] net: tap: using bool instead of bitfield Jason Wang
2013-01-30 11:12 ` [PATCH V4 02/22] net: tap: use abort() instead of assert(0) Jason Wang
2013-01-30 11:12 ` [PATCH V4 03/22] net: introduce qemu_get_queue() Jason Wang
2013-01-30 11:12 ` [PATCH V4 04/22] net: introduce qemu_get_nic() Jason Wang
2013-01-30 11:12 ` [PATCH V4 05/22] net: intorduce qemu_del_nic() Jason Wang
2013-01-30 11:12 ` [PATCH V4 06/22] net: introduce qemu_find_net_clients_except() Jason Wang
2013-01-30 11:12 ` [PATCH V4 07/22] net: introduce qemu_net_client_setup() Jason Wang
2013-01-30 11:12 ` [PATCH V4 08/22] net: introduce NetClientState destructor Jason Wang
2013-01-30 11:12 ` [PATCH V4 09/22] net: multiqueue support Jason Wang
2013-01-30 11:12 ` [PATCH V4 10/22] tap: import linux multiqueue constants Jason Wang
2013-01-30 11:12 ` [PATCH V4 11/22] tap: factor out common tap initialization Jason Wang
2013-01-30 11:12 ` [PATCH V4 12/22] tap: add Linux multiqueue support Jason Wang
2013-01-30 11:12 ` [PATCH V4 13/22] tap: support enabling or disabling a queue Jason Wang
2013-01-30 11:12 ` [PATCH V4 14/22] tap: introduce a helper to get the name of an interface Jason Wang
2013-01-30 11:12 ` [PATCH V4 15/22] tap: multiqueue support Jason Wang
2013-01-30 11:12 ` Jason Wang [this message]
2013-01-30 11:12 ` [PATCH V4 17/22] virtio: introduce virtio_del_queue() Jason Wang
2013-01-30 11:12 ` [PATCH V4 18/22] virtio: add a queue_index to VirtQueue Jason Wang
2013-01-30 11:12 ` [PATCH V4 19/22] virtio-net: separate virtqueue from VirtIONet Jason Wang
2013-01-30 11:12 ` [PATCH V4 20/22] virtio-net: multiqueue support Jason Wang
2013-01-30 11:12 ` [PATCH V4 21/22] virtio-net: migration support for multiqueue Jason Wang
2013-01-30 11:12 ` [PATCH V4 22/22] virtio-net: compat multiqueue support Jason Wang
     [not found] ` <5109669F.5010405@redhat.com>
2013-01-31  7:00   ` [Qemu-devel] [PATCH V4 00/22] Multiqueue virtio-net Jason Wang
2013-01-31 13:44     ` Eric Blake
2013-01-31 13:58       ` [Qemu-devel] " Michael S. Tsirkin
2013-01-31 15:18         ` Eric Blake
2013-01-31 15:04       ` [Qemu-devel] " Jason Wang
2013-01-31 14:21 ` Michael S. Tsirkin
2013-01-31 14:36   ` Michael S. Tsirkin
2013-01-31 15:05     ` Jason Wang
2013-02-04 22:53 ` Anthony Liguori

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1359544361-5089-17-git-send-email-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=aliguori@us.ibm.com \
    --cc=blauwirbel@gmail.com \
    --cc=gaowanlong@cn.fujitsu.com \
    --cc=jwhan@filewood.snu.ac.kr \
    --cc=krkumar2@in.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=mprivozn@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rusty@rustcorp.com.au \
    --cc=shajnocz@redhat.com \
    --cc=shiyer@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.