All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: mst@redhat.com, qemu-devel@nongnu.org, aliguori@us.ibm.com,
	shajnocz@redhat.com
Cc: krkumar2@in.ibm.com, kvm@vger.kernel.org, mprivozn@redhat.com,
	rusty@rustcorp.com.au, jwhan@filewood.snu.ac.kr,
	shiyer@redhat.com, gaowanlong@cn.fujitsu.com,
	Jason Wang <jasowang@redhat.com>
Subject: [PATCH V3 18/20] virtio-net: multiqueue support
Date: Tue, 29 Jan 2013 21:51:30 +0800	[thread overview]
Message-ID: <1359467492-31704-19-git-send-email-jasowang@redhat.com> (raw)
In-Reply-To: <1359467492-31704-1-git-send-email-jasowang@redhat.com>

This patch implements both userspace and vhost support for multiple queue
virtio-net (VIRTIO_NET_F_MQ). This is done by introducing an array of
VirtIONetQueue to VirtIONet.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/virtio-net.c |  305 +++++++++++++++++++++++++++++++++++++++++++------------
 hw/virtio-net.h |   28 +++++-
 2 files changed, 266 insertions(+), 67 deletions(-)

diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index 8a75b52..2ae5aad 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -44,7 +44,7 @@ typedef struct VirtIONet
     VirtIODevice vdev;
     uint8_t mac[ETH_ALEN];
     uint16_t status;
-    VirtIONetQueue vq;
+    VirtIONetQueue vqs[MAX_QUEUE_NUM];
     VirtQueue *ctrl_vq;
     NICState *nic;
     uint32_t tx_timeout;
@@ -70,14 +70,23 @@ typedef struct VirtIONet
     } mac_table;
     uint32_t *vlans;
     DeviceState *qdev;
+    int multiqueue;
+    uint16_t max_queues;
+    uint16_t curr_queues;
 } VirtIONet;
 
-static VirtIONetQueue *virtio_net_get_queue(NetClientState *nc)
+static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
 {
     VirtIONet *n = qemu_get_nic_opaque(nc);
 
-    return &n->vq;
+    return &n->vqs[nc->queue_index];
 }
+
+static int vq2q(int queue_index)
+{
+    return queue_index / 2;
+}
+
 /* TODO
  * - we could suppress RX interrupt if we were so inclined.
  */
@@ -93,6 +102,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
     struct virtio_net_config netcfg;
 
     stw_p(&netcfg.status, n->status);
+    stw_p(&netcfg.max_virtqueue_pairs, n->max_queues);
     memcpy(netcfg.mac, n->mac, ETH_ALEN);
     memcpy(config, &netcfg, sizeof(netcfg));
 }
@@ -119,6 +129,7 @@ static bool virtio_net_started(VirtIONet *n, uint8_t status)
 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
 {
     NetClientState *nc = qemu_get_queue(n->nic);
+    int queues = n->multiqueue ? n->max_queues : 1;
 
     if (!nc->peer) {
         return;
@@ -130,6 +141,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
     if (!tap_get_vhost_net(nc->peer)) {
         return;
     }
+
     if (!!n->vhost_started == virtio_net_started(n, status) &&
                               !nc->peer->link_down) {
         return;
@@ -140,16 +152,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
             return;
         }
         n->vhost_started = 1;
-        r = vhost_net_start(&n->vdev, nc, 1);
+        r = vhost_net_start(&n->vdev, n->nic->ncs, queues);
         if (r < 0) {
             error_report("unable to start vhost net: %d: "
                          "falling back on userspace virtio", -r);
             n->vhost_started = 0;
-        } else {
-            n->vhost_started = 1;
         }
     } else {
-        vhost_net_stop(&n->vdev, nc, 1);
+        vhost_net_stop(&n->vdev, n->nic->ncs, queues);
         n->vhost_started = 0;
     }
 }
@@ -157,26 +167,38 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
 {
     VirtIONet *n = to_virtio_net(vdev);
-    VirtIONetQueue *q = &n->vq;
+    VirtIONetQueue *q;
+    int i;
+    uint8_t queue_status;
 
     virtio_net_vhost_status(n, status);
 
-    if (!q->tx_waiting) {
-        return;
-    }
+    for (i = 0; i < n->max_queues; i++) {
+        q = &n->vqs[i];
 
-    if (virtio_net_started(n, status) && !n->vhost_started) {
-        if (q->tx_timer) {
-            qemu_mod_timer(q->tx_timer,
-                           qemu_get_clock_ns(vm_clock) + n->tx_timeout);
+        if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
+            queue_status = 0;
         } else {
-            qemu_bh_schedule(q->tx_bh);
+            queue_status = status;
         }
-    } else {
-        if (q->tx_timer) {
-            qemu_del_timer(q->tx_timer);
+
+        if (!q->tx_waiting) {
+            continue;
+        }
+
+        if (virtio_net_started(n, queue_status) && !n->vhost_started) {
+            if (q->tx_timer) {
+                qemu_mod_timer(q->tx_timer,
+                               qemu_get_clock_ns(vm_clock) + n->tx_timeout);
+            } else {
+                qemu_bh_schedule(q->tx_bh);
+            }
         } else {
-            qemu_bh_cancel(q->tx_bh);
+            if (q->tx_timer) {
+                qemu_del_timer(q->tx_timer);
+            } else {
+                qemu_bh_cancel(q->tx_bh);
+            }
         }
     }
 }
@@ -208,6 +230,8 @@ static void virtio_net_reset(VirtIODevice *vdev)
     n->nomulti = 0;
     n->nouni = 0;
     n->nobcast = 0;
+    /* multiqueue is disabled by default */
+    n->curr_queues = 1;
 
     /* Flush any MAC and VLAN filter table state */
     n->mac_table.in_use = 0;
@@ -249,18 +273,72 @@ static int peer_has_ufo(VirtIONet *n)
 
 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
 {
+    int i;
+    NetClientState *nc;
+
     n->mergeable_rx_bufs = mergeable_rx_bufs;
 
     n->guest_hdr_len = n->mergeable_rx_bufs ?
         sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
 
-    if (peer_has_vnet_hdr(n) &&
-        tap_has_vnet_hdr_len(qemu_get_queue(n->nic)->peer, n->guest_hdr_len)) {
-        tap_set_vnet_hdr_len(qemu_get_queue(n->nic)->peer, n->guest_hdr_len);
-        n->host_hdr_len = n->guest_hdr_len;
+    for (i = 0; i < n->max_queues; i++) {
+        nc = qemu_get_subqueue(n->nic, i);
+
+        if (peer_has_vnet_hdr(n) &&
+            tap_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
+            tap_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
+            n->host_hdr_len = n->guest_hdr_len;
+        }
+    }
+}
+
+static int peer_attach(VirtIONet *n, int index)
+{
+    NetClientState *nc = qemu_get_subqueue(n->nic, index);
+    int ret;
+
+    if (!nc->peer) {
+        ret = -1;
+    } else if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
+        ret = -1;
+    } else {
+        ret = tap_enable(nc->peer);
+    }
+
+    return ret;
+}
+
+static int peer_detach(VirtIONet *n, int index)
+{
+    NetClientState *nc = qemu_get_subqueue(n->nic, index);
+    int ret;
+
+    if (!nc->peer) {
+        ret = -1;
+    } else if (nc->peer->info->type !=  NET_CLIENT_OPTIONS_KIND_TAP) {
+        ret = -1;
+    } else {
+        ret = tap_disable(nc->peer);
+    }
+
+    return ret;
+}
+
+static void virtio_net_set_queues(VirtIONet *n)
+{
+    int i;
+
+    for (i = 0; i < n->max_queues; i++) {
+        if (i < n->curr_queues) {
+            assert(!peer_attach(n, i));
+        } else {
+            assert(!peer_detach(n, i));
+        }
     }
 }
 
+static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue, int ctrl);
+
 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
 {
     VirtIONet *n = to_virtio_net(vdev);
@@ -312,25 +390,33 @@ static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
 {
     VirtIONet *n = to_virtio_net(vdev);
-    NetClientState *nc = qemu_get_queue(n->nic);
+    int i;
+
+    virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)),
+                              !!(features & (1 << VIRTIO_NET_F_CTRL_VQ)));
 
     virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
 
     if (n->has_vnet_hdr) {
-        tap_set_offload(nc->peer,
+        tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer,
                         (features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
                         (features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
                         (features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
                         (features >> VIRTIO_NET_F_GUEST_ECN)  & 1,
                         (features >> VIRTIO_NET_F_GUEST_UFO)  & 1);
     }
-    if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
-        return;
-    }
-    if (!tap_get_vhost_net(nc->peer)) {
-        return;
+
+    for (i = 0;  i < n->max_queues; i++) {
+        NetClientState *nc = qemu_get_subqueue(n->nic, i);
+
+        if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
+            continue;
+        }
+        if (!tap_get_vhost_net(nc->peer)) {
+            continue;
+        }
+        vhost_net_ack_features(tap_get_vhost_net(nc->peer), features);
     }
-    vhost_net_ack_features(tap_get_vhost_net(nc->peer), features);
 }
 
 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
@@ -440,6 +526,38 @@ static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
     return VIRTIO_NET_OK;
 }
 
+static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
+                                VirtQueueElement *elem)
+{
+    struct virtio_net_ctrl_mq s;
+
+    if (elem->out_num != 2 ||
+        elem->out_sg[1].iov_len != sizeof(struct virtio_net_ctrl_mq)) {
+        error_report("virtio-net ctrl invalid steering command");
+        return VIRTIO_NET_ERR;
+    }
+
+    if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+        return VIRTIO_NET_ERR;
+    }
+
+    memcpy(&s, elem->out_sg[1].iov_base, sizeof(struct virtio_net_ctrl_mq));
+
+    if (s.virtqueue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+        s.virtqueue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
+        s.virtqueue_pairs > n->max_queues ||
+        !n->multiqueue) {
+        return VIRTIO_NET_ERR;
+    }
+
+    n->curr_queues = s.virtqueue_pairs;
+    /* stop the backend before changing the number of queues to avoid handling a
+     * disabled queue */
+    virtio_net_set_status(&n->vdev, n->vdev.status);
+    virtio_net_set_queues(n);
+
+    return VIRTIO_NET_OK;
+}
 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
 {
     VirtIONet *n = to_virtio_net(vdev);
@@ -468,6 +586,9 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
             status = virtio_net_handle_mac(n, ctrl.cmd, &elem);
         else if (ctrl.class == VIRTIO_NET_CTRL_VLAN)
             status = virtio_net_handle_vlan_table(n, ctrl.cmd, &elem);
+        else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
+            status = virtio_net_handle_mq(n, ctrl.cmd, &elem);
+        }
 
         stb_p(elem.in_sg[elem.in_num - 1].iov_base, status);
 
@@ -481,19 +602,24 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
 {
     VirtIONet *n = to_virtio_net(vdev);
+    int queue_index = vq2q(virtio_get_queue_index(vq));
 
-    qemu_flush_queued_packets(qemu_get_queue(n->nic));
+    qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
 }
 
 static int virtio_net_can_receive(NetClientState *nc)
 {
     VirtIONet *n = qemu_get_nic_opaque(nc);
-    VirtIONetQueue *q = virtio_net_get_queue(nc);
+    VirtIONetQueue *q = virtio_net_get_subqueue(nc);
 
     if (!n->vdev.vm_running) {
         return 0;
     }
 
+    if (nc->queue_index >= n->curr_queues) {
+        return 0;
+    }
+
     if (!virtio_queue_ready(q->rx_vq) ||
         !(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
         return 0;
@@ -624,13 +750,13 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
 {
     VirtIONet *n = qemu_get_nic_opaque(nc);
-    VirtIONetQueue *q = virtio_net_get_queue(nc);
+    VirtIONetQueue *q = virtio_net_get_subqueue(nc);
     struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
     struct virtio_net_hdr_mrg_rxbuf mhdr;
     unsigned mhdr_cnt = 0;
     size_t offset, i, guest_offset;
 
-    if (!virtio_net_can_receive(qemu_get_queue(n->nic))) {
+    if (!virtio_net_can_receive(nc)) {
         return -1;
     }
 
@@ -725,7 +851,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
 {
     VirtIONet *n = qemu_get_nic_opaque(nc);
-    VirtIONetQueue *q = virtio_net_get_queue(nc);
+    VirtIONetQueue *q = virtio_net_get_subqueue(nc);
 
     virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
     virtio_notify(&n->vdev, q->tx_vq);
@@ -742,6 +868,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
     VirtIONet *n = q->n;
     VirtQueueElement elem;
     int32_t num_packets = 0;
+    int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
     if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
         return num_packets;
     }
@@ -783,8 +910,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
 
         len = n->guest_hdr_len;
 
-        ret = qemu_sendv_packet_async(qemu_get_queue(n->nic), out_sg, out_num,
-                                      virtio_net_tx_complete);
+        ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
+                                      out_sg, out_num, virtio_net_tx_complete);
         if (ret == 0) {
             virtio_queue_set_notification(q->tx_vq, 0);
             q->async_tx.elem = elem;
@@ -807,7 +934,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
 {
     VirtIONet *n = to_virtio_net(vdev);
-    VirtIONetQueue *q = &n->vq;
+    VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
 
     /* This happens when device was stopped but VCPU wasn't. */
     if (!n->vdev.vm_running) {
@@ -831,7 +958,7 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
 {
     VirtIONet *n = to_virtio_net(vdev);
-    VirtIONetQueue *q = &n->vq;
+    VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
 
     if (unlikely(q->tx_waiting)) {
         return;
@@ -899,10 +1026,46 @@ static void virtio_net_tx_bh(void *opaque)
     }
 }
 
+static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue, int ctrl)
+{
+    VirtIODevice *vdev = &n->vdev;
+    int i, max = multiqueue ? n->max_queues : 1;
+
+    n->multiqueue = multiqueue;
+
+    for (i = 2; i <= n->max_queues * 2 + 1; i++) {
+        virtio_del_queue(vdev, i);
+    }
+
+    for (i = 1; i < max; i++) {
+        n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
+        if (n->vqs[i].tx_timer) {
+            n->vqs[i].tx_vq =
+                virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+            n->vqs[i].tx_timer = qemu_new_timer_ns(vm_clock,
+                                                   virtio_net_tx_timer,
+                                                   &n->vqs[i]);
+        } else {
+            n->vqs[i].tx_vq =
+                virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+            n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
+        }
+
+        n->vqs[i].tx_waiting = 0;
+        n->vqs[i].n = n;
+    }
+
+    if (ctrl) {
+        n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
+    }
+
+    virtio_net_set_queues(n);
+}
+
 static void virtio_net_save(QEMUFile *f, void *opaque)
 {
     VirtIONet *n = opaque;
-    VirtIONetQueue *q = &n->vq;
+    VirtIONetQueue *q = &n->vqs[0];
 
     /* At this point, backend must be stopped, otherwise
      * it might keep writing to memory. */
@@ -931,9 +1094,8 @@ static void virtio_net_save(QEMUFile *f, void *opaque)
 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
 {
     VirtIONet *n = opaque;
-    VirtIONetQueue *q = &n->vq;
-    int i;
-    int ret;
+    VirtIONetQueue *q = &n->vqs[0];
+    int ret, i;
 
     if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
         return -EINVAL;
@@ -1048,7 +1210,7 @@ static NetClientInfo net_virtio_info = {
 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
 {
     VirtIONet *n = to_virtio_net(vdev);
-    NetClientState *nc = qemu_get_queue(n->nic);
+    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
     assert(n->vhost_started);
     return vhost_net_virtqueue_pending(tap_get_vhost_net(nc->peer), idx);
 }
@@ -1057,7 +1219,7 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
                                            bool mask)
 {
     VirtIONet *n = to_virtio_net(vdev);
-    NetClientState *nc = qemu_get_queue(n->nic);
+    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
     assert(n->vhost_started);
     vhost_net_virtqueue_mask(tap_get_vhost_net(nc->peer),
                              vdev, idx, mask);
@@ -1067,6 +1229,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
                               virtio_net_conf *net)
 {
     VirtIONet *n;
+    int i;
 
     n = (VirtIONet *)virtio_common_init("virtio-net", VIRTIO_ID_NET,
                                         sizeof(struct virtio_net_config),
@@ -1081,8 +1244,11 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
     n->vdev.set_status = virtio_net_set_status;
     n->vdev.guest_notifier_mask = virtio_net_guest_notifier_mask;
     n->vdev.guest_notifier_pending = virtio_net_guest_notifier_pending;
-    n->vq.rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
-    n->vq.n = n;
+    n->vqs[0].rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
+    n->max_queues = conf->queues;
+    n->curr_queues = 1;
+    n->vqs[0].n = n;
+    n->tx_timeout = net->txtimer;
 
     if (net->tx && strcmp(net->tx, "timer") && strcmp(net->tx, "bh")) {
         error_report("virtio-net: "
@@ -1092,14 +1258,14 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
     }
 
     if (net->tx && !strcmp(net->tx, "timer")) {
-        n->vq.tx_vq = virtio_add_queue(&n->vdev, 256,
-                                       virtio_net_handle_tx_timer);
-        n->vq.tx_timer = qemu_new_timer_ns(vm_clock,
-                                           virtio_net_tx_timer, &n->vq);
-        n->tx_timeout = net->txtimer;
+        n->vqs[0].tx_vq = virtio_add_queue(&n->vdev, 256,
+                                           virtio_net_handle_tx_timer);
+        n->vqs[0].tx_timer = qemu_new_timer_ns(vm_clock, virtio_net_tx_timer,
+                                               &n->vqs[0]);
     } else {
-        n->vq.tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx_bh);
-        n->vq.tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vq);
+        n->vqs[0].tx_vq = virtio_add_queue(&n->vdev, 256,
+                                           virtio_net_handle_tx_bh);
+        n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
     }
     n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
     qemu_macaddr_default_if_unset(&conf->macaddr);
@@ -1109,7 +1275,9 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
     n->nic = qemu_new_nic(&net_virtio_info, conf, object_get_typename(OBJECT(dev)), dev->id, n);
     peer_test_vnet_hdr(n);
     if (peer_has_vnet_hdr(n)) {
-        tap_using_vnet_hdr(qemu_get_queue(n->nic)->peer, 1);
+        for (i = 0; i < n->max_queues; i++) {
+            tap_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, 1);
+        }
         n->host_hdr_len = sizeof(struct virtio_net_hdr);
     } else {
         n->host_hdr_len = 0;
@@ -1117,7 +1285,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
 
     qemu_format_nic_info_str(qemu_get_queue(n->nic), conf->macaddr.a);
 
-    n->vq.tx_waiting = 0;
+    n->vqs[0].tx_waiting = 0;
     n->tx_burst = net->txburst;
     virtio_net_set_mrg_rx_bufs(n, 0);
     n->promisc = 1; /* for compatibility */
@@ -1138,23 +1306,28 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
 void virtio_net_exit(VirtIODevice *vdev)
 {
     VirtIONet *n = DO_UPCAST(VirtIONet, vdev, vdev);
-    VirtIONetQueue *q = &n->vq;
+    int i;
 
     /* This will stop vhost backend if appropriate. */
     virtio_net_set_status(vdev, 0);
 
-    qemu_purge_queued_packets(qemu_get_queue(n->nic));
-
     unregister_savevm(n->qdev, "virtio-net", n);
 
     g_free(n->mac_table.macs);
     g_free(n->vlans);
 
-    if (q->tx_timer) {
-        qemu_del_timer(q->tx_timer);
-        qemu_free_timer(q->tx_timer);
-    } else {
-        qemu_bh_delete(q->tx_bh);
+    for (i = 0; i < n->max_queues; i++) {
+        VirtIONetQueue *q = &n->vqs[i];
+        NetClientState *nc = qemu_get_subqueue(n->nic, i);
+
+        qemu_purge_queued_packets(nc);
+
+        if (q->tx_timer) {
+            qemu_del_timer(q->tx_timer);
+            qemu_free_timer(q->tx_timer);
+        } else {
+            qemu_bh_delete(q->tx_bh);
+        }
     }
 
     qemu_del_nic(n->nic);
diff --git a/hw/virtio-net.h b/hw/virtio-net.h
index d46fb98..d4fba23 100644
--- a/hw/virtio-net.h
+++ b/hw/virtio-net.h
@@ -43,6 +43,8 @@
 #define VIRTIO_NET_F_CTRL_RX    18      /* Control channel RX mode support */
 #define VIRTIO_NET_F_CTRL_VLAN  19      /* Control channel VLAN filtering */
 #define VIRTIO_NET_F_CTRL_RX_EXTRA 20   /* Extra RX mode control support */
+#define VIRTIO_NET_F_MQ         22      /* Device supports Receive Flow
+                                         * Steering */
 
 #define VIRTIO_NET_S_LINK_UP    1       /* Link is up */
 
@@ -71,6 +73,8 @@ struct virtio_net_config
     uint8_t mac[ETH_ALEN];
     /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
     uint16_t status;
+    /* Max virtqueue pairs supported by the device */
+    uint16_t max_virtqueue_pairs;
 } QEMU_PACKED;
 
 /*
@@ -140,6 +144,26 @@ struct virtio_net_ctrl_mac {
  #define VIRTIO_NET_CTRL_VLAN_ADD             0
  #define VIRTIO_NET_CTRL_VLAN_DEL             1
 
+/*
+ * Control Multiqueue
+ *
+ * The command VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
+ * enables multiqueue, specifying the number of the transmit and
+ * receive queues that will be used. After the command is consumed and acked by
+ * the device, the device will not steer new packets on receive virtqueues
+ * other than specified nor read from transmit virtqueues other than specified.
+ * Accordingly, driver should not transmit new packets  on virtqueues other than
+ * specified.
+ */
+struct virtio_net_ctrl_mq {
+    uint16_t virtqueue_pairs;
+};
+
+#define VIRTIO_NET_CTRL_MQ   4
+ #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET        0
+ #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN        1
+ #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX        0x8000
+
 #define DEFINE_VIRTIO_NET_FEATURES(_state, _field) \
         DEFINE_VIRTIO_COMMON_FEATURES(_state, _field), \
         DEFINE_PROP_BIT("csum", _state, _field, VIRTIO_NET_F_CSUM, true), \
@@ -158,5 +182,7 @@ struct virtio_net_ctrl_mac {
         DEFINE_PROP_BIT("ctrl_vq", _state, _field, VIRTIO_NET_F_CTRL_VQ, true), \
         DEFINE_PROP_BIT("ctrl_rx", _state, _field, VIRTIO_NET_F_CTRL_RX, true), \
         DEFINE_PROP_BIT("ctrl_vlan", _state, _field, VIRTIO_NET_F_CTRL_VLAN, true), \
-        DEFINE_PROP_BIT("ctrl_rx_extra", _state, _field, VIRTIO_NET_F_CTRL_RX_EXTRA, true)
+        DEFINE_PROP_BIT("ctrl_rx_extra", _state, _field, \
+                        VIRTIO_NET_F_CTRL_RX_EXTRA, true),              \
+        DEFINE_PROP_BIT("mq", _state, _field, VIRTIO_NET_F_MQ, true)
 #endif
-- 
1.7.1


  parent reply	other threads:[~2013-01-29 14:02 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-01-29 13:51 [PATCH V3 00/20] Multiqueue virtio-net Jason Wang
2013-01-29 13:51 ` [PATCH V3 01/20] net: introduce qemu_get_queue() Jason Wang
2013-01-29 13:51 ` [PATCH V3 02/20] net: introduce qemu_get_nic() Jason Wang
2013-01-29 13:51 ` [PATCH V3 03/20] net: intorduce qemu_del_nic() Jason Wang
2013-01-29 13:51 ` [PATCH V3 04/20] net: introduce qemu_find_net_clients_except() Jason Wang
2013-01-29 13:51 ` [PATCH V3 05/20] net: introduce qemu_net_client_setup() Jason Wang
2013-01-29 13:51 ` [PATCH V3 06/20] net: introduce NetClientState destructor Jason Wang
2013-01-29 13:51 ` [PATCH V3 07/20] net: multiqueue support Jason Wang
2013-01-29 13:51 ` [PATCH V3 08/20] tap: import linux multiqueue constants Jason Wang
2013-01-29 13:51 ` [PATCH V3 09/20] tap: factor out common tap initialization Jason Wang
2013-01-29 13:51 ` [PATCH V3 10/20] tap: add Linux multiqueue support Jason Wang
2013-01-29 13:51 ` [PATCH V3 11/20] tap: support enabling or disabling a queue Jason Wang
2013-01-29 13:51 ` [PATCH V3 12/20] tap: introduce a helper to get the name of an interface Jason Wang
2013-01-29 13:51 ` [PATCH V3 13/20] tap: multiqueue support Jason Wang
2013-01-29 13:51 ` [PATCH V3 14/20] vhost: " Jason Wang
2013-01-29 13:51 ` [PATCH V3 15/20] virtio: introduce virtio_del_queue() Jason Wang
2013-01-29 13:51 ` [PATCH V3 16/20] virtio: add a queue_index to VirtQueue Jason Wang
2013-01-29 13:51 ` [PATCH V3 17/20] virtio-net: separate virtqueue from VirtIONet Jason Wang
2013-01-29 13:51 ` Jason Wang [this message]
2013-01-29 13:51 ` [PATCH V3 19/20] virtio-net: migration support for multiqueue Jason Wang
2013-01-29 13:51 ` [PATCH V3 20/20] virtio-net: compat multiqueue support Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1359467492-31704-19-git-send-email-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=aliguori@us.ibm.com \
    --cc=gaowanlong@cn.fujitsu.com \
    --cc=jwhan@filewood.snu.ac.kr \
    --cc=krkumar2@in.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=mprivozn@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rusty@rustcorp.com.au \
    --cc=shajnocz@redhat.com \
    --cc=shiyer@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.