All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v2] vhost-user: add multi queue support
@ 2015-01-24 12:22 Nikolay Nikolaev
  2015-01-26 18:51 ` Eric Blake
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Nikolay Nikolaev @ 2015-01-24 12:22 UTC (permalink / raw)
  To: thomas.long, snabb-devel, eblake, qemu-devel, mst; +Cc: tech, n.nikolaev

Vhost-user will implement the multiqueueu support in a similar way to what
vhost already has - a separate thread for each queue.

To enable the multiqueue funcionality - a new command line parameter
"queues" is introduced for the vhost-user netdev.

Changes since v1:
 - use s->nc.info_str when bringing up/down the backend

Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
---
 docs/specs/vhost-user.txt |    5 +++++
 hw/virtio/vhost-user.c    |    6 +++++-
 net/vhost-user.c          |   39 +++++++++++++++++++++++++--------------
 qapi-schema.json          |    6 +++++-
 qemu-options.hx           |    5 +++--
 5 files changed, 43 insertions(+), 18 deletions(-)

diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
index 650bb18..d7b208c 100644
--- a/docs/specs/vhost-user.txt
+++ b/docs/specs/vhost-user.txt
@@ -127,6 +127,11 @@ in the ancillary data:
 If Master is unable to send the full message or receives a wrong reply it will
 close the connection. An optional reconnection mechanism can be implemented.
 
+Multi queue suport
+---------------------
+The protocol supports multiple queues by setting all index fields in the sent
+messages to a properly calculated value.
+
 Message types
 -------------
 
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index aefe0bb..83ebcaa 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -253,17 +253,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
     case VHOST_SET_VRING_NUM:
     case VHOST_SET_VRING_BASE:
         memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
+        msg.state.index += dev->vq_index;
         msg.size = sizeof(m.state);
         break;
 
     case VHOST_GET_VRING_BASE:
         memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
+        msg.state.index += dev->vq_index;
         msg.size = sizeof(m.state);
         need_reply = 1;
         break;
 
     case VHOST_SET_VRING_ADDR:
         memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
+        msg.addr.index += dev->vq_index;
         msg.size = sizeof(m.addr);
         break;
 
@@ -271,7 +274,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
     case VHOST_SET_VRING_CALL:
     case VHOST_SET_VRING_ERR:
         file = arg;
-        msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
+        msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
         msg.size = sizeof(m.u64);
         if (ioeventfd_enabled() && file->fd > 0) {
             fds[fd_num++] = file->fd;
@@ -313,6 +316,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
                 error_report("Received bad msg size.\n");
                 return -1;
             }
+            msg.state.index -= dev->vq_index;
             memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
             break;
         default:
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 24e050c..a0b4af2 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -122,37 +122,39 @@ static void net_vhost_user_event(void *opaque, int event)
     case CHR_EVENT_OPENED:
         vhost_user_start(s);
         net_vhost_link_down(s, false);
-        error_report("chardev \"%s\" went up\n", s->chr->label);
+        error_report("chardev \"%s\" went up\n", s->nc.info_str);
         break;
     case CHR_EVENT_CLOSED:
         net_vhost_link_down(s, true);
         vhost_user_stop(s);
-        error_report("chardev \"%s\" went down\n", s->chr->label);
+        error_report("chardev \"%s\" went down\n", s->nc.info_str);
         break;
     }
 }
 
 static int net_vhost_user_init(NetClientState *peer, const char *device,
                                const char *name, CharDriverState *chr,
-                               bool vhostforce)
+                               bool vhostforce, uint32_t queues)
 {
     NetClientState *nc;
     VhostUserState *s;
+    int i;
 
-    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
+    for (i = 0; i < queues; i++) {
+        nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
 
-    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
-             chr->label);
+        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
+                 i, chr->label);
 
-    s = DO_UPCAST(VhostUserState, nc, nc);
+        s = DO_UPCAST(VhostUserState, nc, nc);
 
-    /* We don't provide a receive callback */
-    s->nc.receive_disabled = 1;
-    s->chr = chr;
-    s->vhostforce = vhostforce;
-
-    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
+        /* We don't provide a receive callback */
+        s->nc.receive_disabled = 1;
+        s->chr = chr;
+        s->vhostforce = vhostforce;
 
+        qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
+    }
     return 0;
 }
 
@@ -228,6 +230,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque)
 int net_init_vhost_user(const NetClientOptions *opts, const char *name,
                         NetClientState *peer)
 {
+    uint32_t queues;
     const NetdevVhostUserOptions *vhost_user_opts;
     CharDriverState *chr;
     bool vhostforce;
@@ -254,5 +257,13 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
         vhostforce = false;
     }
 
-    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
+    /* number of queues for multiqueue */
+    if (vhost_user_opts->has_queues) {
+        queues = vhost_user_opts->queues;
+    } else {
+        queues = 1;
+    }
+
+    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
+                               queues);
 }
diff --git a/qapi-schema.json b/qapi-schema.json
index e16f8eb..c2cead0 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -2287,12 +2287,16 @@
 #
 # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
 #
+# @queues: #optional number of queues to be created for multiqueue vhost-user
+#          (Since 2.3)
+#
 # Since 2.1
 ##
 { 'type': 'NetdevVhostUserOptions',
   'data': {
     'chardev':        'str',
-    '*vhostforce':    'bool' } }
+    '*vhostforce':    'bool',
+    '*queues':        'uint32' } }
 
 ##
 # @NetClientOptions
diff --git a/qemu-options.hx b/qemu-options.hx
index 85ca3ad..b5fa61f 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -1894,13 +1894,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
 netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
 required hub automatically.
 
-@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
+@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
 
 Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
 be a unix domain socket backed one. The vhost-user uses a specifically defined
 protocol to pass vhost ioctl replacement messages to an application on the other
 end of the socket. On non-MSIX guests, the feature can be forced with
-@var{vhostforce}.
+@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
+be created for multiqueue vhost-user.
 
 Example:
 @example

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [Qemu-devel] [PATCH v2] vhost-user: add multi queue support
  2015-01-24 12:22 [Qemu-devel] [PATCH v2] vhost-user: add multi queue support Nikolay Nikolaev
@ 2015-01-26 18:51 ` Eric Blake
  2015-04-06 15:07 ` Michael S. Tsirkin
  2015-05-21 14:20 ` [Qemu-devel] [PATCH v3] " Ouyang Changchun
  2 siblings, 0 replies; 7+ messages in thread
From: Eric Blake @ 2015-01-26 18:51 UTC (permalink / raw)
  To: Nikolay Nikolaev, thomas.long, snabb-devel, qemu-devel, mst; +Cc: tech

[-- Attachment #1: Type: text/plain, Size: 2280 bytes --]

On 01/24/2015 05:22 AM, Nikolay Nikolaev wrote:
> Vhost-user will implement the multiqueueu support in a similar way to what

s/multiqueueu/multiqueue/

Also, you probably want to be consistent - is it two words (subject
line) or one word (this sentence)?  I could go either way.

> vhost already has - a separate thread for each queue.
> 
> To enable the multiqueue funcionality - a new command line parameter

s/funcionality/functionality/

> "queues" is introduced for the vhost-user netdev.
> 
> Changes since v1:
>  - use s->nc.info_str when bringing up/down the backend

This paragraph about path versioning belongs...

> 
> Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
> ---

...here, after the --- separator.  It is useful to reviewers, but makes
no sense in the final qemu.git repository (a year from now, we only care
about the one version that got committed, not how many tries it took to
get to that one version).

>  docs/specs/vhost-user.txt |    5 +++++
>  hw/virtio/vhost-user.c    |    6 +++++-
>  net/vhost-user.c          |   39 +++++++++++++++++++++++++--------------
>  qapi-schema.json          |    6 +++++-
>  qemu-options.hx           |    5 +++--
>  5 files changed, 43 insertions(+), 18 deletions(-)
> 
> diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> index 650bb18..d7b208c 100644
> --- a/docs/specs/vhost-user.txt
> +++ b/docs/specs/vhost-user.txt
> @@ -127,6 +127,11 @@ in the ancillary data:
>  If Master is unable to send the full message or receives a wrong reply it will
>  close the connection. An optional reconnection mechanism can be implemented.
>  
> +Multi queue suport

s/suport/support/

> +---------------------

Make the length of this separator match the text it is underlining (too
long, even after the typo fix)

> +++ b/qapi-schema.json
> @@ -2287,12 +2287,16 @@
>  #
>  # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
>  #
> +# @queues: #optional number of queues to be created for multiqueue vhost-user
> +#          (Since 2.3)

Might be worth mentioning that the default is 1.

-- 
Eric Blake   eblake redhat com    +1-919-301-3266
Libvirt virtualization library http://libvirt.org


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 604 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Qemu-devel] [PATCH v2] vhost-user: add multi queue support
  2015-01-24 12:22 [Qemu-devel] [PATCH v2] vhost-user: add multi queue support Nikolay Nikolaev
  2015-01-26 18:51 ` Eric Blake
@ 2015-04-06 15:07 ` Michael S. Tsirkin
  2015-04-08 15:18   ` [Qemu-devel] [snabb-devel] " Nikolay Nikolaev
  2015-04-09  2:18   ` Ouyang, Changchun
  2015-05-21 14:20 ` [Qemu-devel] [PATCH v3] " Ouyang Changchun
  2 siblings, 2 replies; 7+ messages in thread
From: Michael S. Tsirkin @ 2015-04-06 15:07 UTC (permalink / raw)
  To: Nikolay Nikolaev; +Cc: snabb-devel, thomas.long, qemu-devel, tech

On Sat, Jan 24, 2015 at 02:22:29PM +0200, Nikolay Nikolaev wrote:
> Vhost-user will implement the multiqueueu support in a similar way to what

multiqueue

> vhost already has - a separate thread for each queue.
> 
> To enable the multiqueue funcionality - a new command line parameter
> "queues" is introduced for the vhost-user netdev.
> 
> Changes since v1:
>  - use s->nc.info_str when bringing up/down the backend
> 
> Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
> ---
>  docs/specs/vhost-user.txt |    5 +++++
>  hw/virtio/vhost-user.c    |    6 +++++-
>  net/vhost-user.c          |   39 +++++++++++++++++++++++++--------------
>  qapi-schema.json          |    6 +++++-
>  qemu-options.hx           |    5 +++--
>  5 files changed, 43 insertions(+), 18 deletions(-)
> 
> diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> index 650bb18..d7b208c 100644
> --- a/docs/specs/vhost-user.txt
> +++ b/docs/specs/vhost-user.txt

I've been thinking that the protocol might be a useful addition
to the virtio spec. For this, as a minimum you would have to submit this
document as a comment to virtio TC with a proposal to include it in the
virtio spec.
See
https://www.oasis-open.org/committees/comments/index.php?wg_abbrev=virtio

Can you do this?

We can take it from there, though I would encourage your
company to join as a contributor.


> @@ -127,6 +127,11 @@ in the ancillary data:
>  If Master is unable to send the full message or receives a wrong reply it will
>  close the connection. An optional reconnection mechanism can be implemented.
>  
> +Multi queue suport
> +---------------------
> +The protocol supports multiple queues by setting all index fields in the sent
> +messages to a properly calculated value.
> +

Something that's not clear from this document is what happens with control VQ.
Can you clarify please?


>  Message types
>  -------------
>  
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index aefe0bb..83ebcaa 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -253,17 +253,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
>      case VHOST_SET_VRING_NUM:
>      case VHOST_SET_VRING_BASE:
>          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> +        msg.state.index += dev->vq_index;
>          msg.size = sizeof(m.state);
>          break;
>  
>      case VHOST_GET_VRING_BASE:
>          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> +        msg.state.index += dev->vq_index;
>          msg.size = sizeof(m.state);
>          need_reply = 1;
>          break;
>  
>      case VHOST_SET_VRING_ADDR:
>          memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
> +        msg.addr.index += dev->vq_index;
>          msg.size = sizeof(m.addr);
>          break;
>  
> @@ -271,7 +274,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
>      case VHOST_SET_VRING_CALL:
>      case VHOST_SET_VRING_ERR:
>          file = arg;
> -        msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
> +        msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
>          msg.size = sizeof(m.u64);
>          if (ioeventfd_enabled() && file->fd > 0) {
>              fds[fd_num++] = file->fd;
> @@ -313,6 +316,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
>                  error_report("Received bad msg size.\n");
>                  return -1;
>              }
> +            msg.state.index -= dev->vq_index;
>              memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
>              break;
>          default:
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 24e050c..a0b4af2 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -122,37 +122,39 @@ static void net_vhost_user_event(void *opaque, int event)
>      case CHR_EVENT_OPENED:
>          vhost_user_start(s);
>          net_vhost_link_down(s, false);
> -        error_report("chardev \"%s\" went up\n", s->chr->label);
> +        error_report("chardev \"%s\" went up\n", s->nc.info_str);
>          break;
>      case CHR_EVENT_CLOSED:
>          net_vhost_link_down(s, true);
>          vhost_user_stop(s);
> -        error_report("chardev \"%s\" went down\n", s->chr->label);
> +        error_report("chardev \"%s\" went down\n", s->nc.info_str);
>          break;
>      }
>  }
>  
>  static int net_vhost_user_init(NetClientState *peer, const char *device,
>                                 const char *name, CharDriverState *chr,
> -                               bool vhostforce)
> +                               bool vhostforce, uint32_t queues)
>  {
>      NetClientState *nc;
>      VhostUserState *s;
> +    int i;
>  
> -    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> +    for (i = 0; i < queues; i++) {
> +        nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
>  
> -    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> -             chr->label);
> +        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
> +                 i, chr->label);
>  
> -    s = DO_UPCAST(VhostUserState, nc, nc);
> +        s = DO_UPCAST(VhostUserState, nc, nc);
>  
> -    /* We don't provide a receive callback */
> -    s->nc.receive_disabled = 1;
> -    s->chr = chr;
> -    s->vhostforce = vhostforce;
> -
> -    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> +        /* We don't provide a receive callback */
> +        s->nc.receive_disabled = 1;
> +        s->chr = chr;
> +        s->vhostforce = vhostforce;
>  
> +        qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> +    }
>      return 0;
>  }
>  
> @@ -228,6 +230,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque)
>  int net_init_vhost_user(const NetClientOptions *opts, const char *name,
>                          NetClientState *peer)
>  {
> +    uint32_t queues;
>      const NetdevVhostUserOptions *vhost_user_opts;
>      CharDriverState *chr;
>      bool vhostforce;
> @@ -254,5 +257,13 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
>          vhostforce = false;
>      }
>  
> -    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
> +    /* number of queues for multiqueue */
> +    if (vhost_user_opts->has_queues) {
> +        queues = vhost_user_opts->queues;
> +    } else {
> +        queues = 1;
> +    }
> +
> +    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
> +                               queues);
>  }
> diff --git a/qapi-schema.json b/qapi-schema.json
> index e16f8eb..c2cead0 100644
> --- a/qapi-schema.json
> +++ b/qapi-schema.json
> @@ -2287,12 +2287,16 @@
>  #
>  # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
>  #
> +# @queues: #optional number of queues to be created for multiqueue vhost-user
> +#          (Since 2.3)
> +#
>  # Since 2.1
>  ##
>  { 'type': 'NetdevVhostUserOptions',
>    'data': {
>      'chardev':        'str',
> -    '*vhostforce':    'bool' } }
> +    '*vhostforce':    'bool',
> +    '*queues':        'uint32' } }
>  
>  ##
>  # @NetClientOptions
> diff --git a/qemu-options.hx b/qemu-options.hx
> index 85ca3ad..b5fa61f 100644
> --- a/qemu-options.hx
> +++ b/qemu-options.hx
> @@ -1894,13 +1894,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
>  netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
>  required hub automatically.
>  
> -@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
> +@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
>  
>  Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
>  be a unix domain socket backed one. The vhost-user uses a specifically defined
>  protocol to pass vhost ioctl replacement messages to an application on the other
>  end of the socket. On non-MSIX guests, the feature can be forced with
> -@var{vhostforce}.
> +@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
> +be created for multiqueue vhost-user.
>  
>  Example:
>  @example

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Qemu-devel] [snabb-devel] Re: [PATCH v2] vhost-user: add multi queue support
  2015-04-06 15:07 ` Michael S. Tsirkin
@ 2015-04-08 15:18   ` Nikolay Nikolaev
  2015-04-09  2:18   ` Ouyang, Changchun
  1 sibling, 0 replies; 7+ messages in thread
From: Nikolay Nikolaev @ 2015-04-08 15:18 UTC (permalink / raw)
  To: snabb-devel; +Cc: VirtualOpenSystems Technical Team, Long, Thomas, qemu-devel

On Mon, Apr 6, 2015 at 6:07 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Sat, Jan 24, 2015 at 02:22:29PM +0200, Nikolay Nikolaev wrote:
> > Vhost-user will implement the multiqueueu support in a similar way to what
>
> multiqueue
>
> > vhost already has - a separate thread for each queue.
> >
> > To enable the multiqueue funcionality - a new command line parameter
> > "queues" is introduced for the vhost-user netdev.
> >
> > Changes since v1:
> >  - use s->nc.info_str when bringing up/down the backend
> >
> > Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
> > ---
> >  docs/specs/vhost-user.txt |    5 +++++
> >  hw/virtio/vhost-user.c    |    6 +++++-
> >  net/vhost-user.c          |   39 +++++++++++++++++++++++++--------------
> >  qapi-schema.json          |    6 +++++-
> >  qemu-options.hx           |    5 +++--
> >  5 files changed, 43 insertions(+), 18 deletions(-)
> >
> > diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> > index 650bb18..d7b208c 100644
> > --- a/docs/specs/vhost-user.txt
> > +++ b/docs/specs/vhost-user.txt
>
> I've been thinking that the protocol might be a useful addition
> to the virtio spec. For this, as a minimum you would have to submit this
> document as a comment to virtio TC with a proposal to include it in the
> virtio spec.
> See
> https://www.oasis-open.org/committees/comments/index.php?wg_abbrev=virtio
>
> Can you do this?

This is actually a great idea. I'll post the document to the comment's list.
Do you prefer the upstream version or this one with the MQ additions?

I believe we have to go for the feature negotiation you're proposing
and then add MQ,
reconnections etc. as negotiable features on top. So for the first
version in virtio spec
we can go with this v1.0 of the vhost-user protocol.

>
> We can take it from there, though I would encourage your
> company to join as a contributor.

We'll refrain from joining the TC for the time being, but we may
consider it in the future.

>
>
> > @@ -127,6 +127,11 @@ in the ancillary data:
> >  If Master is unable to send the full message or receives a wrong reply it will
> >  close the connection. An optional reconnection mechanism can be implemented.
> >
> > +Multi queue suport
> > +---------------------
> > +The protocol supports multiple queues by setting all index fields in the sent
> > +messages to a properly calculated value.
> > +
>
> Something that's not clear from this document is what happens with control VQ.
> Can you clarify please?

Well, the control VQ is a property of the virtio net device. The
vhost-user protocol operates
at the generic virtio level. It has not defined specifics for the
different device types.

It is true that the current implementation handles the control VQ in
QEMU itself. This is
because vhost-user was modeled over the 'tap' backend which exposes
only the Rx/Tx queues to the
vhost-net kernel module.

I believe this doc should stay as generic as possible, but still we
can add a note about the
current implementation. Something like:

NOTE: the current vhost-user implementation has no virtio device
specifics. In that sense
the net device handles the cotrolq in QEMU and this it is not exposed
to the slave.

regards,
Nikolay Nikolaev

>
>
> >  Message types
> >  -------------
> >
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index aefe0bb..83ebcaa 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -253,17 +253,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> >      case VHOST_SET_VRING_NUM:
> >      case VHOST_SET_VRING_BASE:
> >          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> > +        msg.state.index += dev->vq_index;
> >          msg.size = sizeof(m.state);
> >          break;
> >
> >      case VHOST_GET_VRING_BASE:
> >          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> > +        msg.state.index += dev->vq_index;
> >          msg.size = sizeof(m.state);
> >          need_reply = 1;
> >          break;
> >
> >      case VHOST_SET_VRING_ADDR:
> >          memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
> > +        msg.addr.index += dev->vq_index;
> >          msg.size = sizeof(m.addr);
> >          break;
> >
> > @@ -271,7 +274,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> >      case VHOST_SET_VRING_CALL:
> >      case VHOST_SET_VRING_ERR:
> >          file = arg;
> > -        msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
> > +        msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
> >          msg.size = sizeof(m.u64);
> >          if (ioeventfd_enabled() && file->fd > 0) {
> >              fds[fd_num++] = file->fd;
> > @@ -313,6 +316,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> >                  error_report("Received bad msg size.\n");
> >                  return -1;
> >              }
> > +            msg.state.index -= dev->vq_index;
> >              memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
> >              break;
> >          default:
> > diff --git a/net/vhost-user.c b/net/vhost-user.c
> > index 24e050c..a0b4af2 100644
> > --- a/net/vhost-user.c
> > +++ b/net/vhost-user.c
> > @@ -122,37 +122,39 @@ static void net_vhost_user_event(void *opaque, int event)
> >      case CHR_EVENT_OPENED:
> >          vhost_user_start(s);
> >          net_vhost_link_down(s, false);
> > -        error_report("chardev \"%s\" went up\n", s->chr->label);
> > +        error_report("chardev \"%s\" went up\n", s->nc.info_str);
> >          break;
> >      case CHR_EVENT_CLOSED:
> >          net_vhost_link_down(s, true);
> >          vhost_user_stop(s);
> > -        error_report("chardev \"%s\" went down\n", s->chr->label);
> > +        error_report("chardev \"%s\" went down\n", s->nc.info_str);
> >          break;
> >      }
> >  }
> >
> >  static int net_vhost_user_init(NetClientState *peer, const char *device,
> >                                 const char *name, CharDriverState *chr,
> > -                               bool vhostforce)
> > +                               bool vhostforce, uint32_t queues)
> >  {
> >      NetClientState *nc;
> >      VhostUserState *s;
> > +    int i;
> >
> > -    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> > +    for (i = 0; i < queues; i++) {
> > +        nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> >
> > -    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> > -             chr->label);
> > +        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
> > +                 i, chr->label);
> >
> > -    s = DO_UPCAST(VhostUserState, nc, nc);
> > +        s = DO_UPCAST(VhostUserState, nc, nc);
> >
> > -    /* We don't provide a receive callback */
> > -    s->nc.receive_disabled = 1;
> > -    s->chr = chr;
> > -    s->vhostforce = vhostforce;
> > -
> > -    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> > +        /* We don't provide a receive callback */
> > +        s->nc.receive_disabled = 1;
> > +        s->chr = chr;
> > +        s->vhostforce = vhostforce;
> >
> > +        qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> > +    }
> >      return 0;
> >  }
> >
> > @@ -228,6 +230,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque)
> >  int net_init_vhost_user(const NetClientOptions *opts, const char *name,
> >                          NetClientState *peer)
> >  {
> > +    uint32_t queues;
> >      const NetdevVhostUserOptions *vhost_user_opts;
> >      CharDriverState *chr;
> >      bool vhostforce;
> > @@ -254,5 +257,13 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
> >          vhostforce = false;
> >      }
> >
> > -    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
> > +    /* number of queues for multiqueue */
> > +    if (vhost_user_opts->has_queues) {
> > +        queues = vhost_user_opts->queues;
> > +    } else {
> > +        queues = 1;
> > +    }
> > +
> > +    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
> > +                               queues);
> >  }
> > diff --git a/qapi-schema.json b/qapi-schema.json
> > index e16f8eb..c2cead0 100644
> > --- a/qapi-schema.json
> > +++ b/qapi-schema.json
> > @@ -2287,12 +2287,16 @@
> >  #
> >  # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
> >  #
> > +# @queues: #optional number of queues to be created for multiqueue vhost-user
> > +#          (Since 2.3)
> > +#
> >  # Since 2.1
> >  ##
> >  { 'type': 'NetdevVhostUserOptions',
> >    'data': {
> >      'chardev':        'str',
> > -    '*vhostforce':    'bool' } }
> > +    '*vhostforce':    'bool',
> > +    '*queues':        'uint32' } }
> >
> >  ##
> >  # @NetClientOptions
> > diff --git a/qemu-options.hx b/qemu-options.hx
> > index 85ca3ad..b5fa61f 100644
> > --- a/qemu-options.hx
> > +++ b/qemu-options.hx
> > @@ -1894,13 +1894,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
> >  netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
> >  required hub automatically.
> >
> > -@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
> > +@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
> >
> >  Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
> >  be a unix domain socket backed one. The vhost-user uses a specifically defined
> >  protocol to pass vhost ioctl replacement messages to an application on the other
> >  end of the socket. On non-MSIX guests, the feature can be forced with
> > -@var{vhostforce}.
> > +@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
> > +be created for multiqueue vhost-user.
> >
> >  Example:
> >  @example
>
> --
> You received this message because you are subscribed to the Google Groups "Snabb Switch development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to snabb-devel+unsubscribe@googlegroups.com.
> To post to this group, send an email to snabb-devel@googlegroups.com.
> Visit this group at http://groups.google.com/group/snabb-devel.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Qemu-devel] [snabb-devel] Re: [PATCH v2] vhost-user: add multi queue support
  2015-04-06 15:07 ` Michael S. Tsirkin
  2015-04-08 15:18   ` [Qemu-devel] [snabb-devel] " Nikolay Nikolaev
@ 2015-04-09  2:18   ` Ouyang, Changchun
  1 sibling, 0 replies; 7+ messages in thread
From: Ouyang, Changchun @ 2015-04-09  2:18 UTC (permalink / raw)
  To: snabb-devel, Nikolay Nikolaev
  Cc: tech, Long, Thomas, qemu-devel, Ouyang, Changchun

Hi guys,

> -----Original Message-----
> From: snabb-devel@googlegroups.com [mailto:snabb-
> devel@googlegroups.com] On Behalf Of Michael S. Tsirkin
> Sent: Monday, April 6, 2015 11:07 PM
> To: Nikolay Nikolaev
> Cc: Long, Thomas; snabb-devel@googlegroups.com; eblake@redhat.com;
> qemu-devel@nongnu.org; tech@virtualopensystems.com
> Subject: [snabb-devel] Re: [PATCH v2] vhost-user: add multi queue support
> 
> On Sat, Jan 24, 2015 at 02:22:29PM +0200, Nikolay Nikolaev wrote:
> > Vhost-user will implement the multiqueueu support in a similar way to
> > what
> 
> multiqueue
> 
> > vhost already has - a separate thread for each queue.
> >
> > To enable the multiqueue funcionality - a new command line parameter
> > "queues" is introduced for the vhost-user netdev.
> >
> > Changes since v1:
> >  - use s->nc.info_str when bringing up/down the backend
> >
> > Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
> > ---
> >  docs/specs/vhost-user.txt |    5 +++++
> >  hw/virtio/vhost-user.c    |    6 +++++-
> >  net/vhost-user.c          |   39 +++++++++++++++++++++++++--------------
> >  qapi-schema.json          |    6 +++++-
> >  qemu-options.hx           |    5 +++--
> >  5 files changed, 43 insertions(+), 18 deletions(-)
> >
> > diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> > index 650bb18..d7b208c 100644
> > --- a/docs/specs/vhost-user.txt
> > +++ b/docs/specs/vhost-user.txt
> 
> I've been thinking that the protocol might be a useful addition to the virtio
> spec. For this, as a minimum you would have to submit this document as a
> comment to virtio TC with a proposal to include it in the virtio spec.
> See
> https://www.oasis-
> open.org/committees/comments/index.php?wg_abbrev=virtio
> 
> Can you do this?
> 
> We can take it from there, though I would encourage your company to join
> as a contributor.
> 
> 
> > @@ -127,6 +127,11 @@ in the ancillary data:
> >  If Master is unable to send the full message or receives a wrong
> > reply it will  close the connection. An optional reconnection mechanism can
> be implemented.
> >
> > +Multi queue suport
> > +---------------------
> > +The protocol supports multiple queues by setting all index fields in
> > +the sent messages to a properly calculated value.
> > +
> 
> Something that's not clear from this document is what happens with control
> VQ.
> Can you clarify please?
> 
> 
> >  Message types
> >  -------------
> >
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index
> > aefe0bb..83ebcaa 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -253,17 +253,20 @@ static int vhost_user_call(struct vhost_dev *dev,
> unsigned long int request,
> >      case VHOST_SET_VRING_NUM:
> >      case VHOST_SET_VRING_BASE:
> >          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> > +        msg.state.index += dev->vq_index;
> >          msg.size = sizeof(m.state);
> >          break;
> >
> >      case VHOST_GET_VRING_BASE:
> >          memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> > +        msg.state.index += dev->vq_index;
> >          msg.size = sizeof(m.state);
> >          need_reply = 1;
> >          break;
> >
> >      case VHOST_SET_VRING_ADDR:
> >          memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
> > +        msg.addr.index += dev->vq_index;
> >          msg.size = sizeof(m.addr);
> >          break;
> >
> > @@ -271,7 +274,7 @@ static int vhost_user_call(struct vhost_dev *dev,
> unsigned long int request,
> >      case VHOST_SET_VRING_CALL:
> >      case VHOST_SET_VRING_ERR:
> >          file = arg;
> > -        msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
> > +        msg.u64 = (file->index + dev->vq_index) &
> > + VHOST_USER_VRING_IDX_MASK;

I identify one vq_index issue here when it is the case of VHOST_SET_VRING_CALL,
The vq_index is not initialized before it is used here, so it could be a random value.
It leads to error in vhost, when this random value is passed to vhost and vhost use this random value to set the vring call.
 
I have a quick fix for this, code changes as the following:
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 4e3a061..2fbdb93 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -157,6 +157,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)

     net->dev.nvqs = 2;
     net->dev.vqs = net->vqs;
+    net->dev.vq_index = net->nc->queue_index;

     r = vhost_dev_init(&net->dev, options->opaque,
                        options->backend_type, options->force);
diff --git a/net/vhost-user.c b/net/vhost-user.c
index a0b4af2..b27190f 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -152,6 +152,7 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
         s->nc.receive_disabled = 1;
         s->chr = chr;
         s->vhostforce = vhostforce;
+        s->nc.queue_index = i;

         qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
     }

Would you guys have a look at this issue?
And I also met other issues when I try to run with 2 virtio ports, each one has 2 queues
But not root cause yet, need further investigate,
So I think it may have more issues in qemu for vhost user multiple queues enabling.

> >          msg.size = sizeof(m.u64);
> >          if (ioeventfd_enabled() && file->fd > 0) {
> >              fds[fd_num++] = file->fd; @@ -313,6 +316,7 @@ static int
> > vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> >                  error_report("Received bad msg size.\n");
> >                  return -1;
> >              }
> > +            msg.state.index -= dev->vq_index;
> >              memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
> >              break;
> >          default:
> > diff --git a/net/vhost-user.c b/net/vhost-user.c index
> > 24e050c..a0b4af2 100644
> > --- a/net/vhost-user.c
> > +++ b/net/vhost-user.c
> > @@ -122,37 +122,39 @@ static void net_vhost_user_event(void *opaque,
> int event)
> >      case CHR_EVENT_OPENED:
> >          vhost_user_start(s);
> >          net_vhost_link_down(s, false);
> > -        error_report("chardev \"%s\" went up\n", s->chr->label);
> > +        error_report("chardev \"%s\" went up\n", s->nc.info_str);
> >          break;
> >      case CHR_EVENT_CLOSED:
> >          net_vhost_link_down(s, true);
> >          vhost_user_stop(s);
> > -        error_report("chardev \"%s\" went down\n", s->chr->label);
> > +        error_report("chardev \"%s\" went down\n", s->nc.info_str);
> >          break;
> >      }
> >  }
> >
> >  static int net_vhost_user_init(NetClientState *peer, const char *device,
> >                                 const char *name, CharDriverState *chr,
> > -                               bool vhostforce)
> > +                               bool vhostforce, uint32_t queues)
> >  {
> >      NetClientState *nc;
> >      VhostUserState *s;
> > +    int i;
> >
> > -    nc = qemu_new_net_client(&net_vhost_user_info, peer, device,
> name);
> > +    for (i = 0; i < queues; i++) {
> > +        nc = qemu_new_net_client(&net_vhost_user_info, peer, device,
> > + name);
> >
> > -    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> > -             chr->label);
> > +        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
> > +                 i, chr->label);
> >
> > -    s = DO_UPCAST(VhostUserState, nc, nc);
> > +        s = DO_UPCAST(VhostUserState, nc, nc);
> >
> > -    /* We don't provide a receive callback */
> > -    s->nc.receive_disabled = 1;
> > -    s->chr = chr;
> > -    s->vhostforce = vhostforce;
> > -
> > -    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event,
> s);
> > +        /* We don't provide a receive callback */
> > +        s->nc.receive_disabled = 1;
> > +        s->chr = chr;
> > +        s->vhostforce = vhostforce;
> >
> > +        qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event,
> s);
> > +    }
> >      return 0;
> >  }
> >
> > @@ -228,6 +230,7 @@ static int net_vhost_check_net(QemuOpts *opts,
> > void *opaque)  int net_init_vhost_user(const NetClientOptions *opts,
> const char *name,
> >                          NetClientState *peer)  {
> > +    uint32_t queues;
> >      const NetdevVhostUserOptions *vhost_user_opts;
> >      CharDriverState *chr;
> >      bool vhostforce;
> > @@ -254,5 +257,13 @@ int net_init_vhost_user(const NetClientOptions
> *opts, const char *name,
> >          vhostforce = false;
> >      }
> >
> > -    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
> > +    /* number of queues for multiqueue */
> > +    if (vhost_user_opts->has_queues) {
> > +        queues = vhost_user_opts->queues;
> > +    } else {
> > +        queues = 1;
> > +    }
> > +
> > +    return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
> > +                               queues);
> >  }
> > diff --git a/qapi-schema.json b/qapi-schema.json index
> > e16f8eb..c2cead0 100644
> > --- a/qapi-schema.json
> > +++ b/qapi-schema.json
> > @@ -2287,12 +2287,16 @@
> >  #
> >  # @vhostforce: #optional vhost on for non-MSIX virtio guests (default:
> false).
> >  #
> > +# @queues: #optional number of queues to be created for multiqueue
> vhost-user
> > +#          (Since 2.3)
> > +#
> >  # Since 2.1
> >  ##
> >  { 'type': 'NetdevVhostUserOptions',
> >    'data': {
> >      'chardev':        'str',
> > -    '*vhostforce':    'bool' } }
> > +    '*vhostforce':    'bool',
> > +    '*queues':        'uint32' } }
> >
> >  ##
> >  # @NetClientOptions
> > diff --git a/qemu-options.hx b/qemu-options.hx index 85ca3ad..b5fa61f
> > 100644
> > --- a/qemu-options.hx
> > +++ b/qemu-options.hx
> > @@ -1894,13 +1894,14 @@ The hubport netdev lets you connect a NIC to a
> > QEMU "vlan" instead of a single  netdev.  @code{-net} and
> > @code{-device} with parameter @option{vlan} create the  required hub
> automatically.
> >
> > -@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
> > +@item -netdev
> > +vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
> >
> >  Establish a vhost-user netdev, backed by a chardev @var{id}. The
> > chardev should  be a unix domain socket backed one. The vhost-user
> > uses a specifically defined  protocol to pass vhost ioctl replacement
> > messages to an application on the other  end of the socket. On
> > non-MSIX guests, the feature can be forced with -@var{vhostforce}.
> > +@var{vhostforce}. Use 'queues=@var{n}' to specify the number of
> > +queues to be created for multiqueue vhost-user.
> >
> >  Example:
> >  @example

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [Qemu-devel] [PATCH v3] vhost-user: add multi queue support
  2015-01-24 12:22 [Qemu-devel] [PATCH v2] vhost-user: add multi queue support Nikolay Nikolaev
  2015-01-26 18:51 ` Eric Blake
  2015-04-06 15:07 ` Michael S. Tsirkin
@ 2015-05-21 14:20 ` Ouyang Changchun
  2015-05-21 19:03   ` Eric Blake
  2 siblings, 1 reply; 7+ messages in thread
From: Ouyang Changchun @ 2015-05-21 14:20 UTC (permalink / raw)
  To: qemu-devel, mst
  Cc: luke, snabb-devel, thomas.long, n.nikolaev, changchun.ouyang

From: Changchun Ouyang <changchun.ouyang@intel.com>

Based on patch by Nikolay Nikolaev:
Vhost-user will implement the multi queue support in a similar way
to what vhost already has - a separate thread for each queue.
To enable the multi queue funcionality - a new command line parameter
"queues" is introduced for the vhost-user netdev.

Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
---
Changes since v2:
 - fix vq index issue for set_vring_call
   When it is the case of VHOST_SET_VRING_CALL, The vq_index is not initialized before it is used,
   thus it could be a random value. The random value lead to crash in vhost after passing down
   to vhost, as vhost use this random value to index an array index.
 - fix the typo in the doc and description
 - address vq index for reset_owner

Changes since v1:
 - use s->nc.info_str when bringing up/down the backend

 docs/specs/vhost-user.txt |  5 +++++
 hw/net/vhost_net.c        |  3 ++-
 hw/virtio/vhost-user.c    | 11 ++++++++++-
 net/vhost-user.c          | 37 ++++++++++++++++++++++++-------------
 qapi-schema.json          |  6 +++++-
 qemu-options.hx           |  5 +++--
 6 files changed, 49 insertions(+), 18 deletions(-)

diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
index 650bb18..2c8e934 100644
--- a/docs/specs/vhost-user.txt
+++ b/docs/specs/vhost-user.txt
@@ -127,6 +127,11 @@ in the ancillary data:
 If Master is unable to send the full message or receives a wrong reply it will
 close the connection. An optional reconnection mechanism can be implemented.
 
+Multi queue support
+-------------------
+The protocol supports multiple queues by setting all index fields in the sent
+messages to a properly calculated value.
+
 Message types
 -------------
 
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 47f8b89..426b23e 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -157,6 +157,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
 
     net->dev.nvqs = 2;
     net->dev.vqs = net->vqs;
+    net->dev.vq_index = net->nc->queue_index;
 
     r = vhost_dev_init(&net->dev, options->opaque,
                        options->backend_type, options->force);
@@ -267,7 +268,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
         for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
             const VhostOps *vhost_ops = net->dev.vhost_ops;
             int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER,
-                                          NULL);
+                                          &file);
             assert(r >= 0);
         }
     }
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index e7ab829..d6f2163 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -210,7 +210,12 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
         break;
 
     case VHOST_SET_OWNER:
+        break;
+
     case VHOST_RESET_OWNER:
+        memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
+        msg.state.index += dev->vq_index;
+        msg.size = sizeof(m.state);
         break;
 
     case VHOST_SET_MEM_TABLE:
@@ -253,17 +258,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
     case VHOST_SET_VRING_NUM:
     case VHOST_SET_VRING_BASE:
         memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
+        msg.state.index += dev->vq_index;
         msg.size = sizeof(m.state);
         break;
 
     case VHOST_GET_VRING_BASE:
         memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
+        msg.state.index += dev->vq_index;
         msg.size = sizeof(m.state);
         need_reply = 1;
         break;
 
     case VHOST_SET_VRING_ADDR:
         memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
+        msg.addr.index += dev->vq_index;
         msg.size = sizeof(m.addr);
         break;
 
@@ -271,7 +279,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
     case VHOST_SET_VRING_CALL:
     case VHOST_SET_VRING_ERR:
         file = arg;
-        msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
+        msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
         msg.size = sizeof(m.u64);
         if (ioeventfd_enabled() && file->fd > 0) {
             fds[fd_num++] = file->fd;
@@ -313,6 +321,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
                 error_report("Received bad msg size.");
                 return -1;
             }
+            msg.state.index -= dev->vq_index;
             memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
             break;
         default:
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 1d86a2b..41c8a27 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -121,35 +121,39 @@ static void net_vhost_user_event(void *opaque, int event)
     case CHR_EVENT_OPENED:
         vhost_user_start(s);
         net_vhost_link_down(s, false);
-        error_report("chardev \"%s\" went up", s->chr->label);
+        error_report("chardev \"%s\" went up\n", s->nc.info_str);
         break;
     case CHR_EVENT_CLOSED:
         net_vhost_link_down(s, true);
         vhost_user_stop(s);
-        error_report("chardev \"%s\" went down", s->chr->label);
+        error_report("chardev \"%s\" went down\n", s->nc.info_str);
         break;
     }
 }
 
 static int net_vhost_user_init(NetClientState *peer, const char *device,
-                               const char *name, CharDriverState *chr)
+                               const char *name, CharDriverState *chr,
+                               uint32_t queues)
 {
     NetClientState *nc;
     VhostUserState *s;
+    int i;
 
-    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
+    for (i = 0; i < queues; i++) {
+        nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
 
-    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
-             chr->label);
+        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
+                 i, chr->label);
 
-    s = DO_UPCAST(VhostUserState, nc, nc);
+        s = DO_UPCAST(VhostUserState, nc, nc);
 
-    /* We don't provide a receive callback */
-    s->nc.receive_disabled = 1;
-    s->chr = chr;
-
-    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
+        /* We don't provide a receive callback */
+        s->nc.receive_disabled = 1;
+        s->chr = chr;
+        s->nc.queue_index = i;
 
+        qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
+    }
     return 0;
 }
 
@@ -225,6 +229,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque)
 int net_init_vhost_user(const NetClientOptions *opts, const char *name,
                         NetClientState *peer)
 {
+    uint32_t queues;
     const NetdevVhostUserOptions *vhost_user_opts;
     CharDriverState *chr;
 
@@ -243,6 +248,12 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
         return -1;
     }
 
+    /* number of queues for multiqueue */
+    if (vhost_user_opts->has_queues) {
+        queues = vhost_user_opts->queues;
+    } else {
+        queues = 1;
+    }
 
-    return net_vhost_user_init(peer, "vhost_user", name, chr);
+    return net_vhost_user_init(peer, "vhost_user", name, chr, queues);
 }
diff --git a/qapi-schema.json b/qapi-schema.json
index f97ffa1..53c7839 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -2444,12 +2444,16 @@
 #
 # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
 #
+# @queues: #optional number of queues to be created for multiqueue vhost-user (default: 1).
+#          (Since 2.4)
+#
 # Since 2.1
 ##
 { 'struct': 'NetdevVhostUserOptions',
   'data': {
     'chardev':        'str',
-    '*vhostforce':    'bool' } }
+    '*vhostforce':    'bool',
+    '*queues':        'uint32' } }
 
 ##
 # @NetClientOptions
diff --git a/qemu-options.hx b/qemu-options.hx
index ec356f6..dad035e 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -1942,13 +1942,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
 netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
 required hub automatically.
 
-@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
+@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
 
 Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
 be a unix domain socket backed one. The vhost-user uses a specifically defined
 protocol to pass vhost ioctl replacement messages to an application on the other
 end of the socket. On non-MSIX guests, the feature can be forced with
-@var{vhostforce}.
+@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
+be created for multiqueue vhost-user.
 
 Example:
 @example
-- 
1.8.4.2

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [Qemu-devel] [PATCH v3] vhost-user: add multi queue support
  2015-05-21 14:20 ` [Qemu-devel] [PATCH v3] " Ouyang Changchun
@ 2015-05-21 19:03   ` Eric Blake
  0 siblings, 0 replies; 7+ messages in thread
From: Eric Blake @ 2015-05-21 19:03 UTC (permalink / raw)
  To: Ouyang Changchun, qemu-devel, mst
  Cc: luke, snabb-devel, thomas.long, n.nikolaev

[-- Attachment #1: Type: text/plain, Size: 1786 bytes --]

On 05/21/2015 08:20 AM, Ouyang Changchun wrote:
> From: Changchun Ouyang <changchun.ouyang@intel.com>
> 

It's better to post a v3 patch as a new top-level thread, rather than
in-reply-to an existing thread.

> Based on patch by Nikolay Nikolaev:
> Vhost-user will implement the multi queue support in a similar way
> to what vhost already has - a separate thread for each queue.
> To enable the multi queue funcionality - a new command line parameter

s/funcionality/functionality/

> "queues" is introduced for the vhost-user netdev.
> 
> Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
> Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
> ---
> Changes since v2:
>  - fix vq index issue for set_vring_call
>    When it is the case of VHOST_SET_VRING_CALL, The vq_index is not initialized before it is used,
>    thus it could be a random value. The random value lead to crash in vhost after passing down
>    to vhost, as vhost use this random value to index an array index.
>  - fix the typo in the doc and description
>  - address vq index for reset_owner

> +++ b/qapi-schema.json
> @@ -2444,12 +2444,16 @@
>  #
>  # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
>  #
> +# @queues: #optional number of queues to be created for multiqueue vhost-user (default: 1).

Long line; please wrap to fit under 80 columns.

> +#          (Since 2.4)
> +#
>  # Since 2.1
>  ##
>  { 'struct': 'NetdevVhostUserOptions',
>    'data': {
>      'chardev':        'str',
> -    '*vhostforce':    'bool' } }
> +    '*vhostforce':    'bool',
> +    '*queues':        'uint32' } }

-- 
Eric Blake   eblake redhat com    +1-919-301-3266
Libvirt virtualization library http://libvirt.org


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 604 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2015-05-21 19:03 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-24 12:22 [Qemu-devel] [PATCH v2] vhost-user: add multi queue support Nikolay Nikolaev
2015-01-26 18:51 ` Eric Blake
2015-04-06 15:07 ` Michael S. Tsirkin
2015-04-08 15:18   ` [Qemu-devel] [snabb-devel] " Nikolay Nikolaev
2015-04-09  2:18   ` Ouyang, Changchun
2015-05-21 14:20 ` [Qemu-devel] [PATCH v3] " Ouyang Changchun
2015-05-21 19:03   ` Eric Blake

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.