All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v3 0/2] Add live migration for vhost user
@ 2015-06-10 13:43 Thibaut Collet
  2015-06-10 13:43 ` [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration Thibaut Collet
  2015-06-10 13:43 ` [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest Thibaut Collet
  0 siblings, 2 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-10 13:43 UTC (permalink / raw)
  To: qemu-devel; +Cc: jasowang, stefanha, mst

This patch is an update of [PATCH v2] net: Add support of
VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user

v2->v3
Define a receive function to vhost-user to manage RARP packet send after a
live migration.
If the guest supports VIRTIO_NET_F_GUEST_ANNOUNCE the packet is discarded (GARP
request is managed by the virtio-net NIC).
Otherwise the RARP packet is transmitted to the vhost client/backend that is
responsible to send the RARP

Thibaut Collet (2):
  vhost user: add support of live migration
  vhost user: Add RARP injection for legacy guest

 docs/specs/vhost-user.txt   |   16 ++++++++++++++++
 hw/net/vhost_net.c          |   23 +++++++++++++++++++++++
 hw/virtio/vhost-user.c      |   11 ++++++++++-
 include/net/vhost_net.h     |    1 +
 linux-headers/linux/vhost.h |    9 +++++++++
 net/vhost-user.c            |   21 +++++++++++++++++++--
 6 files changed, 78 insertions(+), 3 deletions(-)

-- 
1.7.10.4

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration
  2015-06-10 13:43 [Qemu-devel] [PATCH v3 0/2] Add live migration for vhost user Thibaut Collet
@ 2015-06-10 13:43 ` Thibaut Collet
  2015-06-10 13:52   ` Michael S. Tsirkin
  2015-06-23  6:01   ` Michael S. Tsirkin
  2015-06-10 13:43 ` [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest Thibaut Collet
  1 sibling, 2 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-10 13:43 UTC (permalink / raw)
  To: qemu-devel; +Cc: jasowang, stefanha, mst

Some vhost client/backend are able to support live migration.
To provide this service the following features must be added:
1. GARP from guest after live migration:
   the VIRTIO_NET_F_GUEST_ANNOUNCE capability is added to vhost-net when netdev
   backend is vhost-user to let virtio-net NIC manages GARP after migration.
2. RARP on vhost-user:
   After a live migration RARP are automatically sent to any interfaces. A
   receive callback is added to vhost-user to catch this packet. If guest
   supports VIRTIO_NET_F_GUEST_ANNOUNCE this packet is discarded to avoid
   message duplication (already done by virtio-net NIC). For legacy guest
   without VIRTIO_NET_F_GUEST_ANNOUNCE a warning message is displayed to
   alert the user. RARP must be sent to the vhost client/backend.

Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
---
 hw/net/vhost_net.c      |   15 +++++++++++++++
 include/net/vhost_net.h |    1 +
 net/vhost-user.c        |   21 +++++++++++++++++++--
 3 files changed, 35 insertions(+), 2 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 426b23e..4a42325 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
     VIRTIO_NET_F_CTRL_MAC_ADDR,
     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
 
+    VIRTIO_NET_F_GUEST_ANNOUNCE,
+
     VIRTIO_NET_F_MQ,
 
     VHOST_INVALID_FEATURE_BIT
@@ -365,6 +367,15 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
     assert(r >= 0);
 }
 
+void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf, size_t size)
+{
+    if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE)) == 0) {
+        fprintf(stderr,
+                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE support. RARP must be sent by vhost-user backend\n");
+        fflush(stderr);
+    }
+}
+
 void vhost_net_cleanup(struct vhost_net *net)
 {
     vhost_dev_cleanup(&net->dev);
@@ -427,6 +438,10 @@ void vhost_net_stop(VirtIODevice *dev,
 {
 }
 
+void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf, size_t size);
+{
+}
+
 void vhost_net_cleanup(struct vhost_net *net)
 {
 }
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index b1c18a3..e82a0f9 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -20,6 +20,7 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev);
 int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
 void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
 
+void vhost_net_inject_rarp(VHostNetState *net, const uint8_t *buf, size_t size);
 void vhost_net_cleanup(VHostNetState *net);
 
 unsigned vhost_net_get_features(VHostNetState *net, unsigned features);
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 8d26728..69c2313 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -66,6 +66,24 @@ static void vhost_user_stop(VhostUserState *s)
     s->vhost_net = 0;
 }
 
+static ssize_t vhost_user_receive(NetClientState *nc, const uint8_t *buf,
+                                  size_t size)
+{
+    if (size == 60) {
+        /* Assume it is a RARP request sent automatically after a
+         * live migration */
+        /* The RARP must be sent if guest does not support
+         * VIRTIO_NET_F_GUEST_ANNOUNCE */
+        VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
+
+        vhost_net_inject_rarp(s->vhost_net, buf, size);
+    } else {
+        fprintf(stderr,"Vhost user receives unexpected packets\n");
+        fflush(stderr);
+    }
+    return size;
+}
+
 static void vhost_user_cleanup(NetClientState *nc)
 {
     VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
@@ -91,6 +109,7 @@ static bool vhost_user_has_ufo(NetClientState *nc)
 static NetClientInfo net_vhost_user_info = {
         .type = NET_CLIENT_OPTIONS_KIND_VHOST_USER,
         .size = sizeof(VhostUserState),
+        .receive = vhost_user_receive,
         .cleanup = vhost_user_cleanup,
         .has_vnet_hdr = vhost_user_has_vnet_hdr,
         .has_ufo = vhost_user_has_ufo,
@@ -147,8 +166,6 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
 
         s = DO_UPCAST(VhostUserState, nc, nc);
 
-        /* We don't provide a receive callback */
-        s->nc.receive_disabled = 1;
         s->chr = chr;
         s->nc.queue_index = i;
 
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-10 13:43 [Qemu-devel] [PATCH v3 0/2] Add live migration for vhost user Thibaut Collet
  2015-06-10 13:43 ` [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration Thibaut Collet
@ 2015-06-10 13:43 ` Thibaut Collet
  2015-06-10 15:34   ` Michael S. Tsirkin
  1 sibling, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-10 13:43 UTC (permalink / raw)
  To: qemu-devel; +Cc: jasowang, stefanha, mst

In case of live migration with legacy guest (without VIRTIO_NET_F_GUEST_ANNOUNCE)
a message is added between QEMU and the vhost client/backend.
This message provides the RARP content, prepared by QEMU, to the vhost
client/backend.
The vhost client/backend is responsible to send the RARP.

Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
---
 docs/specs/vhost-user.txt   |   16 ++++++++++++++++
 hw/net/vhost_net.c          |    8 ++++++++
 hw/virtio/vhost-user.c      |   11 ++++++++++-
 linux-headers/linux/vhost.h |    9 +++++++++
 4 files changed, 43 insertions(+), 1 deletion(-)

diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
index 2c8e934..ef5d475 100644
--- a/docs/specs/vhost-user.txt
+++ b/docs/specs/vhost-user.txt
@@ -97,6 +97,7 @@ typedef struct VhostUserMsg {
         uint64_t u64;
         struct vhost_vring_state state;
         struct vhost_vring_addr addr;
+        struct vhost_inject_rarp rarp;
         VhostUserMemory memory;
     };
 } QEMU_PACKED VhostUserMsg;
@@ -132,6 +133,12 @@ Multi queue support
 The protocol supports multiple queues by setting all index fields in the sent
 messages to a properly calculated value.
 
+Live migration support
+----------------------
+The protocol supports live migration. GARP from the migrated guest is done
+through the VIRTIO_NET_F_GUEST_ANNOUNCE mechanism for guest that supports it or
+through RARP.
+
 Message types
 -------------
 
@@ -269,3 +276,12 @@ Message types
       Bits (0-7) of the payload contain the vring index. Bit 8 is the
       invalid FD flag. This flag is set when there is no file descriptor
       in the ancillary data.
+
+ * VHOST_USER_NET_INJECT_RARP
+
+      Id: 15
+      Master payload: rarp content
+
+      Provide the RARP message to send to the guest after a live migration. This
+      message is sent only for guest that does not support
+      VIRTIO_NET_F_GUEST_ANNOUNCE.
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 4a42325..f66d48d 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -369,10 +369,18 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
 
 void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf, size_t size)
 {
+    struct vhost_inject_rarp inject_rarp;
+    memcpy(&inject_rarp.rarp, buf, size);
+
     if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE)) == 0) {
+        const VhostOps *vhost_ops = net->dev.vhost_ops;
+        int r;
+
         fprintf(stderr,
                 "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE support. RARP must be sent by vhost-user backend\n");
         fflush(stderr);
+        r = vhost_ops->vhost_call(&net->dev, VHOST_NET_INJECT_RARP, &inject_rarp);
+        assert(r >= 0);
     }
 }
 
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index d6f2163..2e752ab 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
     VHOST_USER_SET_VRING_KICK = 12,
     VHOST_USER_SET_VRING_CALL = 13,
     VHOST_USER_SET_VRING_ERR = 14,
+    VHOST_USER_NET_INJECT_RARP = 15,
     VHOST_USER_MAX
 } VhostUserRequest;
 
@@ -70,6 +71,7 @@ typedef struct VhostUserMsg {
         uint64_t u64;
         struct vhost_vring_state state;
         struct vhost_vring_addr addr;
+        struct vhost_inject_rarp rarp;
         VhostUserMemory memory;
     };
 } QEMU_PACKED VhostUserMsg;
@@ -104,7 +106,8 @@ static unsigned long int ioctl_to_vhost_user_request[VHOST_USER_MAX] = {
     VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
     VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
     VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
-    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
+    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
+    VHOST_NET_INJECT_RARP   /* VHOST_USER_NET_INJECT_RARP */
 };
 
 static VhostUserRequest vhost_user_request_translate(unsigned long int request)
@@ -287,6 +290,12 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
             msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
         }
         break;
+
+    case VHOST_NET_INJECT_RARP:
+        memcpy(&msg.rarp, arg, sizeof(struct vhost_inject_rarp));
+        msg.size = sizeof(struct vhost_inject_rarp);
+        break;
+
     default:
         error_report("vhost-user trying to send unhandled ioctl");
         return -1;
diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
index c656f61..1920134 100644
--- a/linux-headers/linux/vhost.h
+++ b/linux-headers/linux/vhost.h
@@ -63,6 +63,10 @@ struct vhost_memory {
 	struct vhost_memory_region regions[0];
 };
 
+struct vhost_inject_rarp {
+	__u8 rarp[60];
+};
+
 /* ioctls */
 
 #define VHOST_VIRTIO 0xAF
@@ -121,6 +125,11 @@ struct vhost_memory {
  * device.  This can be used to stop the ring (e.g. for migration). */
 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
 
+/* Inject a RARP in case of live migration for guest that does not support
+ * VIRTIO_NET_F_GUEST_ANNOUNCE */
+#define VHOST_NET_INJECT_RARP _IOW(VHOST_VIRTIO, 0x31, struct vhost_inject_rarp)
+
+
 /* Feature bits */
 /* Log all write descriptors. Can be changed while device is active. */
 #define VHOST_F_LOG_ALL 26
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration
  2015-06-10 13:43 ` [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration Thibaut Collet
@ 2015-06-10 13:52   ` Michael S. Tsirkin
  2015-06-10 14:22     ` Thibaut Collet
  2015-06-10 15:34     ` Stefan Hajnoczi
  2015-06-23  6:01   ` Michael S. Tsirkin
  1 sibling, 2 replies; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-10 13:52 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: jasowang, qemu-devel, stefanha

On Wed, Jun 10, 2015 at 03:43:02PM +0200, Thibaut Collet wrote:
> Some vhost client/backend are able to support live migration.
> To provide this service the following features must be added:
> 1. GARP from guest after live migration:
>    the VIRTIO_NET_F_GUEST_ANNOUNCE capability is added to vhost-net when netdev
>    backend is vhost-user to let virtio-net NIC manages GARP after migration.
> 2. RARP on vhost-user:
>    After a live migration RARP are automatically sent to any interfaces. A
>    receive callback is added to vhost-user to catch this packet. If guest
>    supports VIRTIO_NET_F_GUEST_ANNOUNCE this packet is discarded to avoid
>    message duplication (already done by virtio-net NIC). For legacy guest
>    without VIRTIO_NET_F_GUEST_ANNOUNCE a warning message is displayed to
>    alert the user. RARP must be sent to the vhost client/backend.
> 
> Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> ---
>  hw/net/vhost_net.c      |   15 +++++++++++++++
>  include/net/vhost_net.h |    1 +
>  net/vhost-user.c        |   21 +++++++++++++++++++--
>  3 files changed, 35 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 426b23e..4a42325 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
>      VIRTIO_NET_F_CTRL_MAC_ADDR,
>      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
>  
> +    VIRTIO_NET_F_GUEST_ANNOUNCE,
> +
>      VIRTIO_NET_F_MQ,
>  
>      VHOST_INVALID_FEATURE_BIT
> @@ -365,6 +367,15 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
>      assert(r >= 0);
>  }
>  
> +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf, size_t size)
> +{
> +    if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE)) == 0) {
> +        fprintf(stderr,
> +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE support. RARP must be sent by vhost-user backend\n");
> +        fflush(stderr);

And maybe it does, then you are just filling log up with useless
warnings. Pls add some ifdef so this isn't normally compiled in.

> +    }

Can you move this out to vhost-user? It's not a generic function, is it?

> +}
> +
>  void vhost_net_cleanup(struct vhost_net *net)
>  {
>      vhost_dev_cleanup(&net->dev);
> @@ -427,6 +438,10 @@ void vhost_net_stop(VirtIODevice *dev,
>  {
>  }
>  
> +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf, size_t size);
> +{
> +}
> +
>  void vhost_net_cleanup(struct vhost_net *net)
>  {
>  }
> diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> index b1c18a3..e82a0f9 100644
> --- a/include/net/vhost_net.h
> +++ b/include/net/vhost_net.h
> @@ -20,6 +20,7 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev);
>  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
>  void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
>  
> +void vhost_net_inject_rarp(VHostNetState *net, const uint8_t *buf, size_t size);
>  void vhost_net_cleanup(VHostNetState *net);
>  
>  unsigned vhost_net_get_features(VHostNetState *net, unsigned features);
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 8d26728..69c2313 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -66,6 +66,24 @@ static void vhost_user_stop(VhostUserState *s)
>      s->vhost_net = 0;
>  }
>  
> +static ssize_t vhost_user_receive(NetClientState *nc, const uint8_t *buf,
> +                                  size_t size)
> +{
> +    if (size == 60) {
> +        /* Assume it is a RARP request sent automatically after a
> +         * live migration */
> +        /* The RARP must be sent if guest does not support
> +         * VIRTIO_NET_F_GUEST_ANNOUNCE */
> +        VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
> +
> +        vhost_net_inject_rarp(s->vhost_net, buf, size);
> +    } else {
> +        fprintf(stderr,"Vhost user receives unexpected packets\n");
> +        fflush(stderr);
> +    }
> +    return size;
> +}
> +
>  static void vhost_user_cleanup(NetClientState *nc)
>  {
>      VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
> @@ -91,6 +109,7 @@ static bool vhost_user_has_ufo(NetClientState *nc)
>  static NetClientInfo net_vhost_user_info = {
>          .type = NET_CLIENT_OPTIONS_KIND_VHOST_USER,
>          .size = sizeof(VhostUserState),
> +        .receive = vhost_user_receive,
>          .cleanup = vhost_user_cleanup,
>          .has_vnet_hdr = vhost_user_has_vnet_hdr,
>          .has_ufo = vhost_user_has_ufo,
> @@ -147,8 +166,6 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>  
>          s = DO_UPCAST(VhostUserState, nc, nc);
>  
> -        /* We don't provide a receive callback */
> -        s->nc.receive_disabled = 1;
>          s->chr = chr;
>          s->nc.queue_index = i;
>  
> -- 
> 1.7.10.4

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration
  2015-06-10 13:52   ` Michael S. Tsirkin
@ 2015-06-10 14:22     ` Thibaut Collet
  2015-06-10 14:27       ` Michael S. Tsirkin
  2015-06-10 15:34     ` Stefan Hajnoczi
  1 sibling, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-10 14:22 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

[-- Attachment #1: Type: text/plain, Size: 5608 bytes --]

I am not sure to understand your comments.

Could you confirm that the useless warning is:
+        fprintf(stderr,
+                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE
support. RARP must be sent by vhost-user backend\n");

> Can you move this out to vhost-user? It's not a generic function, is it?

Which part of the code do you think it must be moved in a specific file ?


On Wed, Jun 10, 2015 at 3:52 PM, Michael S. Tsirkin <mst@redhat.com> wrote:

> On Wed, Jun 10, 2015 at 03:43:02PM +0200, Thibaut Collet wrote:
> > Some vhost client/backend are able to support live migration.
> > To provide this service the following features must be added:
> > 1. GARP from guest after live migration:
> >    the VIRTIO_NET_F_GUEST_ANNOUNCE capability is added to vhost-net when
> netdev
> >    backend is vhost-user to let virtio-net NIC manages GARP after
> migration.
> > 2. RARP on vhost-user:
> >    After a live migration RARP are automatically sent to any interfaces.
> A
> >    receive callback is added to vhost-user to catch this packet. If guest
> >    supports VIRTIO_NET_F_GUEST_ANNOUNCE this packet is discarded to avoid
> >    message duplication (already done by virtio-net NIC). For legacy guest
> >    without VIRTIO_NET_F_GUEST_ANNOUNCE a warning message is displayed to
> >    alert the user. RARP must be sent to the vhost client/backend.
> >
> > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> > ---
> >  hw/net/vhost_net.c      |   15 +++++++++++++++
> >  include/net/vhost_net.h |    1 +
> >  net/vhost-user.c        |   21 +++++++++++++++++++--
> >  3 files changed, 35 insertions(+), 2 deletions(-)
> >
> > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> > index 426b23e..4a42325 100644
> > --- a/hw/net/vhost_net.c
> > +++ b/hw/net/vhost_net.c
> > @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
> >      VIRTIO_NET_F_CTRL_MAC_ADDR,
> >      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
> >
> > +    VIRTIO_NET_F_GUEST_ANNOUNCE,
> > +
> >      VIRTIO_NET_F_MQ,
> >
> >      VHOST_INVALID_FEATURE_BIT
> > @@ -365,6 +367,15 @@ void vhost_net_stop(VirtIODevice *dev,
> NetClientState *ncs,
> >      assert(r >= 0);
> >  }
> >
> > +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf,
> size_t size)
> > +{
> > +    if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE))
> == 0) {
> > +        fprintf(stderr,
> > +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE
> support. RARP must be sent by vhost-user backend\n");
> > +        fflush(stderr);
>
> And maybe it does, then you are just filling log up with useless
> warnings. Pls add some ifdef so this isn't normally compiled in.
>
> > +    }
>
> Can you move this out to vhost-user? It's not a generic function, is it?
>
> > +}
> > +
> >  void vhost_net_cleanup(struct vhost_net *net)
> >  {
> >      vhost_dev_cleanup(&net->dev);
> > @@ -427,6 +438,10 @@ void vhost_net_stop(VirtIODevice *dev,
> >  {
> >  }
> >
> > +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf,
> size_t size);
> > +{
> > +}
> > +
> >  void vhost_net_cleanup(struct vhost_net *net)
> >  {
> >  }
> > diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> > index b1c18a3..e82a0f9 100644
> > --- a/include/net/vhost_net.h
> > +++ b/include/net/vhost_net.h
> > @@ -20,6 +20,7 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice
> *dev);
> >  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int
> total_queues);
> >  void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int
> total_queues);
> >
> > +void vhost_net_inject_rarp(VHostNetState *net, const uint8_t *buf,
> size_t size);
> >  void vhost_net_cleanup(VHostNetState *net);
> >
> >  unsigned vhost_net_get_features(VHostNetState *net, unsigned features);
> > diff --git a/net/vhost-user.c b/net/vhost-user.c
> > index 8d26728..69c2313 100644
> > --- a/net/vhost-user.c
> > +++ b/net/vhost-user.c
> > @@ -66,6 +66,24 @@ static void vhost_user_stop(VhostUserState *s)
> >      s->vhost_net = 0;
> >  }
> >
> > +static ssize_t vhost_user_receive(NetClientState *nc, const uint8_t
> *buf,
> > +                                  size_t size)
> > +{
> > +    if (size == 60) {
> > +        /* Assume it is a RARP request sent automatically after a
> > +         * live migration */
> > +        /* The RARP must be sent if guest does not support
> > +         * VIRTIO_NET_F_GUEST_ANNOUNCE */
> > +        VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
> > +
> > +        vhost_net_inject_rarp(s->vhost_net, buf, size);
> > +    } else {
> > +        fprintf(stderr,"Vhost user receives unexpected packets\n");
> > +        fflush(stderr);
> > +    }
> > +    return size;
> > +}
> > +
> >  static void vhost_user_cleanup(NetClientState *nc)
> >  {
> >      VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
> > @@ -91,6 +109,7 @@ static bool vhost_user_has_ufo(NetClientState *nc)
> >  static NetClientInfo net_vhost_user_info = {
> >          .type = NET_CLIENT_OPTIONS_KIND_VHOST_USER,
> >          .size = sizeof(VhostUserState),
> > +        .receive = vhost_user_receive,
> >          .cleanup = vhost_user_cleanup,
> >          .has_vnet_hdr = vhost_user_has_vnet_hdr,
> >          .has_ufo = vhost_user_has_ufo,
> > @@ -147,8 +166,6 @@ static int net_vhost_user_init(NetClientState *peer,
> const char *device,
> >
> >          s = DO_UPCAST(VhostUserState, nc, nc);
> >
> > -        /* We don't provide a receive callback */
> > -        s->nc.receive_disabled = 1;
> >          s->chr = chr;
> >          s->nc.queue_index = i;
> >
> > --
> > 1.7.10.4
>

[-- Attachment #2: Type: text/html, Size: 7175 bytes --]

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration
  2015-06-10 14:22     ` Thibaut Collet
@ 2015-06-10 14:27       ` Michael S. Tsirkin
  2015-06-10 15:24         ` Thibaut Collet
  0 siblings, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-10 14:27 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

On Wed, Jun 10, 2015 at 04:22:10PM +0200, Thibaut Collet wrote:
> I am not sure to understand your comments.
> 
> Could you confirm that the useless warning is:
> +        fprintf(stderr,
> +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE support.
> RARP must be sent by vhost-user backend\n");

Yes.

> > Can you move this out to vhost-user? It's not a generic function, is it?
> 
> Which part of the code do you think it must be moved in a specific file ?

vhost_net_inject_rarp is vhost-user specific.

> 
> On Wed, Jun 10, 2015 at 3:52 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> 
>     On Wed, Jun 10, 2015 at 03:43:02PM +0200, Thibaut Collet wrote:
>     > Some vhost client/backend are able to support live migration.
>     > To provide this service the following features must be added:
>     > 1. GARP from guest after live migration:
>     >    the VIRTIO_NET_F_GUEST_ANNOUNCE capability is added to vhost-net when
>     netdev
>     >    backend is vhost-user to let virtio-net NIC manages GARP after
>     migration.
>     > 2. RARP on vhost-user:
>     >    After a live migration RARP are automatically sent to any interfaces.
>     A
>     >    receive callback is added to vhost-user to catch this packet. If guest
>     >    supports VIRTIO_NET_F_GUEST_ANNOUNCE this packet is discarded to avoid
>     >    message duplication (already done by virtio-net NIC). For legacy guest
>     >    without VIRTIO_NET_F_GUEST_ANNOUNCE a warning message is displayed to
>     >    alert the user. RARP must be sent to the vhost client/backend.
>     >
>     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
>     > ---
>     >  hw/net/vhost_net.c      |   15 +++++++++++++++
>     >  include/net/vhost_net.h |    1 +
>     >  net/vhost-user.c        |   21 +++++++++++++++++++--
>     >  3 files changed, 35 insertions(+), 2 deletions(-)
>     >
>     > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
>     > index 426b23e..4a42325 100644
>     > --- a/hw/net/vhost_net.c
>     > +++ b/hw/net/vhost_net.c
>     > @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
>     >      VIRTIO_NET_F_CTRL_MAC_ADDR,
>     >      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
>     >
>     > +    VIRTIO_NET_F_GUEST_ANNOUNCE,
>     > +
>     >      VIRTIO_NET_F_MQ,
>     >
>     >      VHOST_INVALID_FEATURE_BIT
>     > @@ -365,6 +367,15 @@ void vhost_net_stop(VirtIODevice *dev,
>     NetClientState *ncs,
>     >      assert(r >= 0);
>     >  }
>     >
>     > +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf,
>     size_t size)
>     > +{
>     > +    if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE)) =
>     = 0) {
>     > +        fprintf(stderr,
>     > +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE
>     support. RARP must be sent by vhost-user backend\n");
>     > +        fflush(stderr);
> 
>     And maybe it does, then you are just filling log up with useless
>     warnings. Pls add some ifdef so this isn't normally compiled in.
> 
>     > +    }
> 
>     Can you move this out to vhost-user? It's not a generic function, is it?
> 
>     > +}
>     > +
>     >  void vhost_net_cleanup(struct vhost_net *net)
>     >  {
>     >      vhost_dev_cleanup(&net->dev);
>     > @@ -427,6 +438,10 @@ void vhost_net_stop(VirtIODevice *dev,
>     >  {
>     >  }
>     >
>     > +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf,
>     size_t size);
>     > +{
>     > +}
>     > +
>     >  void vhost_net_cleanup(struct vhost_net *net)
>     >  {
>     >  }
>     > diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
>     > index b1c18a3..e82a0f9 100644
>     > --- a/include/net/vhost_net.h
>     > +++ b/include/net/vhost_net.h
>     > @@ -20,6 +20,7 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice
>     *dev);
>     >  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int
>     total_queues);
>     >  void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int
>     total_queues);
>     >
>     > +void vhost_net_inject_rarp(VHostNetState *net, const uint8_t *buf,
>     size_t size);
>     >  void vhost_net_cleanup(VHostNetState *net);
>     >
>     >  unsigned vhost_net_get_features(VHostNetState *net, unsigned features);
>     > diff --git a/net/vhost-user.c b/net/vhost-user.c
>     > index 8d26728..69c2313 100644
>     > --- a/net/vhost-user.c
>     > +++ b/net/vhost-user.c
>     > @@ -66,6 +66,24 @@ static void vhost_user_stop(VhostUserState *s)
>     >      s->vhost_net = 0;
>     >  }
>     >
>     > +static ssize_t vhost_user_receive(NetClientState *nc, const uint8_t
>     *buf,
>     > +                                  size_t size)
>     > +{
>     > +    if (size == 60) {
>     > +        /* Assume it is a RARP request sent automatically after a
>     > +         * live migration */
>     > +        /* The RARP must be sent if guest does not support
>     > +         * VIRTIO_NET_F_GUEST_ANNOUNCE */
>     > +        VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
>     > +
>     > +        vhost_net_inject_rarp(s->vhost_net, buf, size);
>     > +    } else {
>     > +        fprintf(stderr,"Vhost user receives unexpected packets\n");
>     > +        fflush(stderr);
>     > +    }
>     > +    return size;
>     > +}
>     > +
>     >  static void vhost_user_cleanup(NetClientState *nc)
>     >  {
>     >      VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
>     > @@ -91,6 +109,7 @@ static bool vhost_user_has_ufo(NetClientState *nc)
>     >  static NetClientInfo net_vhost_user_info = {
>     >          .type = NET_CLIENT_OPTIONS_KIND_VHOST_USER,
>     >          .size = sizeof(VhostUserState),
>     > +        .receive = vhost_user_receive,
>     >          .cleanup = vhost_user_cleanup,
>     >          .has_vnet_hdr = vhost_user_has_vnet_hdr,
>     >          .has_ufo = vhost_user_has_ufo,
>     > @@ -147,8 +166,6 @@ static int net_vhost_user_init(NetClientState *peer,
>     const char *device,
>     >
>     >          s = DO_UPCAST(VhostUserState, nc, nc);
>     >
>     > -        /* We don't provide a receive callback */
>     > -        s->nc.receive_disabled = 1;
>     >          s->chr = chr;
>     >          s->nc.queue_index = i;
>     >
>     > --
>     > 1.7.10.4
> 
> 

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration
  2015-06-10 14:27       ` Michael S. Tsirkin
@ 2015-06-10 15:24         ` Thibaut Collet
  0 siblings, 0 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-10 15:24 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

[-- Attachment #1: Type: text/plain, Size: 7238 bytes --]

> vhost_net_inject_rarp is vhost-user specific.

For this function test on VIRTIO_NET_F_GUEST_ANNOUNCE bit is important to
take a decision and call a new vhost ioctl (see next patch) if the guest
does not support this feature and allow the vhost client/backend send the
RARP packet.
Use of the VIRTIO_NET_F_GUEST_ANNOUNCE field and call to a vhost ioctl does
not seem relevant in the net/vhost-user.c file

Do you suggest to create a new hw/net/vhost_user_net.c file to manage this
new feature ?

On Wed, Jun 10, 2015 at 4:27 PM, Michael S. Tsirkin <mst@redhat.com> wrote:

> On Wed, Jun 10, 2015 at 04:22:10PM +0200, Thibaut Collet wrote:
> > I am not sure to understand your comments.
> >
> > Could you confirm that the useless warning is:
> > +        fprintf(stderr,
> > +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE
> support.
> > RARP must be sent by vhost-user backend\n");
>
> Yes.
>
> > > Can you move this out to vhost-user? It's not a generic function, is
> it?
> >
> > Which part of the code do you think it must be moved in a specific file ?
>
> vhost_net_inject_rarp is vhost-user specific.
>
> >
> > On Wed, Jun 10, 2015 at 3:52 PM, Michael S. Tsirkin <mst@redhat.com>
> wrote:
> >
> >     On Wed, Jun 10, 2015 at 03:43:02PM +0200, Thibaut Collet wrote:
> >     > Some vhost client/backend are able to support live migration.
> >     > To provide this service the following features must be added:
> >     > 1. GARP from guest after live migration:
> >     >    the VIRTIO_NET_F_GUEST_ANNOUNCE capability is added to
> vhost-net when
> >     netdev
> >     >    backend is vhost-user to let virtio-net NIC manages GARP after
> >     migration.
> >     > 2. RARP on vhost-user:
> >     >    After a live migration RARP are automatically sent to any
> interfaces.
> >     A
> >     >    receive callback is added to vhost-user to catch this packet.
> If guest
> >     >    supports VIRTIO_NET_F_GUEST_ANNOUNCE this packet is discarded
> to avoid
> >     >    message duplication (already done by virtio-net NIC). For
> legacy guest
> >     >    without VIRTIO_NET_F_GUEST_ANNOUNCE a warning message is
> displayed to
> >     >    alert the user. RARP must be sent to the vhost client/backend.
> >     >
> >     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> >     > ---
> >     >  hw/net/vhost_net.c      |   15 +++++++++++++++
> >     >  include/net/vhost_net.h |    1 +
> >     >  net/vhost-user.c        |   21 +++++++++++++++++++--
> >     >  3 files changed, 35 insertions(+), 2 deletions(-)
> >     >
> >     > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> >     > index 426b23e..4a42325 100644
> >     > --- a/hw/net/vhost_net.c
> >     > +++ b/hw/net/vhost_net.c
> >     > @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
> >     >      VIRTIO_NET_F_CTRL_MAC_ADDR,
> >     >      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
> >     >
> >     > +    VIRTIO_NET_F_GUEST_ANNOUNCE,
> >     > +
> >     >      VIRTIO_NET_F_MQ,
> >     >
> >     >      VHOST_INVALID_FEATURE_BIT
> >     > @@ -365,6 +367,15 @@ void vhost_net_stop(VirtIODevice *dev,
> >     NetClientState *ncs,
> >     >      assert(r >= 0);
> >     >  }
> >     >
> >     > +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t
> *buf,
> >     size_t size)
> >     > +{
> >     > +    if ((net->dev.acked_features & (1 <<
> VIRTIO_NET_F_GUEST_ANNOUNCE)) =
> >     = 0) {
> >     > +        fprintf(stderr,
> >     > +                "Warning: Guest with no
> VIRTIO_NET_F_GUEST_ANNOUNCE
> >     support. RARP must be sent by vhost-user backend\n");
> >     > +        fflush(stderr);
> >
> >     And maybe it does, then you are just filling log up with useless
> >     warnings. Pls add some ifdef so this isn't normally compiled in.
> >
> >     > +    }
> >
> >     Can you move this out to vhost-user? It's not a generic function, is
> it?
> >
> >     > +}
> >     > +
> >     >  void vhost_net_cleanup(struct vhost_net *net)
> >     >  {
> >     >      vhost_dev_cleanup(&net->dev);
> >     > @@ -427,6 +438,10 @@ void vhost_net_stop(VirtIODevice *dev,
> >     >  {
> >     >  }
> >     >
> >     > +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t
> *buf,
> >     size_t size);
> >     > +{
> >     > +}
> >     > +
> >     >  void vhost_net_cleanup(struct vhost_net *net)
> >     >  {
> >     >  }
> >     > diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> >     > index b1c18a3..e82a0f9 100644
> >     > --- a/include/net/vhost_net.h
> >     > +++ b/include/net/vhost_net.h
> >     > @@ -20,6 +20,7 @@ bool vhost_net_query(VHostNetState *net,
> VirtIODevice
> >     *dev);
> >     >  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int
> >     total_queues);
> >     >  void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int
> >     total_queues);
> >     >
> >     > +void vhost_net_inject_rarp(VHostNetState *net, const uint8_t *buf,
> >     size_t size);
> >     >  void vhost_net_cleanup(VHostNetState *net);
> >     >
> >     >  unsigned vhost_net_get_features(VHostNetState *net, unsigned
> features);
> >     > diff --git a/net/vhost-user.c b/net/vhost-user.c
> >     > index 8d26728..69c2313 100644
> >     > --- a/net/vhost-user.c
> >     > +++ b/net/vhost-user.c
> >     > @@ -66,6 +66,24 @@ static void vhost_user_stop(VhostUserState *s)
> >     >      s->vhost_net = 0;
> >     >  }
> >     >
> >     > +static ssize_t vhost_user_receive(NetClientState *nc, const
> uint8_t
> >     *buf,
> >     > +                                  size_t size)
> >     > +{
> >     > +    if (size == 60) {
> >     > +        /* Assume it is a RARP request sent automatically after a
> >     > +         * live migration */
> >     > +        /* The RARP must be sent if guest does not support
> >     > +         * VIRTIO_NET_F_GUEST_ANNOUNCE */
> >     > +        VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
> >     > +
> >     > +        vhost_net_inject_rarp(s->vhost_net, buf, size);
> >     > +    } else {
> >     > +        fprintf(stderr,"Vhost user receives unexpected
> packets\n");
> >     > +        fflush(stderr);
> >     > +    }
> >     > +    return size;
> >     > +}
> >     > +
> >     >  static void vhost_user_cleanup(NetClientState *nc)
> >     >  {
> >     >      VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
> >     > @@ -91,6 +109,7 @@ static bool vhost_user_has_ufo(NetClientState
> *nc)
> >     >  static NetClientInfo net_vhost_user_info = {
> >     >          .type = NET_CLIENT_OPTIONS_KIND_VHOST_USER,
> >     >          .size = sizeof(VhostUserState),
> >     > +        .receive = vhost_user_receive,
> >     >          .cleanup = vhost_user_cleanup,
> >     >          .has_vnet_hdr = vhost_user_has_vnet_hdr,
> >     >          .has_ufo = vhost_user_has_ufo,
> >     > @@ -147,8 +166,6 @@ static int net_vhost_user_init(NetClientState
> *peer,
> >     const char *device,
> >     >
> >     >          s = DO_UPCAST(VhostUserState, nc, nc);
> >     >
> >     > -        /* We don't provide a receive callback */
> >     > -        s->nc.receive_disabled = 1;
> >     >          s->chr = chr;
> >     >          s->nc.queue_index = i;
> >     >
> >     > --
> >     > 1.7.10.4
> >
> >
>

[-- Attachment #2: Type: text/html, Size: 9917 bytes --]

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-10 13:43 ` [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest Thibaut Collet
@ 2015-06-10 15:34   ` Michael S. Tsirkin
  2015-06-10 15:48     ` Thibaut Collet
  0 siblings, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-10 15:34 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: jasowang, qemu-devel, stefanha

On Wed, Jun 10, 2015 at 03:43:03PM +0200, Thibaut Collet wrote:
> In case of live migration with legacy guest (without VIRTIO_NET_F_GUEST_ANNOUNCE)
> a message is added between QEMU and the vhost client/backend.
> This message provides the RARP content, prepared by QEMU, to the vhost
> client/backend.
> The vhost client/backend is responsible to send the RARP.
> 
> Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> ---
>  docs/specs/vhost-user.txt   |   16 ++++++++++++++++
>  hw/net/vhost_net.c          |    8 ++++++++
>  hw/virtio/vhost-user.c      |   11 ++++++++++-
>  linux-headers/linux/vhost.h |    9 +++++++++
>  4 files changed, 43 insertions(+), 1 deletion(-)
> 
> diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> index 2c8e934..ef5d475 100644
> --- a/docs/specs/vhost-user.txt
> +++ b/docs/specs/vhost-user.txt
> @@ -97,6 +97,7 @@ typedef struct VhostUserMsg {
>          uint64_t u64;
>          struct vhost_vring_state state;
>          struct vhost_vring_addr addr;
> +        struct vhost_inject_rarp rarp;
>          VhostUserMemory memory;
>      };
>  } QEMU_PACKED VhostUserMsg;
> @@ -132,6 +133,12 @@ Multi queue support
>  The protocol supports multiple queues by setting all index fields in the sent
>  messages to a properly calculated value.
>  
> +Live migration support
> +----------------------
> +The protocol supports live migration. GARP from the migrated guest is done
> +through the VIRTIO_NET_F_GUEST_ANNOUNCE mechanism for guest that supports it or
> +through RARP.
> +
>  Message types
>  -------------
>  
> @@ -269,3 +276,12 @@ Message types
>        Bits (0-7) of the payload contain the vring index. Bit 8 is the
>        invalid FD flag. This flag is set when there is no file descriptor
>        in the ancillary data.
> +
> + * VHOST_USER_NET_INJECT_RARP
> +
> +      Id: 15
> +      Master payload: rarp content
> +
> +      Provide the RARP message to send to the guest after a live migration. This
> +      message is sent only for guest that does not support
> +      VIRTIO_NET_F_GUEST_ANNOUNCE.

I don't see why this is needed.
Can't backend simply send rarp itself?
Why do we need to involve QEMU?



> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 4a42325..f66d48d 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -369,10 +369,18 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
>  
>  void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf, size_t size)
>  {
> +    struct vhost_inject_rarp inject_rarp;
> +    memcpy(&inject_rarp.rarp, buf, size);
> +
>      if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE)) == 0) {
> +        const VhostOps *vhost_ops = net->dev.vhost_ops;
> +        int r;
> +
>          fprintf(stderr,
>                  "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE support. RARP must be sent by vhost-user backend\n");
>          fflush(stderr);
> +        r = vhost_ops->vhost_call(&net->dev, VHOST_NET_INJECT_RARP, &inject_rarp);
> +        assert(r >= 0);
>      }
>  }
>  
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index d6f2163..2e752ab 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
>      VHOST_USER_SET_VRING_KICK = 12,
>      VHOST_USER_SET_VRING_CALL = 13,
>      VHOST_USER_SET_VRING_ERR = 14,
> +    VHOST_USER_NET_INJECT_RARP = 15,
>      VHOST_USER_MAX
>  } VhostUserRequest;
>  
> @@ -70,6 +71,7 @@ typedef struct VhostUserMsg {
>          uint64_t u64;
>          struct vhost_vring_state state;
>          struct vhost_vring_addr addr;
> +        struct vhost_inject_rarp rarp;
>          VhostUserMemory memory;
>      };
>  } QEMU_PACKED VhostUserMsg;
> @@ -104,7 +106,8 @@ static unsigned long int ioctl_to_vhost_user_request[VHOST_USER_MAX] = {
>      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
>      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
>      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
> -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
> +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
> +    VHOST_NET_INJECT_RARP   /* VHOST_USER_NET_INJECT_RARP */
>  };
>  
>  static VhostUserRequest vhost_user_request_translate(unsigned long int request)
> @@ -287,6 +290,12 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
>              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
>          }
>          break;
> +
> +    case VHOST_NET_INJECT_RARP:
> +        memcpy(&msg.rarp, arg, sizeof(struct vhost_inject_rarp));
> +        msg.size = sizeof(struct vhost_inject_rarp);
> +        break;
> +
>      default:
>          error_report("vhost-user trying to send unhandled ioctl");
>          return -1;
> diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
> index c656f61..1920134 100644
> --- a/linux-headers/linux/vhost.h
> +++ b/linux-headers/linux/vhost.h
> @@ -63,6 +63,10 @@ struct vhost_memory {
>  	struct vhost_memory_region regions[0];
>  };
>  
> +struct vhost_inject_rarp {
> +	__u8 rarp[60];
> +};
> +
>  /* ioctls */
>  
>  #define VHOST_VIRTIO 0xAF
> @@ -121,6 +125,11 @@ struct vhost_memory {
>   * device.  This can be used to stop the ring (e.g. for migration). */
>  #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
>  
> +/* Inject a RARP in case of live migration for guest that does not support
> + * VIRTIO_NET_F_GUEST_ANNOUNCE */
> +#define VHOST_NET_INJECT_RARP _IOW(VHOST_VIRTIO, 0x31, struct vhost_inject_rarp)
> +
> +
>  /* Feature bits */
>  /* Log all write descriptors. Can be changed while device is active. */
>  #define VHOST_F_LOG_ALL 26
> -- 
> 1.7.10.4

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration
  2015-06-10 13:52   ` Michael S. Tsirkin
  2015-06-10 14:22     ` Thibaut Collet
@ 2015-06-10 15:34     ` Stefan Hajnoczi
  1 sibling, 0 replies; 48+ messages in thread
From: Stefan Hajnoczi @ 2015-06-10 15:34 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Thibaut Collet, jasowang, qemu-devel

[-- Attachment #1: Type: text/plain, Size: 1529 bytes --]

On Wed, Jun 10, 2015 at 03:52:43PM +0200, Michael S. Tsirkin wrote:
> > +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf, size_t size)
> > +{
> > +    if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE)) == 0) {
> > +        fprintf(stderr,
> > +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE support. RARP must be sent by vhost-user backend\n");
> > +        fflush(stderr);
> 
> And maybe it does, then you are just filling log up with useless
> warnings. Pls add some ifdef so this isn't normally compiled in.

Hmm...I guess vhost-user backends can implement RARP without QEMU or the
guest driver knowing.  Let's just drop the warning completely.  I
requested it but now it seems spurious.

> > +static ssize_t vhost_user_receive(NetClientState *nc, const uint8_t *buf,
> > +                                  size_t size)
> > +{
> > +    if (size == 60) {
> > +        /* Assume it is a RARP request sent automatically after a
> > +         * live migration */
> > +        /* The RARP must be sent if guest does not support
> > +         * VIRTIO_NET_F_GUEST_ANNOUNCE */
> > +        VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
> > +
> > +        vhost_net_inject_rarp(s->vhost_net, buf, size);
> > +    } else {
> > +        fprintf(stderr,"Vhost user receives unexpected packets\n");
> > +        fflush(stderr);

Please use a static bool flag to only print this once.  There's a risk
of flooding logs if we don't limit it.

[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-10 15:34   ` Michael S. Tsirkin
@ 2015-06-10 15:48     ` Thibaut Collet
  2015-06-10 16:00       ` Michael S. Tsirkin
  0 siblings, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-10 15:48 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

[-- Attachment #1: Type: text/plain, Size: 6482 bytes --]

I have involved QEMU because QEMU prepares the rarp. I agree that backend
has probably all the information to do that.
But backend does not know if the guest supports the VIRTIO_NET_F_GUEST_ANNOUNCE
and will send a useless rarp.

Maybe this duplication of requests is not very important and in this case
this patch is not mandatory.

On Wed, Jun 10, 2015 at 5:34 PM, Michael S. Tsirkin <mst@redhat.com> wrote:

> On Wed, Jun 10, 2015 at 03:43:03PM +0200, Thibaut Collet wrote:
> > In case of live migration with legacy guest (without
> VIRTIO_NET_F_GUEST_ANNOUNCE)
> > a message is added between QEMU and the vhost client/backend.
> > This message provides the RARP content, prepared by QEMU, to the vhost
> > client/backend.
> > The vhost client/backend is responsible to send the RARP.
> >
> > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> > ---
> >  docs/specs/vhost-user.txt   |   16 ++++++++++++++++
> >  hw/net/vhost_net.c          |    8 ++++++++
> >  hw/virtio/vhost-user.c      |   11 ++++++++++-
> >  linux-headers/linux/vhost.h |    9 +++++++++
> >  4 files changed, 43 insertions(+), 1 deletion(-)
> >
> > diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> > index 2c8e934..ef5d475 100644
> > --- a/docs/specs/vhost-user.txt
> > +++ b/docs/specs/vhost-user.txt
> > @@ -97,6 +97,7 @@ typedef struct VhostUserMsg {
> >          uint64_t u64;
> >          struct vhost_vring_state state;
> >          struct vhost_vring_addr addr;
> > +        struct vhost_inject_rarp rarp;
> >          VhostUserMemory memory;
> >      };
> >  } QEMU_PACKED VhostUserMsg;
> > @@ -132,6 +133,12 @@ Multi queue support
> >  The protocol supports multiple queues by setting all index fields in
> the sent
> >  messages to a properly calculated value.
> >
> > +Live migration support
> > +----------------------
> > +The protocol supports live migration. GARP from the migrated guest is
> done
> > +through the VIRTIO_NET_F_GUEST_ANNOUNCE mechanism for guest that
> supports it or
> > +through RARP.
> > +
> >  Message types
> >  -------------
> >
> > @@ -269,3 +276,12 @@ Message types
> >        Bits (0-7) of the payload contain the vring index. Bit 8 is the
> >        invalid FD flag. This flag is set when there is no file descriptor
> >        in the ancillary data.
> > +
> > + * VHOST_USER_NET_INJECT_RARP
> > +
> > +      Id: 15
> > +      Master payload: rarp content
> > +
> > +      Provide the RARP message to send to the guest after a live
> migration. This
> > +      message is sent only for guest that does not support
> > +      VIRTIO_NET_F_GUEST_ANNOUNCE.
>
> I don't see why this is needed.
> Can't backend simply send rarp itself?
> Why do we need to involve QEMU?
>
>
>
> > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> > index 4a42325..f66d48d 100644
> > --- a/hw/net/vhost_net.c
> > +++ b/hw/net/vhost_net.c
> > @@ -369,10 +369,18 @@ void vhost_net_stop(VirtIODevice *dev,
> NetClientState *ncs,
> >
> >  void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf,
> size_t size)
> >  {
> > +    struct vhost_inject_rarp inject_rarp;
> > +    memcpy(&inject_rarp.rarp, buf, size);
> > +
> >      if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE))
> == 0) {
> > +        const VhostOps *vhost_ops = net->dev.vhost_ops;
> > +        int r;
> > +
> >          fprintf(stderr,
> >                  "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE
> support. RARP must be sent by vhost-user backend\n");
> >          fflush(stderr);
> > +        r = vhost_ops->vhost_call(&net->dev, VHOST_NET_INJECT_RARP,
> &inject_rarp);
> > +        assert(r >= 0);
> >      }
> >  }
> >
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index d6f2163..2e752ab 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
> >      VHOST_USER_SET_VRING_KICK = 12,
> >      VHOST_USER_SET_VRING_CALL = 13,
> >      VHOST_USER_SET_VRING_ERR = 14,
> > +    VHOST_USER_NET_INJECT_RARP = 15,
> >      VHOST_USER_MAX
> >  } VhostUserRequest;
> >
> > @@ -70,6 +71,7 @@ typedef struct VhostUserMsg {
> >          uint64_t u64;
> >          struct vhost_vring_state state;
> >          struct vhost_vring_addr addr;
> > +        struct vhost_inject_rarp rarp;
> >          VhostUserMemory memory;
> >      };
> >  } QEMU_PACKED VhostUserMsg;
> > @@ -104,7 +106,8 @@ static unsigned long int
> ioctl_to_vhost_user_request[VHOST_USER_MAX] = {
> >      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
> >      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
> >      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
> > -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
> > +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
> > +    VHOST_NET_INJECT_RARP   /* VHOST_USER_NET_INJECT_RARP */
> >  };
> >
> >  static VhostUserRequest vhost_user_request_translate(unsigned long int
> request)
> > @@ -287,6 +290,12 @@ static int vhost_user_call(struct vhost_dev *dev,
> unsigned long int request,
> >              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
> >          }
> >          break;
> > +
> > +    case VHOST_NET_INJECT_RARP:
> > +        memcpy(&msg.rarp, arg, sizeof(struct vhost_inject_rarp));
> > +        msg.size = sizeof(struct vhost_inject_rarp);
> > +        break;
> > +
> >      default:
> >          error_report("vhost-user trying to send unhandled ioctl");
> >          return -1;
> > diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
> > index c656f61..1920134 100644
> > --- a/linux-headers/linux/vhost.h
> > +++ b/linux-headers/linux/vhost.h
> > @@ -63,6 +63,10 @@ struct vhost_memory {
> >       struct vhost_memory_region regions[0];
> >  };
> >
> > +struct vhost_inject_rarp {
> > +     __u8 rarp[60];
> > +};
> > +
> >  /* ioctls */
> >
> >  #define VHOST_VIRTIO 0xAF
> > @@ -121,6 +125,11 @@ struct vhost_memory {
> >   * device.  This can be used to stop the ring (e.g. for migration). */
> >  #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct
> vhost_vring_file)
> >
> > +/* Inject a RARP in case of live migration for guest that does not
> support
> > + * VIRTIO_NET_F_GUEST_ANNOUNCE */
> > +#define VHOST_NET_INJECT_RARP _IOW(VHOST_VIRTIO, 0x31, struct
> vhost_inject_rarp)
> > +
> > +
> >  /* Feature bits */
> >  /* Log all write descriptors. Can be changed while device is active. */
> >  #define VHOST_F_LOG_ALL 26
> > --
> > 1.7.10.4
>

[-- Attachment #2: Type: text/html, Size: 8271 bytes --]

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-10 15:48     ` Thibaut Collet
@ 2015-06-10 16:00       ` Michael S. Tsirkin
  2015-06-10 20:25         ` Thibaut Collet
  0 siblings, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-10 16:00 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

On Wed, Jun 10, 2015 at 05:48:47PM +0200, Thibaut Collet wrote:
> I have involved QEMU because QEMU prepares the rarp. I agree that backend has
> probably all the information to do that.
> But backend does not know if the guest supports the VIRTIO_NET_F_GUEST_ANNOUNCE

Why not?  Backend has the acked feature mask.


> and will send a useless rarp.
> Maybe this duplication of requests is not very important and in this case this
> patch is not mandatory.
> 
> On Wed, Jun 10, 2015 at 5:34 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> 
>     On Wed, Jun 10, 2015 at 03:43:03PM +0200, Thibaut Collet wrote:
>     > In case of live migration with legacy guest (without
>     VIRTIO_NET_F_GUEST_ANNOUNCE)
>     > a message is added between QEMU and the vhost client/backend.
>     > This message provides the RARP content, prepared by QEMU, to the vhost
>     > client/backend.
>     > The vhost client/backend is responsible to send the RARP.
>     >
>     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
>     > ---
>     >  docs/specs/vhost-user.txt   |   16 ++++++++++++++++
>     >  hw/net/vhost_net.c          |    8 ++++++++
>     >  hw/virtio/vhost-user.c      |   11 ++++++++++-
>     >  linux-headers/linux/vhost.h |    9 +++++++++
>     >  4 files changed, 43 insertions(+), 1 deletion(-)
>     >
>     > diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
>     > index 2c8e934..ef5d475 100644
>     > --- a/docs/specs/vhost-user.txt
>     > +++ b/docs/specs/vhost-user.txt
>     > @@ -97,6 +97,7 @@ typedef struct VhostUserMsg {
>     >          uint64_t u64;
>     >          struct vhost_vring_state state;
>     >          struct vhost_vring_addr addr;
>     > +        struct vhost_inject_rarp rarp;
>     >          VhostUserMemory memory;
>     >      };
>     >  } QEMU_PACKED VhostUserMsg;
>     > @@ -132,6 +133,12 @@ Multi queue support
>     >  The protocol supports multiple queues by setting all index fields in the
>     sent
>     >  messages to a properly calculated value.
>     >
>     > +Live migration support
>     > +----------------------
>     > +The protocol supports live migration. GARP from the migrated guest is
>     done
>     > +through the VIRTIO_NET_F_GUEST_ANNOUNCE mechanism for guest that
>     supports it or
>     > +through RARP.
>     > +
>     >  Message types
>     >  -------------
>     >
>     > @@ -269,3 +276,12 @@ Message types
>     >        Bits (0-7) of the payload contain the vring index. Bit 8 is the
>     >        invalid FD flag. This flag is set when there is no file descriptor
>     >        in the ancillary data.
>     > +
>     > + * VHOST_USER_NET_INJECT_RARP
>     > +
>     > +      Id: 15
>     > +      Master payload: rarp content
>     > +
>     > +      Provide the RARP message to send to the guest after a live
>     migration. This
>     > +      message is sent only for guest that does not support
>     > +      VIRTIO_NET_F_GUEST_ANNOUNCE.
> 
>     I don't see why this is needed.
>     Can't backend simply send rarp itself?
>     Why do we need to involve QEMU?
> 
> 
> 
>     > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
>     > index 4a42325..f66d48d 100644
>     > --- a/hw/net/vhost_net.c
>     > +++ b/hw/net/vhost_net.c
>     > @@ -369,10 +369,18 @@ void vhost_net_stop(VirtIODevice *dev,
>     NetClientState *ncs,
>     >
>     >  void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf,
>     size_t size)
>     >  {
>     > +    struct vhost_inject_rarp inject_rarp;
>     > +    memcpy(&inject_rarp.rarp, buf, size);
>     > +
>     >      if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE)) =
>     = 0) {
>     > +        const VhostOps *vhost_ops = net->dev.vhost_ops;
>     > +        int r;
>     > +
>     >          fprintf(stderr,
>     >                  "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE
>     support. RARP must be sent by vhost-user backend\n");
>     >          fflush(stderr);
>     > +        r = vhost_ops->vhost_call(&net->dev, VHOST_NET_INJECT_RARP, &
>     inject_rarp);
>     > +        assert(r >= 0);
>     >      }
>     >  }
>     >
>     > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>     > index d6f2163..2e752ab 100644
>     > --- a/hw/virtio/vhost-user.c
>     > +++ b/hw/virtio/vhost-user.c
>     > @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
>     >      VHOST_USER_SET_VRING_KICK = 12,
>     >      VHOST_USER_SET_VRING_CALL = 13,
>     >      VHOST_USER_SET_VRING_ERR = 14,
>     > +    VHOST_USER_NET_INJECT_RARP = 15,
>     >      VHOST_USER_MAX
>     >  } VhostUserRequest;
>     >
>     > @@ -70,6 +71,7 @@ typedef struct VhostUserMsg {
>     >          uint64_t u64;
>     >          struct vhost_vring_state state;
>     >          struct vhost_vring_addr addr;
>     > +        struct vhost_inject_rarp rarp;
>     >          VhostUserMemory memory;
>     >      };
>     >  } QEMU_PACKED VhostUserMsg;
>     > @@ -104,7 +106,8 @@ static unsigned long int ioctl_to_vhost_user_request
>     [VHOST_USER_MAX] = {
>     >      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
>     >      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
>     >      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
>     > -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
>     > +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
>     > +    VHOST_NET_INJECT_RARP   /* VHOST_USER_NET_INJECT_RARP */
>     >  };
>     >
>     >  static VhostUserRequest vhost_user_request_translate(unsigned long int
>     request)
>     > @@ -287,6 +290,12 @@ static int vhost_user_call(struct vhost_dev *dev,
>     unsigned long int request,
>     >              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
>     >          }
>     >          break;
>     > +
>     > +    case VHOST_NET_INJECT_RARP:
>     > +        memcpy(&msg.rarp, arg, sizeof(struct vhost_inject_rarp));
>     > +        msg.size = sizeof(struct vhost_inject_rarp);
>     > +        break;
>     > +
>     >      default:
>     >          error_report("vhost-user trying to send unhandled ioctl");
>     >          return -1;
>     > diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
>     > index c656f61..1920134 100644
>     > --- a/linux-headers/linux/vhost.h
>     > +++ b/linux-headers/linux/vhost.h
>     > @@ -63,6 +63,10 @@ struct vhost_memory {
>     >       struct vhost_memory_region regions[0];
>     >  };
>     >
>     > +struct vhost_inject_rarp {
>     > +     __u8 rarp[60];
>     > +};
>     > +
>     >  /* ioctls */
>     >
>     >  #define VHOST_VIRTIO 0xAF
>     > @@ -121,6 +125,11 @@ struct vhost_memory {
>     >   * device.  This can be used to stop the ring (e.g. for migration). */
>     >  #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct
>     vhost_vring_file)
>     >
>     > +/* Inject a RARP in case of live migration for guest that does not
>     support
>     > + * VIRTIO_NET_F_GUEST_ANNOUNCE */
>     > +#define VHOST_NET_INJECT_RARP _IOW(VHOST_VIRTIO, 0x31, struct
>     vhost_inject_rarp)
>     > +
>     > +
>     >  /* Feature bits */
>     >  /* Log all write descriptors. Can be changed while device is active. */
>     >  #define VHOST_F_LOG_ALL 26
>     > --
>     > 1.7.10.4
> 
> 

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-10 16:00       ` Michael S. Tsirkin
@ 2015-06-10 20:25         ` Thibaut Collet
  2015-06-10 20:50           ` Michael S. Tsirkin
  2015-06-11  5:39           ` Jason Wang
  0 siblings, 2 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-10 20:25 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

[-- Attachment #1: Type: text/plain, Size: 8625 bytes --]

Yes backend can save everything to be able to send the rarp alone after a
live migration.
Main purpose of this patch is to answer to the point raise by Jason on the
previous version of my patch:
> Yes, your patch works well for recent drivers. But the problem is legacy
> guest/driver without VIRTIO_NET_F_GUEST_ANNOUNCE. In this case there
> will be no GARP sent after migration.

If Jason is OK with this solution this patch can be forgotten.

Maybe, to warn user of this issue if the backend does not send the rarp, it
can be useful to keep the warn message of the previous patch:
> +        fprintf(stderr,
> +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE
support. RARP must be sent by vhost-user backend\n");
> +        fflush(stderr);
with a static bool to display this message only one time to limit the
number of message ?

On Wed, Jun 10, 2015 at 6:00 PM, Michael S. Tsirkin <mst@redhat.com> wrote:

> On Wed, Jun 10, 2015 at 05:48:47PM +0200, Thibaut Collet wrote:
> > I have involved QEMU because QEMU prepares the rarp. I agree that
> backend has
> > probably all the information to do that.
> > But backend does not know if the guest supports
> the VIRTIO_NET_F_GUEST_ANNOUNCE
>
> Why not?  Backend has the acked feature mask.
>
>
> > and will send a useless rarp.
> > Maybe this duplication of requests is not very important and in this
> case this
> > patch is not mandatory.
> >
> > On Wed, Jun 10, 2015 at 5:34 PM, Michael S. Tsirkin <mst@redhat.com>
> wrote:
> >
> >     On Wed, Jun 10, 2015 at 03:43:03PM +0200, Thibaut Collet wrote:
> >     > In case of live migration with legacy guest (without
> >     VIRTIO_NET_F_GUEST_ANNOUNCE)
> >     > a message is added between QEMU and the vhost client/backend.
> >     > This message provides the RARP content, prepared by QEMU, to the
> vhost
> >     > client/backend.
> >     > The vhost client/backend is responsible to send the RARP.
> >     >
> >     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> >     > ---
> >     >  docs/specs/vhost-user.txt   |   16 ++++++++++++++++
> >     >  hw/net/vhost_net.c          |    8 ++++++++
> >     >  hw/virtio/vhost-user.c      |   11 ++++++++++-
> >     >  linux-headers/linux/vhost.h |    9 +++++++++
> >     >  4 files changed, 43 insertions(+), 1 deletion(-)
> >     >
> >     > diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> >     > index 2c8e934..ef5d475 100644
> >     > --- a/docs/specs/vhost-user.txt
> >     > +++ b/docs/specs/vhost-user.txt
> >     > @@ -97,6 +97,7 @@ typedef struct VhostUserMsg {
> >     >          uint64_t u64;
> >     >          struct vhost_vring_state state;
> >     >          struct vhost_vring_addr addr;
> >     > +        struct vhost_inject_rarp rarp;
> >     >          VhostUserMemory memory;
> >     >      };
> >     >  } QEMU_PACKED VhostUserMsg;
> >     > @@ -132,6 +133,12 @@ Multi queue support
> >     >  The protocol supports multiple queues by setting all index fields
> in the
> >     sent
> >     >  messages to a properly calculated value.
> >     >
> >     > +Live migration support
> >     > +----------------------
> >     > +The protocol supports live migration. GARP from the migrated
> guest is
> >     done
> >     > +through the VIRTIO_NET_F_GUEST_ANNOUNCE mechanism for guest that
> >     supports it or
> >     > +through RARP.
> >     > +
> >     >  Message types
> >     >  -------------
> >     >
> >     > @@ -269,3 +276,12 @@ Message types
> >     >        Bits (0-7) of the payload contain the vring index. Bit 8 is
> the
> >     >        invalid FD flag. This flag is set when there is no file
> descriptor
> >     >        in the ancillary data.
> >     > +
> >     > + * VHOST_USER_NET_INJECT_RARP
> >     > +
> >     > +      Id: 15
> >     > +      Master payload: rarp content
> >     > +
> >     > +      Provide the RARP message to send to the guest after a live
> >     migration. This
> >     > +      message is sent only for guest that does not support
> >     > +      VIRTIO_NET_F_GUEST_ANNOUNCE.
> >
> >     I don't see why this is needed.
> >     Can't backend simply send rarp itself?
> >     Why do we need to involve QEMU?
> >
> >
> >
> >     > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> >     > index 4a42325..f66d48d 100644
> >     > --- a/hw/net/vhost_net.c
> >     > +++ b/hw/net/vhost_net.c
> >     > @@ -369,10 +369,18 @@ void vhost_net_stop(VirtIODevice *dev,
> >     NetClientState *ncs,
> >     >
> >     >  void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t
> *buf,
> >     size_t size)
> >     >  {
> >     > +    struct vhost_inject_rarp inject_rarp;
> >     > +    memcpy(&inject_rarp.rarp, buf, size);
> >     > +
> >     >      if ((net->dev.acked_features & (1 <<
> VIRTIO_NET_F_GUEST_ANNOUNCE)) =
> >     = 0) {
> >     > +        const VhostOps *vhost_ops = net->dev.vhost_ops;
> >     > +        int r;
> >     > +
> >     >          fprintf(stderr,
> >     >                  "Warning: Guest with no
> VIRTIO_NET_F_GUEST_ANNOUNCE
> >     support. RARP must be sent by vhost-user backend\n");
> >     >          fflush(stderr);
> >     > +        r = vhost_ops->vhost_call(&net->dev,
> VHOST_NET_INJECT_RARP, &
> >     inject_rarp);
> >     > +        assert(r >= 0);
> >     >      }
> >     >  }
> >     >
> >     > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> >     > index d6f2163..2e752ab 100644
> >     > --- a/hw/virtio/vhost-user.c
> >     > +++ b/hw/virtio/vhost-user.c
> >     > @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
> >     >      VHOST_USER_SET_VRING_KICK = 12,
> >     >      VHOST_USER_SET_VRING_CALL = 13,
> >     >      VHOST_USER_SET_VRING_ERR = 14,
> >     > +    VHOST_USER_NET_INJECT_RARP = 15,
> >     >      VHOST_USER_MAX
> >     >  } VhostUserRequest;
> >     >
> >     > @@ -70,6 +71,7 @@ typedef struct VhostUserMsg {
> >     >          uint64_t u64;
> >     >          struct vhost_vring_state state;
> >     >          struct vhost_vring_addr addr;
> >     > +        struct vhost_inject_rarp rarp;
> >     >          VhostUserMemory memory;
> >     >      };
> >     >  } QEMU_PACKED VhostUserMsg;
> >     > @@ -104,7 +106,8 @@ static unsigned long int
> ioctl_to_vhost_user_request
> >     [VHOST_USER_MAX] = {
> >     >      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
> >     >      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
> >     >      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
> >     > -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
> >     > +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
> >     > +    VHOST_NET_INJECT_RARP   /* VHOST_USER_NET_INJECT_RARP */
> >     >  };
> >     >
> >     >  static VhostUserRequest vhost_user_request_translate(unsigned
> long int
> >     request)
> >     > @@ -287,6 +290,12 @@ static int vhost_user_call(struct vhost_dev
> *dev,
> >     unsigned long int request,
> >     >              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
> >     >          }
> >     >          break;
> >     > +
> >     > +    case VHOST_NET_INJECT_RARP:
> >     > +        memcpy(&msg.rarp, arg, sizeof(struct vhost_inject_rarp));
> >     > +        msg.size = sizeof(struct vhost_inject_rarp);
> >     > +        break;
> >     > +
> >     >      default:
> >     >          error_report("vhost-user trying to send unhandled ioctl");
> >     >          return -1;
> >     > diff --git a/linux-headers/linux/vhost.h
> b/linux-headers/linux/vhost.h
> >     > index c656f61..1920134 100644
> >     > --- a/linux-headers/linux/vhost.h
> >     > +++ b/linux-headers/linux/vhost.h
> >     > @@ -63,6 +63,10 @@ struct vhost_memory {
> >     >       struct vhost_memory_region regions[0];
> >     >  };
> >     >
> >     > +struct vhost_inject_rarp {
> >     > +     __u8 rarp[60];
> >     > +};
> >     > +
> >     >  /* ioctls */
> >     >
> >     >  #define VHOST_VIRTIO 0xAF
> >     > @@ -121,6 +125,11 @@ struct vhost_memory {
> >     >   * device.  This can be used to stop the ring (e.g. for
> migration). */
> >     >  #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct
> >     vhost_vring_file)
> >     >
> >     > +/* Inject a RARP in case of live migration for guest that does not
> >     support
> >     > + * VIRTIO_NET_F_GUEST_ANNOUNCE */
> >     > +#define VHOST_NET_INJECT_RARP _IOW(VHOST_VIRTIO, 0x31, struct
> >     vhost_inject_rarp)
> >     > +
> >     > +
> >     >  /* Feature bits */
> >     >  /* Log all write descriptors. Can be changed while device is
> active. */
> >     >  #define VHOST_F_LOG_ALL 26
> >     > --
> >     > 1.7.10.4
> >
> >
>

[-- Attachment #2: Type: text/html, Size: 12334 bytes --]

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-10 20:25         ` Thibaut Collet
@ 2015-06-10 20:50           ` Michael S. Tsirkin
  2015-06-11  5:34             ` Thibaut Collet
  2015-06-11  5:39           ` Jason Wang
  1 sibling, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-10 20:50 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

On Wed, Jun 10, 2015 at 10:25:57PM +0200, Thibaut Collet wrote:
> Yes backend can save everything to be able to send the rarp alone after a live
> migration.
> Main purpose of this patch is to answer to the point raise by Jason on the
> previous version of my patch:
> > Yes, your patch works well for recent drivers. But the problem is legacy
> > guest/driver without VIRTIO_NET_F_GUEST_ANNOUNCE. In this case there
> > will be no GARP sent after migration.
> 
> If Jason is OK with this solution this patch can be forgotten.
> 
> Maybe, to warn user of this issue if the backend does not send the rarp, it can
> be useful to keep the warn message of the previous patch:
> > +        fprintf(stderr,
> > +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE support.
> RARP must be sent by vhost-user backend\n");
> > +        fflush(stderr);
> with a static bool to display this message only one time to limit the number of
> message ?

I don't see why it's necessary.

> On Wed, Jun 10, 2015 at 6:00 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> 
>     On Wed, Jun 10, 2015 at 05:48:47PM +0200, Thibaut Collet wrote:
>     > I have involved QEMU because QEMU prepares the rarp. I agree that backend
>     has
>     > probably all the information to do that.
>     > But backend does not know if the guest supports
>     the VIRTIO_NET_F_GUEST_ANNOUNCE
> 
>     Why not?  Backend has the acked feature mask.
> 
> 
>     > and will send a useless rarp.
>     > Maybe this duplication of requests is not very important and in this case
>     this
>     > patch is not mandatory.
>     >
>     > On Wed, Jun 10, 2015 at 5:34 PM, Michael S. Tsirkin <mst@redhat.com>
>     wrote:
>     >
>     >     On Wed, Jun 10, 2015 at 03:43:03PM +0200, Thibaut Collet wrote:
>     >     > In case of live migration with legacy guest (without
>     >     VIRTIO_NET_F_GUEST_ANNOUNCE)
>     >     > a message is added between QEMU and the vhost client/backend.
>     >     > This message provides the RARP content, prepared by QEMU, to the
>     vhost
>     >     > client/backend.
>     >     > The vhost client/backend is responsible to send the RARP.
>     >     >
>     >     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
>     >     > ---
>     >     >  docs/specs/vhost-user.txt   |   16 ++++++++++++++++
>     >     >  hw/net/vhost_net.c          |    8 ++++++++
>     >     >  hw/virtio/vhost-user.c      |   11 ++++++++++-
>     >     >  linux-headers/linux/vhost.h |    9 +++++++++
>     >     >  4 files changed, 43 insertions(+), 1 deletion(-)
>     >     >
>     >     > diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
>     >     > index 2c8e934..ef5d475 100644
>     >     > --- a/docs/specs/vhost-user.txt
>     >     > +++ b/docs/specs/vhost-user.txt
>     >     > @@ -97,6 +97,7 @@ typedef struct VhostUserMsg {
>     >     >          uint64_t u64;
>     >     >          struct vhost_vring_state state;
>     >     >          struct vhost_vring_addr addr;
>     >     > +        struct vhost_inject_rarp rarp;
>     >     >          VhostUserMemory memory;
>     >     >      };
>     >     >  } QEMU_PACKED VhostUserMsg;
>     >     > @@ -132,6 +133,12 @@ Multi queue support
>     >     >  The protocol supports multiple queues by setting all index fields
>     in the
>     >     sent
>     >     >  messages to a properly calculated value.
>     >     >
>     >     > +Live migration support
>     >     > +----------------------
>     >     > +The protocol supports live migration. GARP from the migrated guest
>     is
>     >     done
>     >     > +through the VIRTIO_NET_F_GUEST_ANNOUNCE mechanism for guest that
>     >     supports it or
>     >     > +through RARP.
>     >     > +
>     >     >  Message types
>     >     >  -------------
>     >     >
>     >     > @@ -269,3 +276,12 @@ Message types
>     >     >        Bits (0-7) of the payload contain the vring index. Bit 8 is
>     the
>     >     >        invalid FD flag. This flag is set when there is no file
>     descriptor
>     >     >        in the ancillary data.
>     >     > +
>     >     > + * VHOST_USER_NET_INJECT_RARP
>     >     > +
>     >     > +      Id: 15
>     >     > +      Master payload: rarp content
>     >     > +
>     >     > +      Provide the RARP message to send to the guest after a live
>     >     migration. This
>     >     > +      message is sent only for guest that does not support
>     >     > +      VIRTIO_NET_F_GUEST_ANNOUNCE.
>     >
>     >     I don't see why this is needed.
>     >     Can't backend simply send rarp itself?
>     >     Why do we need to involve QEMU?
>     >
>     >
>     >
>     >     > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
>     >     > index 4a42325..f66d48d 100644
>     >     > --- a/hw/net/vhost_net.c
>     >     > +++ b/hw/net/vhost_net.c
>     >     > @@ -369,10 +369,18 @@ void vhost_net_stop(VirtIODevice *dev,
>     >     NetClientState *ncs,
>     >     >
>     >     >  void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t
>     *buf,
>     >     size_t size)
>     >     >  {
>     >     > +    struct vhost_inject_rarp inject_rarp;
>     >     > +    memcpy(&inject_rarp.rarp, buf, size);
>     >     > +
>     >     >      if ((net->dev.acked_features & (1 <<
>     VIRTIO_NET_F_GUEST_ANNOUNCE)) =
>     >     = 0) {
>     >     > +        const VhostOps *vhost_ops = net->dev.vhost_ops;
>     >     > +        int r;
>     >     > +
>     >     >          fprintf(stderr,
>     >     >                  "Warning: Guest with no
>     VIRTIO_NET_F_GUEST_ANNOUNCE
>     >     support. RARP must be sent by vhost-user backend\n");
>     >     >          fflush(stderr);
>     >     > +        r = vhost_ops->vhost_call(&net->dev,
>     VHOST_NET_INJECT_RARP, &
>     >     inject_rarp);
>     >     > +        assert(r >= 0);
>     >     >      }
>     >     >  }
>     >     >
>     >     > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>     >     > index d6f2163..2e752ab 100644
>     >     > --- a/hw/virtio/vhost-user.c
>     >     > +++ b/hw/virtio/vhost-user.c
>     >     > @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
>     >     >      VHOST_USER_SET_VRING_KICK = 12,
>     >     >      VHOST_USER_SET_VRING_CALL = 13,
>     >     >      VHOST_USER_SET_VRING_ERR = 14,
>     >     > +    VHOST_USER_NET_INJECT_RARP = 15,
>     >     >      VHOST_USER_MAX
>     >     >  } VhostUserRequest;
>     >     >
>     >     > @@ -70,6 +71,7 @@ typedef struct VhostUserMsg {
>     >     >          uint64_t u64;
>     >     >          struct vhost_vring_state state;
>     >     >          struct vhost_vring_addr addr;
>     >     > +        struct vhost_inject_rarp rarp;
>     >     >          VhostUserMemory memory;
>     >     >      };
>     >     >  } QEMU_PACKED VhostUserMsg;
>     >     > @@ -104,7 +106,8 @@ static unsigned long int
>     ioctl_to_vhost_user_request
>     >     [VHOST_USER_MAX] = {
>     >     >      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
>     >     >      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
>     >     >      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
>     >     > -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
>     >     > +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
>     >     > +    VHOST_NET_INJECT_RARP   /* VHOST_USER_NET_INJECT_RARP */
>     >     >  };
>     >     >
>     >     >  static VhostUserRequest vhost_user_request_translate(unsigned long
>     int
>     >     request)
>     >     > @@ -287,6 +290,12 @@ static int vhost_user_call(struct vhost_dev
>     *dev,
>     >     unsigned long int request,
>     >     >              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
>     >     >          }
>     >     >          break;
>     >     > +
>     >     > +    case VHOST_NET_INJECT_RARP:
>     >     > +        memcpy(&msg.rarp, arg, sizeof(struct vhost_inject_rarp));
>     >     > +        msg.size = sizeof(struct vhost_inject_rarp);
>     >     > +        break;
>     >     > +
>     >     >      default:
>     >     >          error_report("vhost-user trying to send unhandled ioctl");
>     >     >          return -1;
>     >     > diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/
>     vhost.h
>     >     > index c656f61..1920134 100644
>     >     > --- a/linux-headers/linux/vhost.h
>     >     > +++ b/linux-headers/linux/vhost.h
>     >     > @@ -63,6 +63,10 @@ struct vhost_memory {
>     >     >       struct vhost_memory_region regions[0];
>     >     >  };
>     >     >
>     >     > +struct vhost_inject_rarp {
>     >     > +     __u8 rarp[60];
>     >     > +};
>     >     > +
>     >     >  /* ioctls */
>     >     >
>     >     >  #define VHOST_VIRTIO 0xAF
>     >     > @@ -121,6 +125,11 @@ struct vhost_memory {
>     >     >   * device.  This can be used to stop the ring (e.g. for
>     migration). */
>     >     >  #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct
>     >     vhost_vring_file)
>     >     >
>     >     > +/* Inject a RARP in case of live migration for guest that does not
>     >     support
>     >     > + * VIRTIO_NET_F_GUEST_ANNOUNCE */
>     >     > +#define VHOST_NET_INJECT_RARP _IOW(VHOST_VIRTIO, 0x31, struct
>     >     vhost_inject_rarp)
>     >     > +
>     >     > +
>     >     >  /* Feature bits */
>     >     >  /* Log all write descriptors. Can be changed while device is
>     active. */
>     >     >  #define VHOST_F_LOG_ALL 26
>     >     > --
>     >     > 1.7.10.4
>     >
>     >
> 
> 

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-10 20:50           ` Michael S. Tsirkin
@ 2015-06-11  5:34             ` Thibaut Collet
  0 siblings, 0 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-11  5:34 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

[-- Attachment #1: Type: text/plain, Size: 10236 bytes --]

The warning message is not really necessary is just a reminder.

On Wed, Jun 10, 2015 at 10:50 PM, Michael S. Tsirkin <mst@redhat.com> wrote:

> On Wed, Jun 10, 2015 at 10:25:57PM +0200, Thibaut Collet wrote:
> > Yes backend can save everything to be able to send the rarp alone after
> a live
> > migration.
> > Main purpose of this patch is to answer to the point raise by Jason on
> the
> > previous version of my patch:
> > > Yes, your patch works well for recent drivers. But the problem is
> legacy
> > > guest/driver without VIRTIO_NET_F_GUEST_ANNOUNCE. In this case there
> > > will be no GARP sent after migration.
> >
> > If Jason is OK with this solution this patch can be forgotten.
> >
> > Maybe, to warn user of this issue if the backend does not send the rarp,
> it can
> > be useful to keep the warn message of the previous patch:
> > > +        fprintf(stderr,
> > > +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE
> support.
> > RARP must be sent by vhost-user backend\n");
> > > +        fflush(stderr);
> > with a static bool to display this message only one time to limit the
> number of
> > message ?
>
> I don't see why it's necessary.
>
> > On Wed, Jun 10, 2015 at 6:00 PM, Michael S. Tsirkin <mst@redhat.com>
> wrote:
> >
> >     On Wed, Jun 10, 2015 at 05:48:47PM +0200, Thibaut Collet wrote:
> >     > I have involved QEMU because QEMU prepares the rarp. I agree that
> backend
> >     has
> >     > probably all the information to do that.
> >     > But backend does not know if the guest supports
> >     the VIRTIO_NET_F_GUEST_ANNOUNCE
> >
> >     Why not?  Backend has the acked feature mask.
> >
> >
> >     > and will send a useless rarp.
> >     > Maybe this duplication of requests is not very important and in
> this case
> >     this
> >     > patch is not mandatory.
> >     >
> >     > On Wed, Jun 10, 2015 at 5:34 PM, Michael S. Tsirkin <
> mst@redhat.com>
> >     wrote:
> >     >
> >     >     On Wed, Jun 10, 2015 at 03:43:03PM +0200, Thibaut Collet wrote:
> >     >     > In case of live migration with legacy guest (without
> >     >     VIRTIO_NET_F_GUEST_ANNOUNCE)
> >     >     > a message is added between QEMU and the vhost client/backend.
> >     >     > This message provides the RARP content, prepared by QEMU, to
> the
> >     vhost
> >     >     > client/backend.
> >     >     > The vhost client/backend is responsible to send the RARP.
> >     >     >
> >     >     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> >     >     > ---
> >     >     >  docs/specs/vhost-user.txt   |   16 ++++++++++++++++
> >     >     >  hw/net/vhost_net.c          |    8 ++++++++
> >     >     >  hw/virtio/vhost-user.c      |   11 ++++++++++-
> >     >     >  linux-headers/linux/vhost.h |    9 +++++++++
> >     >     >  4 files changed, 43 insertions(+), 1 deletion(-)
> >     >     >
> >     >     > diff --git a/docs/specs/vhost-user.txt
> b/docs/specs/vhost-user.txt
> >     >     > index 2c8e934..ef5d475 100644
> >     >     > --- a/docs/specs/vhost-user.txt
> >     >     > +++ b/docs/specs/vhost-user.txt
> >     >     > @@ -97,6 +97,7 @@ typedef struct VhostUserMsg {
> >     >     >          uint64_t u64;
> >     >     >          struct vhost_vring_state state;
> >     >     >          struct vhost_vring_addr addr;
> >     >     > +        struct vhost_inject_rarp rarp;
> >     >     >          VhostUserMemory memory;
> >     >     >      };
> >     >     >  } QEMU_PACKED VhostUserMsg;
> >     >     > @@ -132,6 +133,12 @@ Multi queue support
> >     >     >  The protocol supports multiple queues by setting all index
> fields
> >     in the
> >     >     sent
> >     >     >  messages to a properly calculated value.
> >     >     >
> >     >     > +Live migration support
> >     >     > +----------------------
> >     >     > +The protocol supports live migration. GARP from the
> migrated guest
> >     is
> >     >     done
> >     >     > +through the VIRTIO_NET_F_GUEST_ANNOUNCE mechanism for guest
> that
> >     >     supports it or
> >     >     > +through RARP.
> >     >     > +
> >     >     >  Message types
> >     >     >  -------------
> >     >     >
> >     >     > @@ -269,3 +276,12 @@ Message types
> >     >     >        Bits (0-7) of the payload contain the vring index.
> Bit 8 is
> >     the
> >     >     >        invalid FD flag. This flag is set when there is no
> file
> >     descriptor
> >     >     >        in the ancillary data.
> >     >     > +
> >     >     > + * VHOST_USER_NET_INJECT_RARP
> >     >     > +
> >     >     > +      Id: 15
> >     >     > +      Master payload: rarp content
> >     >     > +
> >     >     > +      Provide the RARP message to send to the guest after a
> live
> >     >     migration. This
> >     >     > +      message is sent only for guest that does not support
> >     >     > +      VIRTIO_NET_F_GUEST_ANNOUNCE.
> >     >
> >     >     I don't see why this is needed.
> >     >     Can't backend simply send rarp itself?
> >     >     Why do we need to involve QEMU?
> >     >
> >     >
> >     >
> >     >     > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> >     >     > index 4a42325..f66d48d 100644
> >     >     > --- a/hw/net/vhost_net.c
> >     >     > +++ b/hw/net/vhost_net.c
> >     >     > @@ -369,10 +369,18 @@ void vhost_net_stop(VirtIODevice *dev,
> >     >     NetClientState *ncs,
> >     >     >
> >     >     >  void vhost_net_inject_rarp(struct vhost_net *net, const
> uint8_t
> >     *buf,
> >     >     size_t size)
> >     >     >  {
> >     >     > +    struct vhost_inject_rarp inject_rarp;
> >     >     > +    memcpy(&inject_rarp.rarp, buf, size);
> >     >     > +
> >     >     >      if ((net->dev.acked_features & (1 <<
> >     VIRTIO_NET_F_GUEST_ANNOUNCE)) =
> >     >     = 0) {
> >     >     > +        const VhostOps *vhost_ops = net->dev.vhost_ops;
> >     >     > +        int r;
> >     >     > +
> >     >     >          fprintf(stderr,
> >     >     >                  "Warning: Guest with no
> >     VIRTIO_NET_F_GUEST_ANNOUNCE
> >     >     support. RARP must be sent by vhost-user backend\n");
> >     >     >          fflush(stderr);
> >     >     > +        r = vhost_ops->vhost_call(&net->dev,
> >     VHOST_NET_INJECT_RARP, &
> >     >     inject_rarp);
> >     >     > +        assert(r >= 0);
> >     >     >      }
> >     >     >  }
> >     >     >
> >     >     > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> >     >     > index d6f2163..2e752ab 100644
> >     >     > --- a/hw/virtio/vhost-user.c
> >     >     > +++ b/hw/virtio/vhost-user.c
> >     >     > @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
> >     >     >      VHOST_USER_SET_VRING_KICK = 12,
> >     >     >      VHOST_USER_SET_VRING_CALL = 13,
> >     >     >      VHOST_USER_SET_VRING_ERR = 14,
> >     >     > +    VHOST_USER_NET_INJECT_RARP = 15,
> >     >     >      VHOST_USER_MAX
> >     >     >  } VhostUserRequest;
> >     >     >
> >     >     > @@ -70,6 +71,7 @@ typedef struct VhostUserMsg {
> >     >     >          uint64_t u64;
> >     >     >          struct vhost_vring_state state;
> >     >     >          struct vhost_vring_addr addr;
> >     >     > +        struct vhost_inject_rarp rarp;
> >     >     >          VhostUserMemory memory;
> >     >     >      };
> >     >     >  } QEMU_PACKED VhostUserMsg;
> >     >     > @@ -104,7 +106,8 @@ static unsigned long int
> >     ioctl_to_vhost_user_request
> >     >     [VHOST_USER_MAX] = {
> >     >     >      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
> >     >     >      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
> >     >     >      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
> >     >     > -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
> >     >     > +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
> >     >     > +    VHOST_NET_INJECT_RARP   /* VHOST_USER_NET_INJECT_RARP */
> >     >     >  };
> >     >     >
> >     >     >  static VhostUserRequest
> vhost_user_request_translate(unsigned long
> >     int
> >     >     request)
> >     >     > @@ -287,6 +290,12 @@ static int vhost_user_call(struct
> vhost_dev
> >     *dev,
> >     >     unsigned long int request,
> >     >     >              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
> >     >     >          }
> >     >     >          break;
> >     >     > +
> >     >     > +    case VHOST_NET_INJECT_RARP:
> >     >     > +        memcpy(&msg.rarp, arg, sizeof(struct
> vhost_inject_rarp));
> >     >     > +        msg.size = sizeof(struct vhost_inject_rarp);
> >     >     > +        break;
> >     >     > +
> >     >     >      default:
> >     >     >          error_report("vhost-user trying to send unhandled
> ioctl");
> >     >     >          return -1;
> >     >     > diff --git a/linux-headers/linux/vhost.h
> b/linux-headers/linux/
> >     vhost.h
> >     >     > index c656f61..1920134 100644
> >     >     > --- a/linux-headers/linux/vhost.h
> >     >     > +++ b/linux-headers/linux/vhost.h
> >     >     > @@ -63,6 +63,10 @@ struct vhost_memory {
> >     >     >       struct vhost_memory_region regions[0];
> >     >     >  };
> >     >     >
> >     >     > +struct vhost_inject_rarp {
> >     >     > +     __u8 rarp[60];
> >     >     > +};
> >     >     > +
> >     >     >  /* ioctls */
> >     >     >
> >     >     >  #define VHOST_VIRTIO 0xAF
> >     >     > @@ -121,6 +125,11 @@ struct vhost_memory {
> >     >     >   * device.  This can be used to stop the ring (e.g. for
> >     migration). */
> >     >     >  #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30,
> struct
> >     >     vhost_vring_file)
> >     >     >
> >     >     > +/* Inject a RARP in case of live migration for guest that
> does not
> >     >     support
> >     >     > + * VIRTIO_NET_F_GUEST_ANNOUNCE */
> >     >     > +#define VHOST_NET_INJECT_RARP _IOW(VHOST_VIRTIO, 0x31,
> struct
> >     >     vhost_inject_rarp)
> >     >     > +
> >     >     > +
> >     >     >  /* Feature bits */
> >     >     >  /* Log all write descriptors. Can be changed while device is
> >     active. */
> >     >     >  #define VHOST_F_LOG_ALL 26
> >     >     > --
> >     >     > 1.7.10.4
> >     >
> >     >
> >
> >
>

[-- Attachment #2: Type: text/html, Size: 14543 bytes --]

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-10 20:25         ` Thibaut Collet
  2015-06-10 20:50           ` Michael S. Tsirkin
@ 2015-06-11  5:39           ` Jason Wang
  2015-06-11  5:49             ` Thibaut Collet
  1 sibling, 1 reply; 48+ messages in thread
From: Jason Wang @ 2015-06-11  5:39 UTC (permalink / raw)
  To: Thibaut Collet, Michael S. Tsirkin; +Cc: qemu-devel, Stefan Hajnoczi



On 06/11/2015 04:25 AM, Thibaut Collet wrote:
> Yes backend can save everything to be able to send the rarp alone
> after a live migration.

Yes, but still need a mechanism to notify the backend of migration
completion from qemu side if GUEST_ANNOUNCE is not negotiated.

> Main purpose of this patch is to answer to the point raise by Jason on
> the previous version of my patch:
> > Yes, your patch works well for recent drivers. But the problem is legacy
> > guest/driver without VIRTIO_NET_F_GUEST_ANNOUNCE. In this case there
> > will be no GARP sent after migration.
>
> If Jason is OK with this solution this patch can be forgotten.
>

I prefer to do this in backend since it looks easier.

Thanks

> Maybe, to warn user of this issue if the backend does not send the
> rarp, it can be useful to keep the warn message of the previous patch:
> > +        fprintf(stderr,
> > +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE support. RARP
> must be sent by vhost-user backend\n");
> > +        fflush(stderr);
> with a static bool to display this message only one time to limit the
> number of message ?
>
> On Wed, Jun 10, 2015 at 6:00 PM, Michael S. Tsirkin <mst@redhat.com
> <mailto:mst@redhat.com>> wrote:
>
>     On Wed, Jun 10, 2015 at 05:48:47PM +0200, Thibaut Collet wrote:
>     > I have involved QEMU because QEMU prepares the rarp. I agree
>     that backend has
>     > probably all the information to do that.
>     > But backend does not know if the guest supports
>     the VIRTIO_NET_F_GUEST_ANNOUNCE
>
>     Why not?  Backend has the acked feature mask.
>
>
>     > and will send a useless rarp.
>     > Maybe this duplication of requests is not very important and in
>     this case this
>     > patch is not mandatory.
>     >
>     > On Wed, Jun 10, 2015 at 5:34 PM, Michael S. Tsirkin
>     <mst@redhat.com <mailto:mst@redhat.com>> wrote:
>     >
>     >     On Wed, Jun 10, 2015 at 03:43:03PM +0200, Thibaut Collet wrote:
>     >     > In case of live migration with legacy guest (without
>     >     VIRTIO_NET_F_GUEST_ANNOUNCE)
>     >     > a message is added between QEMU and the vhost client/backend.
>     >     > This message provides the RARP content, prepared by QEMU,
>     to the vhost
>     >     > client/backend.
>     >     > The vhost client/backend is responsible to send the RARP.
>     >     >
>     >     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com
>     <mailto:thibaut.collet@6wind.com>>
>     >     > ---
>     >     >  docs/specs/vhost-user.txt   |   16 ++++++++++++++++
>     >     >  hw/net/vhost_net.c          |    8 ++++++++
>     >     >  hw/virtio/vhost-user.c      |   11 ++++++++++-
>     >     >  linux-headers/linux/vhost.h |    9 +++++++++
>     >     >  4 files changed, 43 insertions(+), 1 deletion(-)
>     >     >
>     >     > diff --git a/docs/specs/vhost-user.txt
>     b/docs/specs/vhost-user.txt
>     >     > index 2c8e934..ef5d475 100644
>     >     > --- a/docs/specs/vhost-user.txt
>     >     > +++ b/docs/specs/vhost-user.txt
>     >     > @@ -97,6 +97,7 @@ typedef struct VhostUserMsg {
>     >     >          uint64_t u64;
>     >     >          struct vhost_vring_state state;
>     >     >          struct vhost_vring_addr addr;
>     >     > +        struct vhost_inject_rarp rarp;
>     >     >          VhostUserMemory memory;
>     >     >      };
>     >     >  } QEMU_PACKED VhostUserMsg;
>     >     > @@ -132,6 +133,12 @@ Multi queue support
>     >     >  The protocol supports multiple queues by setting all
>     index fields in the
>     >     sent
>     >     >  messages to a properly calculated value.
>     >     >
>     >     > +Live migration support
>     >     > +----------------------
>     >     > +The protocol supports live migration. GARP from the
>     migrated guest is
>     >     done
>     >     > +through the VIRTIO_NET_F_GUEST_ANNOUNCE mechanism for
>     guest that
>     >     supports it or
>     >     > +through RARP.
>     >     > +
>     >     >  Message types
>     >     >  -------------
>     >     >
>     >     > @@ -269,3 +276,12 @@ Message types
>     >     >        Bits (0-7) of the payload contain the vring index.
>     Bit 8 is the
>     >     >        invalid FD flag. This flag is set when there is no
>     file descriptor
>     >     >        in the ancillary data.
>     >     > +
>     >     > + * VHOST_USER_NET_INJECT_RARP
>     >     > +
>     >     > +      Id: 15
>     >     > +      Master payload: rarp content
>     >     > +
>     >     > +      Provide the RARP message to send to the guest after
>     a live
>     >     migration. This
>     >     > +      message is sent only for guest that does not support
>     >     > +      VIRTIO_NET_F_GUEST_ANNOUNCE.
>     >
>     >     I don't see why this is needed.
>     >     Can't backend simply send rarp itself?
>     >     Why do we need to involve QEMU?
>     >
>     >
>     >
>     >     > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
>     >     > index 4a42325..f66d48d 100644
>     >     > --- a/hw/net/vhost_net.c
>     >     > +++ b/hw/net/vhost_net.c
>     >     > @@ -369,10 +369,18 @@ void vhost_net_stop(VirtIODevice *dev,
>     >     NetClientState *ncs,
>     >     >
>     >     >  void vhost_net_inject_rarp(struct vhost_net *net, const
>     uint8_t *buf,
>     >     size_t size)
>     >     >  {
>     >     > +    struct vhost_inject_rarp inject_rarp;
>     >     > +    memcpy(&inject_rarp.rarp, buf, size);
>     >     > +
>     >     >      if ((net->dev.acked_features & (1 <<
>     VIRTIO_NET_F_GUEST_ANNOUNCE)) =
>     >     = 0) {
>     >     > +        const VhostOps *vhost_ops = net->dev.vhost_ops;
>     >     > +        int r;
>     >     > +
>     >     >          fprintf(stderr,
>     >     >                  "Warning: Guest with no
>     VIRTIO_NET_F_GUEST_ANNOUNCE
>     >     support. RARP must be sent by vhost-user backend\n");
>     >     >          fflush(stderr);
>     >     > +        r = vhost_ops->vhost_call(&net->dev,
>     VHOST_NET_INJECT_RARP, &
>     >     inject_rarp);
>     >     > +        assert(r >= 0);
>     >     >      }
>     >     >  }
>     >     >
>     >     > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>     >     > index d6f2163..2e752ab 100644
>     >     > --- a/hw/virtio/vhost-user.c
>     >     > +++ b/hw/virtio/vhost-user.c
>     >     > @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
>     >     >      VHOST_USER_SET_VRING_KICK = 12,
>     >     >      VHOST_USER_SET_VRING_CALL = 13,
>     >     >      VHOST_USER_SET_VRING_ERR = 14,
>     >     > +    VHOST_USER_NET_INJECT_RARP = 15,
>     >     >      VHOST_USER_MAX
>     >     >  } VhostUserRequest;
>     >     >
>     >     > @@ -70,6 +71,7 @@ typedef struct VhostUserMsg {
>     >     >          uint64_t u64;
>     >     >          struct vhost_vring_state state;
>     >     >          struct vhost_vring_addr addr;
>     >     > +        struct vhost_inject_rarp rarp;
>     >     >          VhostUserMemory memory;
>     >     >      };
>     >     >  } QEMU_PACKED VhostUserMsg;
>     >     > @@ -104,7 +106,8 @@ static unsigned long int
>     ioctl_to_vhost_user_request
>     >     [VHOST_USER_MAX] = {
>     >     >      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
>     >     >      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
>     >     >      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
>     >     > -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
>     >     > +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
>     >     > +    VHOST_NET_INJECT_RARP   /* VHOST_USER_NET_INJECT_RARP */
>     >     >  };
>     >     >
>     >     >  static VhostUserRequest
>     vhost_user_request_translate(unsigned long int
>     >     request)
>     >     > @@ -287,6 +290,12 @@ static int vhost_user_call(struct
>     vhost_dev *dev,
>     >     unsigned long int request,
>     >     >              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
>     >     >          }
>     >     >          break;
>     >     > +
>     >     > +    case VHOST_NET_INJECT_RARP:
>     >     > +        memcpy(&msg.rarp, arg, sizeof(struct
>     vhost_inject_rarp));
>     >     > +        msg.size = sizeof(struct vhost_inject_rarp);
>     >     > +        break;
>     >     > +
>     >     >      default:
>     >     >          error_report("vhost-user trying to send unhandled
>     ioctl");
>     >     >          return -1;
>     >     > diff --git a/linux-headers/linux/vhost.h
>     b/linux-headers/linux/vhost.h
>     >     > index c656f61..1920134 100644
>     >     > --- a/linux-headers/linux/vhost.h
>     >     > +++ b/linux-headers/linux/vhost.h
>     >     > @@ -63,6 +63,10 @@ struct vhost_memory {
>     >     >       struct vhost_memory_region regions[0];
>     >     >  };
>     >     >
>     >     > +struct vhost_inject_rarp {
>     >     > +     __u8 rarp[60];
>     >     > +};
>     >     > +
>     >     >  /* ioctls */
>     >     >
>     >     >  #define VHOST_VIRTIO 0xAF
>     >     > @@ -121,6 +125,11 @@ struct vhost_memory {
>     >     >   * device.  This can be used to stop the ring (e.g. for
>     migration). */
>     >     >  #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct
>     >     vhost_vring_file)
>     >     >
>     >     > +/* Inject a RARP in case of live migration for guest that
>     does not
>     >     support
>     >     > + * VIRTIO_NET_F_GUEST_ANNOUNCE */
>     >     > +#define VHOST_NET_INJECT_RARP _IOW(VHOST_VIRTIO, 0x31, struct
>     >     vhost_inject_rarp)
>     >     > +
>     >     > +
>     >     >  /* Feature bits */
>     >     >  /* Log all write descriptors. Can be changed while device
>     is active. */
>     >     >  #define VHOST_F_LOG_ALL 26
>     >     > --
>     >     > 1.7.10.4
>     >
>     >
>
>

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-11  5:39           ` Jason Wang
@ 2015-06-11  5:49             ` Thibaut Collet
  2015-06-11  5:54               ` Jason Wang
  0 siblings, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-11  5:49 UTC (permalink / raw)
  To: Jason Wang; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin

[-- Attachment #1: Type: text/plain, Size: 669 bytes --]

> Yes, but still need a mechanism to notify the backend of migration
> completion from qemu side if GUEST_ANNOUNCE is not negotiated.

backend is aware of a connection with the guest (with the feature
negociation) and can send a rarp. This rarp will be always sent by the
backend when a VM is launched (first start or live migration completion) if
the GUEST_ANOUNCE is not supported.
In this case the issue is solved without done everything by QEMU.
If sending a rarp message on the start of te VM is not accceptable, we must
provide a mechanism similar of the one I have implemented. The message
content can be empty as the backend is able to create the rarp message.

[-- Attachment #2: Type: text/html, Size: 1267 bytes --]

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-11  5:49             ` Thibaut Collet
@ 2015-06-11  5:54               ` Jason Wang
  2015-06-11 10:38                 ` Michael S. Tsirkin
  0 siblings, 1 reply; 48+ messages in thread
From: Jason Wang @ 2015-06-11  5:54 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin



On 06/11/2015 01:49 PM, Thibaut Collet wrote:
> > Yes, but still need a mechanism to notify the backend of migration
> > completion from qemu side if GUEST_ANNOUNCE is not negotiated.
>
> backend is aware of a connection with the guest (with the feature
> negociation) and can send a rarp. This rarp will be always sent by the
> backend when a VM is launched (first start or live migration
> completion) if the GUEST_ANOUNCE is not supported.
> In this case the issue is solved without done everything by QEMU.

The issue is during migration guest network is still active. So sending
rarp too early in the destination (e.g during VM is launched) may
confuse the switch.  We want it to be sent exactly when the migration is
completed in destination.

> If sending a rarp message on the start of te VM is not accceptable, we
> must provide a mechanism similar of the one I have implemented. The
> message content can be empty as the backend is able to create the rarp
> message.

Yes.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-11  5:54               ` Jason Wang
@ 2015-06-11 10:38                 ` Michael S. Tsirkin
  2015-06-11 12:10                   ` Thibaut Collet
  0 siblings, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-11 10:38 UTC (permalink / raw)
  To: Jason Wang; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi

On Thu, Jun 11, 2015 at 01:54:22PM +0800, Jason Wang wrote:
> 
> 
> On 06/11/2015 01:49 PM, Thibaut Collet wrote:
> > > Yes, but still need a mechanism to notify the backend of migration
> > > completion from qemu side if GUEST_ANNOUNCE is not negotiated.
> >
> > backend is aware of a connection with the guest (with the feature
> > negociation) and can send a rarp. This rarp will be always sent by the
> > backend when a VM is launched (first start or live migration
> > completion) if the GUEST_ANOUNCE is not supported.
> > In this case the issue is solved without done everything by QEMU.
> 
> The issue is during migration guest network is still active. So sending
> rarp too early in the destination (e.g during VM is launched) may
> confuse the switch.  We want it to be sent exactly when the migration is
> completed in destination.

It needs to be sent when backend is activated by guest kick
(in case of virtio 1, it's possible to use DRIVER_OK for this).
This does not happen when VM still runs on source.

> > If sending a rarp message on the start of te VM is not accceptable, we
> > must provide a mechanism similar of the one I have implemented. The
> > message content can be empty as the backend is able to create the rarp
> > message.
> 
> Yes.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-11 10:38                 ` Michael S. Tsirkin
@ 2015-06-11 12:10                   ` Thibaut Collet
  2015-06-11 12:13                     ` Michael S. Tsirkin
  0 siblings, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-11 12:10 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

[-- Attachment #1: Type: text/plain, Size: 2122 bytes --]

I am not sure to understand your remark:

> It needs to be sent when backend is activated by guest kick
> (in case of virtio 1, it's possible to use DRIVER_OK for this).
> This does not happen when VM still runs on source.

Could you confirm rarp can be sent by backend when the
VHOST_USER_SET_VRING_KICK
message is received by the backend ?
At this time the migration is completed and there is no risk of confusing
the switch.
In this case:
  - there are nothing to do in QEMU to manage legacy guest with
no GUEST_ANNOUNCE.
  - All the job is done by the backend on the VHOST_USER_SET_VRING_KICK
reception. Maybe switch notification of live migration is done with a small
delay but it works
  - This patch can be discarded.


On Thu, Jun 11, 2015 at 12:38 PM, Michael S. Tsirkin <mst@redhat.com> wrote:

> On Thu, Jun 11, 2015 at 01:54:22PM +0800, Jason Wang wrote:
> >
> >
> > On 06/11/2015 01:49 PM, Thibaut Collet wrote:
> > > > Yes, but still need a mechanism to notify the backend of migration
> > > > completion from qemu side if GUEST_ANNOUNCE is not negotiated.
> > >
> > > backend is aware of a connection with the guest (with the feature
> > > negociation) and can send a rarp. This rarp will be always sent by the
> > > backend when a VM is launched (first start or live migration
> > > completion) if the GUEST_ANOUNCE is not supported.
> > > In this case the issue is solved without done everything by QEMU.
> >
> > The issue is during migration guest network is still active. So sending
> > rarp too early in the destination (e.g during VM is launched) may
> > confuse the switch.  We want it to be sent exactly when the migration is
> > completed in destination.
>
> It needs to be sent when backend is activated by guest kick
> (in case of virtio 1, it's possible to use DRIVER_OK for this).
> This does not happen when VM still runs on source.
>
> > > If sending a rarp message on the start of te VM is not accceptable, we
> > > must provide a mechanism similar of the one I have implemented. The
> > > message content can be empty as the backend is able to create the rarp
> > > message.
> >
> > Yes.
>

[-- Attachment #2: Type: text/html, Size: 3037 bytes --]

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-11 12:10                   ` Thibaut Collet
@ 2015-06-11 12:13                     ` Michael S. Tsirkin
  2015-06-11 12:33                       ` Thibaut Collet
  2015-06-12  7:55                       ` Jason Wang
  0 siblings, 2 replies; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-11 12:13 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
> I am not sure to understand your remark:
> 
> > It needs to be sent when backend is activated by guest kick
> > (in case of virtio 1, it's possible to use DRIVER_OK for this).
> > This does not happen when VM still runs on source.
> 
> Could you confirm rarp can be sent by backend when the 
> VHOST_USER_SET_VRING_KICK message is received by the backend ?

No - the time to send pakets is when you start processing
the rings.

And the time to do that is when you detect a kick on
an eventfd, not when said fd is set.


> At this time the migration is completed and there is no risk of confusing the
> switch.
> In this case:
>   - there are nothing to do in QEMU to manage legacy guest with
> no GUEST_ANNOUNCE.
>   - All the job is done by the backend on the VHOST_USER_SET_VRING_KICK
> reception. Maybe switch notification of live migration is done with a small
> delay but it works
>   - This patch can be discarded.
> 
> 
> On Thu, Jun 11, 2015 at 12:38 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> 
>     On Thu, Jun 11, 2015 at 01:54:22PM +0800, Jason Wang wrote:
>     >
>     >
>     > On 06/11/2015 01:49 PM, Thibaut Collet wrote:
>     > > > Yes, but still need a mechanism to notify the backend of migration
>     > > > completion from qemu side if GUEST_ANNOUNCE is not negotiated.
>     > >
>     > > backend is aware of a connection with the guest (with the feature
>     > > negociation) and can send a rarp. This rarp will be always sent by the
>     > > backend when a VM is launched (first start or live migration
>     > > completion) if the GUEST_ANOUNCE is not supported.
>     > > In this case the issue is solved without done everything by QEMU.
>     >
>     > The issue is during migration guest network is still active. So sending
>     > rarp too early in the destination (e.g during VM is launched) may
>     > confuse the switch.  We want it to be sent exactly when the migration is
>     > completed in destination.
> 
>     It needs to be sent when backend is activated by guest kick
>     (in case of virtio 1, it's possible to use DRIVER_OK for this).
>     This does not happen when VM still runs on source.
> 
>     > > If sending a rarp message on the start of te VM is not accceptable, we
>     > > must provide a mechanism similar of the one I have implemented. The
>     > > message content can be empty as the backend is able to create the rarp
>     > > message.
>     >
>     > Yes.
> 
> 

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-11 12:13                     ` Michael S. Tsirkin
@ 2015-06-11 12:33                       ` Thibaut Collet
  2015-06-12  7:55                       ` Jason Wang
  1 sibling, 0 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-11 12:33 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

[-- Attachment #1: Type: text/plain, Size: 3520 bytes --]

Ok.

backend is able to know when the eventfd is kick the first time and then
can send a rarp for legacy guest.

With this backend modification the issue point by Jason is no more a QEMU
problem.
Full support of live migration for vhost user:
 - Need my first patch.
 - For legacy guest the backend must be updated to send a rarp when the
eventfd is kick the first time.

To summarize the status:

1. The first patch must be updated by:
      - removing the warning trace
      - setting the error trace inside a static bool flag to only print
this once
      - removing the vhost_net_inject_rarp function (no more useful)
2. The second patch must be removed. Management of legacy guest for rarp
sending can be done by modifications in backend without any change in QEMU.

Does everyone agree with this summary?

On Thu, Jun 11, 2015 at 2:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:

> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
> > I am not sure to understand your remark:
> >
> > > It needs to be sent when backend is activated by guest kick
> > > (in case of virtio 1, it's possible to use DRIVER_OK for this).
> > > This does not happen when VM still runs on source.
> >
> > Could you confirm rarp can be sent by backend when the
> > VHOST_USER_SET_VRING_KICK message is received by the backend ?
>
> No - the time to send pakets is when you start processing
> the rings.
>
> And the time to do that is when you detect a kick on
> an eventfd, not when said fd is set.
>
>
> > At this time the migration is completed and there is no risk of
> confusing the
> > switch.
> > In this case:
> >   - there are nothing to do in QEMU to manage legacy guest with
> > no GUEST_ANNOUNCE.
> >   - All the job is done by the backend on the VHOST_USER_SET_VRING_KICK
> > reception. Maybe switch notification of live migration is done with a
> small
> > delay but it works
> >   - This patch can be discarded.
> >
> >
> > On Thu, Jun 11, 2015 at 12:38 PM, Michael S. Tsirkin <mst@redhat.com>
> wrote:
> >
> >     On Thu, Jun 11, 2015 at 01:54:22PM +0800, Jason Wang wrote:
> >     >
> >     >
> >     > On 06/11/2015 01:49 PM, Thibaut Collet wrote:
> >     > > > Yes, but still need a mechanism to notify the backend of
> migration
> >     > > > completion from qemu side if GUEST_ANNOUNCE is not negotiated.
> >     > >
> >     > > backend is aware of a connection with the guest (with the feature
> >     > > negociation) and can send a rarp. This rarp will be always sent
> by the
> >     > > backend when a VM is launched (first start or live migration
> >     > > completion) if the GUEST_ANOUNCE is not supported.
> >     > > In this case the issue is solved without done everything by QEMU.
> >     >
> >     > The issue is during migration guest network is still active. So
> sending
> >     > rarp too early in the destination (e.g during VM is launched) may
> >     > confuse the switch.  We want it to be sent exactly when the
> migration is
> >     > completed in destination.
> >
> >     It needs to be sent when backend is activated by guest kick
> >     (in case of virtio 1, it's possible to use DRIVER_OK for this).
> >     This does not happen when VM still runs on source.
> >
> >     > > If sending a rarp message on the start of te VM is not
> accceptable, we
> >     > > must provide a mechanism similar of the one I have implemented.
> The
> >     > > message content can be empty as the backend is able to create
> the rarp
> >     > > message.
> >     >
> >     > Yes.
> >
> >
>

[-- Attachment #2: Type: text/html, Size: 4865 bytes --]

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-11 12:13                     ` Michael S. Tsirkin
  2015-06-11 12:33                       ` Thibaut Collet
@ 2015-06-12  7:55                       ` Jason Wang
  2015-06-12 11:53                         ` Thibaut Collet
  2015-06-12 14:28                         ` Michael S. Tsirkin
  1 sibling, 2 replies; 48+ messages in thread
From: Jason Wang @ 2015-06-12  7:55 UTC (permalink / raw)
  To: Michael S. Tsirkin, Thibaut Collet; +Cc: qemu-devel, Stefan Hajnoczi



On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>> I am not sure to understand your remark:
>>
>>> It needs to be sent when backend is activated by guest kick
>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>>> This does not happen when VM still runs on source.
>> Could you confirm rarp can be sent by backend when the 
>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
> No - the time to send pakets is when you start processing
> the rings.
>
> And the time to do that is when you detect a kick on
> an eventfd, not when said fd is set.
>

Probably not. What if guest is only doing receiving? In this case, you
won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-12  7:55                       ` Jason Wang
@ 2015-06-12 11:53                         ` Thibaut Collet
  2015-06-12 14:28                         ` Michael S. Tsirkin
  1 sibling, 0 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-12 11:53 UTC (permalink / raw)
  To: Jason Wang; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin

If I correctly understand how vhost user / virtio works the solution
proposed by Michael is OK:
 - Rings to exchange data between host and guest are allocated by the guest.
 - As soon as the guest add rings in a queue (for RX or TX) a kick is
done on the eventfd associated to the queue
 - On a live migration (as for a first startup), the guest virtio-net
pre-allocates rings for each queue and so send a kick on the eventfd
(for RX and TX)

So even if the guest is only doing receiving, there is a kick on any
queues. --> Vhost-user backend knows when send the rarp in any
conditions without involving QEMU.

Michael, could you confirm that my analysis is correct?

On Fri, Jun 12, 2015 at 9:55 AM, Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>>> I am not sure to understand your remark:
>>>
>>>> It needs to be sent when backend is activated by guest kick
>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>>>> This does not happen when VM still runs on source.
>>> Could you confirm rarp can be sent by backend when the
>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
>> No - the time to send pakets is when you start processing
>> the rings.
>>
>> And the time to do that is when you detect a kick on
>> an eventfd, not when said fd is set.
>>
>
> Probably not. What if guest is only doing receiving? In this case, you
> won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-12  7:55                       ` Jason Wang
  2015-06-12 11:53                         ` Thibaut Collet
@ 2015-06-12 14:28                         ` Michael S. Tsirkin
  2015-06-15  7:43                           ` Jason Wang
  1 sibling, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-12 14:28 UTC (permalink / raw)
  To: Jason Wang; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi

On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
> 
> 
> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
> > On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
> >> I am not sure to understand your remark:
> >>
> >>> It needs to be sent when backend is activated by guest kick
> >>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
> >>> This does not happen when VM still runs on source.
> >> Could you confirm rarp can be sent by backend when the 
> >> VHOST_USER_SET_VRING_KICK message is received by the backend ?
> > No - the time to send pakets is when you start processing
> > the rings.
> >
> > And the time to do that is when you detect a kick on
> > an eventfd, not when said fd is set.
> >
> 
> Probably not. What if guest is only doing receiving?

Clarification: the kick can be on any VQs.
In your example, guest kicks after adding receive buffers.

> In this case, you
> won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-12 14:28                         ` Michael S. Tsirkin
@ 2015-06-15  7:43                           ` Jason Wang
  2015-06-15  8:44                             ` Michael S. Tsirkin
  0 siblings, 1 reply; 48+ messages in thread
From: Jason Wang @ 2015-06-15  7:43 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi



On 06/12/2015 10:28 PM, Michael S. Tsirkin wrote:
> On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
>>
>> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
>>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>>>> I am not sure to understand your remark:
>>>>
>>>>> It needs to be sent when backend is activated by guest kick
>>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>>>>> This does not happen when VM still runs on source.
>>>> Could you confirm rarp can be sent by backend when the 
>>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
>>> No - the time to send pakets is when you start processing
>>> the rings.
>>>
>>> And the time to do that is when you detect a kick on
>>> an eventfd, not when said fd is set.
>>>
>> Probably not. What if guest is only doing receiving?
> Clarification: the kick can be on any VQs.
> In your example, guest kicks after adding receive buffers.

Yes, but refill only happens on we are lacking of receive buffers. It is
not guaranteed to happen just after migration, we may have still have
enough rx buffers for device to receive.
>
>> In this case, you
>> won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-15  7:43                           ` Jason Wang
@ 2015-06-15  8:44                             ` Michael S. Tsirkin
  2015-06-15 12:12                               ` Thibaut Collet
  2015-06-16  3:35                               ` Jason Wang
  0 siblings, 2 replies; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-15  8:44 UTC (permalink / raw)
  To: Jason Wang; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi

On Mon, Jun 15, 2015 at 03:43:13PM +0800, Jason Wang wrote:
> 
> 
> On 06/12/2015 10:28 PM, Michael S. Tsirkin wrote:
> > On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
> >>
> >> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
> >>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
> >>>> I am not sure to understand your remark:
> >>>>
> >>>>> It needs to be sent when backend is activated by guest kick
> >>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
> >>>>> This does not happen when VM still runs on source.
> >>>> Could you confirm rarp can be sent by backend when the 
> >>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
> >>> No - the time to send pakets is when you start processing
> >>> the rings.
> >>>
> >>> And the time to do that is when you detect a kick on
> >>> an eventfd, not when said fd is set.
> >>>
> >> Probably not. What if guest is only doing receiving?
> > Clarification: the kick can be on any VQs.
> > In your example, guest kicks after adding receive buffers.
> 
> Yes, but refill only happens on we are lacking of receive buffers. It is
> not guaranteed to happen just after migration, we may have still have
> enough rx buffers for device to receive.

I think we also kick the backend after migration, do we not?
Further, DRIVER_OK can be used as a signal to start backend too.

> >
> >> In this case, you
> >> won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-15  8:44                             ` Michael S. Tsirkin
@ 2015-06-15 12:12                               ` Thibaut Collet
  2015-06-15 12:45                                 ` Michael S. Tsirkin
  2015-06-16  5:29                                 ` Jason Wang
  2015-06-16  3:35                               ` Jason Wang
  1 sibling, 2 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-15 12:12 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

After a resume operation the guest always kicks the backend for each
virtual queues.
A live migration does a suspend operation on the old host and a resume
operation on the new host. So the backend has a kick after migration.

I have checked this point with a legacy guest (redhat 6-5 with kernel
version 2.6.32-431.29.2) and the kick occurs after migration or
resume.

Jason have you an example of legacy guest that will not kick the
virtual queue after a resume ?

On Mon, Jun 15, 2015 at 10:44 AM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Mon, Jun 15, 2015 at 03:43:13PM +0800, Jason Wang wrote:
>>
>>
>> On 06/12/2015 10:28 PM, Michael S. Tsirkin wrote:
>> > On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
>> >>
>> >> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
>> >>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>> >>>> I am not sure to understand your remark:
>> >>>>
>> >>>>> It needs to be sent when backend is activated by guest kick
>> >>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>> >>>>> This does not happen when VM still runs on source.
>> >>>> Could you confirm rarp can be sent by backend when the
>> >>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
>> >>> No - the time to send pakets is when you start processing
>> >>> the rings.
>> >>>
>> >>> And the time to do that is when you detect a kick on
>> >>> an eventfd, not when said fd is set.
>> >>>
>> >> Probably not. What if guest is only doing receiving?
>> > Clarification: the kick can be on any VQs.
>> > In your example, guest kicks after adding receive buffers.
>>
>> Yes, but refill only happens on we are lacking of receive buffers. It is
>> not guaranteed to happen just after migration, we may have still have
>> enough rx buffers for device to receive.
>
> I think we also kick the backend after migration, do we not?
> Further, DRIVER_OK can be used as a signal to start backend too.
>
>> >
>> >> In this case, you
>> >> won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-15 12:12                               ` Thibaut Collet
@ 2015-06-15 12:45                                 ` Michael S. Tsirkin
  2015-06-15 13:04                                   ` Thibaut Collet
  2015-06-16  5:29                                 ` Jason Wang
  1 sibling, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-15 12:45 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

On Mon, Jun 15, 2015 at 02:12:40PM +0200, Thibaut Collet wrote:
> After a resume operation the guest always kicks the backend for each
> virtual queues.
> A live migration does a suspend operation on the old host and a resume
> operation on the new host. So the backend has a kick after migration.
> 
> I have checked this point with a legacy guest (redhat 6-5 with kernel
> version 2.6.32-431.29.2) and the kick occurs after migration or
> resume.
> 
> Jason have you an example of legacy guest that will not kick the
> virtual queue after a resume ?

In practice with sufficiently modern guests you can look
at DRIVER_OK status bit. Process rings if that bit is set.

-- 
MST

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-15 12:45                                 ` Michael S. Tsirkin
@ 2015-06-15 13:04                                   ` Thibaut Collet
  0 siblings, 0 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-15 13:04 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

If we use DRIVER_OK status bit to send the RARP by the backend I am
afraid that some legacy guest are not supported.
Moreover the vhost user backend is not aware of the change of the
DRIVER_OK status bit. If this solution is chosen as event to send the
RARP a message between QEMU and vhost user backend must be added.
If there is a solution to avoid to add a message, i.e a IOCTL, to
vhost user backend it will be better (no kernel update).
Your solution with the kick on virtual queue seems OK in any case and
does not need the add of a message.
Except if there are a case where this solution does not work (today I
have not found a case) I will prefer to implement this solution.

On Mon, Jun 15, 2015 at 2:45 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Mon, Jun 15, 2015 at 02:12:40PM +0200, Thibaut Collet wrote:
>> After a resume operation the guest always kicks the backend for each
>> virtual queues.
>> A live migration does a suspend operation on the old host and a resume
>> operation on the new host. So the backend has a kick after migration.
>>
>> I have checked this point with a legacy guest (redhat 6-5 with kernel
>> version 2.6.32-431.29.2) and the kick occurs after migration or
>> resume.
>>
>> Jason have you an example of legacy guest that will not kick the
>> virtual queue after a resume ?
>
> In practice with sufficiently modern guests you can look
> at DRIVER_OK status bit. Process rings if that bit is set.
>
> --
> MST

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-15  8:44                             ` Michael S. Tsirkin
  2015-06-15 12:12                               ` Thibaut Collet
@ 2015-06-16  3:35                               ` Jason Wang
  1 sibling, 0 replies; 48+ messages in thread
From: Jason Wang @ 2015-06-16  3:35 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi



On 06/15/2015 04:44 PM, Michael S. Tsirkin wrote:
> On Mon, Jun 15, 2015 at 03:43:13PM +0800, Jason Wang wrote:
>>
>> On 06/12/2015 10:28 PM, Michael S. Tsirkin wrote:
>>> On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
>>>> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
>>>>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>>>>>> I am not sure to understand your remark:
>>>>>>
>>>>>>> It needs to be sent when backend is activated by guest kick
>>>>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>>>>>>> This does not happen when VM still runs on source.
>>>>>> Could you confirm rarp can be sent by backend when the 
>>>>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
>>>>> No - the time to send pakets is when you start processing
>>>>> the rings.
>>>>>
>>>>> And the time to do that is when you detect a kick on
>>>>> an eventfd, not when said fd is set.
>>>>>
>>>> Probably not. What if guest is only doing receiving?
>>> Clarification: the kick can be on any VQs.
>>> In your example, guest kicks after adding receive buffers.
>> Yes, but refill only happens on we are lacking of receive buffers. It is
>> not guaranteed to happen just after migration, we may have still have
>> enough rx buffers for device to receive.
> I think we also kick the backend after migration, do we not?

For "kick" do you mean kicking in the guest? If yes, migration should be
transparent to the guest (that why we have a config interrupt for guest
announcing) and I don't see where we do the kick. But qemu do kick the
backend to start with VHOST_NET_SET_BACKEND.

> Further, DRIVER_OK can be used as a signal to start backend too.

Then even a reboot will send the RARP packet, looks harmless but strange.

>>>> In this case, you
>>>> won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-15 12:12                               ` Thibaut Collet
  2015-06-15 12:45                                 ` Michael S. Tsirkin
@ 2015-06-16  5:29                                 ` Jason Wang
  2015-06-16  7:24                                   ` Thibaut Collet
  1 sibling, 1 reply; 48+ messages in thread
From: Jason Wang @ 2015-06-16  5:29 UTC (permalink / raw)
  To: Thibaut Collet, Michael S. Tsirkin; +Cc: qemu-devel, Stefan Hajnoczi



On 06/15/2015 08:12 PM, Thibaut Collet wrote:
> After a resume operation the guest always kicks the backend for each
> virtual queues.
> A live migration does a suspend operation on the old host and a resume
> operation on the new host. So the backend has a kick after migration.
>
> I have checked this point with a legacy guest (redhat 6-5 with kernel
> version 2.6.32-431.29.2) and the kick occurs after migration or
> resume.
>
> Jason have you an example of legacy guest that will not kick the
> virtual queue after a resume ?

I must miss something but migration should be transparent to guest.
Could you show me the code that guest does the kick after migration?

>
> On Mon, Jun 15, 2015 at 10:44 AM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> On Mon, Jun 15, 2015 at 03:43:13PM +0800, Jason Wang wrote:
>>>
>>> On 06/12/2015 10:28 PM, Michael S. Tsirkin wrote:
>>>> On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
>>>>> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
>>>>>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>>>>>>> I am not sure to understand your remark:
>>>>>>>
>>>>>>>> It needs to be sent when backend is activated by guest kick
>>>>>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>>>>>>>> This does not happen when VM still runs on source.
>>>>>>> Could you confirm rarp can be sent by backend when the
>>>>>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
>>>>>> No - the time to send pakets is when you start processing
>>>>>> the rings.
>>>>>>
>>>>>> And the time to do that is when you detect a kick on
>>>>>> an eventfd, not when said fd is set.
>>>>>>
>>>>> Probably not. What if guest is only doing receiving?
>>>> Clarification: the kick can be on any VQs.
>>>> In your example, guest kicks after adding receive buffers.
>>> Yes, but refill only happens on we are lacking of receive buffers. It is
>>> not guaranteed to happen just after migration, we may have still have
>>> enough rx buffers for device to receive.
>> I think we also kick the backend after migration, do we not?
>> Further, DRIVER_OK can be used as a signal to start backend too.
>>
>>>>> In this case, you
>>>>> won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-16  5:29                                 ` Jason Wang
@ 2015-06-16  7:24                                   ` Thibaut Collet
  2015-06-16  8:05                                     ` Jason Wang
  0 siblings, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-16  7:24 UTC (permalink / raw)
  To: Jason Wang; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin

If my understanding is correct, on a resume operation, we have the
following callback trace:
1. virtio_pci_restore function that calls all restore call back of
virtio devices
2. virtnet_restore that calls try_fill_recv function for each virtual queues
3. try_fill_recv function kicks the virtual queue (through
virtqueue_kick function)


On Tue, Jun 16, 2015 at 7:29 AM, Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 06/15/2015 08:12 PM, Thibaut Collet wrote:
>> After a resume operation the guest always kicks the backend for each
>> virtual queues.
>> A live migration does a suspend operation on the old host and a resume
>> operation on the new host. So the backend has a kick after migration.
>>
>> I have checked this point with a legacy guest (redhat 6-5 with kernel
>> version 2.6.32-431.29.2) and the kick occurs after migration or
>> resume.
>>
>> Jason have you an example of legacy guest that will not kick the
>> virtual queue after a resume ?
>
> I must miss something but migration should be transparent to guest.
> Could you show me the code that guest does the kick after migration?
>
>>
>> On Mon, Jun 15, 2015 at 10:44 AM, Michael S. Tsirkin <mst@redhat.com> wrote:
>>> On Mon, Jun 15, 2015 at 03:43:13PM +0800, Jason Wang wrote:
>>>>
>>>> On 06/12/2015 10:28 PM, Michael S. Tsirkin wrote:
>>>>> On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
>>>>>> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
>>>>>>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>>>>>>>> I am not sure to understand your remark:
>>>>>>>>
>>>>>>>>> It needs to be sent when backend is activated by guest kick
>>>>>>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>>>>>>>>> This does not happen when VM still runs on source.
>>>>>>>> Could you confirm rarp can be sent by backend when the
>>>>>>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
>>>>>>> No - the time to send pakets is when you start processing
>>>>>>> the rings.
>>>>>>>
>>>>>>> And the time to do that is when you detect a kick on
>>>>>>> an eventfd, not when said fd is set.
>>>>>>>
>>>>>> Probably not. What if guest is only doing receiving?
>>>>> Clarification: the kick can be on any VQs.
>>>>> In your example, guest kicks after adding receive buffers.
>>>> Yes, but refill only happens on we are lacking of receive buffers. It is
>>>> not guaranteed to happen just after migration, we may have still have
>>>> enough rx buffers for device to receive.
>>> I think we also kick the backend after migration, do we not?
>>> Further, DRIVER_OK can be used as a signal to start backend too.
>>>
>>>>>> In this case, you
>>>>>> won't detect any kick if you don't send the rarp first.
>

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-16  7:24                                   ` Thibaut Collet
@ 2015-06-16  8:05                                     ` Jason Wang
  2015-06-16  8:16                                       ` Thibaut Collet
  2015-06-18 15:16                                       ` Thibaut Collet
  0 siblings, 2 replies; 48+ messages in thread
From: Jason Wang @ 2015-06-16  8:05 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin



On 06/16/2015 03:24 PM, Thibaut Collet wrote:
> If my understanding is correct, on a resume operation, we have the
> following callback trace:
> 1. virtio_pci_restore function that calls all restore call back of
> virtio devices
> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
> 3. try_fill_recv function kicks the virtual queue (through
> virtqueue_kick function)

Yes, but this happens only after pm resume not migration. Migration is
totally transparent to guest.

>
>
> On Tue, Jun 16, 2015 at 7:29 AM, Jason Wang <jasowang@redhat.com> wrote:
>>
>> On 06/15/2015 08:12 PM, Thibaut Collet wrote:
>>> After a resume operation the guest always kicks the backend for each
>>> virtual queues.
>>> A live migration does a suspend operation on the old host and a resume
>>> operation on the new host. So the backend has a kick after migration.
>>>
>>> I have checked this point with a legacy guest (redhat 6-5 with kernel
>>> version 2.6.32-431.29.2) and the kick occurs after migration or
>>> resume.
>>>
>>> Jason have you an example of legacy guest that will not kick the
>>> virtual queue after a resume ?
>> I must miss something but migration should be transparent to guest.
>> Could you show me the code that guest does the kick after migration?
>>
>>> On Mon, Jun 15, 2015 at 10:44 AM, Michael S. Tsirkin <mst@redhat.com> wrote:
>>>> On Mon, Jun 15, 2015 at 03:43:13PM +0800, Jason Wang wrote:
>>>>> On 06/12/2015 10:28 PM, Michael S. Tsirkin wrote:
>>>>>> On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
>>>>>>> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
>>>>>>>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>>>>>>>>> I am not sure to understand your remark:
>>>>>>>>>
>>>>>>>>>> It needs to be sent when backend is activated by guest kick
>>>>>>>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>>>>>>>>>> This does not happen when VM still runs on source.
>>>>>>>>> Could you confirm rarp can be sent by backend when the
>>>>>>>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
>>>>>>>> No - the time to send pakets is when you start processing
>>>>>>>> the rings.
>>>>>>>>
>>>>>>>> And the time to do that is when you detect a kick on
>>>>>>>> an eventfd, not when said fd is set.
>>>>>>>>
>>>>>>> Probably not. What if guest is only doing receiving?
>>>>>> Clarification: the kick can be on any VQs.
>>>>>> In your example, guest kicks after adding receive buffers.
>>>>> Yes, but refill only happens on we are lacking of receive buffers. It is
>>>>> not guaranteed to happen just after migration, we may have still have
>>>>> enough rx buffers for device to receive.
>>>> I think we also kick the backend after migration, do we not?
>>>> Further, DRIVER_OK can be used as a signal to start backend too.
>>>>
>>>>>>> In this case, you
>>>>>>> won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-16  8:05                                     ` Jason Wang
@ 2015-06-16  8:16                                       ` Thibaut Collet
  2015-06-17  4:16                                         ` Jason Wang
  2015-06-18 15:16                                       ` Thibaut Collet
  1 sibling, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-16  8:16 UTC (permalink / raw)
  To: Jason Wang; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin

For a live migration my understanding is there are a suspend resume operation:
- The VM image is regularly copied from the old host to the new one
(modified pages due to VM operation can be copied several time)
- As soon as there are only few pages to copy the VM is suspended on
the old host, the last pages are copied, and the VM is resumed on the
new host. Migration is not totally transparent  to guest that has a
small period of unavailability.



On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
>> If my understanding is correct, on a resume operation, we have the
>> following callback trace:
>> 1. virtio_pci_restore function that calls all restore call back of
>> virtio devices
>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
>> 3. try_fill_recv function kicks the virtual queue (through
>> virtqueue_kick function)
>
> Yes, but this happens only after pm resume not migration. Migration is
> totally transparent to guest.
>
>>
>>
>> On Tue, Jun 16, 2015 at 7:29 AM, Jason Wang <jasowang@redhat.com> wrote:
>>>
>>> On 06/15/2015 08:12 PM, Thibaut Collet wrote:
>>>> After a resume operation the guest always kicks the backend for each
>>>> virtual queues.
>>>> A live migration does a suspend operation on the old host and a resume
>>>> operation on the new host. So the backend has a kick after migration.
>>>>
>>>> I have checked this point with a legacy guest (redhat 6-5 with kernel
>>>> version 2.6.32-431.29.2) and the kick occurs after migration or
>>>> resume.
>>>>
>>>> Jason have you an example of legacy guest that will not kick the
>>>> virtual queue after a resume ?
>>> I must miss something but migration should be transparent to guest.
>>> Could you show me the code that guest does the kick after migration?
>>>
>>>> On Mon, Jun 15, 2015 at 10:44 AM, Michael S. Tsirkin <mst@redhat.com> wrote:
>>>>> On Mon, Jun 15, 2015 at 03:43:13PM +0800, Jason Wang wrote:
>>>>>> On 06/12/2015 10:28 PM, Michael S. Tsirkin wrote:
>>>>>>> On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
>>>>>>>> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
>>>>>>>>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>>>>>>>>>> I am not sure to understand your remark:
>>>>>>>>>>
>>>>>>>>>>> It needs to be sent when backend is activated by guest kick
>>>>>>>>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>>>>>>>>>>> This does not happen when VM still runs on source.
>>>>>>>>>> Could you confirm rarp can be sent by backend when the
>>>>>>>>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
>>>>>>>>> No - the time to send pakets is when you start processing
>>>>>>>>> the rings.
>>>>>>>>>
>>>>>>>>> And the time to do that is when you detect a kick on
>>>>>>>>> an eventfd, not when said fd is set.
>>>>>>>>>
>>>>>>>> Probably not. What if guest is only doing receiving?
>>>>>>> Clarification: the kick can be on any VQs.
>>>>>>> In your example, guest kicks after adding receive buffers.
>>>>>> Yes, but refill only happens on we are lacking of receive buffers. It is
>>>>>> not guaranteed to happen just after migration, we may have still have
>>>>>> enough rx buffers for device to receive.
>>>>> I think we also kick the backend after migration, do we not?
>>>>> Further, DRIVER_OK can be used as a signal to start backend too.
>>>>>
>>>>>>>> In this case, you
>>>>>>>> won't detect any kick if you don't send the rarp first.
>

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-16  8:16                                       ` Thibaut Collet
@ 2015-06-17  4:16                                         ` Jason Wang
  2015-06-17  6:42                                           ` Michael S. Tsirkin
  0 siblings, 1 reply; 48+ messages in thread
From: Jason Wang @ 2015-06-17  4:16 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin



On 06/16/2015 04:16 PM, Thibaut Collet wrote:
> For a live migration my understanding is there are a suspend resume operation:
> - The VM image is regularly copied from the old host to the new one
> (modified pages due to VM operation can be copied several time)
> - As soon as there are only few pages to copy the VM is suspended on
> the old host, the last pages are copied, and the VM is resumed on the
> new host. Migration is not totally transparent  to guest that has a
> small period of unavailability.

But guest is not involved in the any of the migration process. Unless
the performance drop, guest should not notice the migration at all.

Btw, please use bottom-posting which is easier for reader to understand
the context.

Thanks

>
>
> On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
>>
>> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
>>> If my understanding is correct, on a resume operation, we have the
>>> following callback trace:
>>> 1. virtio_pci_restore function that calls all restore call back of
>>> virtio devices
>>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
>>> 3. try_fill_recv function kicks the virtual queue (through
>>> virtqueue_kick function)
>> Yes, but this happens only after pm resume not migration. Migration is
>> totally transparent to guest.
>>
>>>
>>> On Tue, Jun 16, 2015 at 7:29 AM, Jason Wang <jasowang@redhat.com> wrote:
>>>> On 06/15/2015 08:12 PM, Thibaut Collet wrote:
>>>>> After a resume operation the guest always kicks the backend for each
>>>>> virtual queues.
>>>>> A live migration does a suspend operation on the old host and a resume
>>>>> operation on the new host. So the backend has a kick after migration.
>>>>>
>>>>> I have checked this point with a legacy guest (redhat 6-5 with kernel
>>>>> version 2.6.32-431.29.2) and the kick occurs after migration or
>>>>> resume.
>>>>>
>>>>> Jason have you an example of legacy guest that will not kick the
>>>>> virtual queue after a resume ?
>>>> I must miss something but migration should be transparent to guest.
>>>> Could you show me the code that guest does the kick after migration?
>>>>
>>>>> On Mon, Jun 15, 2015 at 10:44 AM, Michael S. Tsirkin <mst@redhat.com> wrote:
>>>>>> On Mon, Jun 15, 2015 at 03:43:13PM +0800, Jason Wang wrote:
>>>>>>> On 06/12/2015 10:28 PM, Michael S. Tsirkin wrote:
>>>>>>>> On Fri, Jun 12, 2015 at 03:55:33PM +0800, Jason Wang wrote:
>>>>>>>>> On 06/11/2015 08:13 PM, Michael S. Tsirkin wrote:
>>>>>>>>>> On Thu, Jun 11, 2015 at 02:10:48PM +0200, Thibaut Collet wrote:
>>>>>>>>>>> I am not sure to understand your remark:
>>>>>>>>>>>
>>>>>>>>>>>> It needs to be sent when backend is activated by guest kick
>>>>>>>>>>>> (in case of virtio 1, it's possible to use DRIVER_OK for this).
>>>>>>>>>>>> This does not happen when VM still runs on source.
>>>>>>>>>>> Could you confirm rarp can be sent by backend when the
>>>>>>>>>>> VHOST_USER_SET_VRING_KICK message is received by the backend ?
>>>>>>>>>> No - the time to send pakets is when you start processing
>>>>>>>>>> the rings.
>>>>>>>>>>
>>>>>>>>>> And the time to do that is when you detect a kick on
>>>>>>>>>> an eventfd, not when said fd is set.
>>>>>>>>>>
>>>>>>>>> Probably not. What if guest is only doing receiving?
>>>>>>>> Clarification: the kick can be on any VQs.
>>>>>>>> In your example, guest kicks after adding receive buffers.
>>>>>>> Yes, but refill only happens on we are lacking of receive buffers. It is
>>>>>>> not guaranteed to happen just after migration, we may have still have
>>>>>>> enough rx buffers for device to receive.
>>>>>> I think we also kick the backend after migration, do we not?
>>>>>> Further, DRIVER_OK can be used as a signal to start backend too.
>>>>>>
>>>>>>>>> In this case, you
>>>>>>>>> won't detect any kick if you don't send the rarp first.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-17  4:16                                         ` Jason Wang
@ 2015-06-17  6:42                                           ` Michael S. Tsirkin
  2015-06-17  7:05                                             ` Thibaut Collet
  0 siblings, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-17  6:42 UTC (permalink / raw)
  To: Jason Wang; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi

On Wed, Jun 17, 2015 at 12:16:09PM +0800, Jason Wang wrote:
> 
> 
> On 06/16/2015 04:16 PM, Thibaut Collet wrote:
> > For a live migration my understanding is there are a suspend resume operation:
> > - The VM image is regularly copied from the old host to the new one
> > (modified pages due to VM operation can be copied several time)
> > - As soon as there are only few pages to copy the VM is suspended on
> > the old host, the last pages are copied, and the VM is resumed on the
> > new host. Migration is not totally transparent  to guest that has a
> > small period of unavailability.
> 
> But guest is not involved in the any of the migration process. Unless
> the performance drop, guest should not notice the migration at all.

It is easy for backend to detect that guest started using the device,
e.g. detect DRIVER_OK status set.
I think that's enough to send RARPs.

> Btw, please use bottom-posting which is easier for reader to understand
> the context.
> 
> Thanks

I agree, please don't top-post.

-- 
MST

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-17  6:42                                           ` Michael S. Tsirkin
@ 2015-06-17  7:05                                             ` Thibaut Collet
  0 siblings, 0 replies; 48+ messages in thread
From: Thibaut Collet @ 2015-06-17  7:05 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

On Wed, Jun 17, 2015 at 8:42 AM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Wed, Jun 17, 2015 at 12:16:09PM +0800, Jason Wang wrote:
>>
>>
>> On 06/16/2015 04:16 PM, Thibaut Collet wrote:
>> > For a live migration my understanding is there are a suspend resume operation:
>> > - The VM image is regularly copied from the old host to the new one
>> > (modified pages due to VM operation can be copied several time)
>> > - As soon as there are only few pages to copy the VM is suspended on
>> > the old host, the last pages are copied, and the VM is resumed on the
>> > new host. Migration is not totally transparent  to guest that has a
>> > small period of unavailability.
>>
>> But guest is not involved in the any of the migration process. Unless
>> the performance drop, guest should not notice the migration at all.
>
> It is easy for backend to detect that guest started using the device,
> e.g. detect DRIVER_OK status set.
> I think that's enough to send RARPs.
>
>> Btw, please use bottom-posting which is easier for reader to understand
>> the context.
>>
>> Thanks
>
> I agree, please don't top-post.
>
> --
> MST

Sorry for top-posting rather than bottom-posting.

Regarding the use of DRIVER_OK, in your opinion, when is the best time
to test it ?

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-16  8:05                                     ` Jason Wang
  2015-06-16  8:16                                       ` Thibaut Collet
@ 2015-06-18 15:16                                       ` Thibaut Collet
  2015-06-23  2:12                                         ` Jason Wang
  1 sibling, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-18 15:16 UTC (permalink / raw)
  To: Jason Wang; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin

On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
>> If my understanding is correct, on a resume operation, we have the
>> following callback trace:
>> 1. virtio_pci_restore function that calls all restore call back of
>> virtio devices
>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
>> 3. try_fill_recv function kicks the virtual queue (through
>> virtqueue_kick function)
>
> Yes, but this happens only after pm resume not migration. Migration is
> totally transparent to guest.
>

Hi Jason,

After a deeper look in the migration code of QEMU a resume event is
always sent when the live migration is finished.
On a live migration we have the following callback trace:
1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
autostart boolean to 1  and calls the qemu_start_incoming_migration
function (see function main of vl.c)
.....
2. call of process_incoming_migration function in
migration/migration.c file whatever the way to do the live migration
(tcp:, fd:, unix:, exec: ...)
3. call of process_incoming_migration_co function in migration/migration.c
4. call of vm_start function in vl.c (otherwise the migrated VM stay
in the pause state, the autostart boolean is set to 1 by the main
function in vl.c)
5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
6. call of qapi_event_send_resume function that ends a resume event to the VM

So when a live migration is ended:
1. a resume event is sent to the guest
2. On the reception of this resume event the virtual queue are kicked
by the guest
3. Backend vhost user catches this kick and can emit a RARP to guest
that does not support GUEST_ANNOUNCE

This solution, as solution based on detection of DRIVER_OK status
suggested by Michael, allows backend to send the RARP to legacy guest
without involving QEMU and add ioctl to vhost-user.

Vhost user backend is free to implement one of this two solutions.

The single drawback of these two solutions is useless RARP sending in
some case (reboot, ...). These messages are harmless and probably not
blocking for a "light" patch to support live migration with vhost
user.

If you agree

1. The first patch must be updated by:
      - removing the warning trace
      - setting the error trace inside a static bool flag to only
print this once
      - removing the vhost_net_inject_rarp function (no more useful)
2. The second patch can be removed. Management of legacy guest for
rarp sending can be done by modifications in backend without any
change in QEMU.

Best regards.

Thibaut.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-18 15:16                                       ` Thibaut Collet
@ 2015-06-23  2:12                                         ` Jason Wang
  2015-06-23  5:49                                           ` Michael S. Tsirkin
  0 siblings, 1 reply; 48+ messages in thread
From: Jason Wang @ 2015-06-23  2:12 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin



On 06/18/2015 11:16 PM, Thibaut Collet wrote:
> On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
>>
>> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
>>> If my understanding is correct, on a resume operation, we have the
>>> following callback trace:
>>> 1. virtio_pci_restore function that calls all restore call back of
>>> virtio devices
>>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
>>> 3. try_fill_recv function kicks the virtual queue (through
>>> virtqueue_kick function)
>> Yes, but this happens only after pm resume not migration. Migration is
>> totally transparent to guest.
>>
> Hi Jason,
>
> After a deeper look in the migration code of QEMU a resume event is
> always sent when the live migration is finished.
> On a live migration we have the following callback trace:
> 1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
> autostart boolean to 1  and calls the qemu_start_incoming_migration
> function (see function main of vl.c)
> .....
> 2. call of process_incoming_migration function in
> migration/migration.c file whatever the way to do the live migration
> (tcp:, fd:, unix:, exec: ...)
> 3. call of process_incoming_migration_co function in migration/migration.c
> 4. call of vm_start function in vl.c (otherwise the migrated VM stay
> in the pause state, the autostart boolean is set to 1 by the main
> function in vl.c)
> 5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
> 6. call of qapi_event_send_resume function that ends a resume event to the VM

AFAIK, this function sends resume event to qemu monitor not VM.

>
> So when a live migration is ended:
> 1. a resume event is sent to the guest
> 2. On the reception of this resume event the virtual queue are kicked
> by the guest
> 3. Backend vhost user catches this kick and can emit a RARP to guest
> that does not support GUEST_ANNOUNCE
>
> This solution, as solution based on detection of DRIVER_OK status
> suggested by Michael, allows backend to send the RARP to legacy guest
> without involving QEMU and add ioctl to vhost-user.

A question here is did vhost-user code pass status to the backend? If
not, how can userspace backend detect DRIVER_OK?

>
> Vhost user backend is free to implement one of this two solutions.
>
> The single drawback of these two solutions is useless RARP sending in
> some case (reboot, ...). These messages are harmless and probably not
> blocking for a "light" patch to support live migration with vhost
> user.
>
> If you agree
>
> 1. The first patch must be updated by:
>       - removing the warning trace
>       - setting the error trace inside a static bool flag to only
> print this once
>       - removing the vhost_net_inject_rarp function (no more useful)
> 2. The second patch can be removed. Management of legacy guest for
> rarp sending can be done by modifications in backend without any
> change in QEMU.
>
> Best regards.
>
> Thibaut.
>

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-23  2:12                                         ` Jason Wang
@ 2015-06-23  5:49                                           ` Michael S. Tsirkin
  2015-06-24  8:31                                             ` Jason Wang
  0 siblings, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-23  5:49 UTC (permalink / raw)
  To: Jason Wang; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi

On Tue, Jun 23, 2015 at 10:12:17AM +0800, Jason Wang wrote:
> 
> 
> On 06/18/2015 11:16 PM, Thibaut Collet wrote:
> > On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
> >>
> >> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
> >>> If my understanding is correct, on a resume operation, we have the
> >>> following callback trace:
> >>> 1. virtio_pci_restore function that calls all restore call back of
> >>> virtio devices
> >>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
> >>> 3. try_fill_recv function kicks the virtual queue (through
> >>> virtqueue_kick function)
> >> Yes, but this happens only after pm resume not migration. Migration is
> >> totally transparent to guest.
> >>
> > Hi Jason,
> >
> > After a deeper look in the migration code of QEMU a resume event is
> > always sent when the live migration is finished.
> > On a live migration we have the following callback trace:
> > 1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
> > autostart boolean to 1  and calls the qemu_start_incoming_migration
> > function (see function main of vl.c)
> > .....
> > 2. call of process_incoming_migration function in
> > migration/migration.c file whatever the way to do the live migration
> > (tcp:, fd:, unix:, exec: ...)
> > 3. call of process_incoming_migration_co function in migration/migration.c
> > 4. call of vm_start function in vl.c (otherwise the migrated VM stay
> > in the pause state, the autostart boolean is set to 1 by the main
> > function in vl.c)
> > 5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
> > 6. call of qapi_event_send_resume function that ends a resume event to the VM
> 
> AFAIK, this function sends resume event to qemu monitor not VM.
> 
> >
> > So when a live migration is ended:
> > 1. a resume event is sent to the guest
> > 2. On the reception of this resume event the virtual queue are kicked
> > by the guest
> > 3. Backend vhost user catches this kick and can emit a RARP to guest
> > that does not support GUEST_ANNOUNCE
> >
> > This solution, as solution based on detection of DRIVER_OK status
> > suggested by Michael, allows backend to send the RARP to legacy guest
> > without involving QEMU and add ioctl to vhost-user.
> 
> A question here is did vhost-user code pass status to the backend? If
> not, how can userspace backend detect DRIVER_OK?

Sorry, I must have been unclear.
vhost core calls VHOST_NET_SET_BACKEND on DRIVER_OK.
Unfortunately vhost user currently translates it to VHOST_USER_NONE.

As a work around, I think kicking ioeventfds once you get
VHOST_NET_SET_BACKEND will work.



> >
> > Vhost user backend is free to implement one of this two solutions.
> >
> > The single drawback of these two solutions is useless RARP sending in
> > some case (reboot, ...). These messages are harmless and probably not
> > blocking for a "light" patch to support live migration with vhost
> > user.
> >
> > If you agree
> >
> > 1. The first patch must be updated by:
> >       - removing the warning trace
> >       - setting the error trace inside a static bool flag to only
> > print this once
> >       - removing the vhost_net_inject_rarp function (no more useful)
> > 2. The second patch can be removed. Management of legacy guest for
> > rarp sending can be done by modifications in backend without any
> > change in QEMU.
> >
> > Best regards.
> >
> > Thibaut.
> >

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration
  2015-06-10 13:43 ` [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration Thibaut Collet
  2015-06-10 13:52   ` Michael S. Tsirkin
@ 2015-06-23  6:01   ` Michael S. Tsirkin
  1 sibling, 0 replies; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-23  6:01 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: jasowang, qemu-devel, stefanha

On Wed, Jun 10, 2015 at 03:43:02PM +0200, Thibaut Collet wrote:
> Some vhost client/backend are able to support live migration.
> To provide this service the following features must be added:
> 1. GARP from guest after live migration:
>    the VIRTIO_NET_F_GUEST_ANNOUNCE capability is added to vhost-net when netdev
>    backend is vhost-user to let virtio-net NIC manages GARP after migration.
> 2. RARP on vhost-user:
>    After a live migration RARP are automatically sent to any interfaces. A
>    receive callback is added to vhost-user to catch this packet. If guest
>    supports VIRTIO_NET_F_GUEST_ANNOUNCE this packet is discarded to avoid
>    message duplication (already done by virtio-net NIC). For legacy guest
>    without VIRTIO_NET_F_GUEST_ANNOUNCE a warning message is displayed to
>    alert the user. RARP must be sent to the vhost client/backend.
> 
> Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>

So let's do it gradually
    patch 1. negotiate VIRTIO_NET_F_GUEST_ANNOUNCE, add empty recv
             callback to make it not crash
    patch 2. more tricks for legacy guests

then I can apply patch 1 straight away.

> ---
>  hw/net/vhost_net.c      |   15 +++++++++++++++
>  include/net/vhost_net.h |    1 +
>  net/vhost-user.c        |   21 +++++++++++++++++++--
>  3 files changed, 35 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 426b23e..4a42325 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
>      VIRTIO_NET_F_CTRL_MAC_ADDR,
>      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
>  
> +    VIRTIO_NET_F_GUEST_ANNOUNCE,
> +
>      VIRTIO_NET_F_MQ,
>  
>      VHOST_INVALID_FEATURE_BIT
> @@ -365,6 +367,15 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
>      assert(r >= 0);
>  }
>  
> +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf, size_t size)
> +{
> +    if ((net->dev.acked_features & (1 << VIRTIO_NET_F_GUEST_ANNOUNCE)) == 0) {
> +        fprintf(stderr,
> +                "Warning: Guest with no VIRTIO_NET_F_GUEST_ANNOUNCE support. RARP must be sent by vhost-user backend\n");
> +        fflush(stderr);
> +    }
> +}
> +
>  void vhost_net_cleanup(struct vhost_net *net)
>  {
>      vhost_dev_cleanup(&net->dev);
> @@ -427,6 +438,10 @@ void vhost_net_stop(VirtIODevice *dev,
>  {
>  }
>  
> +void vhost_net_inject_rarp(struct vhost_net *net, const uint8_t *buf, size_t size);
> +{
> +}
> +
>  void vhost_net_cleanup(struct vhost_net *net)
>  {
>  }
> diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> index b1c18a3..e82a0f9 100644
> --- a/include/net/vhost_net.h
> +++ b/include/net/vhost_net.h
> @@ -20,6 +20,7 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev);
>  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
>  void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
>  
> +void vhost_net_inject_rarp(VHostNetState *net, const uint8_t *buf, size_t size);
>  void vhost_net_cleanup(VHostNetState *net);
>  
>  unsigned vhost_net_get_features(VHostNetState *net, unsigned features);
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 8d26728..69c2313 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -66,6 +66,24 @@ static void vhost_user_stop(VhostUserState *s)
>      s->vhost_net = 0;
>  }
>  
> +static ssize_t vhost_user_receive(NetClientState *nc, const uint8_t *buf,
> +                                  size_t size)
> +{
> +    if (size == 60) {
> +        /* Assume it is a RARP request sent automatically after a
> +         * live migration */
> +        /* The RARP must be sent if guest does not support
> +         * VIRTIO_NET_F_GUEST_ANNOUNCE */
> +        VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
> +
> +        vhost_net_inject_rarp(s->vhost_net, buf, size);
> +    } else {
> +        fprintf(stderr,"Vhost user receives unexpected packets\n");
> +        fflush(stderr);
> +    }
> +    return size;
> +}
> +
>  static void vhost_user_cleanup(NetClientState *nc)
>  {
>      VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
> @@ -91,6 +109,7 @@ static bool vhost_user_has_ufo(NetClientState *nc)
>  static NetClientInfo net_vhost_user_info = {
>          .type = NET_CLIENT_OPTIONS_KIND_VHOST_USER,
>          .size = sizeof(VhostUserState),
> +        .receive = vhost_user_receive,
>          .cleanup = vhost_user_cleanup,
>          .has_vnet_hdr = vhost_user_has_vnet_hdr,
>          .has_ufo = vhost_user_has_ufo,
> @@ -147,8 +166,6 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>  
>          s = DO_UPCAST(VhostUserState, nc, nc);
>  
> -        /* We don't provide a receive callback */
> -        s->nc.receive_disabled = 1;
>          s->chr = chr;
>          s->nc.queue_index = i;
>  
> -- 
> 1.7.10.4

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-23  5:49                                           ` Michael S. Tsirkin
@ 2015-06-24  8:31                                             ` Jason Wang
  2015-06-24 11:05                                               ` Michael S. Tsirkin
  0 siblings, 1 reply; 48+ messages in thread
From: Jason Wang @ 2015-06-24  8:31 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi



On 06/23/2015 01:49 PM, Michael S. Tsirkin wrote:
> On Tue, Jun 23, 2015 at 10:12:17AM +0800, Jason Wang wrote:
>> > 
>> > 
>> > On 06/18/2015 11:16 PM, Thibaut Collet wrote:
>>> > > On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
>>>> > >>
>>>> > >> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
>>>>> > >>> If my understanding is correct, on a resume operation, we have the
>>>>> > >>> following callback trace:
>>>>> > >>> 1. virtio_pci_restore function that calls all restore call back of
>>>>> > >>> virtio devices
>>>>> > >>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
>>>>> > >>> 3. try_fill_recv function kicks the virtual queue (through
>>>>> > >>> virtqueue_kick function)
>>>> > >> Yes, but this happens only after pm resume not migration. Migration is
>>>> > >> totally transparent to guest.
>>>> > >>
>>> > > Hi Jason,
>>> > >
>>> > > After a deeper look in the migration code of QEMU a resume event is
>>> > > always sent when the live migration is finished.
>>> > > On a live migration we have the following callback trace:
>>> > > 1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
>>> > > autostart boolean to 1  and calls the qemu_start_incoming_migration
>>> > > function (see function main of vl.c)
>>> > > .....
>>> > > 2. call of process_incoming_migration function in
>>> > > migration/migration.c file whatever the way to do the live migration
>>> > > (tcp:, fd:, unix:, exec: ...)
>>> > > 3. call of process_incoming_migration_co function in migration/migration.c
>>> > > 4. call of vm_start function in vl.c (otherwise the migrated VM stay
>>> > > in the pause state, the autostart boolean is set to 1 by the main
>>> > > function in vl.c)
>>> > > 5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
>>> > > 6. call of qapi_event_send_resume function that ends a resume event to the VM
>> > 
>> > AFAIK, this function sends resume event to qemu monitor not VM.
>> > 
>>> > >
>>> > > So when a live migration is ended:
>>> > > 1. a resume event is sent to the guest
>>> > > 2. On the reception of this resume event the virtual queue are kicked
>>> > > by the guest
>>> > > 3. Backend vhost user catches this kick and can emit a RARP to guest
>>> > > that does not support GUEST_ANNOUNCE
>>> > >
>>> > > This solution, as solution based on detection of DRIVER_OK status
>>> > > suggested by Michael, allows backend to send the RARP to legacy guest
>>> > > without involving QEMU and add ioctl to vhost-user.
>> > 
>> > A question here is did vhost-user code pass status to the backend? If
>> > not, how can userspace backend detect DRIVER_OK?
> Sorry, I must have been unclear.
> vhost core calls VHOST_NET_SET_BACKEND on DRIVER_OK.
> Unfortunately vhost user currently translates it to VHOST_USER_NONE.

Looks like VHOST_NET_SET_BACKEND was only used for tap backend.

> As a work around, I think kicking ioeventfds once you get
> VHOST_NET_SET_BACKEND will work.

Maybe just a eventfd_set() in vhost_net_start(). But is this
"workaround" elegant enough to be documented? Is it better to do this
explicitly with a new feature?

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-24  8:31                                             ` Jason Wang
@ 2015-06-24 11:05                                               ` Michael S. Tsirkin
  2015-06-25  9:59                                                 ` Jason Wang
  0 siblings, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-24 11:05 UTC (permalink / raw)
  To: Jason Wang; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi

On Wed, Jun 24, 2015 at 04:31:15PM +0800, Jason Wang wrote:
> 
> 
> On 06/23/2015 01:49 PM, Michael S. Tsirkin wrote:
> > On Tue, Jun 23, 2015 at 10:12:17AM +0800, Jason Wang wrote:
> >> > 
> >> > 
> >> > On 06/18/2015 11:16 PM, Thibaut Collet wrote:
> >>> > > On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
> >>>> > >>
> >>>> > >> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
> >>>>> > >>> If my understanding is correct, on a resume operation, we have the
> >>>>> > >>> following callback trace:
> >>>>> > >>> 1. virtio_pci_restore function that calls all restore call back of
> >>>>> > >>> virtio devices
> >>>>> > >>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
> >>>>> > >>> 3. try_fill_recv function kicks the virtual queue (through
> >>>>> > >>> virtqueue_kick function)
> >>>> > >> Yes, but this happens only after pm resume not migration. Migration is
> >>>> > >> totally transparent to guest.
> >>>> > >>
> >>> > > Hi Jason,
> >>> > >
> >>> > > After a deeper look in the migration code of QEMU a resume event is
> >>> > > always sent when the live migration is finished.
> >>> > > On a live migration we have the following callback trace:
> >>> > > 1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
> >>> > > autostart boolean to 1  and calls the qemu_start_incoming_migration
> >>> > > function (see function main of vl.c)
> >>> > > .....
> >>> > > 2. call of process_incoming_migration function in
> >>> > > migration/migration.c file whatever the way to do the live migration
> >>> > > (tcp:, fd:, unix:, exec: ...)
> >>> > > 3. call of process_incoming_migration_co function in migration/migration.c
> >>> > > 4. call of vm_start function in vl.c (otherwise the migrated VM stay
> >>> > > in the pause state, the autostart boolean is set to 1 by the main
> >>> > > function in vl.c)
> >>> > > 5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
> >>> > > 6. call of qapi_event_send_resume function that ends a resume event to the VM
> >> > 
> >> > AFAIK, this function sends resume event to qemu monitor not VM.
> >> > 
> >>> > >
> >>> > > So when a live migration is ended:
> >>> > > 1. a resume event is sent to the guest
> >>> > > 2. On the reception of this resume event the virtual queue are kicked
> >>> > > by the guest
> >>> > > 3. Backend vhost user catches this kick and can emit a RARP to guest
> >>> > > that does not support GUEST_ANNOUNCE
> >>> > >
> >>> > > This solution, as solution based on detection of DRIVER_OK status
> >>> > > suggested by Michael, allows backend to send the RARP to legacy guest
> >>> > > without involving QEMU and add ioctl to vhost-user.
> >> > 
> >> > A question here is did vhost-user code pass status to the backend? If
> >> > not, how can userspace backend detect DRIVER_OK?
> > Sorry, I must have been unclear.
> > vhost core calls VHOST_NET_SET_BACKEND on DRIVER_OK.
> > Unfortunately vhost user currently translates it to VHOST_USER_NONE.
> 
> Looks like VHOST_NET_SET_BACKEND was only used for tap backend.
> 
> > As a work around, I think kicking ioeventfds once you get
> > VHOST_NET_SET_BACKEND will work.
> 
> Maybe just a eventfd_set() in vhost_net_start(). But is this
> "workaround" elegant enough to be documented? Is it better to do this
> explicitly with a new feature?

If you are going to do this anyway, there are a couple of other changes
we should do, in particular, decide what we want to do with control vq.

-- 
MST

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-24 11:05                                               ` Michael S. Tsirkin
@ 2015-06-25  9:59                                                 ` Jason Wang
  2015-06-25 11:01                                                   ` Thibaut Collet
  0 siblings, 1 reply; 48+ messages in thread
From: Jason Wang @ 2015-06-25  9:59 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi



On 06/24/2015 07:05 PM, Michael S. Tsirkin wrote:
> On Wed, Jun 24, 2015 at 04:31:15PM +0800, Jason Wang wrote:
>>
>> On 06/23/2015 01:49 PM, Michael S. Tsirkin wrote:
>>> On Tue, Jun 23, 2015 at 10:12:17AM +0800, Jason Wang wrote:
>>>>>
>>>>> On 06/18/2015 11:16 PM, Thibaut Collet wrote:
>>>>>>> On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
>>>>>>>>> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
>>>>>>>>>>> If my understanding is correct, on a resume operation, we have the
>>>>>>>>>>> following callback trace:
>>>>>>>>>>> 1. virtio_pci_restore function that calls all restore call back of
>>>>>>>>>>> virtio devices
>>>>>>>>>>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
>>>>>>>>>>> 3. try_fill_recv function kicks the virtual queue (through
>>>>>>>>>>> virtqueue_kick function)
>>>>>>>>> Yes, but this happens only after pm resume not migration. Migration is
>>>>>>>>> totally transparent to guest.
>>>>>>>>>
>>>>>>> Hi Jason,
>>>>>>>
>>>>>>> After a deeper look in the migration code of QEMU a resume event is
>>>>>>> always sent when the live migration is finished.
>>>>>>> On a live migration we have the following callback trace:
>>>>>>> 1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
>>>>>>> autostart boolean to 1  and calls the qemu_start_incoming_migration
>>>>>>> function (see function main of vl.c)
>>>>>>> .....
>>>>>>> 2. call of process_incoming_migration function in
>>>>>>> migration/migration.c file whatever the way to do the live migration
>>>>>>> (tcp:, fd:, unix:, exec: ...)
>>>>>>> 3. call of process_incoming_migration_co function in migration/migration.c
>>>>>>> 4. call of vm_start function in vl.c (otherwise the migrated VM stay
>>>>>>> in the pause state, the autostart boolean is set to 1 by the main
>>>>>>> function in vl.c)
>>>>>>> 5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
>>>>>>> 6. call of qapi_event_send_resume function that ends a resume event to the VM
>>>>> AFAIK, this function sends resume event to qemu monitor not VM.
>>>>>
>>>>>>> So when a live migration is ended:
>>>>>>> 1. a resume event is sent to the guest
>>>>>>> 2. On the reception of this resume event the virtual queue are kicked
>>>>>>> by the guest
>>>>>>> 3. Backend vhost user catches this kick and can emit a RARP to guest
>>>>>>> that does not support GUEST_ANNOUNCE
>>>>>>>
>>>>>>> This solution, as solution based on detection of DRIVER_OK status
>>>>>>> suggested by Michael, allows backend to send the RARP to legacy guest
>>>>>>> without involving QEMU and add ioctl to vhost-user.
>>>>> A question here is did vhost-user code pass status to the backend? If
>>>>> not, how can userspace backend detect DRIVER_OK?
>>> Sorry, I must have been unclear.
>>> vhost core calls VHOST_NET_SET_BACKEND on DRIVER_OK.
>>> Unfortunately vhost user currently translates it to VHOST_USER_NONE.
>> Looks like VHOST_NET_SET_BACKEND was only used for tap backend.
>>
>>> As a work around, I think kicking ioeventfds once you get
>>> VHOST_NET_SET_BACKEND will work.
>> Maybe just a eventfd_set() in vhost_net_start(). But is this
>> "workaround" elegant enough to be documented? Is it better to do this
>> explicitly with a new feature?
> If you are going to do this anyway, there are a couple of other changes
> we should do, in particular, decide what we want to do with control vq.
>

If I understand correctly, you mean VIRTIO_NET_CTRL_MQ and
VIRTIO_NET_CTRL_GUEST_OFFLOADS? Looks like both of these were broken.
Need more thought, maybe new kinds of requests.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-25  9:59                                                 ` Jason Wang
@ 2015-06-25 11:01                                                   ` Thibaut Collet
  2015-06-25 12:53                                                     ` Michael S. Tsirkin
  0 siblings, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-25 11:01 UTC (permalink / raw)
  To: Jason Wang; +Cc: qemu-devel, Stefan Hajnoczi, Michael S. Tsirkin

On Thu, Jun 25, 2015 at 11:59 AM, Jason Wang <jasowang@redhat.com> wrote:
>
>
>
> On 06/24/2015 07:05 PM, Michael S. Tsirkin wrote:
> > On Wed, Jun 24, 2015 at 04:31:15PM +0800, Jason Wang wrote:
> >>
> >> On 06/23/2015 01:49 PM, Michael S. Tsirkin wrote:
> >>> On Tue, Jun 23, 2015 at 10:12:17AM +0800, Jason Wang wrote:
> >>>>>
> >>>>> On 06/18/2015 11:16 PM, Thibaut Collet wrote:
> >>>>>>> On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
> >>>>>>>>> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
> >>>>>>>>>>> If my understanding is correct, on a resume operation, we have the
> >>>>>>>>>>> following callback trace:
> >>>>>>>>>>> 1. virtio_pci_restore function that calls all restore call back of
> >>>>>>>>>>> virtio devices
> >>>>>>>>>>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
> >>>>>>>>>>> 3. try_fill_recv function kicks the virtual queue (through
> >>>>>>>>>>> virtqueue_kick function)
> >>>>>>>>> Yes, but this happens only after pm resume not migration. Migration is
> >>>>>>>>> totally transparent to guest.
> >>>>>>>>>
> >>>>>>> Hi Jason,
> >>>>>>>
> >>>>>>> After a deeper look in the migration code of QEMU a resume event is
> >>>>>>> always sent when the live migration is finished.
> >>>>>>> On a live migration we have the following callback trace:
> >>>>>>> 1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
> >>>>>>> autostart boolean to 1  and calls the qemu_start_incoming_migration
> >>>>>>> function (see function main of vl.c)
> >>>>>>> .....
> >>>>>>> 2. call of process_incoming_migration function in
> >>>>>>> migration/migration.c file whatever the way to do the live migration
> >>>>>>> (tcp:, fd:, unix:, exec: ...)
> >>>>>>> 3. call of process_incoming_migration_co function in migration/migration.c
> >>>>>>> 4. call of vm_start function in vl.c (otherwise the migrated VM stay
> >>>>>>> in the pause state, the autostart boolean is set to 1 by the main
> >>>>>>> function in vl.c)
> >>>>>>> 5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
> >>>>>>> 6. call of qapi_event_send_resume function that ends a resume event to the VM
> >>>>> AFAIK, this function sends resume event to qemu monitor not VM.
> >>>>>
> >>>>>>> So when a live migration is ended:
> >>>>>>> 1. a resume event is sent to the guest
> >>>>>>> 2. On the reception of this resume event the virtual queue are kicked
> >>>>>>> by the guest
> >>>>>>> 3. Backend vhost user catches this kick and can emit a RARP to guest
> >>>>>>> that does not support GUEST_ANNOUNCE
> >>>>>>>
> >>>>>>> This solution, as solution based on detection of DRIVER_OK status
> >>>>>>> suggested by Michael, allows backend to send the RARP to legacy guest
> >>>>>>> without involving QEMU and add ioctl to vhost-user.
> >>>>> A question here is did vhost-user code pass status to the backend? If
> >>>>> not, how can userspace backend detect DRIVER_OK?
> >>> Sorry, I must have been unclear.
> >>> vhost core calls VHOST_NET_SET_BACKEND on DRIVER_OK.
> >>> Unfortunately vhost user currently translates it to VHOST_USER_NONE.
> >> Looks like VHOST_NET_SET_BACKEND was only used for tap backend.
> >>
> >>> As a work around, I think kicking ioeventfds once you get
> >>> VHOST_NET_SET_BACKEND will work.
> >> Maybe just a eventfd_set() in vhost_net_start(). But is this
> >> "workaround" elegant enough to be documented? Is it better to do this
> >> explicitly with a new feature?
> > If you are going to do this anyway, there are a couple of other changes
> > we should do, in particular, decide what we want to do with control vq.
> >
>
> If I understand correctly, you mean VIRTIO_NET_CTRL_MQ and
> VIRTIO_NET_CTRL_GUEST_OFFLOADS? Looks like both of these were broken.
> Need more thought, maybe new kinds of requests.
>
>

Are there any objections to add VHOST_NET_SET_BACKEND support to vhost
user with a patch like that:


 hw/net/vhost_net.c     |    8 ++++++++
 hw/virtio/vhost-user.c |   10 +++++++++-
 2 files changed, 17 insertions(+), 1 deletion(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 907e002..7a008c0 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -234,6 +234,14 @@ static int vhost_net_start_one(struct vhost_net *net,
                 goto fail;
             }
         }
+    } else if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+         file.fd = 0;
+         for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
+            const VhostOps *vhost_ops = net->dev.vhost_ops;
+            int r = vhost_ops->vhost_call(&net->dev, VHOST_NET_SET_BACKEND,
+                                          &file);
+            assert(r >= 0);
+        }
     }
     return 0;
 fail:
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index d6f2163..32c6bd9 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
     VHOST_USER_SET_VRING_KICK = 12,
     VHOST_USER_SET_VRING_CALL = 13,
     VHOST_USER_SET_VRING_ERR = 14,
+    VHOST_USER_NET_SET_BACKEND = 15,
     VHOST_USER_MAX
 } VhostUserRequest;

@@ -104,7 +105,8 @@ static unsigned long int
ioctl_to_vhost_user_request[VHOST_USER_MAX] = {
     VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
     VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
     VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
-    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
+    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
+    VHOST_NET_SET_BACKEND   /* VHOST_USER_NET_SET_BACKEND */
 };

 static VhostUserRequest vhost_user_request_translate(unsigned long int request)
@@ -287,6 +289,12 @@ static int vhost_user_call(struct vhost_dev *dev,
unsigned long int request,
             msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
         }
         break;
+
+    case VHOST_NET_SET_BACKEND:
+        memcpy(&msg.file, arg, sizeof(struct vhost_vring_state));
+        msg.size = sizeof(m.state);
+        break;
+
     default:
         error_report("vhost-user trying to send unhandled ioctl");
         return -1;


This message will be sent when guest is ready and can be used by vhost
user backend to send RARP to legacy guest.

This solution avoids to add new message and has no impact on control vq.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-25 11:01                                                   ` Thibaut Collet
@ 2015-06-25 12:53                                                     ` Michael S. Tsirkin
  2015-06-25 14:22                                                       ` Thibaut Collet
  0 siblings, 1 reply; 48+ messages in thread
From: Michael S. Tsirkin @ 2015-06-25 12:53 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

On Thu, Jun 25, 2015 at 01:01:29PM +0200, Thibaut Collet wrote:
> On Thu, Jun 25, 2015 at 11:59 AM, Jason Wang <jasowang@redhat.com> wrote:
> >
> >
> >
> > On 06/24/2015 07:05 PM, Michael S. Tsirkin wrote:
> > > On Wed, Jun 24, 2015 at 04:31:15PM +0800, Jason Wang wrote:
> > >>
> > >> On 06/23/2015 01:49 PM, Michael S. Tsirkin wrote:
> > >>> On Tue, Jun 23, 2015 at 10:12:17AM +0800, Jason Wang wrote:
> > >>>>>
> > >>>>> On 06/18/2015 11:16 PM, Thibaut Collet wrote:
> > >>>>>>> On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
> > >>>>>>>>> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
> > >>>>>>>>>>> If my understanding is correct, on a resume operation, we have the
> > >>>>>>>>>>> following callback trace:
> > >>>>>>>>>>> 1. virtio_pci_restore function that calls all restore call back of
> > >>>>>>>>>>> virtio devices
> > >>>>>>>>>>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
> > >>>>>>>>>>> 3. try_fill_recv function kicks the virtual queue (through
> > >>>>>>>>>>> virtqueue_kick function)
> > >>>>>>>>> Yes, but this happens only after pm resume not migration. Migration is
> > >>>>>>>>> totally transparent to guest.
> > >>>>>>>>>
> > >>>>>>> Hi Jason,
> > >>>>>>>
> > >>>>>>> After a deeper look in the migration code of QEMU a resume event is
> > >>>>>>> always sent when the live migration is finished.
> > >>>>>>> On a live migration we have the following callback trace:
> > >>>>>>> 1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
> > >>>>>>> autostart boolean to 1  and calls the qemu_start_incoming_migration
> > >>>>>>> function (see function main of vl.c)
> > >>>>>>> .....
> > >>>>>>> 2. call of process_incoming_migration function in
> > >>>>>>> migration/migration.c file whatever the way to do the live migration
> > >>>>>>> (tcp:, fd:, unix:, exec: ...)
> > >>>>>>> 3. call of process_incoming_migration_co function in migration/migration.c
> > >>>>>>> 4. call of vm_start function in vl.c (otherwise the migrated VM stay
> > >>>>>>> in the pause state, the autostart boolean is set to 1 by the main
> > >>>>>>> function in vl.c)
> > >>>>>>> 5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
> > >>>>>>> 6. call of qapi_event_send_resume function that ends a resume event to the VM
> > >>>>> AFAIK, this function sends resume event to qemu monitor not VM.
> > >>>>>
> > >>>>>>> So when a live migration is ended:
> > >>>>>>> 1. a resume event is sent to the guest
> > >>>>>>> 2. On the reception of this resume event the virtual queue are kicked
> > >>>>>>> by the guest
> > >>>>>>> 3. Backend vhost user catches this kick and can emit a RARP to guest
> > >>>>>>> that does not support GUEST_ANNOUNCE
> > >>>>>>>
> > >>>>>>> This solution, as solution based on detection of DRIVER_OK status
> > >>>>>>> suggested by Michael, allows backend to send the RARP to legacy guest
> > >>>>>>> without involving QEMU and add ioctl to vhost-user.
> > >>>>> A question here is did vhost-user code pass status to the backend? If
> > >>>>> not, how can userspace backend detect DRIVER_OK?
> > >>> Sorry, I must have been unclear.
> > >>> vhost core calls VHOST_NET_SET_BACKEND on DRIVER_OK.
> > >>> Unfortunately vhost user currently translates it to VHOST_USER_NONE.
> > >> Looks like VHOST_NET_SET_BACKEND was only used for tap backend.
> > >>
> > >>> As a work around, I think kicking ioeventfds once you get
> > >>> VHOST_NET_SET_BACKEND will work.
> > >> Maybe just a eventfd_set() in vhost_net_start(). But is this
> > >> "workaround" elegant enough to be documented? Is it better to do this
> > >> explicitly with a new feature?
> > > If you are going to do this anyway, there are a couple of other changes
> > > we should do, in particular, decide what we want to do with control vq.
> > >
> >
> > If I understand correctly, you mean VIRTIO_NET_CTRL_MQ and
> > VIRTIO_NET_CTRL_GUEST_OFFLOADS? Looks like both of these were broken.
> > Need more thought, maybe new kinds of requests.
> >
> >
> 
> Are there any objections to add VHOST_NET_SET_BACKEND support to vhost
> user with a patch like that:
> 
> 
>  hw/net/vhost_net.c     |    8 ++++++++
>  hw/virtio/vhost-user.c |   10 +++++++++-
>  2 files changed, 17 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 907e002..7a008c0 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -234,6 +234,14 @@ static int vhost_net_start_one(struct vhost_net *net,
>                  goto fail;
>              }
>          }
> +    } else if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
> +         file.fd = 0;
> +         for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
> +            const VhostOps *vhost_ops = net->dev.vhost_ops;
> +            int r = vhost_ops->vhost_call(&net->dev, VHOST_NET_SET_BACKEND,
> +                                          &file);
> +            assert(r >= 0);
> +        }
>      }
>      return 0;
>  fail:
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index d6f2163..32c6bd9 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
>      VHOST_USER_SET_VRING_KICK = 12,
>      VHOST_USER_SET_VRING_CALL = 13,
>      VHOST_USER_SET_VRING_ERR = 14,
> +    VHOST_USER_NET_SET_BACKEND = 15,
>      VHOST_USER_MAX
>  } VhostUserRequest;
> 
> @@ -104,7 +105,8 @@ static unsigned long int
> ioctl_to_vhost_user_request[VHOST_USER_MAX] = {
>      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
>      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
>      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
> -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
> +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
> +    VHOST_NET_SET_BACKEND   /* VHOST_USER_NET_SET_BACKEND */
>  };
> 
>  static VhostUserRequest vhost_user_request_translate(unsigned long int request)
> @@ -287,6 +289,12 @@ static int vhost_user_call(struct vhost_dev *dev,
> unsigned long int request,
>              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
>          }
>          break;
> +
> +    case VHOST_NET_SET_BACKEND:
> +        memcpy(&msg.file, arg, sizeof(struct vhost_vring_state));
> +        msg.size = sizeof(m.state);
> +        break;
> +
>      default:
>          error_report("vhost-user trying to send unhandled ioctl");
>          return -1;
> 
> 
> This message will be sent when guest is ready and can be used by vhost
> user backend to send RARP to legacy guest.
> 
> This solution avoids to add new message and has no impact on control vq.


I think that you can't add messages to protocol unconditionally.
For example, snabbswitch simply crashes if it gets an unknown
message.

Either this needs a new feature bit, or implement
[PATCH RFC] vhost-user: protocol extensions
making it safe to add new messages.

-- 
MST

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-25 12:53                                                     ` Michael S. Tsirkin
@ 2015-06-25 14:22                                                       ` Thibaut Collet
  2015-06-26  4:06                                                         ` Jason Wang
  0 siblings, 1 reply; 48+ messages in thread
From: Thibaut Collet @ 2015-06-25 14:22 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi

On Thu, Jun 25, 2015 at 2:53 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Thu, Jun 25, 2015 at 01:01:29PM +0200, Thibaut Collet wrote:
>> On Thu, Jun 25, 2015 at 11:59 AM, Jason Wang <jasowang@redhat.com> wrote:
>> >
>> >
>> >
>> > On 06/24/2015 07:05 PM, Michael S. Tsirkin wrote:
>> > > On Wed, Jun 24, 2015 at 04:31:15PM +0800, Jason Wang wrote:
>> > >>
>> > >> On 06/23/2015 01:49 PM, Michael S. Tsirkin wrote:
>> > >>> On Tue, Jun 23, 2015 at 10:12:17AM +0800, Jason Wang wrote:
>> > >>>>>
>> > >>>>> On 06/18/2015 11:16 PM, Thibaut Collet wrote:
>> > >>>>>>> On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
>> > >>>>>>>>> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
>> > >>>>>>>>>>> If my understanding is correct, on a resume operation, we have the
>> > >>>>>>>>>>> following callback trace:
>> > >>>>>>>>>>> 1. virtio_pci_restore function that calls all restore call back of
>> > >>>>>>>>>>> virtio devices
>> > >>>>>>>>>>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
>> > >>>>>>>>>>> 3. try_fill_recv function kicks the virtual queue (through
>> > >>>>>>>>>>> virtqueue_kick function)
>> > >>>>>>>>> Yes, but this happens only after pm resume not migration. Migration is
>> > >>>>>>>>> totally transparent to guest.
>> > >>>>>>>>>
>> > >>>>>>> Hi Jason,
>> > >>>>>>>
>> > >>>>>>> After a deeper look in the migration code of QEMU a resume event is
>> > >>>>>>> always sent when the live migration is finished.
>> > >>>>>>> On a live migration we have the following callback trace:
>> > >>>>>>> 1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
>> > >>>>>>> autostart boolean to 1  and calls the qemu_start_incoming_migration
>> > >>>>>>> function (see function main of vl.c)
>> > >>>>>>> .....
>> > >>>>>>> 2. call of process_incoming_migration function in
>> > >>>>>>> migration/migration.c file whatever the way to do the live migration
>> > >>>>>>> (tcp:, fd:, unix:, exec: ...)
>> > >>>>>>> 3. call of process_incoming_migration_co function in migration/migration.c
>> > >>>>>>> 4. call of vm_start function in vl.c (otherwise the migrated VM stay
>> > >>>>>>> in the pause state, the autostart boolean is set to 1 by the main
>> > >>>>>>> function in vl.c)
>> > >>>>>>> 5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
>> > >>>>>>> 6. call of qapi_event_send_resume function that ends a resume event to the VM
>> > >>>>> AFAIK, this function sends resume event to qemu monitor not VM.
>> > >>>>>
>> > >>>>>>> So when a live migration is ended:
>> > >>>>>>> 1. a resume event is sent to the guest
>> > >>>>>>> 2. On the reception of this resume event the virtual queue are kicked
>> > >>>>>>> by the guest
>> > >>>>>>> 3. Backend vhost user catches this kick and can emit a RARP to guest
>> > >>>>>>> that does not support GUEST_ANNOUNCE
>> > >>>>>>>
>> > >>>>>>> This solution, as solution based on detection of DRIVER_OK status
>> > >>>>>>> suggested by Michael, allows backend to send the RARP to legacy guest
>> > >>>>>>> without involving QEMU and add ioctl to vhost-user.
>> > >>>>> A question here is did vhost-user code pass status to the backend? If
>> > >>>>> not, how can userspace backend detect DRIVER_OK?
>> > >>> Sorry, I must have been unclear.
>> > >>> vhost core calls VHOST_NET_SET_BACKEND on DRIVER_OK.
>> > >>> Unfortunately vhost user currently translates it to VHOST_USER_NONE.
>> > >> Looks like VHOST_NET_SET_BACKEND was only used for tap backend.
>> > >>
>> > >>> As a work around, I think kicking ioeventfds once you get
>> > >>> VHOST_NET_SET_BACKEND will work.
>> > >> Maybe just a eventfd_set() in vhost_net_start(). But is this
>> > >> "workaround" elegant enough to be documented? Is it better to do this
>> > >> explicitly with a new feature?
>> > > If you are going to do this anyway, there are a couple of other changes
>> > > we should do, in particular, decide what we want to do with control vq.
>> > >
>> >
>> > If I understand correctly, you mean VIRTIO_NET_CTRL_MQ and
>> > VIRTIO_NET_CTRL_GUEST_OFFLOADS? Looks like both of these were broken.
>> > Need more thought, maybe new kinds of requests.
>> >
>> >
>>
>> Are there any objections to add VHOST_NET_SET_BACKEND support to vhost
>> user with a patch like that:
>>
>>
>>  hw/net/vhost_net.c     |    8 ++++++++
>>  hw/virtio/vhost-user.c |   10 +++++++++-
>>  2 files changed, 17 insertions(+), 1 deletion(-)
>>
>> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
>> index 907e002..7a008c0 100644
>> --- a/hw/net/vhost_net.c
>> +++ b/hw/net/vhost_net.c
>> @@ -234,6 +234,14 @@ static int vhost_net_start_one(struct vhost_net *net,
>>                  goto fail;
>>              }
>>          }
>> +    } else if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
>> +         file.fd = 0;
>> +         for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
>> +            const VhostOps *vhost_ops = net->dev.vhost_ops;
>> +            int r = vhost_ops->vhost_call(&net->dev, VHOST_NET_SET_BACKEND,
>> +                                          &file);
>> +            assert(r >= 0);
>> +        }
>>      }
>>      return 0;
>>  fail:
>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>> index d6f2163..32c6bd9 100644
>> --- a/hw/virtio/vhost-user.c
>> +++ b/hw/virtio/vhost-user.c
>> @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
>>      VHOST_USER_SET_VRING_KICK = 12,
>>      VHOST_USER_SET_VRING_CALL = 13,
>>      VHOST_USER_SET_VRING_ERR = 14,
>> +    VHOST_USER_NET_SET_BACKEND = 15,
>>      VHOST_USER_MAX
>>  } VhostUserRequest;
>>
>> @@ -104,7 +105,8 @@ static unsigned long int
>> ioctl_to_vhost_user_request[VHOST_USER_MAX] = {
>>      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
>>      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
>>      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
>> -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
>> +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
>> +    VHOST_NET_SET_BACKEND   /* VHOST_USER_NET_SET_BACKEND */
>>  };
>>
>>  static VhostUserRequest vhost_user_request_translate(unsigned long int request)
>> @@ -287,6 +289,12 @@ static int vhost_user_call(struct vhost_dev *dev,
>> unsigned long int request,
>>              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
>>          }
>>          break;
>> +
>> +    case VHOST_NET_SET_BACKEND:
>> +        memcpy(&msg.file, arg, sizeof(struct vhost_vring_state));
>> +        msg.size = sizeof(m.state);
>> +        break;
>> +
>>      default:
>>          error_report("vhost-user trying to send unhandled ioctl");
>>          return -1;
>>
>>
>> This message will be sent when guest is ready and can be used by vhost
>> user backend to send RARP to legacy guest.
>>
>> This solution avoids to add new message and has no impact on control vq.
>
>
> I think that you can't add messages to protocol unconditionally.
> For example, snabbswitch simply crashes if it gets an unknown
> message.
>
> Either this needs a new feature bit, or implement
> [PATCH RFC] vhost-user: protocol extensions
> making it safe to add new messages.
>
> --
> MST

I understand.
Last idea before doing a RFC:
Normally guest notifies vhost of new buffer onto a virtqueue by
kicking the eventfd. This eventfd has been provided to vhost by QEMU.
So when DRIVER_OK is received by QEMU, QEMU can kick the eventfd.

A possible patch to do that is:
 hw/net/vhost_net.c |    7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 907e002..fbc55e0 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -234,6 +234,13 @@ static int vhost_net_start_one(struct vhost_net *net,
                 goto fail;
             }
         }
+    } else if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+         int idx;
+
+         for (idx = 0; idx < net->dev.nvqs; ++idx) {
+            struct VirtQueue *vq = virtio_get_queue(dev, idx);
+            event_notifier_set(virtio_queue_get_host_notifier(vq));
+        }
     }
     return 0;
 fail:

kicking this eventfd has no impact for QEMU or the guest (they do not
poll it) and simply wake up vhost to allow it to send RARP for legacy
guest.

Regards.

Thibaut.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* Re: [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest
  2015-06-25 14:22                                                       ` Thibaut Collet
@ 2015-06-26  4:06                                                         ` Jason Wang
  0 siblings, 0 replies; 48+ messages in thread
From: Jason Wang @ 2015-06-26  4:06 UTC (permalink / raw)
  To: Thibaut Collet, Michael S. Tsirkin; +Cc: qemu-devel, Stefan Hajnoczi



On 06/25/2015 10:22 PM, Thibaut Collet wrote:
> On Thu, Jun 25, 2015 at 2:53 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> On Thu, Jun 25, 2015 at 01:01:29PM +0200, Thibaut Collet wrote:
>>> On Thu, Jun 25, 2015 at 11:59 AM, Jason Wang <jasowang@redhat.com> wrote:
>>>>
>>>>
>>>> On 06/24/2015 07:05 PM, Michael S. Tsirkin wrote:
>>>>> On Wed, Jun 24, 2015 at 04:31:15PM +0800, Jason Wang wrote:
>>>>>> On 06/23/2015 01:49 PM, Michael S. Tsirkin wrote:
>>>>>>> On Tue, Jun 23, 2015 at 10:12:17AM +0800, Jason Wang wrote:
>>>>>>>>> On 06/18/2015 11:16 PM, Thibaut Collet wrote:
>>>>>>>>>>> On Tue, Jun 16, 2015 at 10:05 AM, Jason Wang <jasowang@redhat.com> wrote:
>>>>>>>>>>>>> On 06/16/2015 03:24 PM, Thibaut Collet wrote:
>>>>>>>>>>>>>>> If my understanding is correct, on a resume operation, we have the
>>>>>>>>>>>>>>> following callback trace:
>>>>>>>>>>>>>>> 1. virtio_pci_restore function that calls all restore call back of
>>>>>>>>>>>>>>> virtio devices
>>>>>>>>>>>>>>> 2. virtnet_restore that calls try_fill_recv function for each virtual queues
>>>>>>>>>>>>>>> 3. try_fill_recv function kicks the virtual queue (through
>>>>>>>>>>>>>>> virtqueue_kick function)
>>>>>>>>>>>>> Yes, but this happens only after pm resume not migration. Migration is
>>>>>>>>>>>>> totally transparent to guest.
>>>>>>>>>>>>>
>>>>>>>>>>> Hi Jason,
>>>>>>>>>>>
>>>>>>>>>>> After a deeper look in the migration code of QEMU a resume event is
>>>>>>>>>>> always sent when the live migration is finished.
>>>>>>>>>>> On a live migration we have the following callback trace:
>>>>>>>>>>> 1. The VM on the new host is set to the state RUN_STATE_INMIGRATE, the
>>>>>>>>>>> autostart boolean to 1  and calls the qemu_start_incoming_migration
>>>>>>>>>>> function (see function main of vl.c)
>>>>>>>>>>> .....
>>>>>>>>>>> 2. call of process_incoming_migration function in
>>>>>>>>>>> migration/migration.c file whatever the way to do the live migration
>>>>>>>>>>> (tcp:, fd:, unix:, exec: ...)
>>>>>>>>>>> 3. call of process_incoming_migration_co function in migration/migration.c
>>>>>>>>>>> 4. call of vm_start function in vl.c (otherwise the migrated VM stay
>>>>>>>>>>> in the pause state, the autostart boolean is set to 1 by the main
>>>>>>>>>>> function in vl.c)
>>>>>>>>>>> 5. call of vm_start function that sets the VM is the RUN_STATE_RUNNING state.
>>>>>>>>>>> 6. call of qapi_event_send_resume function that ends a resume event to the VM
>>>>>>>>> AFAIK, this function sends resume event to qemu monitor not VM.
>>>>>>>>>
>>>>>>>>>>> So when a live migration is ended:
>>>>>>>>>>> 1. a resume event is sent to the guest
>>>>>>>>>>> 2. On the reception of this resume event the virtual queue are kicked
>>>>>>>>>>> by the guest
>>>>>>>>>>> 3. Backend vhost user catches this kick and can emit a RARP to guest
>>>>>>>>>>> that does not support GUEST_ANNOUNCE
>>>>>>>>>>>
>>>>>>>>>>> This solution, as solution based on detection of DRIVER_OK status
>>>>>>>>>>> suggested by Michael, allows backend to send the RARP to legacy guest
>>>>>>>>>>> without involving QEMU and add ioctl to vhost-user.
>>>>>>>>> A question here is did vhost-user code pass status to the backend? If
>>>>>>>>> not, how can userspace backend detect DRIVER_OK?
>>>>>>> Sorry, I must have been unclear.
>>>>>>> vhost core calls VHOST_NET_SET_BACKEND on DRIVER_OK.
>>>>>>> Unfortunately vhost user currently translates it to VHOST_USER_NONE.
>>>>>> Looks like VHOST_NET_SET_BACKEND was only used for tap backend.
>>>>>>
>>>>>>> As a work around, I think kicking ioeventfds once you get
>>>>>>> VHOST_NET_SET_BACKEND will work.
>>>>>> Maybe just a eventfd_set() in vhost_net_start(). But is this
>>>>>> "workaround" elegant enough to be documented? Is it better to do this
>>>>>> explicitly with a new feature?
>>>>> If you are going to do this anyway, there are a couple of other changes
>>>>> we should do, in particular, decide what we want to do with control vq.
>>>>>
>>>> If I understand correctly, you mean VIRTIO_NET_CTRL_MQ and
>>>> VIRTIO_NET_CTRL_GUEST_OFFLOADS? Looks like both of these were broken.
>>>> Need more thought, maybe new kinds of requests.
>>>>
>>>>
>>> Are there any objections to add VHOST_NET_SET_BACKEND support to vhost
>>> user with a patch like that:
>>>
>>>
>>>  hw/net/vhost_net.c     |    8 ++++++++
>>>  hw/virtio/vhost-user.c |   10 +++++++++-
>>>  2 files changed, 17 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
>>> index 907e002..7a008c0 100644
>>> --- a/hw/net/vhost_net.c
>>> +++ b/hw/net/vhost_net.c
>>> @@ -234,6 +234,14 @@ static int vhost_net_start_one(struct vhost_net *net,
>>>                  goto fail;
>>>              }
>>>          }
>>> +    } else if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
>>> +         file.fd = 0;
>>> +         for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
>>> +            const VhostOps *vhost_ops = net->dev.vhost_ops;
>>> +            int r = vhost_ops->vhost_call(&net->dev, VHOST_NET_SET_BACKEND,
>>> +                                          &file);
>>> +            assert(r >= 0);
>>> +        }
>>>      }
>>>      return 0;
>>>  fail:
>>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>>> index d6f2163..32c6bd9 100644
>>> --- a/hw/virtio/vhost-user.c
>>> +++ b/hw/virtio/vhost-user.c
>>> @@ -41,6 +41,7 @@ typedef enum VhostUserRequest {
>>>      VHOST_USER_SET_VRING_KICK = 12,
>>>      VHOST_USER_SET_VRING_CALL = 13,
>>>      VHOST_USER_SET_VRING_ERR = 14,
>>> +    VHOST_USER_NET_SET_BACKEND = 15,
>>>      VHOST_USER_MAX
>>>  } VhostUserRequest;
>>>
>>> @@ -104,7 +105,8 @@ static unsigned long int
>>> ioctl_to_vhost_user_request[VHOST_USER_MAX] = {
>>>      VHOST_GET_VRING_BASE,   /* VHOST_USER_GET_VRING_BASE */
>>>      VHOST_SET_VRING_KICK,   /* VHOST_USER_SET_VRING_KICK */
>>>      VHOST_SET_VRING_CALL,   /* VHOST_USER_SET_VRING_CALL */
>>> -    VHOST_SET_VRING_ERR     /* VHOST_USER_SET_VRING_ERR */
>>> +    VHOST_SET_VRING_ERR,    /* VHOST_USER_SET_VRING_ERR */
>>> +    VHOST_NET_SET_BACKEND   /* VHOST_USER_NET_SET_BACKEND */
>>>  };
>>>
>>>  static VhostUserRequest vhost_user_request_translate(unsigned long int request)
>>> @@ -287,6 +289,12 @@ static int vhost_user_call(struct vhost_dev *dev,
>>> unsigned long int request,
>>>              msg.u64 |= VHOST_USER_VRING_NOFD_MASK;
>>>          }
>>>          break;
>>> +
>>> +    case VHOST_NET_SET_BACKEND:
>>> +        memcpy(&msg.file, arg, sizeof(struct vhost_vring_state));
>>> +        msg.size = sizeof(m.state);
>>> +        break;
>>> +
>>>      default:
>>>          error_report("vhost-user trying to send unhandled ioctl");
>>>          return -1;
>>>
>>>
>>> This message will be sent when guest is ready and can be used by vhost
>>> user backend to send RARP to legacy guest.
>>>
>>> This solution avoids to add new message and has no impact on control vq.
>>
>> I think that you can't add messages to protocol unconditionally.
>> For example, snabbswitch simply crashes if it gets an unknown
>> message.
>>
>> Either this needs a new feature bit, or implement
>> [PATCH RFC] vhost-user: protocol extensions
>> making it safe to add new messages.
>>
>> --
>> MST
> I understand.
> Last idea before doing a RFC:
> Normally guest notifies vhost of new buffer onto a virtqueue by
> kicking the eventfd. This eventfd has been provided to vhost by QEMU.
> So when DRIVER_OK is received by QEMU, QEMU can kick the eventfd.
>
> A possible patch to do that is:
>  hw/net/vhost_net.c |    7 +++++++
>  1 file changed, 7 insertions(+)
>
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 907e002..fbc55e0 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -234,6 +234,13 @@ static int vhost_net_start_one(struct vhost_net *net,
>                  goto fail;
>              }
>          }
> +    } else if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
> +         int idx;
> +
> +         for (idx = 0; idx < net->dev.nvqs; ++idx) {
> +            struct VirtQueue *vq = virtio_get_queue(dev, idx);
> +            event_notifier_set(virtio_queue_get_host_notifier(vq));
> +        }
>      }
>      return 0;
>  fail:
>
> kicking this eventfd has no impact for QEMU or the guest (they do not
> poll it) and simply wake up vhost to allow it to send RARP for legacy
> guest.
>
> Regards.
>
> Thibaut.

This may work but the issue is:

- I believe we should document this in the spec. But it looks more like
a workaround and use implicit method to notify control message which
often cause ulgy codes in both side. This is not elegant to be
documented in the spec.
- Consider you may have 100 queues, then kick will happen 100 times and
backend need handle such case.

^ permalink raw reply	[flat|nested] 48+ messages in thread

end of thread, other threads:[~2015-06-26  4:06 UTC | newest]

Thread overview: 48+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-06-10 13:43 [Qemu-devel] [PATCH v3 0/2] Add live migration for vhost user Thibaut Collet
2015-06-10 13:43 ` [Qemu-devel] [PATCH v3 1/2] vhost user: add support of live migration Thibaut Collet
2015-06-10 13:52   ` Michael S. Tsirkin
2015-06-10 14:22     ` Thibaut Collet
2015-06-10 14:27       ` Michael S. Tsirkin
2015-06-10 15:24         ` Thibaut Collet
2015-06-10 15:34     ` Stefan Hajnoczi
2015-06-23  6:01   ` Michael S. Tsirkin
2015-06-10 13:43 ` [Qemu-devel] [PATCH v3 2/2] vhost user: Add RARP injection for legacy guest Thibaut Collet
2015-06-10 15:34   ` Michael S. Tsirkin
2015-06-10 15:48     ` Thibaut Collet
2015-06-10 16:00       ` Michael S. Tsirkin
2015-06-10 20:25         ` Thibaut Collet
2015-06-10 20:50           ` Michael S. Tsirkin
2015-06-11  5:34             ` Thibaut Collet
2015-06-11  5:39           ` Jason Wang
2015-06-11  5:49             ` Thibaut Collet
2015-06-11  5:54               ` Jason Wang
2015-06-11 10:38                 ` Michael S. Tsirkin
2015-06-11 12:10                   ` Thibaut Collet
2015-06-11 12:13                     ` Michael S. Tsirkin
2015-06-11 12:33                       ` Thibaut Collet
2015-06-12  7:55                       ` Jason Wang
2015-06-12 11:53                         ` Thibaut Collet
2015-06-12 14:28                         ` Michael S. Tsirkin
2015-06-15  7:43                           ` Jason Wang
2015-06-15  8:44                             ` Michael S. Tsirkin
2015-06-15 12:12                               ` Thibaut Collet
2015-06-15 12:45                                 ` Michael S. Tsirkin
2015-06-15 13:04                                   ` Thibaut Collet
2015-06-16  5:29                                 ` Jason Wang
2015-06-16  7:24                                   ` Thibaut Collet
2015-06-16  8:05                                     ` Jason Wang
2015-06-16  8:16                                       ` Thibaut Collet
2015-06-17  4:16                                         ` Jason Wang
2015-06-17  6:42                                           ` Michael S. Tsirkin
2015-06-17  7:05                                             ` Thibaut Collet
2015-06-18 15:16                                       ` Thibaut Collet
2015-06-23  2:12                                         ` Jason Wang
2015-06-23  5:49                                           ` Michael S. Tsirkin
2015-06-24  8:31                                             ` Jason Wang
2015-06-24 11:05                                               ` Michael S. Tsirkin
2015-06-25  9:59                                                 ` Jason Wang
2015-06-25 11:01                                                   ` Thibaut Collet
2015-06-25 12:53                                                     ` Michael S. Tsirkin
2015-06-25 14:22                                                       ` Thibaut Collet
2015-06-26  4:06                                                         ` Jason Wang
2015-06-16  3:35                               ` Jason Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.