All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
@ 2015-05-28  8:03 Thibaut Collet
  2015-05-29 13:12 ` Stefan Hajnoczi
  2015-06-01  9:17 ` Jason Wang
  0 siblings, 2 replies; 33+ messages in thread
From: Thibaut Collet @ 2015-05-28  8:03 UTC (permalink / raw)
  To: qemu-devel; +Cc: v.maffione, jasowang, rizzo, g.lettieri, stefanha

For netdev backend with no receive callback, packets must not be enqueued. When
VM stops the enqueued packets are purged (see fixes ca77d85e1dbf). That causes a
qemu segfault due to a call of a NULL pointer function.

Function to create net client is modified to allow backend to specify the size
of the queue.
For netdev backend with no receive callback, the queue size is set to 0 to
prevent enqueuing of packets.
For other netdev backend, a default queue size is provided. This value (set to
10000) is the value previously set to any queue.

Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
---
 include/net/net.h   |    3 ++-
 include/net/queue.h |    5 ++++-
 net/dump.c          |    3 ++-
 net/hub.c           |    3 ++-
 net/l2tpv3.c        |    3 ++-
 net/net.c           |   10 ++++++----
 net/netmap.c        |    3 ++-
 net/queue.c         |    4 ++--
 net/slirp.c         |    3 ++-
 net/socket.c        |    9 ++++++---
 net/tap-win32.c     |    3 ++-
 net/tap.c           |    3 ++-
 net/vde.c           |    3 ++-
 net/vhost-user.c    |    4 +++-
 14 files changed, 39 insertions(+), 20 deletions(-)

diff --git a/include/net/net.h b/include/net/net.h
index e66ca03..cb3e2df 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -104,7 +104,8 @@ int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
 NetClientState *qemu_new_net_client(NetClientInfo *info,
                                     NetClientState *peer,
                                     const char *model,
-                                    const char *name);
+                                    const char *name,
+                                    uint32_t queue_size);
 NICState *qemu_new_nic(NetClientInfo *info,
                        NICConf *conf,
                        const char *model,
diff --git a/include/net/queue.h b/include/net/queue.h
index fc02b33..4659c5a 100644
--- a/include/net/queue.h
+++ b/include/net/queue.h
@@ -34,7 +34,10 @@ typedef void (NetPacketSent) (NetClientState *sender, ssize_t ret);
 #define QEMU_NET_PACKET_FLAG_NONE  0
 #define QEMU_NET_PACKET_FLAG_RAW  (1<<0)
 
-NetQueue *qemu_new_net_queue(void *opaque);
+#define QEMU_DEFAULT_QUEUE_SIZE  10000
+#define QEMU_EMPTY_QUEUE         0
+
+NetQueue *qemu_new_net_queue(void *opaque, uint32_t queue_size);
 
 void qemu_del_net_queue(NetQueue *queue);
 
diff --git a/net/dump.c b/net/dump.c
index 9d3a09e..299b09e 100644
--- a/net/dump.c
+++ b/net/dump.c
@@ -129,7 +129,8 @@ static int net_dump_init(NetClientState *peer, const char *device,
         return -1;
     }
 
-    nc = qemu_new_net_client(&net_dump_info, peer, device, name);
+    nc = qemu_new_net_client(&net_dump_info, peer, device, name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
 
     snprintf(nc->info_str, sizeof(nc->info_str),
              "dump to %s (len=%d)", filename, len);
diff --git a/net/hub.c b/net/hub.c
index 2b60ab9..a42cac3 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -151,7 +151,8 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
         name = default_name;
     }
 
-    nc = qemu_new_net_client(&net_hub_port_info, NULL, "hub", name);
+    nc = qemu_new_net_client(&net_hub_port_info, NULL, "hub", name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
     port = DO_UPCAST(NetHubPort, nc, nc);
     port->id = id;
     port->hub = hub;
diff --git a/net/l2tpv3.c b/net/l2tpv3.c
index 8c598b0..1d2e4e5 100644
--- a/net/l2tpv3.c
+++ b/net/l2tpv3.c
@@ -548,7 +548,8 @@ int net_init_l2tpv3(const NetClientOptions *opts,
     struct addrinfo *result = NULL;
     char *srcport, *dstport;
 
-    nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3", name);
+    nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3", name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
 
     s = DO_UPCAST(NetL2TPV3State, nc, nc);
 
diff --git a/net/net.c b/net/net.c
index 7427f6a..bcf69da 100644
--- a/net/net.c
+++ b/net/net.c
@@ -214,6 +214,7 @@ static void qemu_net_client_setup(NetClientState *nc,
                                   NetClientState *peer,
                                   const char *model,
                                   const char *name,
+                                  uint32_t queue_size,
                                   NetClientDestructor *destructor)
 {
     nc->info = info;
@@ -231,21 +232,22 @@ static void qemu_net_client_setup(NetClientState *nc,
     }
     QTAILQ_INSERT_TAIL(&net_clients, nc, next);
 
-    nc->incoming_queue = qemu_new_net_queue(nc);
+    nc->incoming_queue = qemu_new_net_queue(nc, queue_size);
     nc->destructor = destructor;
 }
 
 NetClientState *qemu_new_net_client(NetClientInfo *info,
                                     NetClientState *peer,
                                     const char *model,
-                                    const char *name)
+                                    const char *name,
+                                    uint32_t queue_size)
 {
     NetClientState *nc;
 
     assert(info->size >= sizeof(NetClientState));
 
     nc = g_malloc0(info->size);
-    qemu_net_client_setup(nc, info, peer, model, name,
+    qemu_net_client_setup(nc, info, peer, model, name, queue_size,
                           qemu_net_client_destructor);
 
     return nc;
@@ -271,7 +273,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
 
     for (i = 0; i < queues; i++) {
         qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
-                              NULL);
+                              QEMU_DEFAULT_QUEUE_SIZE, NULL);
         nic->ncs[i].queue_index = i;
     }
 
diff --git a/net/netmap.c b/net/netmap.c
index 0c1772b..3ddfc8e 100644
--- a/net/netmap.c
+++ b/net/netmap.c
@@ -461,7 +461,8 @@ int net_init_netmap(const NetClientOptions *opts,
         return -1;
     }
     /* Create the object. */
-    nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name);
+    nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
     s = DO_UPCAST(NetmapState, nc, nc);
     s->me = me;
     s->vnet_hdr_len = 0;
diff --git a/net/queue.c b/net/queue.c
index ebbe2bb..d6ddda5 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -58,14 +58,14 @@ struct NetQueue {
     unsigned delivering : 1;
 };
 
-NetQueue *qemu_new_net_queue(void *opaque)
+NetQueue *qemu_new_net_queue(void *opaque, uint32_t queue_size)
 {
     NetQueue *queue;
 
     queue = g_new0(NetQueue, 1);
 
     queue->opaque = opaque;
-    queue->nq_maxlen = 10000;
+    queue->nq_maxlen = queue_size;
     queue->nq_count = 0;
 
     QTAILQ_INIT(&queue->packets);
diff --git a/net/slirp.c b/net/slirp.c
index 9bbed74..c91e929 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -234,7 +234,8 @@ static int net_slirp_init(NetClientState *peer, const char *model,
     }
 #endif
 
-    nc = qemu_new_net_client(&net_slirp_info, peer, model, name);
+    nc = qemu_new_net_client(&net_slirp_info, peer, model, name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
 
     snprintf(nc->info_str, sizeof(nc->info_str),
              "net=%s,restrict=%s", inet_ntoa(net),
diff --git a/net/socket.c b/net/socket.c
index c30e03f..d5c718c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -387,7 +387,8 @@ static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer,
         }
     }
 
-    nc = qemu_new_net_client(&net_dgram_socket_info, peer, model, name);
+    nc = qemu_new_net_client(&net_dgram_socket_info, peer, model, name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
 
     s = DO_UPCAST(NetSocketState, nc, nc);
 
@@ -436,7 +437,8 @@ static NetSocketState *net_socket_fd_init_stream(NetClientState *peer,
     NetClientState *nc;
     NetSocketState *s;
 
-    nc = qemu_new_net_client(&net_socket_info, peer, model, name);
+    nc = qemu_new_net_client(&net_socket_info, peer, model, name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
 
     snprintf(nc->info_str, sizeof(nc->info_str), "socket: fd=%d", fd);
 
@@ -543,7 +545,8 @@ static int net_socket_listen_init(NetClientState *peer,
         return -1;
     }
 
-    nc = qemu_new_net_client(&net_socket_info, peer, model, name);
+    nc = qemu_new_net_client(&net_socket_info, peer, model, name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
     s = DO_UPCAST(NetSocketState, nc, nc);
     s->fd = -1;
     s->listen_fd = fd;
diff --git a/net/tap-win32.c b/net/tap-win32.c
index 8aee611..dd6046a 100644
--- a/net/tap-win32.c
+++ b/net/tap-win32.c
@@ -737,7 +737,8 @@ static int tap_win32_init(NetClientState *peer, const char *model,
         return -1;
     }
 
-    nc = qemu_new_net_client(&net_tap_win32_info, peer, model, name);
+    nc = qemu_new_net_client(&net_tap_win32_info, peer, model, name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
 
     s = DO_UPCAST(TAPState, nc, nc);
 
diff --git a/net/tap.c b/net/tap.c
index 968df46..383d3ad 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -346,7 +346,8 @@ static TAPState *net_tap_fd_init(NetClientState *peer,
     NetClientState *nc;
     TAPState *s;
 
-    nc = qemu_new_net_client(&net_tap_info, peer, model, name);
+    nc = qemu_new_net_client(&net_tap_info, peer, model, name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
 
     s = DO_UPCAST(TAPState, nc, nc);
 
diff --git a/net/vde.c b/net/vde.c
index 2a619fb..b6d28cf 100644
--- a/net/vde.c
+++ b/net/vde.c
@@ -95,7 +95,8 @@ static int net_vde_init(NetClientState *peer, const char *model,
         return -1;
     }
 
-    nc = qemu_new_net_client(&net_vde_info, peer, model, name);
+    nc = qemu_new_net_client(&net_vde_info, peer, model, name,
+                             QEMU_DEFAULT_QUEUE_SIZE);
 
     snprintf(nc->info_str, sizeof(nc->info_str), "sock=%s,fd=%d",
              sock, vde_datafd(vde));
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 1d86a2b..71f8758 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -137,7 +137,9 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
     NetClientState *nc;
     VhostUserState *s;
 
-    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
+    /* We don't provide a valid queue: no receive callback */
+    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name,
+                             QEMU_EMPTY_QUEUE);
 
     snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
              chr->label);
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-05-28  8:03 [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop Thibaut Collet
@ 2015-05-29 13:12 ` Stefan Hajnoczi
  2015-05-29 14:28   ` Thibaut Collet
  2015-06-01  9:17 ` Jason Wang
  1 sibling, 1 reply; 33+ messages in thread
From: Stefan Hajnoczi @ 2015-05-29 13:12 UTC (permalink / raw)
  To: Thibaut Collet
  Cc: jasowang, qemu-devel, v.maffione, stefanha, g.lettieri, rizzo

[-- Attachment #1: Type: text/plain, Size: 474 bytes --]

On Thu, May 28, 2015 at 10:03:14AM +0200, Thibaut Collet wrote:
> For netdev backend with no receive callback, packets must not be enqueued. When
> VM stops the enqueued packets are purged (see fixes ca77d85e1dbf). That causes a
> qemu segfault due to a call of a NULL pointer function.

virtio-net NIC <-> vhost-user should bypass the QEMU net subsystem since
rx/tx virtqueues are handled outside the QEMU process.  Who is enqueuing
packets on vhost-user's incoming_queue?

[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-05-29 13:12 ` Stefan Hajnoczi
@ 2015-05-29 14:28   ` Thibaut Collet
  2015-06-02 10:34     ` Stefan Hajnoczi
  0 siblings, 1 reply; 33+ messages in thread
From: Thibaut Collet @ 2015-05-29 14:28 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: jasowang, qemu-devel, v.maffione, stefanha, g.lettieri, rizzo

[-- Attachment #1: Type: text/plain, Size: 789 bytes --]

Hi,

I agree that virtio-net NIC never enqueues packet to vhost-user
but qemu_announce_self function (savevm.c file) can do it through
the qemu_announce_self_iter / qemu_send_packet_raw sequence.

Regards

Thibaut.

On Fri, May 29, 2015 at 3:12 PM, Stefan Hajnoczi <stefanha@gmail.com> wrote:

> On Thu, May 28, 2015 at 10:03:14AM +0200, Thibaut Collet wrote:
> > For netdev backend with no receive callback, packets must not be
> enqueued. When
> > VM stops the enqueued packets are purged (see fixes ca77d85e1dbf). That
> causes a
> > qemu segfault due to a call of a NULL pointer function.
>
> virtio-net NIC <-> vhost-user should bypass the QEMU net subsystem since
> rx/tx virtqueues are handled outside the QEMU process.  Who is enqueuing
> packets on vhost-user's incoming_queue?
>

[-- Attachment #2: Type: text/html, Size: 1393 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-05-28  8:03 [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop Thibaut Collet
  2015-05-29 13:12 ` Stefan Hajnoczi
@ 2015-06-01  9:17 ` Jason Wang
  2015-06-01 10:14   ` Thibaut Collet
  1 sibling, 1 reply; 33+ messages in thread
From: Jason Wang @ 2015-06-01  9:17 UTC (permalink / raw)
  To: Thibaut Collet, qemu-devel; +Cc: g.lettieri, rizzo, stefanha, v.maffione



On 05/28/2015 04:03 PM, Thibaut Collet wrote:
> For netdev backend with no receive callback, packets must not be enqueued. When
> VM stops the enqueued packets are purged (see fixes ca77d85e1dbf). That causes a
> qemu segfault due to a call of a NULL pointer function.
>
> Function to create net client is modified to allow backend to specify the size
> of the queue.
> For netdev backend with no receive callback, the queue size is set to 0 to
> prevent enqueuing of packets.
> For other netdev backend, a default queue size is provided. This value (set to
> 10000) is the value previously set to any queue.
>
> Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> ---
>  include/net/net.h   |    3 ++-
>  include/net/queue.h |    5 ++++-
>  net/dump.c          |    3 ++-
>  net/hub.c           |    3 ++-
>  net/l2tpv3.c        |    3 ++-
>  net/net.c           |   10 ++++++----
>  net/netmap.c        |    3 ++-
>  net/queue.c         |    4 ++--
>  net/slirp.c         |    3 ++-
>  net/socket.c        |    9 ++++++---
>  net/tap-win32.c     |    3 ++-
>  net/tap.c           |    3 ++-
>  net/vde.c           |    3 ++-
>  net/vhost-user.c    |    4 +++-
>  14 files changed, 39 insertions(+), 20 deletions(-)
>
> diff --git a/include/net/net.h b/include/net/net.h
> index e66ca03..cb3e2df 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -104,7 +104,8 @@ int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
>  NetClientState *qemu_new_net_client(NetClientInfo *info,
>                                      NetClientState *peer,
>                                      const char *model,
> -                                    const char *name);
> +                                    const char *name,
> +                                    uint32_t queue_size);
>  NICState *qemu_new_nic(NetClientInfo *info,
>                         NICConf *conf,
>                         const char *model,
> diff --git a/include/net/queue.h b/include/net/queue.h
> index fc02b33..4659c5a 100644
> --- a/include/net/queue.h
> +++ b/include/net/queue.h
> @@ -34,7 +34,10 @@ typedef void (NetPacketSent) (NetClientState *sender, ssize_t ret);
>  #define QEMU_NET_PACKET_FLAG_NONE  0
>  #define QEMU_NET_PACKET_FLAG_RAW  (1<<0)
>  
> -NetQueue *qemu_new_net_queue(void *opaque);
> +#define QEMU_DEFAULT_QUEUE_SIZE  10000
> +#define QEMU_EMPTY_QUEUE         0
> +
> +NetQueue *qemu_new_net_queue(void *opaque, uint32_t queue_size);
>  
>  void qemu_del_net_queue(NetQueue *queue);
>  
> diff --git a/net/dump.c b/net/dump.c
> index 9d3a09e..299b09e 100644
> --- a/net/dump.c
> +++ b/net/dump.c
> @@ -129,7 +129,8 @@ static int net_dump_init(NetClientState *peer, const char *device,
>          return -1;
>      }
>  
> -    nc = qemu_new_net_client(&net_dump_info, peer, device, name);
> +    nc = qemu_new_net_client(&net_dump_info, peer, device, name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>  
>      snprintf(nc->info_str, sizeof(nc->info_str),
>               "dump to %s (len=%d)", filename, len);
> diff --git a/net/hub.c b/net/hub.c
> index 2b60ab9..a42cac3 100644
> --- a/net/hub.c
> +++ b/net/hub.c
> @@ -151,7 +151,8 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
>          name = default_name;
>      }
>  
> -    nc = qemu_new_net_client(&net_hub_port_info, NULL, "hub", name);
> +    nc = qemu_new_net_client(&net_hub_port_info, NULL, "hub", name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>      port = DO_UPCAST(NetHubPort, nc, nc);
>      port->id = id;
>      port->hub = hub;
> diff --git a/net/l2tpv3.c b/net/l2tpv3.c
> index 8c598b0..1d2e4e5 100644
> --- a/net/l2tpv3.c
> +++ b/net/l2tpv3.c
> @@ -548,7 +548,8 @@ int net_init_l2tpv3(const NetClientOptions *opts,
>      struct addrinfo *result = NULL;
>      char *srcport, *dstport;
>  
> -    nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3", name);
> +    nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3", name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>  
>      s = DO_UPCAST(NetL2TPV3State, nc, nc);
>  
> diff --git a/net/net.c b/net/net.c
> index 7427f6a..bcf69da 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -214,6 +214,7 @@ static void qemu_net_client_setup(NetClientState *nc,
>                                    NetClientState *peer,
>                                    const char *model,
>                                    const char *name,
> +                                  uint32_t queue_size,
>                                    NetClientDestructor *destructor)
>  {
>      nc->info = info;
> @@ -231,21 +232,22 @@ static void qemu_net_client_setup(NetClientState *nc,
>      }
>      QTAILQ_INSERT_TAIL(&net_clients, nc, next);
>  
> -    nc->incoming_queue = qemu_new_net_queue(nc);
> +    nc->incoming_queue = qemu_new_net_queue(nc, queue_size);
>      nc->destructor = destructor;
>  }
>  
>  NetClientState *qemu_new_net_client(NetClientInfo *info,
>                                      NetClientState *peer,
>                                      const char *model,
> -                                    const char *name)
> +                                    const char *name,
> +                                    uint32_t queue_size)
>  {
>      NetClientState *nc;
>  
>      assert(info->size >= sizeof(NetClientState));
>  
>      nc = g_malloc0(info->size);
> -    qemu_net_client_setup(nc, info, peer, model, name,
> +    qemu_net_client_setup(nc, info, peer, model, name, queue_size,
>                            qemu_net_client_destructor);
>  
>      return nc;
> @@ -271,7 +273,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
>  
>      for (i = 0; i < queues; i++) {
>          qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
> -                              NULL);
> +                              QEMU_DEFAULT_QUEUE_SIZE, NULL);
>          nic->ncs[i].queue_index = i;
>      }
>  
> diff --git a/net/netmap.c b/net/netmap.c
> index 0c1772b..3ddfc8e 100644
> --- a/net/netmap.c
> +++ b/net/netmap.c
> @@ -461,7 +461,8 @@ int net_init_netmap(const NetClientOptions *opts,
>          return -1;
>      }
>      /* Create the object. */
> -    nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name);
> +    nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>      s = DO_UPCAST(NetmapState, nc, nc);
>      s->me = me;
>      s->vnet_hdr_len = 0;
> diff --git a/net/queue.c b/net/queue.c
> index ebbe2bb..d6ddda5 100644
> --- a/net/queue.c
> +++ b/net/queue.c
> @@ -58,14 +58,14 @@ struct NetQueue {
>      unsigned delivering : 1;
>  };
>  
> -NetQueue *qemu_new_net_queue(void *opaque)
> +NetQueue *qemu_new_net_queue(void *opaque, uint32_t queue_size)
>  {
>      NetQueue *queue;
>  
>      queue = g_new0(NetQueue, 1);
>  
>      queue->opaque = opaque;
> -    queue->nq_maxlen = 10000;
> +    queue->nq_maxlen = queue_size;
>      queue->nq_count = 0;
>  
>      QTAILQ_INIT(&queue->packets);
> diff --git a/net/slirp.c b/net/slirp.c
> index 9bbed74..c91e929 100644
> --- a/net/slirp.c
> +++ b/net/slirp.c
> @@ -234,7 +234,8 @@ static int net_slirp_init(NetClientState *peer, const char *model,
>      }
>  #endif
>  
> -    nc = qemu_new_net_client(&net_slirp_info, peer, model, name);
> +    nc = qemu_new_net_client(&net_slirp_info, peer, model, name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>  
>      snprintf(nc->info_str, sizeof(nc->info_str),
>               "net=%s,restrict=%s", inet_ntoa(net),
> diff --git a/net/socket.c b/net/socket.c
> index c30e03f..d5c718c 100644
> --- a/net/socket.c
> +++ b/net/socket.c
> @@ -387,7 +387,8 @@ static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer,
>          }
>      }
>  
> -    nc = qemu_new_net_client(&net_dgram_socket_info, peer, model, name);
> +    nc = qemu_new_net_client(&net_dgram_socket_info, peer, model, name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>  
>      s = DO_UPCAST(NetSocketState, nc, nc);
>  
> @@ -436,7 +437,8 @@ static NetSocketState *net_socket_fd_init_stream(NetClientState *peer,
>      NetClientState *nc;
>      NetSocketState *s;
>  
> -    nc = qemu_new_net_client(&net_socket_info, peer, model, name);
> +    nc = qemu_new_net_client(&net_socket_info, peer, model, name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>  
>      snprintf(nc->info_str, sizeof(nc->info_str), "socket: fd=%d", fd);
>  
> @@ -543,7 +545,8 @@ static int net_socket_listen_init(NetClientState *peer,
>          return -1;
>      }
>  
> -    nc = qemu_new_net_client(&net_socket_info, peer, model, name);
> +    nc = qemu_new_net_client(&net_socket_info, peer, model, name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>      s = DO_UPCAST(NetSocketState, nc, nc);
>      s->fd = -1;
>      s->listen_fd = fd;
> diff --git a/net/tap-win32.c b/net/tap-win32.c
> index 8aee611..dd6046a 100644
> --- a/net/tap-win32.c
> +++ b/net/tap-win32.c
> @@ -737,7 +737,8 @@ static int tap_win32_init(NetClientState *peer, const char *model,
>          return -1;
>      }
>  
> -    nc = qemu_new_net_client(&net_tap_win32_info, peer, model, name);
> +    nc = qemu_new_net_client(&net_tap_win32_info, peer, model, name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>  
>      s = DO_UPCAST(TAPState, nc, nc);
>  
> diff --git a/net/tap.c b/net/tap.c
> index 968df46..383d3ad 100644
> --- a/net/tap.c
> +++ b/net/tap.c
> @@ -346,7 +346,8 @@ static TAPState *net_tap_fd_init(NetClientState *peer,
>      NetClientState *nc;
>      TAPState *s;
>  
> -    nc = qemu_new_net_client(&net_tap_info, peer, model, name);
> +    nc = qemu_new_net_client(&net_tap_info, peer, model, name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>  
>      s = DO_UPCAST(TAPState, nc, nc);
>  
> diff --git a/net/vde.c b/net/vde.c
> index 2a619fb..b6d28cf 100644
> --- a/net/vde.c
> +++ b/net/vde.c
> @@ -95,7 +95,8 @@ static int net_vde_init(NetClientState *peer, const char *model,
>          return -1;
>      }
>  
> -    nc = qemu_new_net_client(&net_vde_info, peer, model, name);
> +    nc = qemu_new_net_client(&net_vde_info, peer, model, name,
> +                             QEMU_DEFAULT_QUEUE_SIZE);
>  
>      snprintf(nc->info_str, sizeof(nc->info_str), "sock=%s,fd=%d",
>               sock, vde_datafd(vde));
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 1d86a2b..71f8758 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -137,7 +137,9 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>      NetClientState *nc;
>      VhostUserState *s;
>  
> -    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> +    /* We don't provide a valid queue: no receive callback */
> +    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name,
> +                             QEMU_EMPTY_QUEUE);
>  
>      snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
>               chr->label);

Two questions:

- vhost-user set received_disabled to true to prevent this from
happening. but looks like qemu_flush_or_purge_queue_packets() change it
to false unconditionally. And I don't quite understand this, probably we
can just remove this and the issue is fixed?
- If not, maybe you just need another flag like receive_disabled. This
seems can save a lot of codes.

Thanks

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-06-01  9:17 ` Jason Wang
@ 2015-06-01 10:14   ` Thibaut Collet
  2015-06-03  9:40     ` Jason Wang
  0 siblings, 1 reply; 33+ messages in thread
From: Thibaut Collet @ 2015-06-01 10:14 UTC (permalink / raw)
  To: Jason Wang
  Cc: stefanha, Giuseppe Lettieri, Luigi Rizzo, qemu-devel, Vincenzo Maffione

[-- Attachment #1: Type: text/plain, Size: 13353 bytes --]

Hi,

>- vhost-user set received_disabled to true to prevent this from
>happening. but looks like qemu_flush_or_purge_queue_packets() change it
>to false unconditionally. And I don't quite understand this, probably we
>can just remove this and the issue is fixed?

I am afraid that solution can cause issues with other netdev backends. If
for any reason a netdev backend is no more able to treat packets it is
unvalidated by setting the received_disabled to true. This boolean is reset
to false only by the qemu_flush_or_purge_queue_packets function. This way
allows to restart communication with a netdev backend that will be
temporary done. I do not know if this case can occur but I do not want to
break this mechanism.

>- If not, maybe you just need another flag like receive_disabled. This
>seems can save a lot of codes.

Idea of my fix is to avoid enqueuing packet. This solution has the
advantage to provide dynamic configuration of queue size for future use but
modified several files.

Your suggestion is more vhost user centric, and has the disadvantage to
enqueue packet that will be never send, nor free during the flush
operation. Memory will be normally freed by the system when qemu stops but
it can increase the risk of memory leak.

Regards.

Thibaut.

On Mon, Jun 1, 2015 at 11:17 AM, Jason Wang <jasowang@redhat.com> wrote:

>
>
> On 05/28/2015 04:03 PM, Thibaut Collet wrote:
> > For netdev backend with no receive callback, packets must not be
> enqueued. When
> > VM stops the enqueued packets are purged (see fixes ca77d85e1dbf). That
> causes a
> > qemu segfault due to a call of a NULL pointer function.
> >
> > Function to create net client is modified to allow backend to specify
> the size
> > of the queue.
> > For netdev backend with no receive callback, the queue size is set to 0
> to
> > prevent enqueuing of packets.
> > For other netdev backend, a default queue size is provided. This value
> (set to
> > 10000) is the value previously set to any queue.
> >
> > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> > ---
> >  include/net/net.h   |    3 ++-
> >  include/net/queue.h |    5 ++++-
> >  net/dump.c          |    3 ++-
> >  net/hub.c           |    3 ++-
> >  net/l2tpv3.c        |    3 ++-
> >  net/net.c           |   10 ++++++----
> >  net/netmap.c        |    3 ++-
> >  net/queue.c         |    4 ++--
> >  net/slirp.c         |    3 ++-
> >  net/socket.c        |    9 ++++++---
> >  net/tap-win32.c     |    3 ++-
> >  net/tap.c           |    3 ++-
> >  net/vde.c           |    3 ++-
> >  net/vhost-user.c    |    4 +++-
> >  14 files changed, 39 insertions(+), 20 deletions(-)
> >
> > diff --git a/include/net/net.h b/include/net/net.h
> > index e66ca03..cb3e2df 100644
> > --- a/include/net/net.h
> > +++ b/include/net/net.h
> > @@ -104,7 +104,8 @@ int qemu_find_net_clients_except(const char *id,
> NetClientState **ncs,
> >  NetClientState *qemu_new_net_client(NetClientInfo *info,
> >                                      NetClientState *peer,
> >                                      const char *model,
> > -                                    const char *name);
> > +                                    const char *name,
> > +                                    uint32_t queue_size);
> >  NICState *qemu_new_nic(NetClientInfo *info,
> >                         NICConf *conf,
> >                         const char *model,
> > diff --git a/include/net/queue.h b/include/net/queue.h
> > index fc02b33..4659c5a 100644
> > --- a/include/net/queue.h
> > +++ b/include/net/queue.h
> > @@ -34,7 +34,10 @@ typedef void (NetPacketSent) (NetClientState *sender,
> ssize_t ret);
> >  #define QEMU_NET_PACKET_FLAG_NONE  0
> >  #define QEMU_NET_PACKET_FLAG_RAW  (1<<0)
> >
> > -NetQueue *qemu_new_net_queue(void *opaque);
> > +#define QEMU_DEFAULT_QUEUE_SIZE  10000
> > +#define QEMU_EMPTY_QUEUE         0
> > +
> > +NetQueue *qemu_new_net_queue(void *opaque, uint32_t queue_size);
> >
> >  void qemu_del_net_queue(NetQueue *queue);
> >
> > diff --git a/net/dump.c b/net/dump.c
> > index 9d3a09e..299b09e 100644
> > --- a/net/dump.c
> > +++ b/net/dump.c
> > @@ -129,7 +129,8 @@ static int net_dump_init(NetClientState *peer, const
> char *device,
> >          return -1;
> >      }
> >
> > -    nc = qemu_new_net_client(&net_dump_info, peer, device, name);
> > +    nc = qemu_new_net_client(&net_dump_info, peer, device, name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >
> >      snprintf(nc->info_str, sizeof(nc->info_str),
> >               "dump to %s (len=%d)", filename, len);
> > diff --git a/net/hub.c b/net/hub.c
> > index 2b60ab9..a42cac3 100644
> > --- a/net/hub.c
> > +++ b/net/hub.c
> > @@ -151,7 +151,8 @@ static NetHubPort *net_hub_port_new(NetHub *hub,
> const char *name)
> >          name = default_name;
> >      }
> >
> > -    nc = qemu_new_net_client(&net_hub_port_info, NULL, "hub", name);
> > +    nc = qemu_new_net_client(&net_hub_port_info, NULL, "hub", name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >      port = DO_UPCAST(NetHubPort, nc, nc);
> >      port->id = id;
> >      port->hub = hub;
> > diff --git a/net/l2tpv3.c b/net/l2tpv3.c
> > index 8c598b0..1d2e4e5 100644
> > --- a/net/l2tpv3.c
> > +++ b/net/l2tpv3.c
> > @@ -548,7 +548,8 @@ int net_init_l2tpv3(const NetClientOptions *opts,
> >      struct addrinfo *result = NULL;
> >      char *srcport, *dstport;
> >
> > -    nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3", name);
> > +    nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3", name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >
> >      s = DO_UPCAST(NetL2TPV3State, nc, nc);
> >
> > diff --git a/net/net.c b/net/net.c
> > index 7427f6a..bcf69da 100644
> > --- a/net/net.c
> > +++ b/net/net.c
> > @@ -214,6 +214,7 @@ static void qemu_net_client_setup(NetClientState *nc,
> >                                    NetClientState *peer,
> >                                    const char *model,
> >                                    const char *name,
> > +                                  uint32_t queue_size,
> >                                    NetClientDestructor *destructor)
> >  {
> >      nc->info = info;
> > @@ -231,21 +232,22 @@ static void qemu_net_client_setup(NetClientState
> *nc,
> >      }
> >      QTAILQ_INSERT_TAIL(&net_clients, nc, next);
> >
> > -    nc->incoming_queue = qemu_new_net_queue(nc);
> > +    nc->incoming_queue = qemu_new_net_queue(nc, queue_size);
> >      nc->destructor = destructor;
> >  }
> >
> >  NetClientState *qemu_new_net_client(NetClientInfo *info,
> >                                      NetClientState *peer,
> >                                      const char *model,
> > -                                    const char *name)
> > +                                    const char *name,
> > +                                    uint32_t queue_size)
> >  {
> >      NetClientState *nc;
> >
> >      assert(info->size >= sizeof(NetClientState));
> >
> >      nc = g_malloc0(info->size);
> > -    qemu_net_client_setup(nc, info, peer, model, name,
> > +    qemu_net_client_setup(nc, info, peer, model, name, queue_size,
> >                            qemu_net_client_destructor);
> >
> >      return nc;
> > @@ -271,7 +273,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
> >
> >      for (i = 0; i < queues; i++) {
> >          qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
> > -                              NULL);
> > +                              QEMU_DEFAULT_QUEUE_SIZE, NULL);
> >          nic->ncs[i].queue_index = i;
> >      }
> >
> > diff --git a/net/netmap.c b/net/netmap.c
> > index 0c1772b..3ddfc8e 100644
> > --- a/net/netmap.c
> > +++ b/net/netmap.c
> > @@ -461,7 +461,8 @@ int net_init_netmap(const NetClientOptions *opts,
> >          return -1;
> >      }
> >      /* Create the object. */
> > -    nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name);
> > +    nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >      s = DO_UPCAST(NetmapState, nc, nc);
> >      s->me = me;
> >      s->vnet_hdr_len = 0;
> > diff --git a/net/queue.c b/net/queue.c
> > index ebbe2bb..d6ddda5 100644
> > --- a/net/queue.c
> > +++ b/net/queue.c
> > @@ -58,14 +58,14 @@ struct NetQueue {
> >      unsigned delivering : 1;
> >  };
> >
> > -NetQueue *qemu_new_net_queue(void *opaque)
> > +NetQueue *qemu_new_net_queue(void *opaque, uint32_t queue_size)
> >  {
> >      NetQueue *queue;
> >
> >      queue = g_new0(NetQueue, 1);
> >
> >      queue->opaque = opaque;
> > -    queue->nq_maxlen = 10000;
> > +    queue->nq_maxlen = queue_size;
> >      queue->nq_count = 0;
> >
> >      QTAILQ_INIT(&queue->packets);
> > diff --git a/net/slirp.c b/net/slirp.c
> > index 9bbed74..c91e929 100644
> > --- a/net/slirp.c
> > +++ b/net/slirp.c
> > @@ -234,7 +234,8 @@ static int net_slirp_init(NetClientState *peer,
> const char *model,
> >      }
> >  #endif
> >
> > -    nc = qemu_new_net_client(&net_slirp_info, peer, model, name);
> > +    nc = qemu_new_net_client(&net_slirp_info, peer, model, name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >
> >      snprintf(nc->info_str, sizeof(nc->info_str),
> >               "net=%s,restrict=%s", inet_ntoa(net),
> > diff --git a/net/socket.c b/net/socket.c
> > index c30e03f..d5c718c 100644
> > --- a/net/socket.c
> > +++ b/net/socket.c
> > @@ -387,7 +387,8 @@ static NetSocketState
> *net_socket_fd_init_dgram(NetClientState *peer,
> >          }
> >      }
> >
> > -    nc = qemu_new_net_client(&net_dgram_socket_info, peer, model, name);
> > +    nc = qemu_new_net_client(&net_dgram_socket_info, peer, model, name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >
> >      s = DO_UPCAST(NetSocketState, nc, nc);
> >
> > @@ -436,7 +437,8 @@ static NetSocketState
> *net_socket_fd_init_stream(NetClientState *peer,
> >      NetClientState *nc;
> >      NetSocketState *s;
> >
> > -    nc = qemu_new_net_client(&net_socket_info, peer, model, name);
> > +    nc = qemu_new_net_client(&net_socket_info, peer, model, name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >
> >      snprintf(nc->info_str, sizeof(nc->info_str), "socket: fd=%d", fd);
> >
> > @@ -543,7 +545,8 @@ static int net_socket_listen_init(NetClientState
> *peer,
> >          return -1;
> >      }
> >
> > -    nc = qemu_new_net_client(&net_socket_info, peer, model, name);
> > +    nc = qemu_new_net_client(&net_socket_info, peer, model, name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >      s = DO_UPCAST(NetSocketState, nc, nc);
> >      s->fd = -1;
> >      s->listen_fd = fd;
> > diff --git a/net/tap-win32.c b/net/tap-win32.c
> > index 8aee611..dd6046a 100644
> > --- a/net/tap-win32.c
> > +++ b/net/tap-win32.c
> > @@ -737,7 +737,8 @@ static int tap_win32_init(NetClientState *peer,
> const char *model,
> >          return -1;
> >      }
> >
> > -    nc = qemu_new_net_client(&net_tap_win32_info, peer, model, name);
> > +    nc = qemu_new_net_client(&net_tap_win32_info, peer, model, name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >
> >      s = DO_UPCAST(TAPState, nc, nc);
> >
> > diff --git a/net/tap.c b/net/tap.c
> > index 968df46..383d3ad 100644
> > --- a/net/tap.c
> > +++ b/net/tap.c
> > @@ -346,7 +346,8 @@ static TAPState *net_tap_fd_init(NetClientState
> *peer,
> >      NetClientState *nc;
> >      TAPState *s;
> >
> > -    nc = qemu_new_net_client(&net_tap_info, peer, model, name);
> > +    nc = qemu_new_net_client(&net_tap_info, peer, model, name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >
> >      s = DO_UPCAST(TAPState, nc, nc);
> >
> > diff --git a/net/vde.c b/net/vde.c
> > index 2a619fb..b6d28cf 100644
> > --- a/net/vde.c
> > +++ b/net/vde.c
> > @@ -95,7 +95,8 @@ static int net_vde_init(NetClientState *peer, const
> char *model,
> >          return -1;
> >      }
> >
> > -    nc = qemu_new_net_client(&net_vde_info, peer, model, name);
> > +    nc = qemu_new_net_client(&net_vde_info, peer, model, name,
> > +                             QEMU_DEFAULT_QUEUE_SIZE);
> >
> >      snprintf(nc->info_str, sizeof(nc->info_str), "sock=%s,fd=%d",
> >               sock, vde_datafd(vde));
> > diff --git a/net/vhost-user.c b/net/vhost-user.c
> > index 1d86a2b..71f8758 100644
> > --- a/net/vhost-user.c
> > +++ b/net/vhost-user.c
> > @@ -137,7 +137,9 @@ static int net_vhost_user_init(NetClientState *peer,
> const char *device,
> >      NetClientState *nc;
> >      VhostUserState *s;
> >
> > -    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> > +    /* We don't provide a valid queue: no receive callback */
> > +    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name,
> > +                             QEMU_EMPTY_QUEUE);
> >
> >      snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> >               chr->label);
>
> Two questions:
>
> - vhost-user set received_disabled to true to prevent this from
> happening. but looks like qemu_flush_or_purge_queue_packets() change it
> to false unconditionally. And I don't quite understand this, probably we
> can just remove this and the issue is fixed?
> - If not, maybe you just need another flag like receive_disabled. This
> seems can save a lot of codes.
>
> Thanks
>

[-- Attachment #2: Type: text/html, Size: 17666 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-05-29 14:28   ` Thibaut Collet
@ 2015-06-02 10:34     ` Stefan Hajnoczi
  2015-06-03  7:56       ` Thibaut Collet
  0 siblings, 1 reply; 33+ messages in thread
From: Stefan Hajnoczi @ 2015-06-02 10:34 UTC (permalink / raw)
  To: Thibaut Collet
  Cc: Stefan Hajnoczi, jasowang, qemu-devel, v.maffione, g.lettieri, rizzo

[-- Attachment #1: Type: text/plain, Size: 446 bytes --]

On Fri, May 29, 2015 at 04:28:48PM +0200, Thibaut Collet wrote:
> I agree that virtio-net NIC never enqueues packet to vhost-user
> but qemu_announce_self function (savevm.c file) can do it through
> the qemu_announce_self_iter / qemu_send_packet_raw sequence.

Does vhost-user support live migration?

If no, then qemu_announce_self is never called.

If yes, then the packet should not be discarded since the ARP announce
has a purpose.

Stefan

[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-06-02 10:34     ` Stefan Hajnoczi
@ 2015-06-03  7:56       ` Thibaut Collet
  2015-06-03  9:42         ` Jason Wang
  2015-06-03  9:42         ` [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop Stefan Hajnoczi
  0 siblings, 2 replies; 33+ messages in thread
From: Thibaut Collet @ 2015-06-03  7:56 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: Stefan Hajnoczi, Jason Wang, qemu-devel, Vincenzo Maffione,
	Giuseppe Lettieri, Luigi Rizzo

[-- Attachment #1: Type: text/plain, Size: 811 bytes --]

Hi,

thanks for your point, I did not notice the problem of the lost of ARP
announce by discarding the message.
So I must rewrite my patch to define a queue to transmit the ARP announce
to the guest. Is it the proper solution?

Thanks.

Best regards.

Thibaut.

On Tue, Jun 2, 2015 at 12:34 PM, Stefan Hajnoczi <stefanha@redhat.com>
wrote:

> On Fri, May 29, 2015 at 04:28:48PM +0200, Thibaut Collet wrote:
> > I agree that virtio-net NIC never enqueues packet to vhost-user
> > but qemu_announce_self function (savevm.c file) can do it through
> > the qemu_announce_self_iter / qemu_send_packet_raw sequence.
>
> Does vhost-user support live migration?
>
> If no, then qemu_announce_self is never called.
>
> If yes, then the packet should not be discarded since the ARP announce
> has a purpose.
>
> Stefan
>

[-- Attachment #2: Type: text/html, Size: 1342 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-06-01 10:14   ` Thibaut Collet
@ 2015-06-03  9:40     ` Jason Wang
  0 siblings, 0 replies; 33+ messages in thread
From: Jason Wang @ 2015-06-03  9:40 UTC (permalink / raw)
  To: Thibaut Collet
  Cc: Vincenzo Maffione, Giuseppe Lettieri, Luigi Rizzo, qemu-devel, stefanha



On 06/01/2015 06:14 PM, Thibaut Collet wrote:
> Hi,
>
> >- vhost-user set received_disabled to true to prevent this from
> >happening. but looks like qemu_flush_or_purge_queue_packets() change it
> >to false unconditionally. And I don't quite understand this, probably we
> >can just remove this and the issue is fixed?
>
> I am afraid that solution can cause issues with other netdev backends.
> If for any reason a netdev backend is no more able to treat packets it
> is unvalidated by setting the received_disabled to true. This boolean
> is reset to false only by the qemu_flush_or_purge_queue_packets
> function. This way allows to restart communication with a netdev
> backend that will be temporary done. I do not know if this case can
> occur but I do not want to break this mechanism.

Then we can only set receive_disabled to zero when doing flush. And keep
the value when purge.

>
> >- If not, maybe you just need another flag like receive_disabled. This
> >seems can save a lot of codes.
>
> Idea of my fix is to avoid enqueuing packet. This solution has the
> advantage to provide dynamic configuration of queue size for future
> use but modified several files.
>
> Your suggestion is more vhost user centric, and has the disadvantage
> to enqueue packet that will be never send, nor free during the flush
> operation. Memory will be normally freed by the system when qemu stops
> but it can increase the risk of memory leak.

The issue is this only works when no sent_cb (though it was not an issue
for vhost-user since it does not even use a sent_cb).

My suggestion is to just check this flag and drop the packet if it was
true in qemu_net_queue_append{_iov}(). But I'm ok to your method also.

>
> Regards.
>
> Thibaut.
>
> On Mon, Jun 1, 2015 at 11:17 AM, Jason Wang <jasowang@redhat.com
> <mailto:jasowang@redhat.com>> wrote:
>
>
>
>     On 05/28/2015 04:03 PM, Thibaut Collet wrote:
>     > For netdev backend with no receive callback, packets must not be
>     enqueued. When
>     > VM stops the enqueued packets are purged (see fixes
>     ca77d85e1dbf). That causes a
>     > qemu segfault due to a call of a NULL pointer function.
>     >
>     > Function to create net client is modified to allow backend to
>     specify the size
>     > of the queue.
>     > For netdev backend with no receive callback, the queue size is
>     set to 0 to
>     > prevent enqueuing of packets.
>     > For other netdev backend, a default queue size is provided. This
>     value (set to
>     > 10000) is the value previously set to any queue.
>     >
>     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com
>     <mailto:thibaut.collet@6wind.com>>
>     > ---
>     >  include/net/net.h   |    3 ++-
>     >  include/net/queue.h |    5 ++++-
>     >  net/dump.c          |    3 ++-
>     >  net/hub.c           |    3 ++-
>     >  net/l2tpv3.c        |    3 ++-
>     >  net/net.c           |   10 ++++++----
>     >  net/netmap.c        |    3 ++-
>     >  net/queue.c         |    4 ++--
>     >  net/slirp.c         |    3 ++-
>     >  net/socket.c        |    9 ++++++---
>     >  net/tap-win32.c     |    3 ++-
>     >  net/tap.c           |    3 ++-
>     >  net/vde.c           |    3 ++-
>     >  net/vhost-user.c    |    4 +++-
>     >  14 files changed, 39 insertions(+), 20 deletions(-)
>     >
>     > diff --git a/include/net/net.h b/include/net/net.h
>     > index e66ca03..cb3e2df 100644
>     > --- a/include/net/net.h
>     > +++ b/include/net/net.h
>     > @@ -104,7 +104,8 @@ int qemu_find_net_clients_except(const char
>     *id, NetClientState **ncs,
>     >  NetClientState *qemu_new_net_client(NetClientInfo *info,
>     >                                      NetClientState *peer,
>     >                                      const char *model,
>     > -                                    const char *name);
>     > +                                    const char *name,
>     > +                                    uint32_t queue_size);
>     >  NICState *qemu_new_nic(NetClientInfo *info,
>     >                         NICConf *conf,
>     >                         const char *model,
>     > diff --git a/include/net/queue.h b/include/net/queue.h
>     > index fc02b33..4659c5a 100644
>     > --- a/include/net/queue.h
>     > +++ b/include/net/queue.h
>     > @@ -34,7 +34,10 @@ typedef void (NetPacketSent) (NetClientState
>     *sender, ssize_t ret);
>     >  #define QEMU_NET_PACKET_FLAG_NONE  0
>     >  #define QEMU_NET_PACKET_FLAG_RAW  (1<<0)
>     >
>     > -NetQueue *qemu_new_net_queue(void *opaque);
>     > +#define QEMU_DEFAULT_QUEUE_SIZE  10000
>     > +#define QEMU_EMPTY_QUEUE         0
>     > +
>     > +NetQueue *qemu_new_net_queue(void *opaque, uint32_t queue_size);
>     >
>     >  void qemu_del_net_queue(NetQueue *queue);
>     >
>     > diff --git a/net/dump.c b/net/dump.c
>     > index 9d3a09e..299b09e 100644
>     > --- a/net/dump.c
>     > +++ b/net/dump.c
>     > @@ -129,7 +129,8 @@ static int net_dump_init(NetClientState
>     *peer, const char *device,
>     >          return -1;
>     >      }
>     >
>     > -    nc = qemu_new_net_client(&net_dump_info, peer, device, name);
>     > +    nc = qemu_new_net_client(&net_dump_info, peer, device, name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >
>     >      snprintf(nc->info_str, sizeof(nc->info_str),
>     >               "dump to %s (len=%d)", filename, len);
>     > diff --git a/net/hub.c b/net/hub.c
>     > index 2b60ab9..a42cac3 100644
>     > --- a/net/hub.c
>     > +++ b/net/hub.c
>     > @@ -151,7 +151,8 @@ static NetHubPort *net_hub_port_new(NetHub
>     *hub, const char *name)
>     >          name = default_name;
>     >      }
>     >
>     > -    nc = qemu_new_net_client(&net_hub_port_info, NULL, "hub",
>     name);
>     > +    nc = qemu_new_net_client(&net_hub_port_info, NULL, "hub", name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >      port = DO_UPCAST(NetHubPort, nc, nc);
>     >      port->id = id;
>     >      port->hub = hub;
>     > diff --git a/net/l2tpv3.c b/net/l2tpv3.c
>     > index 8c598b0..1d2e4e5 100644
>     > --- a/net/l2tpv3.c
>     > +++ b/net/l2tpv3.c
>     > @@ -548,7 +548,8 @@ int net_init_l2tpv3(const NetClientOptions
>     *opts,
>     >      struct addrinfo *result = NULL;
>     >      char *srcport, *dstport;
>     >
>     > -    nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3",
>     name);
>     > +    nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3",
>     name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >
>     >      s = DO_UPCAST(NetL2TPV3State, nc, nc);
>     >
>     > diff --git a/net/net.c b/net/net.c
>     > index 7427f6a..bcf69da 100644
>     > --- a/net/net.c
>     > +++ b/net/net.c
>     > @@ -214,6 +214,7 @@ static void
>     qemu_net_client_setup(NetClientState *nc,
>     >                                    NetClientState *peer,
>     >                                    const char *model,
>     >                                    const char *name,
>     > +                                  uint32_t queue_size,
>     >                                    NetClientDestructor *destructor)
>     >  {
>     >      nc->info = info;
>     > @@ -231,21 +232,22 @@ static void
>     qemu_net_client_setup(NetClientState *nc,
>     >      }
>     >      QTAILQ_INSERT_TAIL(&net_clients, nc, next);
>     >
>     > -    nc->incoming_queue = qemu_new_net_queue(nc);
>     > +    nc->incoming_queue = qemu_new_net_queue(nc, queue_size);
>     >      nc->destructor = destructor;
>     >  }
>     >
>     >  NetClientState *qemu_new_net_client(NetClientInfo *info,
>     >                                      NetClientState *peer,
>     >                                      const char *model,
>     > -                                    const char *name)
>     > +                                    const char *name,
>     > +                                    uint32_t queue_size)
>     >  {
>     >      NetClientState *nc;
>     >
>     >      assert(info->size >= sizeof(NetClientState));
>     >
>     >      nc = g_malloc0(info->size);
>     > -    qemu_net_client_setup(nc, info, peer, model, name,
>     > +    qemu_net_client_setup(nc, info, peer, model, name, queue_size,
>     >                            qemu_net_client_destructor);
>     >
>     >      return nc;
>     > @@ -271,7 +273,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
>     >
>     >      for (i = 0; i < queues; i++) {
>     >          qemu_net_client_setup(&nic->ncs[i], info, peers[i],
>     model, name,
>     > -                              NULL);
>     > +                              QEMU_DEFAULT_QUEUE_SIZE, NULL);
>     >          nic->ncs[i].queue_index = i;
>     >      }
>     >
>     > diff --git a/net/netmap.c b/net/netmap.c
>     > index 0c1772b..3ddfc8e 100644
>     > --- a/net/netmap.c
>     > +++ b/net/netmap.c
>     > @@ -461,7 +461,8 @@ int net_init_netmap(const NetClientOptions
>     *opts,
>     >          return -1;
>     >      }
>     >      /* Create the object. */
>     > -    nc = qemu_new_net_client(&net_netmap_info, peer, "netmap",
>     name);
>     > +    nc = qemu_new_net_client(&net_netmap_info, peer, "netmap",
>     name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >      s = DO_UPCAST(NetmapState, nc, nc);
>     >      s->me = me;
>     >      s->vnet_hdr_len = 0;
>     > diff --git a/net/queue.c b/net/queue.c
>     > index ebbe2bb..d6ddda5 100644
>     > --- a/net/queue.c
>     > +++ b/net/queue.c
>     > @@ -58,14 +58,14 @@ struct NetQueue {
>     >      unsigned delivering : 1;
>     >  };
>     >
>     > -NetQueue *qemu_new_net_queue(void *opaque)
>     > +NetQueue *qemu_new_net_queue(void *opaque, uint32_t queue_size)
>     >  {
>     >      NetQueue *queue;
>     >
>     >      queue = g_new0(NetQueue, 1);
>     >
>     >      queue->opaque = opaque;
>     > -    queue->nq_maxlen = 10000;
>     > +    queue->nq_maxlen = queue_size;
>     >      queue->nq_count = 0;
>     >
>     >      QTAILQ_INIT(&queue->packets);
>     > diff --git a/net/slirp.c b/net/slirp.c
>     > index 9bbed74..c91e929 100644
>     > --- a/net/slirp.c
>     > +++ b/net/slirp.c
>     > @@ -234,7 +234,8 @@ static int net_slirp_init(NetClientState
>     *peer, const char *model,
>     >      }
>     >  #endif
>     >
>     > -    nc = qemu_new_net_client(&net_slirp_info, peer, model, name);
>     > +    nc = qemu_new_net_client(&net_slirp_info, peer, model, name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >
>     >      snprintf(nc->info_str, sizeof(nc->info_str),
>     >               "net=%s,restrict=%s", inet_ntoa(net),
>     > diff --git a/net/socket.c b/net/socket.c
>     > index c30e03f..d5c718c 100644
>     > --- a/net/socket.c
>     > +++ b/net/socket.c
>     > @@ -387,7 +387,8 @@ static NetSocketState
>     *net_socket_fd_init_dgram(NetClientState *peer,
>     >          }
>     >      }
>     >
>     > -    nc = qemu_new_net_client(&net_dgram_socket_info, peer,
>     model, name);
>     > +    nc = qemu_new_net_client(&net_dgram_socket_info, peer,
>     model, name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >
>     >      s = DO_UPCAST(NetSocketState, nc, nc);
>     >
>     > @@ -436,7 +437,8 @@ static NetSocketState
>     *net_socket_fd_init_stream(NetClientState *peer,
>     >      NetClientState *nc;
>     >      NetSocketState *s;
>     >
>     > -    nc = qemu_new_net_client(&net_socket_info, peer, model, name);
>     > +    nc = qemu_new_net_client(&net_socket_info, peer, model, name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >
>     >      snprintf(nc->info_str, sizeof(nc->info_str), "socket:
>     fd=%d", fd);
>     >
>     > @@ -543,7 +545,8 @@ static int
>     net_socket_listen_init(NetClientState *peer,
>     >          return -1;
>     >      }
>     >
>     > -    nc = qemu_new_net_client(&net_socket_info, peer, model, name);
>     > +    nc = qemu_new_net_client(&net_socket_info, peer, model, name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >      s = DO_UPCAST(NetSocketState, nc, nc);
>     >      s->fd = -1;
>     >      s->listen_fd = fd;
>     > diff --git a/net/tap-win32.c b/net/tap-win32.c
>     > index 8aee611..dd6046a 100644
>     > --- a/net/tap-win32.c
>     > +++ b/net/tap-win32.c
>     > @@ -737,7 +737,8 @@ static int tap_win32_init(NetClientState
>     *peer, const char *model,
>     >          return -1;
>     >      }
>     >
>     > -    nc = qemu_new_net_client(&net_tap_win32_info, peer, model,
>     name);
>     > +    nc = qemu_new_net_client(&net_tap_win32_info, peer, model,
>     name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >
>     >      s = DO_UPCAST(TAPState, nc, nc);
>     >
>     > diff --git a/net/tap.c b/net/tap.c
>     > index 968df46..383d3ad 100644
>     > --- a/net/tap.c
>     > +++ b/net/tap.c
>     > @@ -346,7 +346,8 @@ static TAPState
>     *net_tap_fd_init(NetClientState *peer,
>     >      NetClientState *nc;
>     >      TAPState *s;
>     >
>     > -    nc = qemu_new_net_client(&net_tap_info, peer, model, name);
>     > +    nc = qemu_new_net_client(&net_tap_info, peer, model, name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >
>     >      s = DO_UPCAST(TAPState, nc, nc);
>     >
>     > diff --git a/net/vde.c b/net/vde.c
>     > index 2a619fb..b6d28cf 100644
>     > --- a/net/vde.c
>     > +++ b/net/vde.c
>     > @@ -95,7 +95,8 @@ static int net_vde_init(NetClientState *peer,
>     const char *model,
>     >          return -1;
>     >      }
>     >
>     > -    nc = qemu_new_net_client(&net_vde_info, peer, model, name);
>     > +    nc = qemu_new_net_client(&net_vde_info, peer, model, name,
>     > +                             QEMU_DEFAULT_QUEUE_SIZE);
>     >
>     >      snprintf(nc->info_str, sizeof(nc->info_str), "sock=%s,fd=%d",
>     >               sock, vde_datafd(vde));
>     > diff --git a/net/vhost-user.c b/net/vhost-user.c
>     > index 1d86a2b..71f8758 100644
>     > --- a/net/vhost-user.c
>     > +++ b/net/vhost-user.c
>     > @@ -137,7 +137,9 @@ static int
>     net_vhost_user_init(NetClientState *peer, const char *device,
>     >      NetClientState *nc;
>     >      VhostUserState *s;
>     >
>     > -    nc = qemu_new_net_client(&net_vhost_user_info, peer,
>     device, name);
>     > +    /* We don't provide a valid queue: no receive callback */
>     > +    nc = qemu_new_net_client(&net_vhost_user_info, peer,
>     device, name,
>     > +                             QEMU_EMPTY_QUEUE);
>     >
>     >      snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to
>     %s",
>     >               chr->label);
>
>     Two questions:
>
>     - vhost-user set received_disabled to true to prevent this from
>     happening. but looks like qemu_flush_or_purge_queue_packets()
>     change it
>     to false unconditionally. And I don't quite understand this,
>     probably we
>     can just remove this and the issue is fixed?
>     - If not, maybe you just need another flag like receive_disabled. This
>     seems can save a lot of codes.
>
>     Thanks
>
>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-06-03  7:56       ` Thibaut Collet
@ 2015-06-03  9:42         ` Jason Wang
  2015-06-03 13:40           ` Stefan Hajnoczi
  2015-06-03  9:42         ` [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop Stefan Hajnoczi
  1 sibling, 1 reply; 33+ messages in thread
From: Jason Wang @ 2015-06-03  9:42 UTC (permalink / raw)
  To: Thibaut Collet, Stefan Hajnoczi
  Cc: Stefan Hajnoczi, Giuseppe Lettieri, Luigi Rizzo, qemu-devel,
	Vincenzo Maffione



On 06/03/2015 03:56 PM, Thibaut Collet wrote:
> Hi,
>
> thanks for your point, I did not notice the problem of the lost of ARP
> announce by discarding the message. 
> So I must rewrite my patch to define a queue to transmit the ARP
> announce to the guest. Is it the proper solution?
>

Please have a look at VIRTIO_NET_F_GUEST_ANNOUNCE. It can notify guest
after migration to let it to send arp announce. Recent linux driver has
this support.

> Thanks.
>
> Best regards.
>
> Thibaut.
>
> On Tue, Jun 2, 2015 at 12:34 PM, Stefan Hajnoczi <stefanha@redhat.com
> <mailto:stefanha@redhat.com>> wrote:
>
>     On Fri, May 29, 2015 at 04:28:48PM +0200, Thibaut Collet wrote:
>     > I agree that virtio-net NIC never enqueues packet to vhost-user
>     > but qemu_announce_self function (savevm.c file) can do it through
>     > the qemu_announce_self_iter / qemu_send_packet_raw sequence.
>
>     Does vhost-user support live migration?
>
>     If no, then qemu_announce_self is never called.
>
>     If yes, then the packet should not be discarded since the ARP announce
>     has a purpose.
>
>     Stefan
>
>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-06-03  7:56       ` Thibaut Collet
  2015-06-03  9:42         ` Jason Wang
@ 2015-06-03  9:42         ` Stefan Hajnoczi
  1 sibling, 0 replies; 33+ messages in thread
From: Stefan Hajnoczi @ 2015-06-03  9:42 UTC (permalink / raw)
  To: Thibaut Collet
  Cc: Stefan Hajnoczi, Jason Wang, qemu-devel, Vincenzo Maffione,
	Nikolay Nikolaev, Giuseppe Lettieri, Luigi Rizzo

[-- Attachment #1: Type: text/plain, Size: 699 bytes --]

On Wed, Jun 03, 2015 at 09:56:57AM +0200, Thibaut Collet wrote:
> thanks for your point, I did not notice the problem of the lost of ARP
> announce by discarding the message.
> So I must rewrite my patch to define a queue to transmit the ARP announce
> to the guest. Is it the proper solution?

I'm not sure if vhost-user is designed to support live migration in the
first place.  It's only worth adding ARP packet injection support if
live migration is supported with vhost-user.

You don't need to define a queue, instead implement the .receive()
function for vhost-user and invoke a new virtio-net-specific vhost-user
request type that injects a packet (independent of the tx virtqueue).

Stefan

[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-06-03  9:42         ` Jason Wang
@ 2015-06-03 13:40           ` Stefan Hajnoczi
  2015-06-04  9:35             ` Jason Wang
  0 siblings, 1 reply; 33+ messages in thread
From: Stefan Hajnoczi @ 2015-06-03 13:40 UTC (permalink / raw)
  To: Jason Wang
  Cc: Thibaut Collet, qemu-devel, Vincenzo Maffione, Stefan Hajnoczi,
	Giuseppe Lettieri, Luigi Rizzo

On Wed, Jun 3, 2015 at 10:42 AM, Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 06/03/2015 03:56 PM, Thibaut Collet wrote:
>> Hi,
>>
>> thanks for your point, I did not notice the problem of the lost of ARP
>> announce by discarding the message.
>> So I must rewrite my patch to define a queue to transmit the ARP
>> announce to the guest. Is it the proper solution?
>>
>
> Please have a look at VIRTIO_NET_F_GUEST_ANNOUNCE. It can notify guest
> after migration to let it to send arp announce. Recent linux driver has
> this support.

This looks like a useful feature.

I couldn't see a place in the code where qemu_announce_self() skips
virtio-net NICs that have this feature bit enabled.  Does this mean
these NICs will send announces twice (once from the guest and once
from QEMU)?

My hope was that with the virtio-net announce feature the
qemu_announce_self() function would skip the NIC.

Stefan

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop
  2015-06-03 13:40           ` Stefan Hajnoczi
@ 2015-06-04  9:35             ` Jason Wang
  2015-06-05 13:24               ` [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user Thibaut Collet
  0 siblings, 1 reply; 33+ messages in thread
From: Jason Wang @ 2015-06-04  9:35 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: Thibaut Collet, qemu-devel, Vincenzo Maffione, Stefan Hajnoczi,
	Giuseppe Lettieri, Luigi Rizzo



On 06/03/2015 09:40 PM, Stefan Hajnoczi wrote:
> On Wed, Jun 3, 2015 at 10:42 AM, Jason Wang <jasowang@redhat.com> wrote:
>>
>> On 06/03/2015 03:56 PM, Thibaut Collet wrote:
>>> Hi,
>>>
>>> thanks for your point, I did not notice the problem of the lost of ARP
>>> announce by discarding the message.
>>> So I must rewrite my patch to define a queue to transmit the ARP
>>> announce to the guest. Is it the proper solution?
>>>
>> Please have a look at VIRTIO_NET_F_GUEST_ANNOUNCE. It can notify guest
>> after migration to let it to send arp announce. Recent linux driver has
>> this support.
> This looks like a useful feature.
>
> I couldn't see a place in the code where qemu_announce_self() skips
> virtio-net NICs that have this feature bit enabled.  Does this mean
> these NICs will send announces twice (once from the guest and once
> from QEMU)?

Yes, for simplicity, the changes were limited to virito-net only.

>
> My hope was that with the virtio-net announce feature the
> qemu_announce_self() function would skip the NIC.
>
> Stefan

Yes and will try to post patch to do this.

Thanks

^ permalink raw reply	[flat|nested] 33+ messages in thread

* [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-04  9:35             ` Jason Wang
@ 2015-06-05 13:24               ` Thibaut Collet
  2015-06-08  5:55                 ` Jason Wang
                                   ` (2 more replies)
  0 siblings, 3 replies; 33+ messages in thread
From: Thibaut Collet @ 2015-06-05 13:24 UTC (permalink / raw)
  To: qemu-devel; +Cc: jasowang, quintela, stefanha, mst

Add VIRTIO_NET_F_GUEST_ANNOUNCE capability to vhost-net when netdev backend is
vhost-user.

For netdev backend using virtio-net NIC the self announce is managed directly
by the virtio-net NIC and not by the netdev backend itself.
To avoid duplication of announces (once from the guest and once from QEMU) a
bitfield is added in the NetClientState structure.
If this bit is set self announce does not send message to the guest to request
gratuitous ARP but let virtio-net NIC set the VIRTIO_NET_S_ANNOUNCE for
gratuitous ARP.

Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
---
v2: do not discard anymore packets send to vhost-user: it is GARP request after
    live migration.
    As suggested by S. Hajnoczi qemu_announce_self skips virtio-net NIC that
    already send GARP.

 hw/net/vhost_net.c |    2 ++
 include/net/net.h  |    1 +
 net/vhost-user.c   |    2 ++
 savevm.c           |   11 ++++++++---
 4 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 426b23e..a745f97 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
     VIRTIO_NET_F_CTRL_MAC_ADDR,
     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
 
+    VIRTIO_NET_F_GUEST_ANNOUNCE,
+
     VIRTIO_NET_F_MQ,
 
     VHOST_INVALID_FEATURE_BIT
diff --git a/include/net/net.h b/include/net/net.h
index e66ca03..a78e9df 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -85,6 +85,7 @@ struct NetClientState {
     char *name;
     char info_str[256];
     unsigned receive_disabled : 1;
+    unsigned self_announce_disabled : 1;
     NetClientDestructor *destructor;
     unsigned int queue_index;
     unsigned rxfilter_notify_enabled:1;
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 8d26728..b345446 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -147,6 +147,8 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
 
         s = DO_UPCAST(VhostUserState, nc, nc);
 
+        /* Self announce is managed directly by virtio-net NIC */
+        s->nc.self_announce_disabled = 1;
         /* We don't provide a receive callback */
         s->nc.receive_disabled = 1;
         s->chr = chr;
diff --git a/savevm.c b/savevm.c
index 3b0e222..7a134b1 100644
--- a/savevm.c
+++ b/savevm.c
@@ -80,11 +80,16 @@ static void qemu_announce_self_iter(NICState *nic, void *opaque)
 {
     uint8_t buf[60];
     int len;
+    NetClientState *nc;
 
-    trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
-    len = announce_self_create(buf, nic->conf->macaddr.a);
+    nc = qemu_get_queue(nic);
 
-    qemu_send_packet_raw(qemu_get_queue(nic), buf, len);
+    if (!nc->peer->self_announce_disabled) {
+        trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
+        len = announce_self_create(buf, nic->conf->macaddr.a);
+
+        qemu_send_packet_raw(nc, buf, len);
+    }
 }
 
 
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-05 13:24               ` [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user Thibaut Collet
@ 2015-06-08  5:55                 ` Jason Wang
  2015-06-08  8:21                   ` Thibaut Collet
  2015-06-08 10:12                 ` Michael S. Tsirkin
  2015-06-08 13:25                 ` Stefan Hajnoczi
  2 siblings, 1 reply; 33+ messages in thread
From: Jason Wang @ 2015-06-08  5:55 UTC (permalink / raw)
  To: Thibaut Collet, qemu-devel; +Cc: mst, stefanha, quintela



On 06/05/2015 09:24 PM, Thibaut Collet wrote:
> Add VIRTIO_NET_F_GUEST_ANNOUNCE capability to vhost-net when netdev backend is
> vhost-user.
>
> For netdev backend using virtio-net NIC the self announce is managed directly
> by the virtio-net NIC and not by the netdev backend itself.
> To avoid duplication of announces (once from the guest and once from QEMU) a
> bitfield is added in the NetClientState structure.
> If this bit is set self announce does not send message to the guest to request
> gratuitous ARP but let virtio-net NIC set the VIRTIO_NET_S_ANNOUNCE for
> gratuitous ARP.
>
> Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> ---
> v2: do not discard anymore packets send to vhost-user: it is GARP request after
>     live migration.
>     As suggested by S. Hajnoczi qemu_announce_self skips virtio-net NIC that
>     already send GARP.
>
>  hw/net/vhost_net.c |    2 ++
>  include/net/net.h  |    1 +
>  net/vhost-user.c   |    2 ++
>  savevm.c           |   11 ++++++++---
>  4 files changed, 13 insertions(+), 3 deletions(-)
>
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 426b23e..a745f97 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
>      VIRTIO_NET_F_CTRL_MAC_ADDR,
>      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
>  
> +    VIRTIO_NET_F_GUEST_ANNOUNCE,
> +
>      VIRTIO_NET_F_MQ,
>  
>      VHOST_INVALID_FEATURE_BIT
> diff --git a/include/net/net.h b/include/net/net.h
> index e66ca03..a78e9df 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -85,6 +85,7 @@ struct NetClientState {
>      char *name;
>      char info_str[256];
>      unsigned receive_disabled : 1;
> +    unsigned self_announce_disabled : 1;
>      NetClientDestructor *destructor;
>      unsigned int queue_index;
>      unsigned rxfilter_notify_enabled:1;
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 8d26728..b345446 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -147,6 +147,8 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>  
>          s = DO_UPCAST(VhostUserState, nc, nc);
>  
> +        /* Self announce is managed directly by virtio-net NIC */
> +        s->nc.self_announce_disabled = 1;

Shouldn't we do this in set_features consider it needs guest support or
you want to disable gratuitous packet for vhost-user at all?

>          /* We don't provide a receive callback */
>          s->nc.receive_disabled = 1;
>          s->chr = chr;
> diff --git a/savevm.c b/savevm.c
> index 3b0e222..7a134b1 100644
> --- a/savevm.c
> +++ b/savevm.c
> @@ -80,11 +80,16 @@ static void qemu_announce_self_iter(NICState *nic, void *opaque)
>  {
>      uint8_t buf[60];
>      int len;
> +    NetClientState *nc;
>  
> -    trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
> -    len = announce_self_create(buf, nic->conf->macaddr.a);
> +    nc = qemu_get_queue(nic);
>  
> -    qemu_send_packet_raw(qemu_get_queue(nic), buf, len);
> +    if (!nc->peer->self_announce_disabled) {
> +        trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
> +        len = announce_self_create(buf, nic->conf->macaddr.a);
> +
> +        qemu_send_packet_raw(nc, buf, len);
> +    }
>  }
>  
>  

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08  5:55                 ` Jason Wang
@ 2015-06-08  8:21                   ` Thibaut Collet
  2015-06-08  9:14                     ` Jason Wang
  2015-06-08 13:32                     ` Stefan Hajnoczi
  0 siblings, 2 replies; 33+ messages in thread
From: Thibaut Collet @ 2015-06-08  8:21 UTC (permalink / raw)
  To: Jason Wang; +Cc: mst, qemu-devel, Stefan Hajnoczi, quintela

[-- Attachment #1: Type: text/plain, Size: 4744 bytes --]

Hi,

My understanding of gratuitous packet with virtio for any backend (vhost
user or other):
- When the VM is loaded (first start or migration) the virtio net
interfaces are loaded ( virtio_net_load_device function in
hw/net/virtio-net.c)
- If the guest has the VIRTIO_NET_F_GUEST_ANNOUNCE capability, request to
send gratuitous packet is done.

1. To enable gratuitous packet through this mechanism I have added
VIRTIO_NET_F_GUEST_ANNOUNCE
capability to hw/net/vhost_net.c. So host and guest can negotiate this
feature when vhost-user is used.

2. self announce occurs in case of live migration. During a live migration
a GARP is sent to all net backend through a queue dedicated to the net
backend.
   But for vhost-user:
       - this operation is not possible (vhost-user has no queue)
       - it is already done with the previous mechanism.
   Rather to define a queue to vhost user and notify twice the guest to
send gratuitous packet I have disable GARP from self announce and use only
the first mechanism for that.

I have tested my modifications with guest that supports
VIRTIO_NET_F_GUEST_ANNOUNCE and vhost-user on the host. After a live
migration I have the GARP from the guest.

Regards.

On Mon, Jun 8, 2015 at 7:55 AM, Jason Wang <jasowang@redhat.com> wrote:

>
>
> On 06/05/2015 09:24 PM, Thibaut Collet wrote:
> > Add VIRTIO_NET_F_GUEST_ANNOUNCE capability to vhost-net when netdev
> backend is
> > vhost-user.
> >
> > For netdev backend using virtio-net NIC the self announce is managed
> directly
> > by the virtio-net NIC and not by the netdev backend itself.
> > To avoid duplication of announces (once from the guest and once from
> QEMU) a
> > bitfield is added in the NetClientState structure.
> > If this bit is set self announce does not send message to the guest to
> request
> > gratuitous ARP but let virtio-net NIC set the VIRTIO_NET_S_ANNOUNCE for
> > gratuitous ARP.
> >
> > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> > ---
> > v2: do not discard anymore packets send to vhost-user: it is GARP
> request after
> >     live migration.
> >     As suggested by S. Hajnoczi qemu_announce_self skips virtio-net NIC
> that
> >     already send GARP.
> >
> >  hw/net/vhost_net.c |    2 ++
> >  include/net/net.h  |    1 +
> >  net/vhost-user.c   |    2 ++
> >  savevm.c           |   11 ++++++++---
> >  4 files changed, 13 insertions(+), 3 deletions(-)
> >
> > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> > index 426b23e..a745f97 100644
> > --- a/hw/net/vhost_net.c
> > +++ b/hw/net/vhost_net.c
> > @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
> >      VIRTIO_NET_F_CTRL_MAC_ADDR,
> >      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
> >
> > +    VIRTIO_NET_F_GUEST_ANNOUNCE,
> > +
> >      VIRTIO_NET_F_MQ,
> >
> >      VHOST_INVALID_FEATURE_BIT
> > diff --git a/include/net/net.h b/include/net/net.h
> > index e66ca03..a78e9df 100644
> > --- a/include/net/net.h
> > +++ b/include/net/net.h
> > @@ -85,6 +85,7 @@ struct NetClientState {
> >      char *name;
> >      char info_str[256];
> >      unsigned receive_disabled : 1;
> > +    unsigned self_announce_disabled : 1;
> >      NetClientDestructor *destructor;
> >      unsigned int queue_index;
> >      unsigned rxfilter_notify_enabled:1;
> > diff --git a/net/vhost-user.c b/net/vhost-user.c
> > index 8d26728..b345446 100644
> > --- a/net/vhost-user.c
> > +++ b/net/vhost-user.c
> > @@ -147,6 +147,8 @@ static int net_vhost_user_init(NetClientState *peer,
> const char *device,
> >
> >          s = DO_UPCAST(VhostUserState, nc, nc);
> >
> > +        /* Self announce is managed directly by virtio-net NIC */
> > +        s->nc.self_announce_disabled = 1;
>
> Shouldn't we do this in set_features consider it needs guest support or
> you want to disable gratuitous packet for vhost-user at all?
>
> >          /* We don't provide a receive callback */
> >          s->nc.receive_disabled = 1;
> >          s->chr = chr;
> > diff --git a/savevm.c b/savevm.c
> > index 3b0e222..7a134b1 100644
> > --- a/savevm.c
> > +++ b/savevm.c
> > @@ -80,11 +80,16 @@ static void qemu_announce_self_iter(NICState *nic,
> void *opaque)
> >  {
> >      uint8_t buf[60];
> >      int len;
> > +    NetClientState *nc;
> >
> > -    trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
> > -    len = announce_self_create(buf, nic->conf->macaddr.a);
> > +    nc = qemu_get_queue(nic);
> >
> > -    qemu_send_packet_raw(qemu_get_queue(nic), buf, len);
> > +    if (!nc->peer->self_announce_disabled) {
> > +
> trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
> > +        len = announce_self_create(buf, nic->conf->macaddr.a);
> > +
> > +        qemu_send_packet_raw(nc, buf, len);
> > +    }
> >  }
> >
> >
>
>

[-- Attachment #2: Type: text/html, Size: 6241 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08  8:21                   ` Thibaut Collet
@ 2015-06-08  9:14                     ` Jason Wang
  2015-06-08 10:01                       ` Thibaut Collet
  2015-06-08 13:32                     ` Stefan Hajnoczi
  1 sibling, 1 reply; 33+ messages in thread
From: Jason Wang @ 2015-06-08  9:14 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: quintela, qemu-devel, Stefan Hajnoczi, mst



On 06/08/2015 04:21 PM, Thibaut Collet wrote:
> Hi,
>
> My understanding of gratuitous packet with virtio for any backend
> (vhost user or other):
> - When the VM is loaded (first start or migration) the virtio net
> interfaces are loaded ( virtio_net_load_device function in
> hw/net/virtio-net.c)
> - If the guest has the VIRTIO_NET_F_GUEST_ANNOUNCE capability, request
> to send gratuitous packet is done.
>
> 1. To enable gratuitous packet through this mechanism I have
> added VIRTIO_NET_F_GUEST_ANNOUNCE capability to hw/net/vhost_net.c. So
> host and guest can negotiate this feature when vhost-user is used.
>
> 2. self announce occurs in case of live migration. During a live
> migration a GARP is sent to all net backend through a queue dedicated
> to the net backend.
>    But for vhost-user:
>        - this operation is not possible (vhost-user has no queue)
>        - it is already done with the previous mechanism.
>    Rather to define a queue to vhost user and notify twice the guest
> to send gratuitous packet I have disable GARP from self announce and
> use only the first mechanism for that.
>
> I have tested my modifications with guest that supports
> VIRTIO_NET_F_GUEST_ANNOUNCE and vhost-user on the host. After a live
> migration I have the GARP from the guest.

Yes, your patch works well for recent drivers. But the problem is legacy
guest/driver without VIRTIO_NET_F_GUEST_ANNOUNCE. In this case there
will be no GARP sent after migration.

Thanks

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08  9:14                     ` Jason Wang
@ 2015-06-08 10:01                       ` Thibaut Collet
  2015-06-08 10:09                         ` Michael S. Tsirkin
  2015-06-08 10:22                         ` Jason Wang
  0 siblings, 2 replies; 33+ messages in thread
From: Thibaut Collet @ 2015-06-08 10:01 UTC (permalink / raw)
  To: Jason Wang; +Cc: quintela, qemu-devel, Stefan Hajnoczi, mst

[-- Attachment #1: Type: text/plain, Size: 2487 bytes --]

Hi,

I agree that my patch is OK only for recent drivers. To have a simple patch
with only few modifications I do not implement a solution for old driver
but I can be done later.

For legacy guest without VIRTIO_NET_F_GUEST_ANNOUNCE the problem is more
complex. The RARP must be sent by the vhost client/backend. This component
is outside QEMU (the reference implementation is snabbswitch:
http://www.virtualopensystems.com/en/solutions/guides/snabbswitch-qemu/).
To do that:
- a receive function must be defined for the vhost user.
- a message must be added between QEMU and vapp. This message will be sent
only for old guest driver to avoid GARP duplication.
- the added self_announce_disabled must be removed (decision to send or not
the RARP is done later by the backend and not by the generic migration
method)

Do you agree with this solution ?


Regards.

On Mon, Jun 8, 2015 at 11:14 AM, Jason Wang <jasowang@redhat.com> wrote:

>
>
> On 06/08/2015 04:21 PM, Thibaut Collet wrote:
> > Hi,
> >
> > My understanding of gratuitous packet with virtio for any backend
> > (vhost user or other):
> > - When the VM is loaded (first start or migration) the virtio net
> > interfaces are loaded ( virtio_net_load_device function in
> > hw/net/virtio-net.c)
> > - If the guest has the VIRTIO_NET_F_GUEST_ANNOUNCE capability, request
> > to send gratuitous packet is done.
> >
> > 1. To enable gratuitous packet through this mechanism I have
> > added VIRTIO_NET_F_GUEST_ANNOUNCE capability to hw/net/vhost_net.c. So
> > host and guest can negotiate this feature when vhost-user is used.
> >
> > 2. self announce occurs in case of live migration. During a live
> > migration a GARP is sent to all net backend through a queue dedicated
> > to the net backend.
> >    But for vhost-user:
> >        - this operation is not possible (vhost-user has no queue)
> >        - it is already done with the previous mechanism.
> >    Rather to define a queue to vhost user and notify twice the guest
> > to send gratuitous packet I have disable GARP from self announce and
> > use only the first mechanism for that.
> >
> > I have tested my modifications with guest that supports
> > VIRTIO_NET_F_GUEST_ANNOUNCE and vhost-user on the host. After a live
> > migration I have the GARP from the guest.
>
> Yes, your patch works well for recent drivers. But the problem is legacy
> guest/driver without VIRTIO_NET_F_GUEST_ANNOUNCE. In this case there
> will be no GARP sent after migration.
>
> Thanks
>

[-- Attachment #2: Type: text/html, Size: 3327 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 10:01                       ` Thibaut Collet
@ 2015-06-08 10:09                         ` Michael S. Tsirkin
  2015-06-08 10:22                         ` Jason Wang
  1 sibling, 0 replies; 33+ messages in thread
From: Michael S. Tsirkin @ 2015-06-08 10:09 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi, quintela

On Mon, Jun 08, 2015 at 12:01:38PM +0200, Thibaut Collet wrote:
> Hi,
> 
> I agree that my patch is OK only for recent drivers. To have a simple patch
> with only few modifications I do not implement a solution for old driver but I
> can be done later.
> 
> For legacy guest without VIRTIO_NET_F_GUEST_ANNOUNCE the problem is more
> complex. The RARP must be sent by the vhost client/backend. This component is
> outside QEMU (the reference implementation is snabbswitch: http://
> www.virtualopensystems.com/en/solutions/guides/snabbswitch-qemu/). To do that:
> - a receive function must be defined for the vhost user.
> - a message must be added between QEMU and vapp. This message will be sent only
> for old guest driver to avoid GARP duplication.
> - the added self_announce_disabled must be removed (decision to send or not the
> RARP is done later by the backend and not by the generic migration method)
> 
> Do you agree with this solution ?
> 
> 
> Regards.

I don't get it.  Why do you need any extra messages for old drivers?  To
detect old drivers, simply have backend check whether
VIRTIO_NET_F_GUEST_ANNOUNCE is set.

But I don't see this as a blocker for this patch,
this can be added separately as needed.


> On Mon, Jun 8, 2015 at 11:14 AM, Jason Wang <jasowang@redhat.com> wrote:
> 
>    
> 
>     On 06/08/2015 04:21 PM, Thibaut Collet wrote:
>     > Hi,
>     >
>     > My understanding of gratuitous packet with virtio for any backend
>     > (vhost user or other):
>     > - When the VM is loaded (first start or migration) the virtio net
>     > interfaces are loaded ( virtio_net_load_device function in
>     > hw/net/virtio-net.c)
>     > - If the guest has the VIRTIO_NET_F_GUEST_ANNOUNCE capability, request
>     > to send gratuitous packet is done.
>     >
>     > 1. To enable gratuitous packet through this mechanism I have
>     > added VIRTIO_NET_F_GUEST_ANNOUNCE capability to hw/net/vhost_net.c. So
>     > host and guest can negotiate this feature when vhost-user is used.
>     >
>     > 2. self announce occurs in case of live migration. During a live
>     > migration a GARP is sent to all net backend through a queue dedicated
>     > to the net backend.
>     >    But for vhost-user:
>     >        - this operation is not possible (vhost-user has no queue)
>     >        - it is already done with the previous mechanism.
>     >    Rather to define a queue to vhost user and notify twice the guest
>     > to send gratuitous packet I have disable GARP from self announce and
>     > use only the first mechanism for that.
>     >
>     > I have tested my modifications with guest that supports
>     > VIRTIO_NET_F_GUEST_ANNOUNCE and vhost-user on the host. After a live
>     > migration I have the GARP from the guest.
> 
>     Yes, your patch works well for recent drivers. But the problem is legacy
>     guest/driver without VIRTIO_NET_F_GUEST_ANNOUNCE. In this case there
>     will be no GARP sent after migration.
> 
>     Thanks
> 
> 

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-05 13:24               ` [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user Thibaut Collet
  2015-06-08  5:55                 ` Jason Wang
@ 2015-06-08 10:12                 ` Michael S. Tsirkin
  2015-06-08 11:29                   ` Thibaut Collet
  2015-06-08 13:25                 ` Stefan Hajnoczi
  2 siblings, 1 reply; 33+ messages in thread
From: Michael S. Tsirkin @ 2015-06-08 10:12 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: jasowang, qemu-devel, stefanha, quintela

On Fri, Jun 05, 2015 at 03:24:12PM +0200, Thibaut Collet wrote:
> Add VIRTIO_NET_F_GUEST_ANNOUNCE capability to vhost-net when netdev backend is
> vhost-user.
> 
> For netdev backend using virtio-net NIC the self announce is managed directly
> by the virtio-net NIC and not by the netdev backend itself.
> To avoid duplication of announces (once from the guest and once from QEMU) a
> bitfield is added in the NetClientState structure.
> If this bit is set self announce does not send message to the guest to request
> gratuitous ARP but let virtio-net NIC set the VIRTIO_NET_S_ANNOUNCE for
> gratuitous ARP.
> 
> Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> ---
> v2: do not discard anymore packets send to vhost-user: it is GARP request after
>     live migration.
>     As suggested by S. Hajnoczi qemu_announce_self skips virtio-net NIC that
>     already send GARP.
> 
>  hw/net/vhost_net.c |    2 ++
>  include/net/net.h  |    1 +
>  net/vhost-user.c   |    2 ++
>  savevm.c           |   11 ++++++++---
>  4 files changed, 13 insertions(+), 3 deletions(-)
> 
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 426b23e..a745f97 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
>      VIRTIO_NET_F_CTRL_MAC_ADDR,
>      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
>  
> +    VIRTIO_NET_F_GUEST_ANNOUNCE,
> +
>      VIRTIO_NET_F_MQ,
>  
>      VHOST_INVALID_FEATURE_BIT
> diff --git a/include/net/net.h b/include/net/net.h
> index e66ca03..a78e9df 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -85,6 +85,7 @@ struct NetClientState {
>      char *name;
>      char info_str[256];
>      unsigned receive_disabled : 1;
> +    unsigned self_announce_disabled : 1;
>      NetClientDestructor *destructor;
>      unsigned int queue_index;
>      unsigned rxfilter_notify_enabled:1;
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 8d26728..b345446 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -147,6 +147,8 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>  
>          s = DO_UPCAST(VhostUserState, nc, nc);
>  
> +        /* Self announce is managed directly by virtio-net NIC */
> +        s->nc.self_announce_disabled = 1;
>          /* We don't provide a receive callback */
>          s->nc.receive_disabled = 1;
>          s->chr = chr;
> diff --git a/savevm.c b/savevm.c
> index 3b0e222..7a134b1 100644
> --- a/savevm.c
> +++ b/savevm.c
> @@ -80,11 +80,16 @@ static void qemu_announce_self_iter(NICState *nic, void *opaque)
>  {
>      uint8_t buf[60];
>      int len;
> +    NetClientState *nc;
>  
> -    trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
> -    len = announce_self_create(buf, nic->conf->macaddr.a);
> +    nc = qemu_get_queue(nic);
>  
> -    qemu_send_packet_raw(qemu_get_queue(nic), buf, len);
> +    if (!nc->peer->self_announce_disabled) {
> +        trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
> +        len = announce_self_create(buf, nic->conf->macaddr.a);
> +
> +        qemu_send_packet_raw(nc, buf, len);
> +    }
>  }
>  

I don't think qemu_send_packet_raw can ever work for vhost user.
What happens if you merely add VIRTIO_NET_F_GUEST_ANNOUNCE
to the feature list, and drop the rest of the patch?


> -- 
> 1.7.10.4

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 10:01                       ` Thibaut Collet
  2015-06-08 10:09                         ` Michael S. Tsirkin
@ 2015-06-08 10:22                         ` Jason Wang
  1 sibling, 0 replies; 33+ messages in thread
From: Jason Wang @ 2015-06-08 10:22 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: mst, qemu-devel, Stefan Hajnoczi, quintela



On 06/08/2015 06:01 PM, Thibaut Collet wrote:
> Hi,
>
> I agree that my patch is OK only for recent drivers. To have a simple
> patch with only few modifications I do not implement a solution for
> old driver but I can be done later.
>

This makes sense.

> For legacy guest without VIRTIO_NET_F_GUEST_ANNOUNCE the problem is
> more complex. The RARP must be sent by the vhost client/backend. This
> component is outside QEMU (the reference implementation is
> snabbswitch: http://www.virtualopensystems.com/en/solutions/guides/snabbswitch-qemu/).
> To do that:
> - a receive function must be defined for the vhost user.
> - a message must be added between QEMU and vapp. This message will be
> sent only for old guest driver to avoid GARP duplication.
> - the added self_announce_disabled must be removed (decision to send
> or not the RARP is done later by the backend and not by the generic
> migration method)
>
> Do you agree with this solution ?
>
>

Sounds ok.

> Regards.
>
> On Mon, Jun 8, 2015 at 11:14 AM, Jason Wang <jasowang@redhat.com
> <mailto:jasowang@redhat.com>> wrote:
>
>
>
>     On 06/08/2015 04:21 PM, Thibaut Collet wrote:
>     > Hi,
>     >
>     > My understanding of gratuitous packet with virtio for any backend
>     > (vhost user or other):
>     > - When the VM is loaded (first start or migration) the virtio net
>     > interfaces are loaded ( virtio_net_load_device function in
>     > hw/net/virtio-net.c)
>     > - If the guest has the VIRTIO_NET_F_GUEST_ANNOUNCE capability,
>     request
>     > to send gratuitous packet is done.
>     >
>     > 1. To enable gratuitous packet through this mechanism I have
>     > added VIRTIO_NET_F_GUEST_ANNOUNCE capability to
>     hw/net/vhost_net.c. So
>     > host and guest can negotiate this feature when vhost-user is used.
>     >
>     > 2. self announce occurs in case of live migration. During a live
>     > migration a GARP is sent to all net backend through a queue
>     dedicated
>     > to the net backend.
>     >    But for vhost-user:
>     >        - this operation is not possible (vhost-user has no queue)
>     >        - it is already done with the previous mechanism.
>     >    Rather to define a queue to vhost user and notify twice the guest
>     > to send gratuitous packet I have disable GARP from self announce and
>     > use only the first mechanism for that.
>     >
>     > I have tested my modifications with guest that supports
>     > VIRTIO_NET_F_GUEST_ANNOUNCE and vhost-user on the host. After a live
>     > migration I have the GARP from the guest.
>
>     Yes, your patch works well for recent drivers. But the problem is
>     legacy
>     guest/driver without VIRTIO_NET_F_GUEST_ANNOUNCE. In this case there
>     will be no GARP sent after migration.
>
>     Thanks
>
>

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 10:12                 ` Michael S. Tsirkin
@ 2015-06-08 11:29                   ` Thibaut Collet
  2015-06-08 12:45                     ` Michael S. Tsirkin
  0 siblings, 1 reply; 33+ messages in thread
From: Thibaut Collet @ 2015-06-08 11:29 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi, quintela

[-- Attachment #1: Type: text/plain, Size: 5296 bytes --]

Hi,

> I don't think qemu_send_packet_raw can ever work for vhost user.
> What happens if you merely add VIRTIO_NET_F_GUEST_ANNOUNCE
> to the feature list, and drop the rest of the patch?

If you merely add VIRTIO_NET_F_GUEST_ANNOUNCE you have the GARP with recent
guest after a live migration.
The rest of the patch (can be set in a separate commit) avoid a qemu
crashes with live migration when vhost-user is present:
 - When a live migration migration is complete a RARP is automatically send
to any net backend (self announce mechanism)
 - vhost user does not provide any receive function and RARP request is
stored in the vhost-user queue
 - When a migration back is done all the net backend queues are purged. The
stored RARP request for vhost-user is then sent to the register receive
callback of vhost-user that is NULL.

Support of live migration for vhost user needs the whole patch. (and maore
if we want support legacy guest with no support of
VIRTIO_NET_F_GUEST_ANNOUNCE)


> I don't get it.  Why do you need any extra messages for old drivers?  To
> detect old drivers, simply have backend check whether
> VIRTIO_NET_F_GUEST_ANNOUNCE is set.

For old driver we can not use the mechanism implemented in virtio-net that
manages VIRTIO_NET_F_GUEST_ANNOUNCE. In this case we must send the RARP on
the network interface created by vhost-user.
My first idea to do that was to add a message between QEMU and vhost
client/backend (vapp or other) to let the vhost client/backend send the
RARP.
But maybe it will be easier to let QEMU send directly the RARP message on
the network interface created by vhost user.
This point can be done later if it is needed.

Regards.


On Mon, Jun 8, 2015 at 12:12 PM, Michael S. Tsirkin <mst@redhat.com> wrote:

> On Fri, Jun 05, 2015 at 03:24:12PM +0200, Thibaut Collet wrote:
> > Add VIRTIO_NET_F_GUEST_ANNOUNCE capability to vhost-net when netdev
> backend is
> > vhost-user.
> >
> > For netdev backend using virtio-net NIC the self announce is managed
> directly
> > by the virtio-net NIC and not by the netdev backend itself.
> > To avoid duplication of announces (once from the guest and once from
> QEMU) a
> > bitfield is added in the NetClientState structure.
> > If this bit is set self announce does not send message to the guest to
> request
> > gratuitous ARP but let virtio-net NIC set the VIRTIO_NET_S_ANNOUNCE for
> > gratuitous ARP.
> >
> > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> > ---
> > v2: do not discard anymore packets send to vhost-user: it is GARP
> request after
> >     live migration.
> >     As suggested by S. Hajnoczi qemu_announce_self skips virtio-net NIC
> that
> >     already send GARP.
> >
> >  hw/net/vhost_net.c |    2 ++
> >  include/net/net.h  |    1 +
> >  net/vhost-user.c   |    2 ++
> >  savevm.c           |   11 ++++++++---
> >  4 files changed, 13 insertions(+), 3 deletions(-)
> >
> > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> > index 426b23e..a745f97 100644
> > --- a/hw/net/vhost_net.c
> > +++ b/hw/net/vhost_net.c
> > @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
> >      VIRTIO_NET_F_CTRL_MAC_ADDR,
> >      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
> >
> > +    VIRTIO_NET_F_GUEST_ANNOUNCE,
> > +
> >      VIRTIO_NET_F_MQ,
> >
> >      VHOST_INVALID_FEATURE_BIT
> > diff --git a/include/net/net.h b/include/net/net.h
> > index e66ca03..a78e9df 100644
> > --- a/include/net/net.h
> > +++ b/include/net/net.h
> > @@ -85,6 +85,7 @@ struct NetClientState {
> >      char *name;
> >      char info_str[256];
> >      unsigned receive_disabled : 1;
> > +    unsigned self_announce_disabled : 1;
> >      NetClientDestructor *destructor;
> >      unsigned int queue_index;
> >      unsigned rxfilter_notify_enabled:1;
> > diff --git a/net/vhost-user.c b/net/vhost-user.c
> > index 8d26728..b345446 100644
> > --- a/net/vhost-user.c
> > +++ b/net/vhost-user.c
> > @@ -147,6 +147,8 @@ static int net_vhost_user_init(NetClientState *peer,
> const char *device,
> >
> >          s = DO_UPCAST(VhostUserState, nc, nc);
> >
> > +        /* Self announce is managed directly by virtio-net NIC */
> > +        s->nc.self_announce_disabled = 1;
> >          /* We don't provide a receive callback */
> >          s->nc.receive_disabled = 1;
> >          s->chr = chr;
> > diff --git a/savevm.c b/savevm.c
> > index 3b0e222..7a134b1 100644
> > --- a/savevm.c
> > +++ b/savevm.c
> > @@ -80,11 +80,16 @@ static void qemu_announce_self_iter(NICState *nic,
> void *opaque)
> >  {
> >      uint8_t buf[60];
> >      int len;
> > +    NetClientState *nc;
> >
> > -    trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
> > -    len = announce_self_create(buf, nic->conf->macaddr.a);
> > +    nc = qemu_get_queue(nic);
> >
> > -    qemu_send_packet_raw(qemu_get_queue(nic), buf, len);
> > +    if (!nc->peer->self_announce_disabled) {
> > +
> trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
> > +        len = announce_self_create(buf, nic->conf->macaddr.a);
> > +
> > +        qemu_send_packet_raw(nc, buf, len);
> > +    }
> >  }
> >
>
> I don't think qemu_send_packet_raw can ever work for vhost user.
> What happens if you merely add VIRTIO_NET_F_GUEST_ANNOUNCE
> to the feature list, and drop the rest of the patch?
>
>
> > --
> > 1.7.10.4
>

[-- Attachment #2: Type: text/html, Size: 8145 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 11:29                   ` Thibaut Collet
@ 2015-06-08 12:45                     ` Michael S. Tsirkin
  2015-06-08 13:20                       ` Thibaut Collet
  0 siblings, 1 reply; 33+ messages in thread
From: Michael S. Tsirkin @ 2015-06-08 12:45 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi, quintela

On Mon, Jun 08, 2015 at 01:29:39PM +0200, Thibaut Collet wrote:
> Hi,
> 
> > I don't think qemu_send_packet_raw can ever work for vhost user.
> > What happens if you merely add VIRTIO_NET_F_GUEST_ANNOUNCE
> > to the feature list, and drop the rest of the patch?
> 
> If you merely add VIRTIO_NET_F_GUEST_ANNOUNCE you have the GARP with recent
> guest after a live migration.
> The rest of the patch (can be set in a separate commit) avoid a qemu crashes
> with live migration when vhost-user is present:
>  - When a live migration migration is complete a RARP is automatically send to
> any net backend (self announce mechanism)
>  - vhost user does not provide any receive function and RARP request is stored
> in the vhost-user queue
>  - When a migration back is done all the net backend queues are purged. The
> stored RARP request for vhost-user is then sent to the register receive
> callback of vhost-user that is NULL.
> 
> Support of live migration for vhost user needs the whole patch. (and maore if
> we want support legacy guest with no support of VIRTIO_NET_F_GUEST_ANNOUNCE)

How about implementing a receive function that discards all packets
then?

> 
> > I don't get it.  Why do you need any extra messages for old drivers?  To
> > detect old drivers, simply have backend check whether
> > VIRTIO_NET_F_GUEST_ANNOUNCE is set.
> 
> For old driver we can not use the mechanism implemented in virtio-net that
> manages VIRTIO_NET_F_GUEST_ANNOUNCE. In this case we must send the RARP on the
> network interface created by vhost-user.
> My first idea to do that was to add a message between QEMU and vhost client/
> backend (vapp or other) to let the vhost client/backend send the RARP.
> But maybe it will be easier to let QEMU send directly the RARP message on the
> network interface created by vhost user.
> This point can be done later if it is needed.
> 
> Regards.

Can't vhost-user backend sends this automatically?
Why do we need to do anything in QEMU?

> 
> On Mon, Jun 8, 2015 at 12:12 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> 
>     On Fri, Jun 05, 2015 at 03:24:12PM +0200, Thibaut Collet wrote:
>     > Add VIRTIO_NET_F_GUEST_ANNOUNCE capability to vhost-net when netdev
>     backend is
>     > vhost-user.
>     >
>     > For netdev backend using virtio-net NIC the self announce is managed
>     directly
>     > by the virtio-net NIC and not by the netdev backend itself.
>     > To avoid duplication of announces (once from the guest and once from
>     QEMU) a
>     > bitfield is added in the NetClientState structure.
>     > If this bit is set self announce does not send message to the guest to
>     request
>     > gratuitous ARP but let virtio-net NIC set the VIRTIO_NET_S_ANNOUNCE for
>     > gratuitous ARP.
>     >
>     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
>     > ---
>     > v2: do not discard anymore packets send to vhost-user: it is GARP request
>     after
>     >     live migration.
>     >     As suggested by S. Hajnoczi qemu_announce_self skips virtio-net NIC
>     that
>     >     already send GARP.
>     >
>     >  hw/net/vhost_net.c |    2 ++
>     >  include/net/net.h  |    1 +
>     >  net/vhost-user.c   |    2 ++
>     >  savevm.c           |   11 ++++++++---
>     >  4 files changed, 13 insertions(+), 3 deletions(-)
>     >
>     > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
>     > index 426b23e..a745f97 100644
>     > --- a/hw/net/vhost_net.c
>     > +++ b/hw/net/vhost_net.c
>     > @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
>     >      VIRTIO_NET_F_CTRL_MAC_ADDR,
>     >      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
>     >
>     > +    VIRTIO_NET_F_GUEST_ANNOUNCE,
>     > +
>     >      VIRTIO_NET_F_MQ,
>     >
>     >      VHOST_INVALID_FEATURE_BIT
>     > diff --git a/include/net/net.h b/include/net/net.h
>     > index e66ca03..a78e9df 100644
>     > --- a/include/net/net.h
>     > +++ b/include/net/net.h
>     > @@ -85,6 +85,7 @@ struct NetClientState {
>     >      char *name;
>     >      char info_str[256];
>     >      unsigned receive_disabled : 1;
>     > +    unsigned self_announce_disabled : 1;
>     >      NetClientDestructor *destructor;
>     >      unsigned int queue_index;
>     >      unsigned rxfilter_notify_enabled:1;
>     > diff --git a/net/vhost-user.c b/net/vhost-user.c
>     > index 8d26728..b345446 100644
>     > --- a/net/vhost-user.c
>     > +++ b/net/vhost-user.c
>     > @@ -147,6 +147,8 @@ static int net_vhost_user_init(NetClientState *peer,
>     const char *device,
>     >
>     >          s = DO_UPCAST(VhostUserState, nc, nc);
>     >
>     > +        /* Self announce is managed directly by virtio-net NIC */
>     > +        s->nc.self_announce_disabled = 1;
>     >          /* We don't provide a receive callback */
>     >          s->nc.receive_disabled = 1;
>     >          s->chr = chr;
>     > diff --git a/savevm.c b/savevm.c
>     > index 3b0e222..7a134b1 100644
>     > --- a/savevm.c
>     > +++ b/savevm.c
>     > @@ -80,11 +80,16 @@ static void qemu_announce_self_iter(NICState *nic,
>     void *opaque)
>     >  {
>     >      uint8_t buf[60];
>     >      int len;
>     > +    NetClientState *nc;
>     >
>     > -    trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
>     > -    len = announce_self_create(buf, nic->conf->macaddr.a);
>     > +    nc = qemu_get_queue(nic);
>     >
>     > -    qemu_send_packet_raw(qemu_get_queue(nic), buf, len);
>     > +    if (!nc->peer->self_announce_disabled) {
>     > +        trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->
>     macaddr));
>     > +        len = announce_self_create(buf, nic->conf->macaddr.a);
>     > +
>     > +        qemu_send_packet_raw(nc, buf, len);
>     > +    }
>     >  }
>     >
> 
>     I don't think qemu_send_packet_raw can ever work for vhost user.
>     What happens if you merely add VIRTIO_NET_F_GUEST_ANNOUNCE
>     to the feature list, and drop the rest of the patch?
>    
> 
>     > --
>     > 1.7.10.4
> 
> 

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 12:45                     ` Michael S. Tsirkin
@ 2015-06-08 13:20                       ` Thibaut Collet
  2015-06-08 15:48                         ` Michael S. Tsirkin
  0 siblings, 1 reply; 33+ messages in thread
From: Thibaut Collet @ 2015-06-08 13:20 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi, quintela

[-- Attachment #1: Type: text/plain, Size: 7593 bytes --]

> How about implementing a receive function that discards all packets
> then?

Implementing a receive function that discards all packets is an other
solution. I have not chosen this one to avoid to mask other potential
issues:
Normally vhost-user never receives packets. By keeping a NULL function as
received callback we can detect easily case where packets are sent (qemu
crashes) and solve this issue.

> Can't vhost-user backend sends this automatically?
> Why do we need to do anything in QEMU?

My explanations are maybe unclear. For old driver we have to send the RARP
to the guest through the network interface (and to implement a receive
function for vhost-user). My question is: where is the best location to do
that:
 1. In the receive function of vhost-user (in the file net/vhost-user.c)
 2. In a self announce function (called when vhost-user receives a RARP,
analysis of the packet content is needed in this case) in the file
hw/net/vhost-net.c
 3. In the vhost-user backend (file hw/virtio/vhost-user.c). In this case a
new message must be defined between hw/net/vhost-net.c and
hw/virtio/vhost-user.c.
 4. In the vhost client/backend (vapp or other)



On Mon, Jun 8, 2015 at 2:45 PM, Michael S. Tsirkin <mst@redhat.com> wrote:

> On Mon, Jun 08, 2015 at 01:29:39PM +0200, Thibaut Collet wrote:
> > Hi,
> >
> > > I don't think qemu_send_packet_raw can ever work for vhost user.
> > > What happens if you merely add VIRTIO_NET_F_GUEST_ANNOUNCE
> > > to the feature list, and drop the rest of the patch?
> >
> > If you merely add VIRTIO_NET_F_GUEST_ANNOUNCE you have the GARP with
> recent
> > guest after a live migration.
> > The rest of the patch (can be set in a separate commit) avoid a qemu
> crashes
> > with live migration when vhost-user is present:
> >  - When a live migration migration is complete a RARP is automatically
> send to
> > any net backend (self announce mechanism)
> >  - vhost user does not provide any receive function and RARP request is
> stored
> > in the vhost-user queue
> >  - When a migration back is done all the net backend queues are purged.
> The
> > stored RARP request for vhost-user is then sent to the register receive
> > callback of vhost-user that is NULL.
> >
> > Support of live migration for vhost user needs the whole patch. (and
> maore if
> > we want support legacy guest with no support
> of VIRTIO_NET_F_GUEST_ANNOUNCE)
>
> How about implementing a receive function that discards all packets
> then?
>
> >
> > > I don't get it.  Why do you need any extra messages for old drivers?
> To
> > > detect old drivers, simply have backend check whether
> > > VIRTIO_NET_F_GUEST_ANNOUNCE is set.
> >
> > For old driver we can not use the mechanism implemented in virtio-net
> that
> > manages VIRTIO_NET_F_GUEST_ANNOUNCE. In this case we must send the RARP
> on the
> > network interface created by vhost-user.
> > My first idea to do that was to add a message between QEMU and vhost
> client/
> > backend (vapp or other) to let the vhost client/backend send the RARP.
> > But maybe it will be easier to let QEMU send directly the RARP message
> on the
> > network interface created by vhost user.
> > This point can be done later if it is needed.
> >
> > Regards.
>
> Can't vhost-user backend sends this automatically?
> Why do we need to do anything in QEMU?
>
> >
> > On Mon, Jun 8, 2015 at 12:12 PM, Michael S. Tsirkin <mst@redhat.com>
> wrote:
> >
> >     On Fri, Jun 05, 2015 at 03:24:12PM +0200, Thibaut Collet wrote:
> >     > Add VIRTIO_NET_F_GUEST_ANNOUNCE capability to vhost-net when netdev
> >     backend is
> >     > vhost-user.
> >     >
> >     > For netdev backend using virtio-net NIC the self announce is
> managed
> >     directly
> >     > by the virtio-net NIC and not by the netdev backend itself.
> >     > To avoid duplication of announces (once from the guest and once
> from
> >     QEMU) a
> >     > bitfield is added in the NetClientState structure.
> >     > If this bit is set self announce does not send message to the
> guest to
> >     request
> >     > gratuitous ARP but let virtio-net NIC set the
> VIRTIO_NET_S_ANNOUNCE for
> >     > gratuitous ARP.
> >     >
> >     > Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> >     > ---
> >     > v2: do not discard anymore packets send to vhost-user: it is GARP
> request
> >     after
> >     >     live migration.
> >     >     As suggested by S. Hajnoczi qemu_announce_self skips
> virtio-net NIC
> >     that
> >     >     already send GARP.
> >     >
> >     >  hw/net/vhost_net.c |    2 ++
> >     >  include/net/net.h  |    1 +
> >     >  net/vhost-user.c   |    2 ++
> >     >  savevm.c           |   11 ++++++++---
> >     >  4 files changed, 13 insertions(+), 3 deletions(-)
> >     >
> >     > diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> >     > index 426b23e..a745f97 100644
> >     > --- a/hw/net/vhost_net.c
> >     > +++ b/hw/net/vhost_net.c
> >     > @@ -82,6 +82,8 @@ static const int user_feature_bits[] = {
> >     >      VIRTIO_NET_F_CTRL_MAC_ADDR,
> >     >      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
> >     >
> >     > +    VIRTIO_NET_F_GUEST_ANNOUNCE,
> >     > +
> >     >      VIRTIO_NET_F_MQ,
> >     >
> >     >      VHOST_INVALID_FEATURE_BIT
> >     > diff --git a/include/net/net.h b/include/net/net.h
> >     > index e66ca03..a78e9df 100644
> >     > --- a/include/net/net.h
> >     > +++ b/include/net/net.h
> >     > @@ -85,6 +85,7 @@ struct NetClientState {
> >     >      char *name;
> >     >      char info_str[256];
> >     >      unsigned receive_disabled : 1;
> >     > +    unsigned self_announce_disabled : 1;
> >     >      NetClientDestructor *destructor;
> >     >      unsigned int queue_index;
> >     >      unsigned rxfilter_notify_enabled:1;
> >     > diff --git a/net/vhost-user.c b/net/vhost-user.c
> >     > index 8d26728..b345446 100644
> >     > --- a/net/vhost-user.c
> >     > +++ b/net/vhost-user.c
> >     > @@ -147,6 +147,8 @@ static int net_vhost_user_init(NetClientState
> *peer,
> >     const char *device,
> >     >
> >     >          s = DO_UPCAST(VhostUserState, nc, nc);
> >     >
> >     > +        /* Self announce is managed directly by virtio-net NIC */
> >     > +        s->nc.self_announce_disabled = 1;
> >     >          /* We don't provide a receive callback */
> >     >          s->nc.receive_disabled = 1;
> >     >          s->chr = chr;
> >     > diff --git a/savevm.c b/savevm.c
> >     > index 3b0e222..7a134b1 100644
> >     > --- a/savevm.c
> >     > +++ b/savevm.c
> >     > @@ -80,11 +80,16 @@ static void qemu_announce_self_iter(NICState
> *nic,
> >     void *opaque)
> >     >  {
> >     >      uint8_t buf[60];
> >     >      int len;
> >     > +    NetClientState *nc;
> >     >
> >     > -
> trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
> >     > -    len = announce_self_create(buf, nic->conf->macaddr.a);
> >     > +    nc = qemu_get_queue(nic);
> >     >
> >     > -    qemu_send_packet_raw(qemu_get_queue(nic), buf, len);
> >     > +    if (!nc->peer->self_announce_disabled) {
> >     > +        trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->
> >     macaddr));
> >     > +        len = announce_self_create(buf, nic->conf->macaddr.a);
> >     > +
> >     > +        qemu_send_packet_raw(nc, buf, len);
> >     > +    }
> >     >  }
> >     >
> >
> >     I don't think qemu_send_packet_raw can ever work for vhost user.
> >     What happens if you merely add VIRTIO_NET_F_GUEST_ANNOUNCE
> >     to the feature list, and drop the rest of the patch?
> >
> >
> >     > --
> >     > 1.7.10.4
> >
> >
>

[-- Attachment #2: Type: text/html, Size: 10588 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-05 13:24               ` [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user Thibaut Collet
  2015-06-08  5:55                 ` Jason Wang
  2015-06-08 10:12                 ` Michael S. Tsirkin
@ 2015-06-08 13:25                 ` Stefan Hajnoczi
  2 siblings, 0 replies; 33+ messages in thread
From: Stefan Hajnoczi @ 2015-06-08 13:25 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: jasowang, quintela, qemu-devel, mst

[-- Attachment #1: Type: text/plain, Size: 1241 bytes --]

On Fri, Jun 05, 2015 at 03:24:12PM +0200, Thibaut Collet wrote:
> Add VIRTIO_NET_F_GUEST_ANNOUNCE capability to vhost-net when netdev backend is
> vhost-user.
> 
> For netdev backend using virtio-net NIC the self announce is managed directly
> by the virtio-net NIC and not by the netdev backend itself.
> To avoid duplication of announces (once from the guest and once from QEMU) a
> bitfield is added in the NetClientState structure.
> If this bit is set self announce does not send message to the guest to request
> gratuitous ARP but let virtio-net NIC set the VIRTIO_NET_S_ANNOUNCE for
> gratuitous ARP.
> 
> Signed-off-by: Thibaut Collet <thibaut.collet@6wind.com>
> ---
> v2: do not discard anymore packets send to vhost-user: it is GARP request after
>     live migration.
>     As suggested by S. Hajnoczi qemu_announce_self skips virtio-net NIC that
>     already send GARP.
> 
>  hw/net/vhost_net.c |    2 ++
>  include/net/net.h  |    1 +
>  net/vhost-user.c   |    2 ++
>  savevm.c           |   11 ++++++++---
>  4 files changed, 13 insertions(+), 3 deletions(-)

Please send each patch revision as a new email thread.  This makes it
easy to see your new patches in threaded email clients.

Stefan

[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08  8:21                   ` Thibaut Collet
  2015-06-08  9:14                     ` Jason Wang
@ 2015-06-08 13:32                     ` Stefan Hajnoczi
  2015-06-08 14:05                       ` Thibaut Collet
  1 sibling, 1 reply; 33+ messages in thread
From: Stefan Hajnoczi @ 2015-06-08 13:32 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, mst, qemu-devel, quintela

[-- Attachment #1: Type: text/plain, Size: 1980 bytes --]

On Mon, Jun 08, 2015 at 10:21:38AM +0200, Thibaut Collet wrote:
> Hi,
> 
> My understanding of gratuitous packet with virtio for any backend (vhost
> user or other):
> - When the VM is loaded (first start or migration) the virtio net
> interfaces are loaded ( virtio_net_load_device function in
> hw/net/virtio-net.c)
> - If the guest has the VIRTIO_NET_F_GUEST_ANNOUNCE capability, request to
> send gratuitous packet is done.
> 
> 1. To enable gratuitous packet through this mechanism I have added
> VIRTIO_NET_F_GUEST_ANNOUNCE
> capability to hw/net/vhost_net.c. So host and guest can negotiate this
> feature when vhost-user is used.
> 
> 2. self announce occurs in case of live migration. During a live migration
> a GARP is sent to all net backend through a queue dedicated to the net
> backend.
>    But for vhost-user:
>        - this operation is not possible (vhost-user has no queue)
>        - it is already done with the previous mechanism.
>    Rather to define a queue to vhost user and notify twice the guest to
> send gratuitous packet I have disable GARP from self announce and use only
> the first mechanism for that.
> 
> I have tested my modifications with guest that supports
> VIRTIO_NET_F_GUEST_ANNOUNCE and vhost-user on the host. After a live
> migration I have the GARP from the guest.

I think Jason is pointing out that your patch lacks support for guests
that do not negotiate VIRTIO_NET_F_GUEST_ANNOUNCE.

If the guest does not set the feature bit then packets might continue to
get forwarded to the old host.

Perhaps the correct place to implement this is in the virtio-net.c
device instead of in vhost-user.c.  The non-vhost-user case should also
skip sending two ARP packets.

BUT before we go any further:

I've asked several times whether vhost-user support live migration?

You didn't answer that question.  Fixing this issue only makes sense if
vhost-user live migration is supported.

Stefan

[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 13:32                     ` Stefan Hajnoczi
@ 2015-06-08 14:05                       ` Thibaut Collet
  2015-06-08 15:13                         ` Stefan Hajnoczi
  0 siblings, 1 reply; 33+ messages in thread
From: Thibaut Collet @ 2015-06-08 14:05 UTC (permalink / raw)
  To: Stefan Hajnoczi; +Cc: Jason Wang, Michael S. Tsirkin, qemu-devel, quintela

[-- Attachment #1: Type: text/plain, Size: 2858 bytes --]

> I've asked several times whether vhost-user support live migration?

vhost-user can support live migration but it needs such fixes of Qemu.
I have found the issue and develop my patch by trying live migration with
vhost user.

> I think Jason is pointing out that your patch lacks support for guests
> that do not negotiate VIRTIO_NET_F_GUEST_ANNOUNCE.

I have understood the issue with old guest pointed by Jason.
I have thinking about the best way to do solve it and any advices are
welcome.

> Please send each patch revision as a new email thread.  This makes it
> easy to see your new patches in threaded email clients.

I have used the same email thread as it is a reworked of my first patch.
I will create a new email thread for the next version.

On Mon, Jun 8, 2015 at 3:32 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:

> On Mon, Jun 08, 2015 at 10:21:38AM +0200, Thibaut Collet wrote:
> > Hi,
> >
> > My understanding of gratuitous packet with virtio for any backend (vhost
> > user or other):
> > - When the VM is loaded (first start or migration) the virtio net
> > interfaces are loaded ( virtio_net_load_device function in
> > hw/net/virtio-net.c)
> > - If the guest has the VIRTIO_NET_F_GUEST_ANNOUNCE capability, request to
> > send gratuitous packet is done.
> >
> > 1. To enable gratuitous packet through this mechanism I have added
> > VIRTIO_NET_F_GUEST_ANNOUNCE
> > capability to hw/net/vhost_net.c. So host and guest can negotiate this
> > feature when vhost-user is used.
> >
> > 2. self announce occurs in case of live migration. During a live
> migration
> > a GARP is sent to all net backend through a queue dedicated to the net
> > backend.
> >    But for vhost-user:
> >        - this operation is not possible (vhost-user has no queue)
> >        - it is already done with the previous mechanism.
> >    Rather to define a queue to vhost user and notify twice the guest to
> > send gratuitous packet I have disable GARP from self announce and use
> only
> > the first mechanism for that.
> >
> > I have tested my modifications with guest that supports
> > VIRTIO_NET_F_GUEST_ANNOUNCE and vhost-user on the host. After a live
> > migration I have the GARP from the guest.
>
> I think Jason is pointing out that your patch lacks support for guests
> that do not negotiate VIRTIO_NET_F_GUEST_ANNOUNCE.
>
> If the guest does not set the feature bit then packets might continue to
> get forwarded to the old host.
>
> Perhaps the correct place to implement this is in the virtio-net.c
> device instead of in vhost-user.c.  The non-vhost-user case should also
> skip sending two ARP packets.
>
> BUT before we go any further:
>
> I've asked several times whether vhost-user support live migration?
>
> You didn't answer that question.  Fixing this issue only makes sense if
> vhost-user live migration is supported.
>
> Stefan
>

[-- Attachment #2: Type: text/html, Size: 4102 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 14:05                       ` Thibaut Collet
@ 2015-06-08 15:13                         ` Stefan Hajnoczi
  2015-06-08 15:32                           ` Thibaut Collet
  2015-06-08 16:13                           ` Michael S. Tsirkin
  0 siblings, 2 replies; 33+ messages in thread
From: Stefan Hajnoczi @ 2015-06-08 15:13 UTC (permalink / raw)
  To: Thibaut Collet
  Cc: Jason Wang, Juan Quintela, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

On Mon, Jun 8, 2015 at 3:05 PM, Thibaut Collet <thibaut.collet@6wind.com> wrote:
>> I think Jason is pointing out that your patch lacks support for guests
>> that do not negotiate VIRTIO_NET_F_GUEST_ANNOUNCE.
>
> I have understood the issue with old guest pointed by Jason.
> I have thinking about the best way to do solve it and any advices are
> welcome.

1. Add vhost-user virtio-net command to inject packets

Add a new VhostUserRequest VHOST_USER_NET_INJECT_PACKET allowing QEMU
to hand a packet to the vhost-user process for injection.  This
command is virtio-net specific and fails if called on a different
device type.

2. Fail negotiation when VIRTIO_NET_F_GUEST_ANNOUNCE is not accepted

This is only really possible in VIRTIO 1.0 and later.  Pre-1.0 does
not allow the device to reject due to disabled features.

file:///home/stefanha/virtio/trunk/virtio-v1.0-cs02.html#x1-450001

Therefore this is not a great solution.

3. The easiest solution - nop .receive()

Just implement a nop .receive() which drops the packet and prints a
warning message to stderr.  This way VIRTIO_NET_F_GUEST_ANNOUNCE
guests work while legacy guests don't send a gratuitous ARP packet.

Stefan

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 15:13                         ` Stefan Hajnoczi
@ 2015-06-08 15:32                           ` Thibaut Collet
  2015-06-09 10:38                             ` Stefan Hajnoczi
  2015-06-09 10:43                             ` Stefan Hajnoczi
  2015-06-08 16:13                           ` Michael S. Tsirkin
  1 sibling, 2 replies; 33+ messages in thread
From: Thibaut Collet @ 2015-06-08 15:32 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: Jason Wang, Juan Quintela, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

[-- Attachment #1: Type: text/plain, Size: 1820 bytes --]

> 3. The easiest solution - nop .receive()

The solution 3 is similar of my implementation and does not solve issue
pointed by Jason: legacy guest do not send a gratuitous ARP.

> 2. Fail negotiation when VIRTIO_NET_F_GUEST_ANNOUNCE is not accepted
> file:///home/stefanha/virtio/trunk/virtio-v1.0-cs02.html#x1-450001

Could you send me the virtio-v1.0-cs02.html#x1-450001 if you think it can
help me ?

Nevertheless it seems that only solution 1 can provide a solution to the
issue pointed by Jason.

On Mon, Jun 8, 2015 at 5:13 PM, Stefan Hajnoczi <stefanha@gmail.com> wrote:

> On Mon, Jun 8, 2015 at 3:05 PM, Thibaut Collet <thibaut.collet@6wind.com>
> wrote:
> >> I think Jason is pointing out that your patch lacks support for guests
> >> that do not negotiate VIRTIO_NET_F_GUEST_ANNOUNCE.
> >
> > I have understood the issue with old guest pointed by Jason.
> > I have thinking about the best way to do solve it and any advices are
> > welcome.
>
> 1. Add vhost-user virtio-net command to inject packets
>
> Add a new VhostUserRequest VHOST_USER_NET_INJECT_PACKET allowing QEMU
> to hand a packet to the vhost-user process for injection.  This
> command is virtio-net specific and fails if called on a different
> device type.
>
> 2. Fail negotiation when VIRTIO_NET_F_GUEST_ANNOUNCE is not accepted
>
> This is only really possible in VIRTIO 1.0 and later.  Pre-1.0 does
> not allow the device to reject due to disabled features.
>
> file:///home/stefanha/virtio/trunk/virtio-v1.0-cs02.html#x1-450001
>
> Therefore this is not a great solution.
>
> 3. The easiest solution - nop .receive()
>
> Just implement a nop .receive() which drops the packet and prints a
> warning message to stderr.  This way VIRTIO_NET_F_GUEST_ANNOUNCE
> guests work while legacy guests don't send a gratuitous ARP packet.
>
> Stefan
>

[-- Attachment #2: Type: text/html, Size: 2956 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 13:20                       ` Thibaut Collet
@ 2015-06-08 15:48                         ` Michael S. Tsirkin
  0 siblings, 0 replies; 33+ messages in thread
From: Michael S. Tsirkin @ 2015-06-08 15:48 UTC (permalink / raw)
  To: Thibaut Collet; +Cc: Jason Wang, qemu-devel, Stefan Hajnoczi, quintela

On Mon, Jun 08, 2015 at 03:20:23PM +0200, Thibaut Collet wrote:
> > How about implementing a receive function that discards all packets
> > then?
> 
> Implementing a receive function that discards all packets is an other solution.
> I have not chosen this one to avoid to mask other potential issues: 
> Normally vhost-user never receives packets. By keeping a NULL function as
> received callback we can detect easily case where packets are sent (qemu
> crashes) and solve this issue.

What I have an issue with is the fact that you make it depend on guest
configuration.
vhost user backend can't get packets from qemu at all, it doesn't make
sense to disable sending packets only if guest set some flag.

In particular, this flag isn't set around guest reset.


> > Can't vhost-user backend sends this automatically?
> > Why do we need to do anything in QEMU?
> 
> My explanations are maybe unclear. For old driver we have to send the RARP to
> the guest through the network interface (and to implement a receive function
> for vhost-user). My question is: where is the best location to do that:
>  1. In the receive function of vhost-user (in the file net/vhost-user.c)
>  2. In a self announce function (called when vhost-user receives a RARP,
> analysis of the packet content is needed in this case) in the file hw/net/
> vhost-net.c
>  3. In the vhost-user backend (file hw/virtio/vhost-user.c). In this case a new
> message must be defined between hw/net/vhost-net.c and hw/virtio/vhost-user.c.
>  4. In the vhost client/backend (vapp or other)

In the vhost client/backend would be best.
See any issues with this?

-- 
MST

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 15:13                         ` Stefan Hajnoczi
  2015-06-08 15:32                           ` Thibaut Collet
@ 2015-06-08 16:13                           ` Michael S. Tsirkin
  2015-06-09  2:35                             ` Jason Wang
  1 sibling, 1 reply; 33+ messages in thread
From: Michael S. Tsirkin @ 2015-06-08 16:13 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: Thibaut Collet, Jason Wang, qemu-devel, Stefan Hajnoczi, Juan Quintela

On Mon, Jun 08, 2015 at 04:13:59PM +0100, Stefan Hajnoczi wrote:
> On Mon, Jun 8, 2015 at 3:05 PM, Thibaut Collet <thibaut.collet@6wind.com> wrote:
> >> I think Jason is pointing out that your patch lacks support for guests
> >> that do not negotiate VIRTIO_NET_F_GUEST_ANNOUNCE.
> >
> > I have understood the issue with old guest pointed by Jason.
> > I have thinking about the best way to do solve it and any advices are
> > welcome.
> 
> 1. Add vhost-user virtio-net command to inject packets
> 
> Add a new VhostUserRequest VHOST_USER_NET_INJECT_PACKET allowing QEMU
> to hand a packet to the vhost-user process for injection.  This
> command is virtio-net specific and fails if called on a different
> device type.
> 
> 2. Fail negotiation when VIRTIO_NET_F_GUEST_ANNOUNCE is not accepted
> 
> This is only really possible in VIRTIO 1.0 and later.  Pre-1.0 does
> not allow the device to reject due to disabled features.
> 
> file:///home/stefanha/virtio/trunk/virtio-v1.0-cs02.html#x1-450001
> 
> Therefore this is not a great solution.
> 
> 3. The easiest solution - nop .receive()
> 
> Just implement a nop .receive() which drops the packet and prints a
> warning message to stderr.  This way VIRTIO_NET_F_GUEST_ANNOUNCE
> guests work while legacy guests don't send a gratuitous ARP packet.
> 
> Stefan

3 sounds good to me.

-- 
MST

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 16:13                           ` Michael S. Tsirkin
@ 2015-06-09  2:35                             ` Jason Wang
  0 siblings, 0 replies; 33+ messages in thread
From: Jason Wang @ 2015-06-09  2:35 UTC (permalink / raw)
  To: Michael S. Tsirkin, Stefan Hajnoczi
  Cc: Thibaut Collet, qemu-devel, Stefan Hajnoczi, Juan Quintela



On 06/09/2015 12:13 AM, Michael S. Tsirkin wrote:
> On Mon, Jun 08, 2015 at 04:13:59PM +0100, Stefan Hajnoczi wrote:
>> On Mon, Jun 8, 2015 at 3:05 PM, Thibaut Collet <thibaut.collet@6wind.com> wrote:
>>>> I think Jason is pointing out that your patch lacks support for guests
>>>> that do not negotiate VIRTIO_NET_F_GUEST_ANNOUNCE.
>>> I have understood the issue with old guest pointed by Jason.
>>> I have thinking about the best way to do solve it and any advices are
>>> welcome.
>> 1. Add vhost-user virtio-net command to inject packets
>>
>> Add a new VhostUserRequest VHOST_USER_NET_INJECT_PACKET allowing QEMU
>> to hand a packet to the vhost-user process for injection.  This
>> command is virtio-net specific and fails if called on a different
>> device type.
>>
>> 2. Fail negotiation when VIRTIO_NET_F_GUEST_ANNOUNCE is not accepted
>>
>> This is only really possible in VIRTIO 1.0 and later.  Pre-1.0 does
>> not allow the device to reject due to disabled features.
>>
>> file:///home/stefanha/virtio/trunk/virtio-v1.0-cs02.html#x1-450001
>>
>> Therefore this is not a great solution.
>>
>> 3. The easiest solution - nop .receive()
>>
>> Just implement a nop .receive() which drops the packet and prints a
>> warning message to stderr.  This way VIRTIO_NET_F_GUEST_ANNOUNCE
>> guests work while legacy guests don't send a gratuitous ARP packet.
>>
>> Stefan
> 3 sounds good to me.
>

And looks like we can remove the

        s->nc.receive_disabled = 1;

in vhost-user.c in this case.

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 15:32                           ` Thibaut Collet
@ 2015-06-09 10:38                             ` Stefan Hajnoczi
  2015-06-09 10:43                             ` Stefan Hajnoczi
  1 sibling, 0 replies; 33+ messages in thread
From: Stefan Hajnoczi @ 2015-06-09 10:38 UTC (permalink / raw)
  To: Thibaut Collet
  Cc: Jason Wang, Juan Quintela, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

[-- Attachment #1: Type: text/plain, Size: 672 bytes --]

On Mon, Jun 08, 2015 at 05:32:38PM +0200, Thibaut Collet wrote:
> > 3. The easiest solution - nop .receive()
> 
> The solution 3 is similar of my implementation and does not solve issue
> pointed by Jason: legacy guest do not send a gratuitous ARP.
> 
> > 2. Fail negotiation when VIRTIO_NET_F_GUEST_ANNOUNCE is not accepted
> > file:///home/stefanha/virtio/trunk/virtio-v1.0-cs02.html#x1-450001
> 
> Could you send me the virtio-v1.0-cs02.html#x1-450001 if you think it can
> help me ?

Sorry, for the local link.  The link is just for background context:

http://docs.oasis-open.org/virtio/virtio/v1.0/csprd01/virtio-v1.0-csprd01.html#x1-230001

Stefan

[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user
  2015-06-08 15:32                           ` Thibaut Collet
  2015-06-09 10:38                             ` Stefan Hajnoczi
@ 2015-06-09 10:43                             ` Stefan Hajnoczi
  1 sibling, 0 replies; 33+ messages in thread
From: Stefan Hajnoczi @ 2015-06-09 10:43 UTC (permalink / raw)
  To: Thibaut Collet
  Cc: Jason Wang, Juan Quintela, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

[-- Attachment #1: Type: text/plain, Size: 762 bytes --]

On Mon, Jun 08, 2015 at 05:32:38PM +0200, Thibaut Collet wrote:
> > 3. The easiest solution - nop .receive()
> 
> The solution 3 is similar of my implementation and does not solve issue
> pointed by Jason: legacy guest do not send a gratuitous ARP.

Yes, but it prints a warning so the user is aware they are operating in
a degraded state.

Sending gratuitous ARP packets minimizes the time that network
infrastructure forwards packets to the old host.  But the network
infrastructure will learn about the new host as long as the guest
sends outgoing packets after migration.

Putting these two things together, it seems acceptable to do #3.  Most
users should be running a modern virtio-net that supports the announce
feature anyway.

Stefan

[-- Attachment #2: Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 33+ messages in thread

end of thread, other threads:[~2015-06-09 10:44 UTC | newest]

Thread overview: 33+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-28  8:03 [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop Thibaut Collet
2015-05-29 13:12 ` Stefan Hajnoczi
2015-05-29 14:28   ` Thibaut Collet
2015-06-02 10:34     ` Stefan Hajnoczi
2015-06-03  7:56       ` Thibaut Collet
2015-06-03  9:42         ` Jason Wang
2015-06-03 13:40           ` Stefan Hajnoczi
2015-06-04  9:35             ` Jason Wang
2015-06-05 13:24               ` [Qemu-devel] [PATCH v2] net: Add support of VIRTIO_NET_F_GUEST_ANNOUNCE for vhost-net/vhost-user Thibaut Collet
2015-06-08  5:55                 ` Jason Wang
2015-06-08  8:21                   ` Thibaut Collet
2015-06-08  9:14                     ` Jason Wang
2015-06-08 10:01                       ` Thibaut Collet
2015-06-08 10:09                         ` Michael S. Tsirkin
2015-06-08 10:22                         ` Jason Wang
2015-06-08 13:32                     ` Stefan Hajnoczi
2015-06-08 14:05                       ` Thibaut Collet
2015-06-08 15:13                         ` Stefan Hajnoczi
2015-06-08 15:32                           ` Thibaut Collet
2015-06-09 10:38                             ` Stefan Hajnoczi
2015-06-09 10:43                             ` Stefan Hajnoczi
2015-06-08 16:13                           ` Michael S. Tsirkin
2015-06-09  2:35                             ` Jason Wang
2015-06-08 10:12                 ` Michael S. Tsirkin
2015-06-08 11:29                   ` Thibaut Collet
2015-06-08 12:45                     ` Michael S. Tsirkin
2015-06-08 13:20                       ` Thibaut Collet
2015-06-08 15:48                         ` Michael S. Tsirkin
2015-06-08 13:25                 ` Stefan Hajnoczi
2015-06-03  9:42         ` [Qemu-devel] [PATCH 1/1] net: fix queue's purge on VM stop Stefan Hajnoczi
2015-06-01  9:17 ` Jason Wang
2015-06-01 10:14   ` Thibaut Collet
2015-06-03  9:40     ` Jason Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.