All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH V2] net: Fix dealing with packets when runstate changes
@ 2014-08-21 12:39 zhanghailiang
  2014-08-22  7:40 ` Jason Wang
  0 siblings, 1 reply; 4+ messages in thread
From: zhanghailiang @ 2014-08-21 12:39 UTC (permalink / raw)
  To: qemu-devel
  Cc: peter.maydell, zhanghailiang, mst, jasowang, luonengjun,
	peter.huangpeng, stefanha

For all NICs(except virtio-net) emulated by qemu,
Such as e1000, rtl8139, pcnet and ne2k_pci,
Qemu can still receive packets when VM is not running.
If this happened in *migration's* last PAUSE VM stage,
The new dirty RAM related to the packets will be missed,
And this will lead serious network fault in VM.

To avoid this, we forbid receiving packets in generic net code when
VM is not running. Also, when the runstate changes back to running,
we definitely need to flush queues to get packets flowing again.

Here we implement this in the net layer:
(1) Judge the vm runstate in qemu_can_send_packet
(2) Add a member 'VMChangeStateEntry *vmstate' to struct NICState,
Which will listen for VM runstate changes.
(3) Register a handler function for VMstate change.
When vm changes back to running, we flush all queues in the callback function.
(4) Remove checking vm state in virtio_net_can_receive

Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
---
v2:
- remove the superfluous check of nc->received_disabled 
---
 hw/net/virtio-net.c |  4 ----
 include/net/net.h   |  2 ++
 net/net.c           | 31 +++++++++++++++++++++++++++++++
 3 files changed, 33 insertions(+), 4 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 268eff9..287d762 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -839,10 +839,6 @@ static int virtio_net_can_receive(NetClientState *nc)
     VirtIODevice *vdev = VIRTIO_DEVICE(n);
     VirtIONetQueue *q = virtio_net_get_subqueue(nc);
 
-    if (!vdev->vm_running) {
-        return 0;
-    }
-
     if (nc->queue_index >= n->curr_queues) {
         return 0;
     }
diff --git a/include/net/net.h b/include/net/net.h
index ed594f9..a294277 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -8,6 +8,7 @@
 #include "net/queue.h"
 #include "migration/vmstate.h"
 #include "qapi-types.h"
+#include "sysemu/sysemu.h"
 
 #define MAX_QUEUE_NUM 1024
 
@@ -96,6 +97,7 @@ typedef struct NICState {
     NICConf *conf;
     void *opaque;
     bool peer_deleted;
+    VMChangeStateEntry *vmstate;
 } NICState;
 
 NetClientState *qemu_find_netdev(const char *id);
diff --git a/net/net.c b/net/net.c
index 6d930ea..113a37b 100644
--- a/net/net.c
+++ b/net/net.c
@@ -242,6 +242,28 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
     return nc;
 }
 
+static void nic_vmstate_change_handler(void *opaque,
+                                       int running,
+                                       RunState state)
+{
+    NICState *nic = opaque;
+    NetClientState *nc;
+    int i, queues;
+
+    if (!running) {
+        return;
+    }
+
+    queues = MAX(1, nic->conf->peers.queues);
+    for (i = 0; i < queues; i++) {
+        nc = &nic->ncs[i];
+        if (nc->info->can_receive && !nc->info->can_receive(nc)) {
+            continue;
+        }
+        qemu_flush_queued_packets(nc);
+    }
+}
+
 NICState *qemu_new_nic(NetClientInfo *info,
                        NICConf *conf,
                        const char *model,
@@ -259,6 +281,8 @@ NICState *qemu_new_nic(NetClientInfo *info,
     nic->ncs = (void *)nic + info->size;
     nic->conf = conf;
     nic->opaque = opaque;
+    nic->vmstate = qemu_add_vm_change_state_handler(nic_vmstate_change_handler,
+                                                    nic);
 
     for (i = 0; i < queues; i++) {
         qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
@@ -379,6 +403,7 @@ void qemu_del_nic(NICState *nic)
         qemu_free_net_client(nc);
     }
 
+    qemu_del_vm_change_state_handler(nic->vmstate);
     g_free(nic);
 }
 
@@ -452,6 +477,12 @@ void qemu_set_vnet_hdr_len(NetClientState *nc, int len)
 
 int qemu_can_send_packet(NetClientState *sender)
 {
+    int vmstat = runstate_is_running();
+
+    if (!vmstat) {
+        return 0;
+    }
+
     if (!sender->peer) {
         return 1;
     }
-- 
1.7.12.4

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [Qemu-devel] [PATCH V2] net: Fix dealing with packets when runstate changes
  2014-08-21 12:39 [Qemu-devel] [PATCH V2] net: Fix dealing with packets when runstate changes zhanghailiang
@ 2014-08-22  7:40 ` Jason Wang
  2014-08-22  9:21   ` zhanghailiang
  0 siblings, 1 reply; 4+ messages in thread
From: Jason Wang @ 2014-08-22  7:40 UTC (permalink / raw)
  To: zhanghailiang, qemu-devel
  Cc: peter.maydell, luonengjun, peter.huangpeng, stefanha, mst

On 08/21/2014 08:39 PM, zhanghailiang wrote:
> For all NICs(except virtio-net) emulated by qemu,
> Such as e1000, rtl8139, pcnet and ne2k_pci,
> Qemu can still receive packets when VM is not running.
> If this happened in *migration's* last PAUSE VM stage,
> The new dirty RAM related to the packets will be missed,
> And this will lead serious network fault in VM.
>
> To avoid this, we forbid receiving packets in generic net code when
> VM is not running. Also, when the runstate changes back to running,
> we definitely need to flush queues to get packets flowing again.
Hi:

Can you describe what will happen if you don't flush queues after vm is
started? Btw, the notifier dependency and impact on vhost should be
mentioned here as well.
>
> Here we implement this in the net layer:
> (1) Judge the vm runstate in qemu_can_send_packet
> (2) Add a member 'VMChangeStateEntry *vmstate' to struct NICState,
> Which will listen for VM runstate changes.
> (3) Register a handler function for VMstate change.
> When vm changes back to running, we flush all queues in the callback function.
> (4) Remove checking vm state in virtio_net_can_receive
>
> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
> ---
> v2:
> - remove the superfluous check of nc->received_disabled 
> ---
>  hw/net/virtio-net.c |  4 ----
>  include/net/net.h   |  2 ++
>  net/net.c           | 31 +++++++++++++++++++++++++++++++
>  3 files changed, 33 insertions(+), 4 deletions(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 268eff9..287d762 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -839,10 +839,6 @@ static int virtio_net_can_receive(NetClientState *nc)
>      VirtIODevice *vdev = VIRTIO_DEVICE(n);
>      VirtIONetQueue *q = virtio_net_get_subqueue(nc);
>  
> -    if (!vdev->vm_running) {
> -        return 0;
> -    }
> -
>      if (nc->queue_index >= n->curr_queues) {
>          return 0;
>      }
> diff --git a/include/net/net.h b/include/net/net.h
> index ed594f9..a294277 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -8,6 +8,7 @@
>  #include "net/queue.h"
>  #include "migration/vmstate.h"
>  #include "qapi-types.h"
> +#include "sysemu/sysemu.h"
>  
>  #define MAX_QUEUE_NUM 1024
>  
> @@ -96,6 +97,7 @@ typedef struct NICState {
>      NICConf *conf;
>      void *opaque;
>      bool peer_deleted;
> +    VMChangeStateEntry *vmstate;
>  } NICState;
>  
>  NetClientState *qemu_find_netdev(const char *id);
> diff --git a/net/net.c b/net/net.c
> index 6d930ea..113a37b 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -242,6 +242,28 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
>      return nc;
>  }
>  
> +static void nic_vmstate_change_handler(void *opaque,
> +                                       int running,
> +                                       RunState state)
> +{
> +    NICState *nic = opaque;
> +    NetClientState *nc;
> +    int i, queues;
> +
> +    if (!running) {
> +        return;
> +    }
> +
> +    queues = MAX(1, nic->conf->peers.queues);
> +    for (i = 0; i < queues; i++) {
> +        nc = &nic->ncs[i];
> +        if (nc->info->can_receive && !nc->info->can_receive(nc)) {
> +            continue;
> +        }

Why not just using qemu_can_send_packet() here?
> +        qemu_flush_queued_packets(nc);
> +    }
> +}
> +
>  NICState *qemu_new_nic(NetClientInfo *info,
>                         NICConf *conf,
>                         const char *model,
> @@ -259,6 +281,8 @@ NICState *qemu_new_nic(NetClientInfo *info,
>      nic->ncs = (void *)nic + info->size;
>      nic->conf = conf;
>      nic->opaque = opaque;
> +    nic->vmstate = qemu_add_vm_change_state_handler(nic_vmstate_change_handler,
> +                                                    nic);
>  
>      for (i = 0; i < queues; i++) {
>          qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
> @@ -379,6 +403,7 @@ void qemu_del_nic(NICState *nic)
>          qemu_free_net_client(nc);
>      }
>  
> +    qemu_del_vm_change_state_handler(nic->vmstate);
>      g_free(nic);
>  }
>  
> @@ -452,6 +477,12 @@ void qemu_set_vnet_hdr_len(NetClientState *nc, int len)
>  
>  int qemu_can_send_packet(NetClientState *sender)
>  {
> +    int vmstat = runstate_is_running();
> +
> +    if (!vmstat) {
> +        return 0;
> +    }

I think you want "vmstart" here?
> +
>      if (!sender->peer) {
>          return 1;
>      }

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Qemu-devel] [PATCH V2] net: Fix dealing with packets when runstate changes
  2014-08-22  7:40 ` Jason Wang
@ 2014-08-22  9:21   ` zhanghailiang
  2014-08-22  9:35     ` Jason Wang
  0 siblings, 1 reply; 4+ messages in thread
From: zhanghailiang @ 2014-08-22  9:21 UTC (permalink / raw)
  To: Jason Wang
  Cc: peter.maydell, mst, luonengjun, peter.huangpeng, qemu-devel, stefanha

On 2014/8/22 15:40, Jason Wang wrote:
> On 08/21/2014 08:39 PM, zhanghailiang wrote:
>> For all NICs(except virtio-net) emulated by qemu,
>> Such as e1000, rtl8139, pcnet and ne2k_pci,
>> Qemu can still receive packets when VM is not running.
>> If this happened in *migration's* last PAUSE VM stage,
>> The new dirty RAM related to the packets will be missed,
>> And this will lead serious network fault in VM.
>>
>> To avoid this, we forbid receiving packets in generic net code when
>> VM is not running. Also, when the runstate changes back to running,
>> we definitely need to flush queues to get packets flowing again.
> Hi:
>
> Can you describe what will happen if you don't flush queues after vm is
> started? Btw, the notifier dependency and impact on vhost should be
> mentioned here as well.

Hi Jason,

There is no side-effect for vhost-net because in nic_vmstate_change_handler callback,
We will check the return value of nc->info->can_receive before flush queues,
If the vhost-net(virtio-net) is not ready, it will return 0, then we will not
do the flush action.

Also i have tested this patch with vhost_net, include when multiqueue is on,
Everything seems to be Ok!

When nc->info->receive return 0, the receive_disabled will be set to 1,
If this happened just before VM pause(is not running), after VM runstate
change back to run, the receive_disabled is still 1, if qemu want to
send packets to VM, it will always fail until something happen to clear it.
So we'd better to clear it after runstate change back to run.

>>
>> Here we implement this in the net layer:
>> (1) Judge the vm runstate in qemu_can_send_packet
>> (2) Add a member 'VMChangeStateEntry *vmstate' to struct NICState,
>> Which will listen for VM runstate changes.
>> (3) Register a handler function for VMstate change.
>> When vm changes back to running, we flush all queues in the callback function.
>> (4) Remove checking vm state in virtio_net_can_receive
>>
>> Signed-off-by: zhanghailiang<zhang.zhanghailiang@huawei.com>
>> ---
>> v2:
>> - remove the superfluous check of nc->received_disabled
>> ---
>>   hw/net/virtio-net.c |  4 ----
>>   include/net/net.h   |  2 ++
>>   net/net.c           | 31 +++++++++++++++++++++++++++++++
>>   3 files changed, 33 insertions(+), 4 deletions(-)
>>
>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>> index 268eff9..287d762 100644
>> --- a/hw/net/virtio-net.c
>> +++ b/hw/net/virtio-net.c
>> @@ -839,10 +839,6 @@ static int virtio_net_can_receive(NetClientState *nc)
>>       VirtIODevice *vdev = VIRTIO_DEVICE(n);
>>       VirtIONetQueue *q = virtio_net_get_subqueue(nc);
>>
>> -    if (!vdev->vm_running) {
>> -        return 0;
>> -    }
>> -
>>       if (nc->queue_index>= n->curr_queues) {
>>           return 0;
>>       }
>> diff --git a/include/net/net.h b/include/net/net.h
>> index ed594f9..a294277 100644
>> --- a/include/net/net.h
>> +++ b/include/net/net.h
>> @@ -8,6 +8,7 @@
>>   #include "net/queue.h"
>>   #include "migration/vmstate.h"
>>   #include "qapi-types.h"
>> +#include "sysemu/sysemu.h"
>>
>>   #define MAX_QUEUE_NUM 1024
>>
>> @@ -96,6 +97,7 @@ typedef struct NICState {
>>       NICConf *conf;
>>       void *opaque;
>>       bool peer_deleted;
>> +    VMChangeStateEntry *vmstate;
>>   } NICState;
>>
>>   NetClientState *qemu_find_netdev(const char *id);
>> diff --git a/net/net.c b/net/net.c
>> index 6d930ea..113a37b 100644
>> --- a/net/net.c
>> +++ b/net/net.c
>> @@ -242,6 +242,28 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
>>       return nc;
>>   }
>>
>> +static void nic_vmstate_change_handler(void *opaque,
>> +                                       int running,
>> +                                       RunState state)
>> +{
>> +    NICState *nic = opaque;
>> +    NetClientState *nc;
>> +    int i, queues;
>> +
>> +    if (!running) {
>> +        return;
>> +    }
>> +
>> +    queues = MAX(1, nic->conf->peers.queues);
>> +    for (i = 0; i<  queues; i++) {
>> +        nc =&nic->ncs[i];
>> +        if (nc->info->can_receive&&  !nc->info->can_receive(nc)) {
>> +            continue;
>> +        }
>
> Why not just using qemu_can_send_packet() here?

qemu_can_send_packet will check the value of receive_disabled,
Our purpose is to clear it, if its value is 1.

May be we should check it as follow:

      if ((!nc->receive_disabled) || (nc->info->can_receive && !nc->info->can_receive(nc)) {
          continue;
       }
       qemu_flush_queued_packets(nc);



>> +        qemu_flush_queued_packets(nc);
>> +    }
>> +}
>> +
>>   NICState *qemu_new_nic(NetClientInfo *info,
>>                          NICConf *conf,
>>                          const char *model,
>> @@ -259,6 +281,8 @@ NICState *qemu_new_nic(NetClientInfo *info,
>>       nic->ncs = (void *)nic + info->size;
>>       nic->conf = conf;
>>       nic->opaque = opaque;
>> +    nic->vmstate = qemu_add_vm_change_state_handler(nic_vmstate_change_handler,
>> +                                                    nic);
>>
>>       for (i = 0; i<  queues; i++) {
>>           qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
>> @@ -379,6 +403,7 @@ void qemu_del_nic(NICState *nic)
>>           qemu_free_net_client(nc);
>>       }
>>
>> +    qemu_del_vm_change_state_handler(nic->vmstate);
>>       g_free(nic);
>>   }
>>
>> @@ -452,6 +477,12 @@ void qemu_set_vnet_hdr_len(NetClientState *nc, int len)
>>
>>   int qemu_can_send_packet(NetClientState *sender)
>>   {
>> +    int vmstat = runstate_is_running();
>> +
>> +    if (!vmstat) {
>> +        return 0;
>> +    }
>
> I think you want "vmstart" here?

NO, we want to check the change of VM runstate, not just *start* action.;)

>> +
>>       if (!sender->peer) {
>>           return 1;
>>       }
>
>
> .
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Qemu-devel] [PATCH V2] net: Fix dealing with packets when runstate changes
  2014-08-22  9:21   ` zhanghailiang
@ 2014-08-22  9:35     ` Jason Wang
  0 siblings, 0 replies; 4+ messages in thread
From: Jason Wang @ 2014-08-22  9:35 UTC (permalink / raw)
  To: zhanghailiang
  Cc: peter.maydell, mst, luonengjun, peter.huangpeng, qemu-devel, stefanha

On 08/22/2014 05:21 PM, zhanghailiang wrote:
> On 2014/8/22 15:40, Jason Wang wrote:
>> On 08/21/2014 08:39 PM, zhanghailiang wrote:
>>> For all NICs(except virtio-net) emulated by qemu,
>>> Such as e1000, rtl8139, pcnet and ne2k_pci,
>>> Qemu can still receive packets when VM is not running.
>>> If this happened in *migration's* last PAUSE VM stage,
>>> The new dirty RAM related to the packets will be missed,
>>> And this will lead serious network fault in VM.
>>>
>>> To avoid this, we forbid receiving packets in generic net code when
>>> VM is not running. Also, when the runstate changes back to running,
>>> we definitely need to flush queues to get packets flowing again.
>> Hi:
>>
>> Can you describe what will happen if you don't flush queues after vm is
>> started? Btw, the notifier dependency and impact on vhost should be
>> mentioned here as well.
>
> Hi Jason,
>
> There is no side-effect for vhost-net because in
> nic_vmstate_change_handler callback,
> We will check the return value of nc->info->can_receive before flush
> queues,
> If the vhost-net(virtio-net) is not ready, it will return 0, then we
> will not
> do the flush action.

Thanks for the clarification. Then the vmstate handler does nothing for
virtio-net :)
>
> Also i have tested this patch with vhost_net, include when multiqueue
> is on,
> Everything seems to be Ok!
>
> When nc->info->receive return 0, the receive_disabled will be set to 1,
> If this happened just before VM pause(is not running), after VM runstate
> change back to run, the receive_disabled is still 1, if qemu want to
> send packets to VM, it will always fail until something happen to
> clear it.
> So we'd better to clear it after runstate change back to run.

The same question as I replied in V1. Let's take virtio_net as an
example, when its receive() returns zero, it means its rx ring needs
refilling. And guest will kick qemu if it finishes the refilling, at
this time virtio_net_handle_rx() will call qemu_flush_queued_packets()
to clear receive_disabled.

So it looks safe to keep its value?
>
>>>
>>> Here we implement this in the net layer:
>>> (1) Judge the vm runstate in qemu_can_send_packet
>>> (2) Add a member 'VMChangeStateEntry *vmstate' to struct NICState,
>>> Which will listen for VM runstate changes.
>>> (3) Register a handler function for VMstate change.
>>> When vm changes back to running, we flush all queues in the callback
>>> function.
>>> (4) Remove checking vm state in virtio_net_can_receive
>>>
>>> Signed-off-by: zhanghailiang<zhang.zhanghailiang@huawei.com>
>>> ---
>>> v2:
>>> - remove the superfluous check of nc->received_disabled
>>> ---
>>>   hw/net/virtio-net.c |  4 ----
>>>   include/net/net.h   |  2 ++
>>>   net/net.c           | 31 +++++++++++++++++++++++++++++++
>>>   3 files changed, 33 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>> index 268eff9..287d762 100644
>>> --- a/hw/net/virtio-net.c
>>> +++ b/hw/net/virtio-net.c
>>> @@ -839,10 +839,6 @@ static int
>>> virtio_net_can_receive(NetClientState *nc)
>>>       VirtIODevice *vdev = VIRTIO_DEVICE(n);
>>>       VirtIONetQueue *q = virtio_net_get_subqueue(nc);
>>>
>>> -    if (!vdev->vm_running) {
>>> -        return 0;
>>> -    }
>>> -
>>>       if (nc->queue_index>= n->curr_queues) {
>>>           return 0;
>>>       }
>>> diff --git a/include/net/net.h b/include/net/net.h
>>> index ed594f9..a294277 100644
>>> --- a/include/net/net.h
>>> +++ b/include/net/net.h
>>> @@ -8,6 +8,7 @@
>>>   #include "net/queue.h"
>>>   #include "migration/vmstate.h"
>>>   #include "qapi-types.h"
>>> +#include "sysemu/sysemu.h"
>>>
>>>   #define MAX_QUEUE_NUM 1024
>>>
>>> @@ -96,6 +97,7 @@ typedef struct NICState {
>>>       NICConf *conf;
>>>       void *opaque;
>>>       bool peer_deleted;
>>> +    VMChangeStateEntry *vmstate;
>>>   } NICState;
>>>
>>>   NetClientState *qemu_find_netdev(const char *id);
>>> diff --git a/net/net.c b/net/net.c
>>> index 6d930ea..113a37b 100644
>>> --- a/net/net.c
>>> +++ b/net/net.c
>>> @@ -242,6 +242,28 @@ NetClientState
>>> *qemu_new_net_client(NetClientInfo *info,
>>>       return nc;
>>>   }
>>>
>>> +static void nic_vmstate_change_handler(void *opaque,
>>> +                                       int running,
>>> +                                       RunState state)
>>> +{
>>> +    NICState *nic = opaque;
>>> +    NetClientState *nc;
>>> +    int i, queues;
>>> +
>>> +    if (!running) {
>>> +        return;
>>> +    }
>>> +
>>> +    queues = MAX(1, nic->conf->peers.queues);
>>> +    for (i = 0; i<  queues; i++) {
>>> +        nc =&nic->ncs[i];
>>> +        if (nc->info->can_receive&&  !nc->info->can_receive(nc)) {
>>> +            continue;
>>> +        }
>>
>> Why not just using qemu_can_send_packet() here?
>
> qemu_can_send_packet will check the value of receive_disabled,
> Our purpose is to clear it, if its value is 1.
>
> May be we should check it as follow:
>
>      if ((!nc->receive_disabled) || (nc->info->can_receive &&
> !nc->info->can_receive(nc)) {
>          continue;
>       }
>       qemu_flush_queued_packets(nc);
>
>
>
>>> +        qemu_flush_queued_packets(nc);
>>> +    }
>>> +}
>>> +
>>>   NICState *qemu_new_nic(NetClientInfo *info,
>>>                          NICConf *conf,
>>>                          const char *model,
>>> @@ -259,6 +281,8 @@ NICState *qemu_new_nic(NetClientInfo *info,
>>>       nic->ncs = (void *)nic + info->size;
>>>       nic->conf = conf;
>>>       nic->opaque = opaque;
>>> +    nic->vmstate =
>>> qemu_add_vm_change_state_handler(nic_vmstate_change_handler,
>>> +                                                    nic);
>>>
>>>       for (i = 0; i<  queues; i++) {
>>>           qemu_net_client_setup(&nic->ncs[i], info, peers[i], model,
>>> name,
>>> @@ -379,6 +403,7 @@ void qemu_del_nic(NICState *nic)
>>>           qemu_free_net_client(nc);
>>>       }
>>>
>>> +    qemu_del_vm_change_state_handler(nic->vmstate);
>>>       g_free(nic);
>>>   }
>>>
>>> @@ -452,6 +477,12 @@ void qemu_set_vnet_hdr_len(NetClientState *nc,
>>> int len)
>>>
>>>   int qemu_can_send_packet(NetClientState *sender)
>>>   {
>>> +    int vmstat = runstate_is_running();
>>> +
>>> +    if (!vmstat) {
>>> +        return 0;
>>> +    }
>>
>> I think you want "vmstart" here?
>
> NO, we want to check the change of VM runstate, not just *start*
> action.;)
>

Ok, but vmstat does not sound like a boolean value. You may just use
if(!runstate_is_running).
>>> +
>>>       if (!sender->peer) {
>>>           return 1;
>>>       }
>>
>>
>> .
>>
>
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2014-08-22  9:35 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-08-21 12:39 [Qemu-devel] [PATCH V2] net: Fix dealing with packets when runstate changes zhanghailiang
2014-08-22  7:40 ` Jason Wang
2014-08-22  9:21   ` zhanghailiang
2014-08-22  9:35     ` Jason Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.