All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v2 0/2] vhost: two fixes
@ 2017-12-15  8:45 Jay Zhou
  2017-12-15  8:45 ` [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately Jay Zhou
                   ` (3 more replies)
  0 siblings, 4 replies; 18+ messages in thread
From: Jay Zhou @ 2017-12-15  8:45 UTC (permalink / raw)
  To: qemu-devel, mst
  Cc: weidong.huang, arei.gonglei, wangxinxin.wang, jianjay.zhou,
	gary.liuzhe, dgilbert, imammedo

v1 -> v2:
  * delete the "used_memslots" global variable, and add it
    for vhost-user and vhost-kernel separately

Jay Zhou (2):
  vhost: add used memslot number for vhost-user and vhost-kernel    
    separately
  vhost: double check used memslots number

 hw/virtio/vhost-backend.c         | 14 ++++++++++++++
 hw/virtio/vhost-user.c            | 31 +++++++++++++++++++++++++++++++
 hw/virtio/vhost.c                 | 39 ++++++++++++++++++++++++++++++---------
 include/hw/virtio/vhost-backend.h |  4 ++++
 4 files changed, 79 insertions(+), 9 deletions(-)

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately
  2017-12-15  8:45 [Qemu-devel] [PATCH v2 0/2] vhost: two fixes Jay Zhou
@ 2017-12-15  8:45 ` Jay Zhou
  2017-12-22 16:25   ` Igor Mammedov
  2017-12-15  8:45 ` [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number Jay Zhou
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 18+ messages in thread
From: Jay Zhou @ 2017-12-15  8:45 UTC (permalink / raw)
  To: qemu-devel, mst
  Cc: weidong.huang, arei.gonglei, wangxinxin.wang, jianjay.zhou,
	gary.liuzhe, dgilbert, imammedo

Used_memslots is equal to dev->mem->nregions now, it is true for
vhost kernel, but not for vhost user, which uses the memory regions
that have file descriptor. In fact, not all of the memory regions
have file descriptor.
It is usefully in some scenarios, e.g. used_memslots is 8, and only
5 memory slots can be used by vhost user, it is failed to hot plug
a new memory RAM because vhost_has_free_slot just returned false,
but we can hot plug it safely in fact.

Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
Signed-off-by: Zhe Liu <gary.liuzhe@huawei.com>
---
 hw/virtio/vhost-backend.c         | 14 ++++++++++++++
 hw/virtio/vhost-user.c            | 31 +++++++++++++++++++++++++++++++
 hw/virtio/vhost.c                 | 16 +++++++++-------
 include/hw/virtio/vhost-backend.h |  4 ++++
 4 files changed, 58 insertions(+), 7 deletions(-)

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 7f09efa..866718c 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -15,6 +15,8 @@
 #include "hw/virtio/vhost-backend.h"
 #include "qemu/error-report.h"
 
+static unsigned int vhost_kernel_used_memslots;
+
 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
                              void *arg)
 {
@@ -233,6 +235,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
 }
 
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+    vhost_kernel_used_memslots = dev->mem->nregions;
+}
+
+static unsigned int vhost_kernel_get_used_memslots(void)
+{
+    return vhost_kernel_used_memslots;
+}
+
 static const VhostOps kernel_ops = {
         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
         .vhost_backend_init = vhost_kernel_init,
@@ -264,6 +276,8 @@ static const VhostOps kernel_ops = {
 #endif /* CONFIG_VHOST_VSOCK */
         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
         .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
+        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
+        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
 };
 
 int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 093675e..58a4cec 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -122,6 +122,8 @@ static VhostUserMsg m __attribute__ ((unused));
 /* The version of the protocol we support */
 #define VHOST_USER_VERSION    (0x1)
 
+static unsigned int vhost_user_used_memslots;
+
 struct vhost_user {
     CharBackend *chr;
     int slave_fd;
@@ -922,6 +924,33 @@ static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
     /* No-op as the receive channel is not dedicated to IOTLB messages. */
 }
 
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+    int counter = 0;
+    int i;
+
+    for (i = 0; i < dev->mem->nregions; ++i) {
+        struct vhost_memory_region *reg = dev->mem->regions + i;
+        ram_addr_t offset;
+        MemoryRegion *mr;
+        int fd;
+
+        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
+                                    &offset);
+        fd = memory_region_get_fd(mr);
+        if (fd > 0) {
+            counter++;
+        }
+    }
+    vhost_user_used_memslots = counter;
+}
+
+static unsigned int vhost_user_get_used_memslots(void)
+{
+    return vhost_user_used_memslots;
+}
+
 const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_backend_init = vhost_user_init,
@@ -948,4 +977,6 @@ const VhostOps user_ops = {
         .vhost_net_set_mtu = vhost_user_net_set_mtu,
         .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
         .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
+        .vhost_set_used_memslots = vhost_user_set_used_memslots,
+        .vhost_get_used_memslots = vhost_user_get_used_memslots,
 };
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index e4290ce..59a32e9 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -43,20 +43,21 @@
 static struct vhost_log *vhost_log;
 static struct vhost_log *vhost_log_shm;
 
-static unsigned int used_memslots;
 static QLIST_HEAD(, vhost_dev) vhost_devices =
     QLIST_HEAD_INITIALIZER(vhost_devices);
 
 bool vhost_has_free_slot(void)
 {
-    unsigned int slots_limit = ~0U;
     struct vhost_dev *hdev;
 
     QLIST_FOREACH(hdev, &vhost_devices, entry) {
-        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
-        slots_limit = MIN(slots_limit, r);
+        if (hdev->vhost_ops->vhost_get_used_memslots() >=
+            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+            return false;
+        }
     }
-    return slots_limit > used_memslots;
+
+    return true;
 }
 
 static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -606,7 +607,7 @@ static void vhost_set_memory(MemoryListener *listener,
     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
     dev->memory_changed = true;
-    used_memslots = dev->mem->nregions;
+    dev->vhost_ops->vhost_set_used_memslots(dev);
 }
 
 static bool vhost_section(MemoryRegionSection *section)
@@ -1251,7 +1252,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
         goto fail;
     }
 
-    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+    if (hdev->vhost_ops->vhost_get_used_memslots() >
+        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
         error_report("vhost backend memory slots limit is less"
                 " than current number of present memory slots");
         r = -1;
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index a7a5f22..19961b5 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -84,6 +84,8 @@ typedef void (*vhost_set_iotlb_callback_op)(struct vhost_dev *dev,
                                            int enabled);
 typedef int (*vhost_send_device_iotlb_msg_op)(struct vhost_dev *dev,
                                               struct vhost_iotlb_msg *imsg);
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
+typedef unsigned int (*vhost_get_used_memslots_op)(void);
 
 typedef struct VhostOps {
     VhostBackendType backend_type;
@@ -118,6 +120,8 @@ typedef struct VhostOps {
     vhost_vsock_set_running_op vhost_vsock_set_running;
     vhost_set_iotlb_callback_op vhost_set_iotlb_callback;
     vhost_send_device_iotlb_msg_op vhost_send_device_iotlb_msg;
+    vhost_set_used_memslots_op vhost_set_used_memslots;
+    vhost_get_used_memslots_op vhost_get_used_memslots;
 } VhostOps;
 
 extern const VhostOps user_ops;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number
  2017-12-15  8:45 [Qemu-devel] [PATCH v2 0/2] vhost: two fixes Jay Zhou
  2017-12-15  8:45 ` [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately Jay Zhou
@ 2017-12-15  8:45 ` Jay Zhou
  2017-12-22 18:48   ` Igor Mammedov
  2017-12-15  8:52 ` [Qemu-devel] [PATCH v2 0/2] vhost: two fixes no-reply
  2017-12-19 21:46 ` Michael S. Tsirkin
  3 siblings, 1 reply; 18+ messages in thread
From: Jay Zhou @ 2017-12-15  8:45 UTC (permalink / raw)
  To: qemu-devel, mst
  Cc: weidong.huang, arei.gonglei, wangxinxin.wang, jianjay.zhou,
	gary.liuzhe, dgilbert, imammedo

If the VM already has N(N>8) available memory slots for vhost user,
the VM will be crashed in vhost_user_set_mem_table if we try to
hotplug the first vhost user NIC.
This patch checks if memslots number exceeded or not after updating
vhost_user_used_memslots.

Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
---
 hw/virtio/vhost.c | 27 +++++++++++++++++++++++----
 1 file changed, 23 insertions(+), 4 deletions(-)

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 59a32e9..e45f5e2 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1234,6 +1234,18 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
     event_notifier_cleanup(&vq->masked_notifier);
 }
 
+static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
+{
+    if (hdev->vhost_ops->vhost_get_used_memslots() >
+        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+        error_report("vhost backend memory slots limit is less"
+                " than current number of present memory slots");
+        return true;
+    }
+
+    return false;
+}
+
 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
                    VhostBackendType backend_type, uint32_t busyloop_timeout)
 {
@@ -1252,10 +1264,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
         goto fail;
     }
 
-    if (hdev->vhost_ops->vhost_get_used_memslots() >
-        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
-        error_report("vhost backend memory slots limit is less"
-                " than current number of present memory slots");
+    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
         r = -1;
         goto fail;
     }
@@ -1341,6 +1350,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     hdev->memory_changed = false;
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
+
+    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
+        r = -1;
+        if (busyloop_timeout) {
+            goto fail_busyloop;
+        } else {
+            goto fail;
+        }
+    }
+
     return 0;
 
 fail_busyloop:
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 0/2] vhost: two fixes
  2017-12-15  8:45 [Qemu-devel] [PATCH v2 0/2] vhost: two fixes Jay Zhou
  2017-12-15  8:45 ` [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately Jay Zhou
  2017-12-15  8:45 ` [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number Jay Zhou
@ 2017-12-15  8:52 ` no-reply
  2017-12-19 21:46 ` Michael S. Tsirkin
  3 siblings, 0 replies; 18+ messages in thread
From: no-reply @ 2017-12-15  8:52 UTC (permalink / raw)
  To: jianjay.zhou
  Cc: famz, qemu-devel, mst, weidong.huang, wangxinxin.wang, dgilbert,
	gary.liuzhe, arei.gonglei, imammedo

Hi,

This series seems to have some coding style problems. See output below for
more information:

Message-id: 1513327555-17520-1-git-send-email-jianjay.zhou@huawei.com
Subject: [Qemu-devel] [PATCH v2 0/2] vhost: two fixes
Type: series

=== TEST SCRIPT BEGIN ===
#!/bin/bash

BASE=base
n=1
total=$(git log --oneline $BASE.. | wc -l)
failed=0

git config --local diff.renamelimit 0
git config --local diff.renames True

commits="$(git log --format=%H --reverse $BASE..)"
for c in $commits; do
    echo "Checking PATCH $n/$total: $(git log -n 1 --format=%s $c)..."
    if ! git show $c --format=email | ./scripts/checkpatch.pl --mailback -; then
        failed=1
        echo
    fi
    n=$((n+1))
done

exit $failed
=== TEST SCRIPT END ===

Updating 3c8cf5a9c21ff8782164d1def7f44bd888713384
error: RPC failed; curl 18 transfer closed with outstanding read data remaining
fatal: The remote end hung up unexpectedly
error: Could not fetch 3c8cf5a9c21ff8782164d1def7f44bd888713384
Traceback (most recent call last):
  File "/usr/bin/patchew", line 442, in test_one
    git_clone_repo(clone, r["repo"], r["head"], logf)
  File "/usr/bin/patchew", line 48, in git_clone_repo
    stdout=logf, stderr=logf)
  File "/usr/lib64/python3.6/subprocess.py", line 291, in check_call
    raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['git', 'remote', 'add', '-f', '--mirror=fetch', '3c8cf5a9c21ff8782164d1def7f44bd888713384', 'https://github.com/patchew-project/qemu']' returned non-zero exit status 1.



---
Email generated automatically by Patchew [http://patchew.org/].
Please send your feedback to patchew-devel@freelists.org

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 0/2] vhost: two fixes
  2017-12-15  8:45 [Qemu-devel] [PATCH v2 0/2] vhost: two fixes Jay Zhou
                   ` (2 preceding siblings ...)
  2017-12-15  8:52 ` [Qemu-devel] [PATCH v2 0/2] vhost: two fixes no-reply
@ 2017-12-19 21:46 ` Michael S. Tsirkin
  3 siblings, 0 replies; 18+ messages in thread
From: Michael S. Tsirkin @ 2017-12-19 21:46 UTC (permalink / raw)
  To: Jay Zhou
  Cc: qemu-devel, weidong.huang, arei.gonglei, wangxinxin.wang,
	gary.liuzhe, dgilbert, imammedo

On Fri, Dec 15, 2017 at 04:45:53PM +0800, Jay Zhou wrote:
> v1 -> v2:
>   * delete the "used_memslots" global variable, and add it
>     for vhost-user and vhost-kernel separately

imammedo, any feedback on this?

> Jay Zhou (2):
>   vhost: add used memslot number for vhost-user and vhost-kernel    
>     separately
>   vhost: double check used memslots number
> 
>  hw/virtio/vhost-backend.c         | 14 ++++++++++++++
>  hw/virtio/vhost-user.c            | 31 +++++++++++++++++++++++++++++++
>  hw/virtio/vhost.c                 | 39 ++++++++++++++++++++++++++++++---------
>  include/hw/virtio/vhost-backend.h |  4 ++++
>  4 files changed, 79 insertions(+), 9 deletions(-)
> 
> -- 
> 1.8.3.1
> 

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately
  2017-12-15  8:45 ` [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately Jay Zhou
@ 2017-12-22 16:25   ` Igor Mammedov
  2017-12-22 19:03     ` Igor Mammedov
  2017-12-23  6:01     ` Zhoujian (jay)
  0 siblings, 2 replies; 18+ messages in thread
From: Igor Mammedov @ 2017-12-22 16:25 UTC (permalink / raw)
  To: Jay Zhou
  Cc: qemu-devel, mst, weidong.huang, arei.gonglei, wangxinxin.wang,
	gary.liuzhe, dgilbert

On Fri, 15 Dec 2017 16:45:54 +0800
Jay Zhou <jianjay.zhou@huawei.com> wrote:

> Used_memslots is equal to dev->mem->nregions now, it is true for
> vhost kernel, but not for vhost user, which uses the memory regions
> that have file descriptor. In fact, not all of the memory regions
> have file descriptor.
> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> 5 memory slots can be used by vhost user, it is failed to hot plug
> a new memory RAM because vhost_has_free_slot just returned false,
> but we can hot plug it safely in fact.

a bit of rant:
it seems vhost is chasing after a tail (1-2 regions that are not
actually RAM), while approach will allow to hotplug 1-2 extra dimms
vhost-user still will be limited to 8 regions max.


vhost-user protocol should be extended to allow qemu query
limit value from backend (which in turn should be tunable),
so that backend could be configured to meet user needs.

Jay,
would you like to look into it?

Otherwise it looks like patch will do its job,
see for a minor suggestion below.
 
> Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> Signed-off-by: Zhe Liu <gary.liuzhe@huawei.com>
> ---
>  hw/virtio/vhost-backend.c         | 14 ++++++++++++++
>  hw/virtio/vhost-user.c            | 31 +++++++++++++++++++++++++++++++
>  hw/virtio/vhost.c                 | 16 +++++++++-------
>  include/hw/virtio/vhost-backend.h |  4 ++++
>  4 files changed, 58 insertions(+), 7 deletions(-)
> 
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> index 7f09efa..866718c 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -15,6 +15,8 @@
>  #include "hw/virtio/vhost-backend.h"
>  #include "qemu/error-report.h"
>  
> +static unsigned int vhost_kernel_used_memslots;
> +
>  static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
>                               void *arg)
>  {
> @@ -233,6 +235,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
>          qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
>  }
>  
> +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
> +{
> +    vhost_kernel_used_memslots = dev->mem->nregions;
> +}
> +
> +static unsigned int vhost_kernel_get_used_memslots(void)
> +{
> +    return vhost_kernel_used_memslots;
> +}
> +
>  static const VhostOps kernel_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
>          .vhost_backend_init = vhost_kernel_init,
> @@ -264,6 +276,8 @@ static const VhostOps kernel_ops = {
>  #endif /* CONFIG_VHOST_VSOCK */
>          .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
>          .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
> +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
>  };
>  
>  int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 093675e..58a4cec 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -122,6 +122,8 @@ static VhostUserMsg m __attribute__ ((unused));
>  /* The version of the protocol we support */
>  #define VHOST_USER_VERSION    (0x1)
>  
> +static unsigned int vhost_user_used_memslots;
> +
>  struct vhost_user {
>      CharBackend *chr;
>      int slave_fd;
> @@ -922,6 +924,33 @@ static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
>      /* No-op as the receive channel is not dedicated to IOTLB messages. */
>  }
>  
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> +    int counter = 0;
> +    int i;
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +        int fd;
> +
> +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> +        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> +                                    &offset);
> +        fd = memory_region_get_fd(mr);
> +        if (fd > 0) {
> +            counter++;
> +        }
> +    }
vhost_user_set_mem_table() already does the same counting,
there is no point in duplicating it, just drop vhost_set_used_memslots callback

> +    vhost_user_used_memslots = counter;
and update this value from vhost_user_set_mem_table()

> +}
> +
> +static unsigned int vhost_user_get_used_memslots(void)
> +{
> +    return vhost_user_used_memslots;
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_init,
> @@ -948,4 +977,6 @@ const VhostOps user_ops = {
>          .vhost_net_set_mtu = vhost_user_net_set_mtu,
>          .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
>          .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
> +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_user_get_used_memslots,
>  };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index e4290ce..59a32e9 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -43,20 +43,21 @@
>  static struct vhost_log *vhost_log;
>  static struct vhost_log *vhost_log_shm;
>  
> -static unsigned int used_memslots;
>  static QLIST_HEAD(, vhost_dev) vhost_devices =
>      QLIST_HEAD_INITIALIZER(vhost_devices);
>  
>  bool vhost_has_free_slot(void)
>  {
> -    unsigned int slots_limit = ~0U;
>      struct vhost_dev *hdev;
>  
>      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> -        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> -        slots_limit = MIN(slots_limit, r);
> +        if (hdev->vhost_ops->vhost_get_used_memslots() >=
> +            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +            return false;
> +        }
>      }
> -    return slots_limit > used_memslots;
> +
> +    return true;
>  }
>  
>  static void vhost_dev_sync_region(struct vhost_dev *dev,
> @@ -606,7 +607,7 @@ static void vhost_set_memory(MemoryListener *listener,
>      dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
>      dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
>      dev->memory_changed = true;
> -    used_memslots = dev->mem->nregions;
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
>  }
>  
>  static bool vhost_section(MemoryRegionSection *section)
> @@ -1251,7 +1252,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>          goto fail;
>      }
>  
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
>          error_report("vhost backend memory slots limit is less"
>                  " than current number of present memory slots");
>          r = -1;
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index a7a5f22..19961b5 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -84,6 +84,8 @@ typedef void (*vhost_set_iotlb_callback_op)(struct vhost_dev *dev,
>                                             int enabled);
>  typedef int (*vhost_send_device_iotlb_msg_op)(struct vhost_dev *dev,
>                                                struct vhost_iotlb_msg *imsg);
> +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> +typedef unsigned int (*vhost_get_used_memslots_op)(void);
>  
>  typedef struct VhostOps {
>      VhostBackendType backend_type;
> @@ -118,6 +120,8 @@ typedef struct VhostOps {
>      vhost_vsock_set_running_op vhost_vsock_set_running;
>      vhost_set_iotlb_callback_op vhost_set_iotlb_callback;
>      vhost_send_device_iotlb_msg_op vhost_send_device_iotlb_msg;
> +    vhost_set_used_memslots_op vhost_set_used_memslots;
> +    vhost_get_used_memslots_op vhost_get_used_memslots;
>  } VhostOps;
>  
>  extern const VhostOps user_ops;

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number
  2017-12-15  8:45 ` [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number Jay Zhou
@ 2017-12-22 18:48   ` Igor Mammedov
  2017-12-22 21:15     ` Michael S. Tsirkin
                       ` (2 more replies)
  0 siblings, 3 replies; 18+ messages in thread
From: Igor Mammedov @ 2017-12-22 18:48 UTC (permalink / raw)
  To: Jay Zhou
  Cc: qemu-devel, mst, weidong.huang, arei.gonglei, wangxinxin.wang,
	gary.liuzhe, dgilbert

On Fri, 15 Dec 2017 16:45:55 +0800
Jay Zhou <jianjay.zhou@huawei.com> wrote:

> If the VM already has N(N>8) available memory slots for vhost user,
> the VM will be crashed in vhost_user_set_mem_table if we try to
> hotplug the first vhost user NIC.
> This patch checks if memslots number exceeded or not after updating
> vhost_user_used_memslots.
Can't understand commit message, pls rephrase (what is being fixed, and how it's fixed)
also include reproducing steps for crash and maybe describe call flow/backtrace
that triggers crash.

PS:
I wasn't able to reproduce crash

> 
> Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> ---
>  hw/virtio/vhost.c | 27 +++++++++++++++++++++++----
>  1 file changed, 23 insertions(+), 4 deletions(-)
> 
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 59a32e9..e45f5e2 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -1234,6 +1234,18 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
>      event_notifier_cleanup(&vq->masked_notifier);
>  }
>  
> +static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
> +{
> +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +        error_report("vhost backend memory slots limit is less"
> +                " than current number of present memory slots");
> +        return true;
> +    }
> +
> +    return false;
> +}
> +
>  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>                     VhostBackendType backend_type, uint32_t busyloop_timeout)
>  {
> @@ -1252,10 +1264,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>          goto fail;
>      }
>  
> -    if (hdev->vhost_ops->vhost_get_used_memslots() >
> -        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> -        error_report("vhost backend memory slots limit is less"
> -                " than current number of present memory slots");
> +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
why do you keep this check?
it seems always be false



>          r = -1;
>          goto fail;
>      }
> @@ -1341,6 +1350,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      hdev->memory_changed = false;
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> +
> +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> +        r = -1;
> +        if (busyloop_timeout) {
> +            goto fail_busyloop;
> +        } else {
> +            goto fail;
> +        }
> +    }
seem to be right thing to do, since after registering listener for the first time
used_memslots will be updated to actual value.


I did some testing and without this hunk/patch

on 'device_add  virtio-net-pci,netdev=net0' qemu prints:

qemu-system-x86_64: vhost_set_mem_table failed: Argument list too long (7)
qemu-system-x86_64: unable to start vhost net: 7: falling back on userspace virtio

and network is operational in guest, but with this patch

"netdev_add ...,vhost-on" prints:

vhost backend memory slots limit is less than current number of present memory slots
vhost-net requested but could not be initialized

and following "device_add  virtio-net-pci,netdev=net0" prints:

TUNSETOFFLOAD ioctl() failed: Bad file descriptor
TUNSETOFFLOAD ioctl() failed: Bad file descriptor

adapter is still hot-plugged but guest networking is broken (can't get IP address via DHCP)

so patch seems introduces a regression or something broken elsewhere and this exposes issue,
not sure what qemu reaction should be in this case
 i.e. when netdev_add fails 
    1: should we fail followed up device_add or 
    2: make it fall back to userspace virtio

I'd go for #2,
Michael what's your take on it?

> +
>      return 0;
>  
>  fail_busyloop:

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately
  2017-12-22 16:25   ` Igor Mammedov
@ 2017-12-22 19:03     ` Igor Mammedov
  2017-12-23  7:09       ` Zhoujian (jay)
  2017-12-23  6:01     ` Zhoujian (jay)
  1 sibling, 1 reply; 18+ messages in thread
From: Igor Mammedov @ 2017-12-22 19:03 UTC (permalink / raw)
  To: Jay Zhou
  Cc: weidong.huang, mst, wangxinxin.wang, qemu-devel, gary.liuzhe,
	dgilbert, arei.gonglei

On Fri, 22 Dec 2017 17:25:13 +0100
Igor Mammedov <imammedo@redhat.com> wrote:

> On Fri, 15 Dec 2017 16:45:54 +0800
> Jay Zhou <jianjay.zhou@huawei.com> wrote:
[...]

> > +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> > +{
> > +    int counter = 0;
> > +    int i;
> > +
> > +    for (i = 0; i < dev->mem->nregions; ++i) {
> > +        struct vhost_memory_region *reg = dev->mem->regions + i;
> > +        ram_addr_t offset;
> > +        MemoryRegion *mr;
> > +        int fd;
> > +
> > +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> > +        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> > +                                    &offset);
> > +        fd = memory_region_get_fd(mr);
> > +        if (fd > 0) {
> > +            counter++;
> > +        }
> > +    }
> vhost_user_set_mem_table() already does the same counting,
> there is no point in duplicating it, just drop vhost_set_used_memslots callback
> 
> > +    vhost_user_used_memslots = counter;
> and update this value from vhost_user_set_mem_table()
never mind, updating it from vhost_user_set_mem_table() as it's called only when
device is started which is not the case when backend is initialized,
so the way you did it should work for both cases


> 
> > +}
> > +
[...]

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number
  2017-12-22 18:48   ` Igor Mammedov
@ 2017-12-22 21:15     ` Michael S. Tsirkin
  2017-12-28 11:29       ` Igor Mammedov
  2017-12-23  8:27     ` Zhoujian (jay)
  2017-12-23  8:49     ` Zhoujian (jay)
  2 siblings, 1 reply; 18+ messages in thread
From: Michael S. Tsirkin @ 2017-12-22 21:15 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: Jay Zhou, qemu-devel, weidong.huang, arei.gonglei,
	wangxinxin.wang, gary.liuzhe, dgilbert

On Fri, Dec 22, 2017 at 07:48:55PM +0100, Igor Mammedov wrote:
> On Fri, 15 Dec 2017 16:45:55 +0800
> Jay Zhou <jianjay.zhou@huawei.com> wrote:
> 
> > If the VM already has N(N>8) available memory slots for vhost user,
> > the VM will be crashed in vhost_user_set_mem_table if we try to
> > hotplug the first vhost user NIC.
> > This patch checks if memslots number exceeded or not after updating
> > vhost_user_used_memslots.
> Can't understand commit message, pls rephrase (what is being fixed, and how it's fixed)
> also include reproducing steps for crash and maybe describe call flow/backtrace
> that triggers crash.
> 
> PS:
> I wasn't able to reproduce crash
> 
> > 
> > Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> > ---
> >  hw/virtio/vhost.c | 27 +++++++++++++++++++++++----
> >  1 file changed, 23 insertions(+), 4 deletions(-)
> > 
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> > index 59a32e9..e45f5e2 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -1234,6 +1234,18 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
> >      event_notifier_cleanup(&vq->masked_notifier);
> >  }
> >  
> > +static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
> > +{
> > +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > +        error_report("vhost backend memory slots limit is less"
> > +                " than current number of present memory slots");
> > +        return true;
> > +    }
> > +
> > +    return false;
> > +}
> > +
> >  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> >                     VhostBackendType backend_type, uint32_t busyloop_timeout)
> >  {
> > @@ -1252,10 +1264,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> >          goto fail;
> >      }
> >  
> > -    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > -        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > -        error_report("vhost backend memory slots limit is less"
> > -                " than current number of present memory slots");
> > +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> why do you keep this check?
> it seems always be false
> 
> 
> 
> >          r = -1;
> >          goto fail;
> >      }
> > @@ -1341,6 +1350,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> >      hdev->memory_changed = false;
> >      memory_listener_register(&hdev->memory_listener, &address_space_memory);
> >      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> > +
> > +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> > +        r = -1;
> > +        if (busyloop_timeout) {
> > +            goto fail_busyloop;
> > +        } else {
> > +            goto fail;
> > +        }
> > +    }
> seem to be right thing to do, since after registering listener for the first time
> used_memslots will be updated to actual value.
> 
> 
> I did some testing and without this hunk/patch
> 
> on 'device_add  virtio-net-pci,netdev=net0' qemu prints:
> 
> qemu-system-x86_64: vhost_set_mem_table failed: Argument list too long (7)
> qemu-system-x86_64: unable to start vhost net: 7: falling back on userspace virtio
> 
> and network is operational in guest, but with this patch
> 
> "netdev_add ...,vhost-on" prints:
> 
> vhost backend memory slots limit is less than current number of present memory slots
> vhost-net requested but could not be initialized
> 
> and following "device_add  virtio-net-pci,netdev=net0" prints:
> 
> TUNSETOFFLOAD ioctl() failed: Bad file descriptor
> TUNSETOFFLOAD ioctl() failed: Bad file descriptor
> 
> adapter is still hot-plugged but guest networking is broken (can't get IP address via DHCP)
> 
> so patch seems introduces a regression or something broken elsewhere and this exposes issue,
> not sure what qemu reaction should be in this case
>  i.e. when netdev_add fails 
>     1: should we fail followed up device_add or 
>     2: make it fall back to userspace virtio
> 
> I'd go for #2,
> Michael what's your take on it?

OK but there's a vhost force flag, if that is set we definitely should
fail device_add.

Also, hotplug can follow device_add, should be handled similarly.

> > +
> >      return 0;
> >  
> >  fail_busyloop:

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately
  2017-12-22 16:25   ` Igor Mammedov
  2017-12-22 19:03     ` Igor Mammedov
@ 2017-12-23  6:01     ` Zhoujian (jay)
  1 sibling, 0 replies; 18+ messages in thread
From: Zhoujian (jay) @ 2017-12-23  6:01 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: qemu-devel, mst, Huangweidong (C), Gonglei (Arei), wangxin (U),
	Liuzhe (Cloud Open Labs, NFV),
	dgilbert

> -----Original Message-----
> From: Igor Mammedov [mailto:imammedo@redhat.com]
> Sent: Saturday, December 23, 2017 12:25 AM
> To: Zhoujian (jay) <jianjay.zhou@huawei.com>
> Cc: qemu-devel@nongnu.org; mst@redhat.com; Huangweidong (C)
> <weidong.huang@huawei.com>; Gonglei (Arei) <arei.gonglei@huawei.com>;
> wangxin (U) <wangxinxin.wang@huawei.com>; Liuzhe (Cloud Open Labs, NFV)
> <gary.liuzhe@huawei.com>; dgilbert@redhat.com
> Subject: Re: [PATCH v2 1/2] vhost: add used memslot number for vhost-user
> and vhost-kernel separately
> 
> On Fri, 15 Dec 2017 16:45:54 +0800
> Jay Zhou <jianjay.zhou@huawei.com> wrote:
> 
> > Used_memslots is equal to dev->mem->nregions now, it is true for vhost
> > kernel, but not for vhost user, which uses the memory regions that
> > have file descriptor. In fact, not all of the memory regions have file
> > descriptor.
> > It is usefully in some scenarios, e.g. used_memslots is 8, and only
> > 5 memory slots can be used by vhost user, it is failed to hot plug a
> > new memory RAM because vhost_has_free_slot just returned false, but we
> > can hot plug it safely in fact.
> 
> a bit of rant:
> it seems vhost is chasing after a tail (1-2 regions that are not actually
> RAM), while approach will allow to hotplug 1-2 extra dimms vhost-user
> still will be limited to 8 regions max.
> 
> 
> vhost-user protocol should be extended to allow qemu query limit value
> from backend (which in turn should be tunable), so that backend could be
> configured to meet user needs.

Michael mentioned it can be extended with a protocol flag, here is the discussion
thread:
https://lists.gnu.org/archive/html/qemu-devel/2017-11/msg04656.html

> 
> Jay,
> would you like to look into it?

Yes, will look into it.

> 
> Otherwise it looks like patch will do its job, see for a minor suggestion
> below.
> 
> > Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> > Signed-off-by: Zhe Liu <gary.liuzhe@huawei.com>
> > ---
> >  hw/virtio/vhost-backend.c         | 14 ++++++++++++++
> >  hw/virtio/vhost-user.c            | 31 +++++++++++++++++++++++++++++++
> >  hw/virtio/vhost.c                 | 16 +++++++++-------
> >  include/hw/virtio/vhost-backend.h |  4 ++++
> >  4 files changed, 58 insertions(+), 7 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> > index 7f09efa..866718c 100644
> > --- a/hw/virtio/vhost-backend.c
> > +++ b/hw/virtio/vhost-backend.c
> > @@ -15,6 +15,8 @@
> >  #include "hw/virtio/vhost-backend.h"
> >  #include "qemu/error-report.h"
> >
> > +static unsigned int vhost_kernel_used_memslots;
> > +
> >  static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int
> request,
> >                               void *arg)  { @@ -233,6 +235,16 @@
> > static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
> >          qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL,
> > NULL);  }
> >
> > +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev) {
> > +    vhost_kernel_used_memslots = dev->mem->nregions; }
> > +
> > +static unsigned int vhost_kernel_get_used_memslots(void)
> > +{
> > +    return vhost_kernel_used_memslots; }
> > +
> >  static const VhostOps kernel_ops = {
> >          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
> >          .vhost_backend_init = vhost_kernel_init, @@ -264,6 +276,8 @@
> > static const VhostOps kernel_ops = {  #endif /* CONFIG_VHOST_VSOCK */
> >          .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
> >          .vhost_send_device_iotlb_msg =
> > vhost_kernel_send_device_iotlb_msg,
> > +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> > +        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
> >  };
> >
> >  int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType
> > backend_type) diff --git a/hw/virtio/vhost-user.c
> > b/hw/virtio/vhost-user.c index 093675e..58a4cec 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -122,6 +122,8 @@ static VhostUserMsg m __attribute__ ((unused));
> >  /* The version of the protocol we support */
> >  #define VHOST_USER_VERSION    (0x1)
> >
> > +static unsigned int vhost_user_used_memslots;
> > +
> >  struct vhost_user {
> >      CharBackend *chr;
> >      int slave_fd;
> > @@ -922,6 +924,33 @@ static void vhost_user_set_iotlb_callback(struct
> vhost_dev *dev, int enabled)
> >      /* No-op as the receive channel is not dedicated to IOTLB
> > messages. */  }
> >
> > +static void vhost_user_set_used_memslots(struct vhost_dev *dev) {
> > +    int counter = 0;
> > +    int i;
> > +
> > +    for (i = 0; i < dev->mem->nregions; ++i) {
> > +        struct vhost_memory_region *reg = dev->mem->regions + i;
> > +        ram_addr_t offset;
> > +        MemoryRegion *mr;
> > +        int fd;
> > +
> > +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> > +        mr = memory_region_from_host((void *)(uintptr_t)reg-
> >userspace_addr,
> > +                                    &offset);
> > +        fd = memory_region_get_fd(mr);
> > +        if (fd > 0) {
> > +            counter++;
> > +        }
> > +    }
> vhost_user_set_mem_table() already does the same counting, there is no
> point in duplicating it, just drop vhost_set_used_memslots callback
> 
> > +    vhost_user_used_memslots = counter;
> and update this value from vhost_user_set_mem_table()
> 

Answered in another thread.

Regards,
Jay

> > +}
> > +
> > +static unsigned int vhost_user_get_used_memslots(void)
> > +{
> > +    return vhost_user_used_memslots;
> > +}
> > +
> >  const VhostOps user_ops = {
> >          .backend_type = VHOST_BACKEND_TYPE_USER,
> >          .vhost_backend_init = vhost_user_init, @@ -948,4 +977,6 @@
> > const VhostOps user_ops = {
> >          .vhost_net_set_mtu = vhost_user_net_set_mtu,
> >          .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
> >          .vhost_send_device_iotlb_msg =
> > vhost_user_send_device_iotlb_msg,
> > +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> > +        .vhost_get_used_memslots = vhost_user_get_used_memslots,
> >  };
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index
> > e4290ce..59a32e9 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -43,20 +43,21 @@
> >  static struct vhost_log *vhost_log;
> >  static struct vhost_log *vhost_log_shm;
> >
> > -static unsigned int used_memslots;
> >  static QLIST_HEAD(, vhost_dev) vhost_devices =
> >      QLIST_HEAD_INITIALIZER(vhost_devices);
> >
> >  bool vhost_has_free_slot(void)
> >  {
> > -    unsigned int slots_limit = ~0U;
> >      struct vhost_dev *hdev;
> >
> >      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> > -        unsigned int r = hdev->vhost_ops-
> >vhost_backend_memslots_limit(hdev);
> > -        slots_limit = MIN(slots_limit, r);
> > +        if (hdev->vhost_ops->vhost_get_used_memslots() >=
> > +            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > +            return false;
> > +        }
> >      }
> > -    return slots_limit > used_memslots;
> > +
> > +    return true;
> >  }
> >
> >  static void vhost_dev_sync_region(struct vhost_dev *dev, @@ -606,7
> > +607,7 @@ static void vhost_set_memory(MemoryListener *listener,
> >      dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr,
> start_addr);
> >      dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr,
> start_addr + size - 1);
> >      dev->memory_changed = true;
> > -    used_memslots = dev->mem->nregions;
> > +    dev->vhost_ops->vhost_set_used_memslots(dev);
> >  }
> >
> >  static bool vhost_section(MemoryRegionSection *section) @@ -1251,7
> > +1252,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> >          goto fail;
> >      }
> >
> > -    if (used_memslots > hdev->vhost_ops-
> >vhost_backend_memslots_limit(hdev)) {
> > +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> >          error_report("vhost backend memory slots limit is less"
> >                  " than current number of present memory slots");
> >          r = -1;
> > diff --git a/include/hw/virtio/vhost-backend.h
> > b/include/hw/virtio/vhost-backend.h
> > index a7a5f22..19961b5 100644
> > --- a/include/hw/virtio/vhost-backend.h
> > +++ b/include/hw/virtio/vhost-backend.h
> > @@ -84,6 +84,8 @@ typedef void (*vhost_set_iotlb_callback_op)(struct
> vhost_dev *dev,
> >                                             int enabled);  typedef int
> > (*vhost_send_device_iotlb_msg_op)(struct vhost_dev *dev,
> >                                                struct vhost_iotlb_msg
> > *imsg);
> > +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> > +typedef unsigned int (*vhost_get_used_memslots_op)(void);
> >
> >  typedef struct VhostOps {
> >      VhostBackendType backend_type;
> > @@ -118,6 +120,8 @@ typedef struct VhostOps {
> >      vhost_vsock_set_running_op vhost_vsock_set_running;
> >      vhost_set_iotlb_callback_op vhost_set_iotlb_callback;
> >      vhost_send_device_iotlb_msg_op vhost_send_device_iotlb_msg;
> > +    vhost_set_used_memslots_op vhost_set_used_memslots;
> > +    vhost_get_used_memslots_op vhost_get_used_memslots;
> >  } VhostOps;
> >
> >  extern const VhostOps user_ops;

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately
  2017-12-22 19:03     ` Igor Mammedov
@ 2017-12-23  7:09       ` Zhoujian (jay)
  2017-12-28 11:11         ` Igor Mammedov
  0 siblings, 1 reply; 18+ messages in thread
From: Zhoujian (jay) @ 2017-12-23  7:09 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: Huangweidong (C), mst, wangxin (U),
	qemu-devel, Liuzhe (Cloud Open Labs, NFV),
	dgilbert, Gonglei (Arei)

Hi Igor,

> -----Original Message-----
> From: Igor Mammedov [mailto:imammedo@redhat.com]
> Sent: Saturday, December 23, 2017 3:03 AM
> To: Zhoujian (jay) <jianjay.zhou@huawei.com>
> Cc: Huangweidong (C) <weidong.huang@huawei.com>; mst@redhat.com; wangxin
> (U) <wangxinxin.wang@huawei.com>; qemu-devel@nongnu.org; Liuzhe (Cloud
> Open Labs, NFV) <gary.liuzhe@huawei.com>; dgilbert@redhat.com; Gonglei
> (Arei) <arei.gonglei@huawei.com>
> Subject: Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number
> for vhost-user and vhost-kernel separately
> 
> On Fri, 22 Dec 2017 17:25:13 +0100
> Igor Mammedov <imammedo@redhat.com> wrote:
> 
> > On Fri, 15 Dec 2017 16:45:54 +0800
> > Jay Zhou <jianjay.zhou@huawei.com> wrote:
> [...]
> 
> > > +static void vhost_user_set_used_memslots(struct vhost_dev *dev) {
> > > +    int counter = 0;
> > > +    int i;
> > > +
> > > +    for (i = 0; i < dev->mem->nregions; ++i) {
> > > +        struct vhost_memory_region *reg = dev->mem->regions + i;
> > > +        ram_addr_t offset;
> > > +        MemoryRegion *mr;
> > > +        int fd;
> > > +
> > > +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> > > +        mr = memory_region_from_host((void *)(uintptr_t)reg-
> >userspace_addr,
> > > +                                    &offset);
> > > +        fd = memory_region_get_fd(mr);
> > > +        if (fd > 0) {
> > > +            counter++;
> > > +        }
> > > +    }
> > vhost_user_set_mem_table() already does the same counting, there is no
> > point in duplicating it, just drop vhost_set_used_memslots callback
> >
> > > +    vhost_user_used_memslots = counter;
> > and update this value from vhost_user_set_mem_table()
> never mind, updating it from vhost_user_set_mem_table() as it's called
> only when device is started which is not the case when backend is
> initialized, so the way you did it should work for both cases
> 

How about do it like this(not sure whether the best way, any idea?):

Define a new function, e.g.

static void vhost_user_set_used_memslots_and_msgs(struct vhost_dev *dev,
                            VhostUserMsg *msg, size_t *fd_num, int *fds)
{
    int num = 0;
    int i;

    for (i = 0; i < dev->mem->nregions; ++i) {
        struct vhost_memory_region *reg = dev->mem->regions + i;
        ram_addr_t offset;
        MemoryRegion *mr;
        int fd;

        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
                                &offset);
        fd = memory_region_get_fd(mr);
        if (fd > 0) {
            if (msg && fd_num && fds) {
                msg->payload.memory.regions[num].userspace_addr
                                        = reg->userspace_addr;
                msg->payload.memory.regions[num].memory_size
                                        = reg->memory_size;
                msg->payload.memory.regions[num].guest_phys_addr
                                        = reg->guest_phys_addr;
                msg->payload.memory.regions[num].mmap_offset = offset;
                assert(num < VHOST_MEMORY_MAX_NREGIONS);
                fds[num++] = fd;
            }
        }
    }
    vhost_user_used_memslots = num;
    if (fd_num)
        *fd_num = num;
}

And call it when the backend is initialized and the device is started
respectively,

static void vhost_user_set_used_memslots(struct vhost_dev *dev)
{
	vhost_user_set_used_memslots_and_msgs(dev, NULL, NULL, NULL);
}

static int vhost_user_set_mem_table(struct vhost_dev *dev,
						struct vhost_memory *mem)
{
	[...]

	vhost_user_set_used_memslots_and_msgs(dev, &msg, &fd_num, fds);

	[...]
}


Regards,
Jay

> 
> >
> > > +}
> > > +
> [...]

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number
  2017-12-22 18:48   ` Igor Mammedov
  2017-12-22 21:15     ` Michael S. Tsirkin
@ 2017-12-23  8:27     ` Zhoujian (jay)
  2017-12-28 11:19       ` Igor Mammedov
  2017-12-23  8:49     ` Zhoujian (jay)
  2 siblings, 1 reply; 18+ messages in thread
From: Zhoujian (jay) @ 2017-12-23  8:27 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: qemu-devel, mst, Huangweidong (C), Gonglei (Arei), wangxin (U),
	Liuzhe (Cloud Open Labs, NFV),
	dgilbert



> -----Original Message-----
> From: Igor Mammedov [mailto:imammedo@redhat.com]
> Sent: Saturday, December 23, 2017 2:49 AM
> To: Zhoujian (jay) <jianjay.zhou@huawei.com>
> Cc: qemu-devel@nongnu.org; mst@redhat.com; Huangweidong (C)
> <weidong.huang@huawei.com>; Gonglei (Arei) <arei.gonglei@huawei.com>;
> wangxin (U) <wangxinxin.wang@huawei.com>; Liuzhe (Cloud Open Labs, NFV)
> <gary.liuzhe@huawei.com>; dgilbert@redhat.com
> Subject: Re: [PATCH v2 2/2] vhost: double check used memslots number
> 
> On Fri, 15 Dec 2017 16:45:55 +0800
> Jay Zhou <jianjay.zhou@huawei.com> wrote:
> 
> > If the VM already has N(N>8) available memory slots for vhost user,
> > the VM will be crashed in vhost_user_set_mem_table if we try to
> > hotplug the first vhost user NIC.
> > This patch checks if memslots number exceeded or not after updating
> > vhost_user_used_memslots.
> Can't understand commit message, pls rephrase (what is being fixed, and
> how it's fixed) also include reproducing steps for crash and maybe
> describe call flow/backtrace that triggers crash.

Sorry about my pool english

> 
> PS:
> I wasn't able to reproduce crash

Steps to reproduce:
(1) start up a VM successfully without any vhost device
(2) hotplug 8 DIMM memory successfully
(3) hotplug a vhost-user NIC, the VM crashed, it asserted
    at the line
        assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
    in vhost_user_set_mem_table()

Regards,
Jay

> >
> > Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> > ---
> >  hw/virtio/vhost.c | 27 +++++++++++++++++++++++----
> >  1 file changed, 23 insertions(+), 4 deletions(-)
> >
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index
> > 59a32e9..e45f5e2 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -1234,6 +1234,18 @@ static void vhost_virtqueue_cleanup(struct
> vhost_virtqueue *vq)
> >      event_notifier_cleanup(&vq->masked_notifier);
> >  }
> >
> > +static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev
> > +*hdev) {
> > +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > +        error_report("vhost backend memory slots limit is less"
> > +                " than current number of present memory slots");
> > +        return true;
> > +    }
> > +
> > +    return false;
> > +}
> > +
> >  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> >                     VhostBackendType backend_type, uint32_t
> > busyloop_timeout)  { @@ -1252,10 +1264,7 @@ int vhost_dev_init(struct
> > vhost_dev *hdev, void *opaque,
> >          goto fail;
> >      }
> >
> > -    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > -        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > -        error_report("vhost backend memory slots limit is less"
> > -                " than current number of present memory slots");
> > +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> why do you keep this check?
> it seems always be false
> 
> 
> 
> >          r = -1;
> >          goto fail;
> >      }
> > @@ -1341,6 +1350,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void
> *opaque,
> >      hdev->memory_changed = false;
> >      memory_listener_register(&hdev->memory_listener,
> &address_space_memory);
> >      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> > +
> > +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> > +        r = -1;
> > +        if (busyloop_timeout) {
> > +            goto fail_busyloop;
> > +        } else {
> > +            goto fail;
> > +        }
> > +    }
> seem to be right thing to do, since after registering listener for the
> first time used_memslots will be updated to actual value.
> 
> 
> I did some testing and without this hunk/patch
> 
> on 'device_add  virtio-net-pci,netdev=net0' qemu prints:
> 
> qemu-system-x86_64: vhost_set_mem_table failed: Argument list too long (7)
> qemu-system-x86_64: unable to start vhost net: 7: falling back on
> userspace virtio
> 
> and network is operational in guest, but with this patch
> 
> "netdev_add ...,vhost-on" prints:
> 
> vhost backend memory slots limit is less than current number of present
> memory slots vhost-net requested but could not be initialized
> 
> and following "device_add  virtio-net-pci,netdev=net0" prints:
> 
> TUNSETOFFLOAD ioctl() failed: Bad file descriptor TUNSETOFFLOAD ioctl()
> failed: Bad file descriptor
> 
> adapter is still hot-plugged but guest networking is broken (can't get IP
> address via DHCP)
> 
> so patch seems introduces a regression or something broken elsewhere and
> this exposes issue, not sure what qemu reaction should be in this case
> i.e. when netdev_add fails
>     1: should we fail followed up device_add or
>     2: make it fall back to userspace virtio
> 
> I'd go for #2,
> Michael what's your take on it?
> 
> > +
> >      return 0;
> >
> >  fail_busyloop:

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number
  2017-12-22 18:48   ` Igor Mammedov
  2017-12-22 21:15     ` Michael S. Tsirkin
  2017-12-23  8:27     ` Zhoujian (jay)
@ 2017-12-23  8:49     ` Zhoujian (jay)
  2 siblings, 0 replies; 18+ messages in thread
From: Zhoujian (jay) @ 2017-12-23  8:49 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: qemu-devel, mst, Huangweidong (C), Gonglei (Arei), wangxin (U),
	Liuzhe (Cloud Open Labs, NFV),
	dgilbert



[...]

> > ---
> >  hw/virtio/vhost.c | 27 +++++++++++++++++++++++----
> >  1 file changed, 23 insertions(+), 4 deletions(-)
> >
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index
> > 59a32e9..e45f5e2 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -1234,6 +1234,18 @@ static void vhost_virtqueue_cleanup(struct
> vhost_virtqueue *vq)
> >      event_notifier_cleanup(&vq->masked_notifier);
> >  }
> >
> > +static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev
> > +*hdev) {
> > +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > +        error_report("vhost backend memory slots limit is less"
> > +                " than current number of present memory slots");
> > +        return true;
> > +    }
> > +
> > +    return false;
> > +}
> > +
> >  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> >                     VhostBackendType backend_type, uint32_t
> > busyloop_timeout)  { @@ -1252,10 +1264,7 @@ int vhost_dev_init(struct
> > vhost_dev *hdev, void *opaque,
> >          goto fail;
> >      }
> >
> > -    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > -        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > -        error_report("vhost backend memory slots limit is less"
> > -                " than current number of present memory slots");
> > +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> why do you keep this check?
> it seems always be false
> 

If a vhost device has been already added successfully, i.e. its memory
Listener has been registered, i.e.
hdev->vhost_ops->vhost_set_used_memslots() has been called(used_memslots
is updated here),
then if we hotplug another same backend type vhost device,
hdev->vhost_ops->vhost_get_used_memslots() will not be 0(
used_memslots is the same for the same type backend vhost device), so it will
not always be false.

Regards,
Jay

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately
  2017-12-23  7:09       ` Zhoujian (jay)
@ 2017-12-28 11:11         ` Igor Mammedov
  2017-12-28 13:42           ` Zhoujian (jay)
  0 siblings, 1 reply; 18+ messages in thread
From: Igor Mammedov @ 2017-12-28 11:11 UTC (permalink / raw)
  To: Zhoujian (jay)
  Cc: Huangweidong (C), mst, wangxin (U),
	dgilbert, Liuzhe (Cloud Open Labs, NFV),
	qemu-devel, Gonglei (Arei)

On Sat, 23 Dec 2017 07:09:51 +0000
"Zhoujian (jay)" <jianjay.zhou@huawei.com> wrote:

> Hi Igor,
> 
> > -----Original Message-----
> > From: Igor Mammedov [mailto:imammedo@redhat.com]
> > Sent: Saturday, December 23, 2017 3:03 AM
> > To: Zhoujian (jay) <jianjay.zhou@huawei.com>
> > Cc: Huangweidong (C) <weidong.huang@huawei.com>; mst@redhat.com; wangxin
> > (U) <wangxinxin.wang@huawei.com>; qemu-devel@nongnu.org; Liuzhe (Cloud
> > Open Labs, NFV) <gary.liuzhe@huawei.com>; dgilbert@redhat.com; Gonglei
> > (Arei) <arei.gonglei@huawei.com>
> > Subject: Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number
> > for vhost-user and vhost-kernel separately
> > 
> > On Fri, 22 Dec 2017 17:25:13 +0100
> > Igor Mammedov <imammedo@redhat.com> wrote:
> > 
> > > On Fri, 15 Dec 2017 16:45:54 +0800
> > > Jay Zhou <jianjay.zhou@huawei.com> wrote:
> > [...]
> > 
> > > > +static void vhost_user_set_used_memslots(struct vhost_dev *dev) {
> > > > +    int counter = 0;
> > > > +    int i;
> > > > +
> > > > +    for (i = 0; i < dev->mem->nregions; ++i) {
> > > > +        struct vhost_memory_region *reg = dev->mem->regions + i;
> > > > +        ram_addr_t offset;
> > > > +        MemoryRegion *mr;
> > > > +        int fd;
> > > > +
> > > > +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> > > > +        mr = memory_region_from_host((void *)(uintptr_t)reg-
> > >userspace_addr,
> > > > +                                    &offset);
> > > > +        fd = memory_region_get_fd(mr);
> > > > +        if (fd > 0) {
> > > > +            counter++;
> > > > +        }
> > > > +    }
> > > vhost_user_set_mem_table() already does the same counting, there is no
> > > point in duplicating it, just drop vhost_set_used_memslots callback
> > >
> > > > +    vhost_user_used_memslots = counter;
> > > and update this value from vhost_user_set_mem_table()
> > never mind, updating it from vhost_user_set_mem_table() as it's called
> > only when device is started which is not the case when backend is
> > initialized, so the way you did it should work for both cases
> > 
> 
> How about do it like this(not sure whether the best way, any idea?):
> 
> Define a new function, e.g.
> 
> static void vhost_user_set_used_memslots_and_msgs(struct vhost_dev *dev,
>                             VhostUserMsg *msg, size_t *fd_num, int *fds)
s/vhost_user_set_used_memslots_and_msgs/vhost_user_prepare_msg/

> {
>     int num = 0;
>     int i;
> 
>     for (i = 0; i < dev->mem->nregions; ++i) {
>         struct vhost_memory_region *reg = dev->mem->regions + i;
>         ram_addr_t offset;
>         MemoryRegion *mr;
>         int fd;
> 
>         assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
>         mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
>                                 &offset);
>         fd = memory_region_get_fd(mr);
>         if (fd > 0) {
>             if (msg && fd_num && fds) {
>                 msg->payload.memory.regions[num].userspace_addr
>                                         = reg->userspace_addr;
>                 msg->payload.memory.regions[num].memory_size
>                                         = reg->memory_size;
>                 msg->payload.memory.regions[num].guest_phys_addr
>                                         = reg->guest_phys_addr;
>                 msg->payload.memory.regions[num].mmap_offset = offset;
>                 assert(num < VHOST_MEMORY_MAX_NREGIONS);
there is no need to assert here function can return error.
 
>                 fds[num++] = fd;
Is num counting in wrong place?

   if (msg && fd_num && fds) => false
is the case vhost_user_set_used_memslots(), so it won't count 'num'
which is later used to intialize vhost_user_used_memslots.

>             }
>         }
>     }
>     vhost_user_used_memslots = num;
add comment here as it won't be obvious to reader why vhost_user_used_memslots is here


>     if (fd_num)
>         *fd_num = num;
msg.payload.memory.nregions can be used directly instead of num amd fd_num
and it's 1 less argument to pass out of function.

> }
> 
> And call it when the backend is initialized and the device is started
> respectively,
> 
> static void vhost_user_set_used_memslots(struct vhost_dev *dev)
static int

for return value

> {
> 	vhost_user_set_used_memslots_and_msgs(dev, NULL, NULL, NULL);
we can do here
        vhost_user_set_used_memslots_and_msgs(dev, &msg, &fd_num, fds);
and discard/ignore results, that way it will be less conditionals inside of called function

> }
> 
> static int vhost_user_set_mem_table(struct vhost_dev *dev,
> 						struct vhost_memory *mem)
> {
> 	[...]
> 
> 	vhost_user_set_used_memslots_and_msgs(dev, &msg, &fd_num, fds);
   rc = vhost_user_set_used_memslots_and_msgs(dev, &msg, &fd_num, fds);

and return error from vhost_user_set_mem_table(), caller should be able
to coupe with error.

> 
> 	[...]
> }
> 
> 
> Regards,
> Jay
> 
> > 
> > >
> > > > +}
> > > > +
> > [...]
> 

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number
  2017-12-23  8:27     ` Zhoujian (jay)
@ 2017-12-28 11:19       ` Igor Mammedov
  0 siblings, 0 replies; 18+ messages in thread
From: Igor Mammedov @ 2017-12-28 11:19 UTC (permalink / raw)
  To: Zhoujian (jay)
  Cc: Huangweidong (C), mst, wangxin (U),
	qemu-devel, Liuzhe (Cloud Open Labs, NFV),
	dgilbert, Gonglei (Arei)

On Sat, 23 Dec 2017 08:27:25 +0000
"Zhoujian (jay)" <jianjay.zhou@huawei.com> wrote:

> 
> 
> > -----Original Message-----
> > From: Igor Mammedov [mailto:imammedo@redhat.com]
> > Sent: Saturday, December 23, 2017 2:49 AM
> > To: Zhoujian (jay) <jianjay.zhou@huawei.com>
> > Cc: qemu-devel@nongnu.org; mst@redhat.com; Huangweidong (C)
> > <weidong.huang@huawei.com>; Gonglei (Arei) <arei.gonglei@huawei.com>;
> > wangxin (U) <wangxinxin.wang@huawei.com>; Liuzhe (Cloud Open Labs, NFV)
> > <gary.liuzhe@huawei.com>; dgilbert@redhat.com
> > Subject: Re: [PATCH v2 2/2] vhost: double check used memslots number
> > 
> > On Fri, 15 Dec 2017 16:45:55 +0800
> > Jay Zhou <jianjay.zhou@huawei.com> wrote:
> > 
> > > If the VM already has N(N>8) available memory slots for vhost user,
> > > the VM will be crashed in vhost_user_set_mem_table if we try to
> > > hotplug the first vhost user NIC.
> > > This patch checks if memslots number exceeded or not after updating
> > > vhost_user_used_memslots.
> > Can't understand commit message, pls rephrase (what is being fixed, and
> > how it's fixed) also include reproducing steps for crash and maybe
> > describe call flow/backtrace that triggers crash.
> 
> Sorry about my pool english
> 
> > 
> > PS:
> > I wasn't able to reproduce crash
> 
> Steps to reproduce:
> (1) start up a VM successfully without any vhost device
> (2) hotplug 8 DIMM memory successfully
> (3) hotplug a vhost-user NIC, the VM crashed, it asserted
>     at the line
>         assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
quick fix for this crash could be:

diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 093675ed98..07a37537dd 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -321,7 +321,9 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
             msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
             msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
             msg.payload.memory.regions[fd_num].mmap_offset = offset;
-            assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
+            if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
+                return -1;
+            }
             fds[fd_num++] = fd;
         }
     }

it should gracefully prevent device to start.

>     in vhost_user_set_mem_table()
> 
> Regards,
> Jay
[...]

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number
  2017-12-22 21:15     ` Michael S. Tsirkin
@ 2017-12-28 11:29       ` Igor Mammedov
  2018-01-03 14:19         ` Zhoujian (jay)
  0 siblings, 1 reply; 18+ messages in thread
From: Igor Mammedov @ 2017-12-28 11:29 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: weidong.huang, wangxinxin.wang, qemu-devel, gary.liuzhe,
	dgilbert, arei.gonglei, Jay Zhou

On Fri, 22 Dec 2017 23:15:09 +0200
"Michael S. Tsirkin" <mst@redhat.com> wrote:

> On Fri, Dec 22, 2017 at 07:48:55PM +0100, Igor Mammedov wrote:
> > On Fri, 15 Dec 2017 16:45:55 +0800
> > Jay Zhou <jianjay.zhou@huawei.com> wrote:
> > 
> > > If the VM already has N(N>8) available memory slots for vhost user,
> > > the VM will be crashed in vhost_user_set_mem_table if we try to
> > > hotplug the first vhost user NIC.
> > > This patch checks if memslots number exceeded or not after updating
> > > vhost_user_used_memslots.
> > Can't understand commit message, pls rephrase (what is being fixed, and how it's fixed)
> > also include reproducing steps for crash and maybe describe call flow/backtrace
> > that triggers crash.
> > 
> > PS:
> > I wasn't able to reproduce crash
> > 
> > > 
> > > Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> > > ---
> > >  hw/virtio/vhost.c | 27 +++++++++++++++++++++++----
> > >  1 file changed, 23 insertions(+), 4 deletions(-)
> > > 
> > > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> > > index 59a32e9..e45f5e2 100644
> > > --- a/hw/virtio/vhost.c
> > > +++ b/hw/virtio/vhost.c
> > > @@ -1234,6 +1234,18 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
> > >      event_notifier_cleanup(&vq->masked_notifier);
> > >  }
> > >  
> > > +static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
> > > +{
> > > +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > > +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > > +        error_report("vhost backend memory slots limit is less"
> > > +                " than current number of present memory slots");
> > > +        return true;
> > > +    }
> > > +
> > > +    return false;
> > > +}
> > > +
> > >  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> > >                     VhostBackendType backend_type, uint32_t busyloop_timeout)
> > >  {
> > > @@ -1252,10 +1264,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> > >          goto fail;
> > >      }
> > >  
> > > -    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > > -        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > > -        error_report("vhost backend memory slots limit is less"
> > > -                " than current number of present memory slots");
> > > +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> > why do you keep this check?
> > it seems always be false
> > 
> > 
> > 
> > >          r = -1;
> > >          goto fail;
> > >      }
> > > @@ -1341,6 +1350,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> > >      hdev->memory_changed = false;
> > >      memory_listener_register(&hdev->memory_listener, &address_space_memory);
> > >      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> > > +
> > > +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> > > +        r = -1;
> > > +        if (busyloop_timeout) {
> > > +            goto fail_busyloop;
> > > +        } else {
> > > +            goto fail;
> > > +        }
> > > +    }
> > seem to be right thing to do, since after registering listener for the first time
> > used_memslots will be updated to actual value.
> > 
> > 
> > I did some testing and without this hunk/patch
> > 
> > on 'device_add  virtio-net-pci,netdev=net0' qemu prints:
> > 
> > qemu-system-x86_64: vhost_set_mem_table failed: Argument list too long (7)
> > qemu-system-x86_64: unable to start vhost net: 7: falling back on userspace virtio
> > 
> > and network is operational in guest, but with this patch
> > 
> > "netdev_add ...,vhost-on" prints:
> > 
> > vhost backend memory slots limit is less than current number of present memory slots
> > vhost-net requested but could not be initialized
> > 
> > and following "device_add  virtio-net-pci,netdev=net0" prints:
> > 
> > TUNSETOFFLOAD ioctl() failed: Bad file descriptor
> > TUNSETOFFLOAD ioctl() failed: Bad file descriptor
> > 
> > adapter is still hot-plugged but guest networking is broken (can't get IP address via DHCP)
> > 
> > so patch seems introduces a regression or something broken elsewhere and this exposes issue,
> > not sure what qemu reaction should be in this case
> >  i.e. when netdev_add fails 
> >     1: should we fail followed up device_add or 
> >     2: make it fall back to userspace virtio
> > 
> > I'd go for #2,
> > Michael what's your take on it?
> 
> OK but there's a vhost force flag, if that is set we definitely should
> fail device_add.
> 
> Also, hotplug can follow device_add, should be handled similarly.
I was testing with vhost-kernel (as it doesn't need extra environment to setup)
and it's able to fallback to virtio transport.

However in case of vhost-user, is there even an option to fallback to?
Perhaps our only choice here is to fail backend creation cleanly,
so no one would be able to add a frontend refering to non existing backend.


PS:
even if we have to fail on error for vhost-user, this patch shouldn't
change current vhost-kernel behavior (fallback should still work)

> 
> > > +
> > >      return 0;
> > >  
> > >  fail_busyloop:
> 

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately
  2017-12-28 11:11         ` Igor Mammedov
@ 2017-12-28 13:42           ` Zhoujian (jay)
  0 siblings, 0 replies; 18+ messages in thread
From: Zhoujian (jay) @ 2017-12-28 13:42 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: Huangweidong (C), mst, wangxin (U),
	dgilbert, Liuzhe (Cloud Open Labs, NFV),
	qemu-devel, Gonglei (Arei)



> -----Original Message-----
> From: Igor Mammedov [mailto:imammedo@redhat.com]
> Sent: Thursday, December 28, 2017 7:12 PM
> To: Zhoujian (jay) <jianjay.zhou@huawei.com>
> Cc: Huangweidong (C) <weidong.huang@huawei.com>; mst@redhat.com; wangxin
> (U) <wangxinxin.wang@huawei.com>; dgilbert@redhat.com; Liuzhe (Cloud Open
> Labs, NFV) <gary.liuzhe@huawei.com>; qemu-devel@nongnu.org; Gonglei (Arei)
> <arei.gonglei@huawei.com>
> Subject: Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number
> for vhost-user and vhost-kernel separately
> 
> On Sat, 23 Dec 2017 07:09:51 +0000
> "Zhoujian (jay)" <jianjay.zhou@huawei.com> wrote:
> 
> > Hi Igor,
> >
> > > -----Original Message-----
> > > From: Igor Mammedov [mailto:imammedo@redhat.com]
> > > Sent: Saturday, December 23, 2017 3:03 AM
> > > To: Zhoujian (jay) <jianjay.zhou@huawei.com>
> > > Cc: Huangweidong (C) <weidong.huang@huawei.com>; mst@redhat.com;
> > > wangxin
> > > (U) <wangxinxin.wang@huawei.com>; qemu-devel@nongnu.org; Liuzhe
> > > (Cloud Open Labs, NFV) <gary.liuzhe@huawei.com>;
> > > dgilbert@redhat.com; Gonglei
> > > (Arei) <arei.gonglei@huawei.com>
> > > Subject: Re: [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot
> > > number for vhost-user and vhost-kernel separately
> > >
> > > On Fri, 22 Dec 2017 17:25:13 +0100
> > > Igor Mammedov <imammedo@redhat.com> wrote:
> > >
> > > > On Fri, 15 Dec 2017 16:45:54 +0800 Jay Zhou
> > > > <jianjay.zhou@huawei.com> wrote:
> > > [...]
> > >
> > > > > +static void vhost_user_set_used_memslots(struct vhost_dev *dev) {
> > > > > +    int counter = 0;
> > > > > +    int i;
> > > > > +
> > > > > +    for (i = 0; i < dev->mem->nregions; ++i) {
> > > > > +        struct vhost_memory_region *reg = dev->mem->regions + i;
> > > > > +        ram_addr_t offset;
> > > > > +        MemoryRegion *mr;
> > > > > +        int fd;
> > > > > +
> > > > > +        assert((uintptr_t)reg->userspace_addr == reg-
> >userspace_addr);
> > > > > +        mr = memory_region_from_host((void *)(uintptr_t)reg-
> > > >userspace_addr,
> > > > > +                                    &offset);
> > > > > +        fd = memory_region_get_fd(mr);
> > > > > +        if (fd > 0) {
> > > > > +            counter++;
> > > > > +        }
> > > > > +    }
> > > > vhost_user_set_mem_table() already does the same counting, there
> > > > is no point in duplicating it, just drop vhost_set_used_memslots
> > > > callback
> > > >
> > > > > +    vhost_user_used_memslots = counter;
> > > > and update this value from vhost_user_set_mem_table()
> > > never mind, updating it from vhost_user_set_mem_table() as it's
> > > called only when device is started which is not the case when
> > > backend is initialized, so the way you did it should work for both
> > > cases
> > >
> >
> > How about do it like this(not sure whether the best way, any idea?):
> >
> > Define a new function, e.g.
> >
> > static void vhost_user_set_used_memslots_and_msgs(struct vhost_dev *dev,
> >                             VhostUserMsg *msg, size_t *fd_num, int
> > *fds)
> s/vhost_user_set_used_memslots_and_msgs/vhost_user_prepare_msg/

Ok

> 
> > {
> >     int num = 0;
> >     int i;
> >
> >     for (i = 0; i < dev->mem->nregions; ++i) {
> >         struct vhost_memory_region *reg = dev->mem->regions + i;
> >         ram_addr_t offset;
> >         MemoryRegion *mr;
> >         int fd;
> >
> >         assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> >         mr = memory_region_from_host((void *)(uintptr_t)reg-
> >userspace_addr,
> >                                 &offset);
> >         fd = memory_region_get_fd(mr);
> >         if (fd > 0) {
> >             if (msg && fd_num && fds) {
> >                 msg->payload.memory.regions[num].userspace_addr
> >                                         = reg->userspace_addr;
> >                 msg->payload.memory.regions[num].memory_size
> >                                         = reg->memory_size;
> >                 msg->payload.memory.regions[num].guest_phys_addr
> >                                         = reg->guest_phys_addr;
> >                 msg->payload.memory.regions[num].mmap_offset = offset;
> >                 assert(num < VHOST_MEMORY_MAX_NREGIONS);
> there is no need to assert here function can return error.

Ok

> 
> >                 fds[num++] = fd;
> Is num counting in wrong place?
> 
>    if (msg && fd_num && fds) => false
> is the case vhost_user_set_used_memslots(), so it won't count 'num'
> which is later used to intialize vhost_user_used_memslots.

My bad, will fix.

> >             }
> >         }
> >     }
> >     vhost_user_used_memslots = num;
> add comment here as it won't be obvious to reader why
> vhost_user_used_memslots is here

Will do.

> 
> >     if (fd_num)
> >         *fd_num = num;
> msg.payload.memory.nregions can be used directly instead of num amd fd_num
> and it's 1 less argument to pass out of function.
> 
> > }
> >
> > And call it when the backend is initialized and the device is started
> > respectively,
> >
> > static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> static int
> 
> for return value
> 
> > {
> > 	vhost_user_set_used_memslots_and_msgs(dev, NULL, NULL, NULL);
> we can do here
>         vhost_user_set_used_memslots_and_msgs(dev, &msg, &fd_num, fds);
> and discard/ignore results, that way it will be less conditionals inside
> of called function
> 
> > }
> >
> > static int vhost_user_set_mem_table(struct vhost_dev *dev,
> > 						struct vhost_memory *mem)
> > {
> > 	[...]
> >
> > 	vhost_user_set_used_memslots_and_msgs(dev, &msg, &fd_num, fds);
>    rc = vhost_user_set_used_memslots_and_msgs(dev, &msg, &fd_num, fds);
> 
> and return error from vhost_user_set_mem_table(), caller should be able to
> coupe with error.
> 

Will fix all of them in the next version.

Regards,
Jay

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number
  2017-12-28 11:29       ` Igor Mammedov
@ 2018-01-03 14:19         ` Zhoujian (jay)
  0 siblings, 0 replies; 18+ messages in thread
From: Zhoujian (jay) @ 2018-01-03 14:19 UTC (permalink / raw)
  To: Igor Mammedov, Michael S. Tsirkin
  Cc: Huangweidong (C), wangxin (U),
	qemu-devel, Liuzhe (Cloud Open Labs, NFV),
	dgilbert, Gonglei (Arei)

> -----Original Message-----
> From: Igor Mammedov [mailto:imammedo@redhat.com]
> Sent: Thursday, December 28, 2017 7:29 PM
> To: Michael S. Tsirkin <mst@redhat.com>
> Cc: Huangweidong (C) <weidong.huang@huawei.com>; wangxin (U)
> <wangxinxin.wang@huawei.com>; qemu-devel@nongnu.org; Liuzhe (Cloud Open
> Labs, NFV) <gary.liuzhe@huawei.com>; dgilbert@redhat.com; Gonglei (Arei)
> <arei.gonglei@huawei.com>; Zhoujian (jay) <jianjay.zhou@huawei.com>
> Subject: Re: [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots
> number
> 
> On Fri, 22 Dec 2017 23:15:09 +0200
> "Michael S. Tsirkin" <mst@redhat.com> wrote:
> 
> > On Fri, Dec 22, 2017 at 07:48:55PM +0100, Igor Mammedov wrote:
> > > On Fri, 15 Dec 2017 16:45:55 +0800
> > > Jay Zhou <jianjay.zhou@huawei.com> wrote:
> > >
> > > > If the VM already has N(N>8) available memory slots for vhost
> > > > user, the VM will be crashed in vhost_user_set_mem_table if we try
> > > > to hotplug the first vhost user NIC.
> > > > This patch checks if memslots number exceeded or not after
> > > > updating vhost_user_used_memslots.
> > > Can't understand commit message, pls rephrase (what is being fixed,
> > > and how it's fixed) also include reproducing steps for crash and
> > > maybe describe call flow/backtrace that triggers crash.
> > >
> > > PS:
> > > I wasn't able to reproduce crash
> > >
> > > >
> > > > Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> > > > ---
> > > >  hw/virtio/vhost.c | 27 +++++++++++++++++++++++----
> > > >  1 file changed, 23 insertions(+), 4 deletions(-)
> > > >
> > > > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index
> > > > 59a32e9..e45f5e2 100644
> > > > --- a/hw/virtio/vhost.c
> > > > +++ b/hw/virtio/vhost.c
> > > > @@ -1234,6 +1234,18 @@ static void vhost_virtqueue_cleanup(struct
> vhost_virtqueue *vq)
> > > >      event_notifier_cleanup(&vq->masked_notifier);
> > > >  }
> > > >
> > > > +static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev
> > > > +*hdev) {
> > > > +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > > > +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > > > +        error_report("vhost backend memory slots limit is less"
> > > > +                " than current number of present memory slots");
> > > > +        return true;
> > > > +    }
> > > > +
> > > > +    return false;
> > > > +}
> > > > +
> > > >  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> > > >                     VhostBackendType backend_type, uint32_t
> > > > busyloop_timeout)  { @@ -1252,10 +1264,7 @@ int
> > > > vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> > > >          goto fail;
> > > >      }
> > > >
> > > > -    if (hdev->vhost_ops->vhost_get_used_memslots() >
> > > > -        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> > > > -        error_report("vhost backend memory slots limit is less"
> > > > -                " than current number of present memory slots");
> > > > +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> > > why do you keep this check?
> > > it seems always be false
> > >
> > >
> > >
> > > >          r = -1;
> > > >          goto fail;
> > > >      }
> > > > @@ -1341,6 +1350,16 @@ int vhost_dev_init(struct vhost_dev *hdev,
> void *opaque,
> > > >      hdev->memory_changed = false;
> > > >      memory_listener_register(&hdev->memory_listener,
> &address_space_memory);
> > > >      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> > > > +
> > > > +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
> > > > +        r = -1;
> > > > +        if (busyloop_timeout) {
> > > > +            goto fail_busyloop;
> > > > +        } else {
> > > > +            goto fail;
> > > > +        }
> > > > +    }
> > > seem to be right thing to do, since after registering listener for
> > > the first time used_memslots will be updated to actual value.
> > >
> > >
> > > I did some testing and without this hunk/patch
> > >
> > > on 'device_add  virtio-net-pci,netdev=net0' qemu prints:
> > >
> > > qemu-system-x86_64: vhost_set_mem_table failed: Argument list too
> > > long (7)
> > > qemu-system-x86_64: unable to start vhost net: 7: falling back on
> > > userspace virtio

Error code 7 is E2BIG, which means

    	if (mem.nregions > max_mem_regions)
		return -E2BIG;

happened in the kernel.

> > >
> > > and network is operational in guest, but with this patch
> > >
> > > "netdev_add ...,vhost-on" prints:
> > >
> > > vhost backend memory slots limit is less than current number of
> > > present memory slots vhost-net requested but could not be
> > > initialized
> > >
> > > and following "device_add  virtio-net-pci,netdev=net0" prints:
> > >
> > > TUNSETOFFLOAD ioctl() failed: Bad file descriptor TUNSETOFFLOAD
> > > ioctl() failed: Bad file descriptor
> > >
> > > adapter is still hot-plugged but guest networking is broken (can't
> > > get IP address via DHCP)
> > >
> > > so patch seems introduces a regression or something broken elsewhere
> > > and this exposes issue, not sure what qemu reaction should be in
> > > this case  i.e. when netdev_add fails
> > >     1: should we fail followed up device_add or
> > >     2: make it fall back to userspace virtio
> > >
> > > I'd go for #2,
> > > Michael what's your take on it?
> >
> > OK but there's a vhost force flag, if that is set we definitely should
> > fail device_add.
> >
> > Also, hotplug can follow device_add, should be handled similarly.
> I was testing with vhost-kernel (as it doesn't need extra environment to
> setup) and it's able to fallback to virtio transport.
> 
> However in case of vhost-user, is there even an option to fallback to?

Using error code(which do it like vhost-kernel) instead of asserting in
vhost_user_set_mem_table(), I have tested:
"netdev_add vhost-user,chardev=charnet0,id=hostnet0" is successful,
following "device_add virtio-net-pci,netdev=hostnet0,id=net0,bus=pci.0" prints:

"qemu-system-x86_64: vhost_set_mem_table failed: Interrupted system call (4)
qemu-system-x86_64: unable to start vhost net: 4: falling back on userspace virtio"

or

"qemu-system-x86_64: vhost_set_mem_table failed: Resource temporarily unavailable (11)
qemu-system-x86_64: unable to start vhost net: 11: falling back on userspace virtio"

adapter is still hot-plugged but guest networking is broken (can't get IP
address via DHCP), does this mean make no sense for vhost-user to fallback
to?

> Perhaps our only choice here is to fail backend creation cleanly, so no
> one would be able to add a frontend refering to non existing backend.

Not sure what to do.

> 
> 
> PS:
> even if we have to fail on error for vhost-user, this patch shouldn't
> change current vhost-kernel behavior (fallback should still work)

Does it mean vhost-kernel don't need to care about the value of used_memslots
(because it's able to fall back to userspace virtio)?

Is it enough to use error code in vhost_user_set_mem_table() and
vhost_kernel_set_mem_table()?
  1. If yes, how about removing the check of used_memslots totally?
  2. If no, is it enough to check used_memslots for vhost-user only after
    memory listener is registered?


Regards,
Jay

> 
> >
> > > > +
> > > >      return 0;
> > > >
> > > >  fail_busyloop:
> >

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2018-01-03 14:19 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-15  8:45 [Qemu-devel] [PATCH v2 0/2] vhost: two fixes Jay Zhou
2017-12-15  8:45 ` [Qemu-devel] [PATCH v2 1/2] vhost: add used memslot number for vhost-user and vhost-kernel separately Jay Zhou
2017-12-22 16:25   ` Igor Mammedov
2017-12-22 19:03     ` Igor Mammedov
2017-12-23  7:09       ` Zhoujian (jay)
2017-12-28 11:11         ` Igor Mammedov
2017-12-28 13:42           ` Zhoujian (jay)
2017-12-23  6:01     ` Zhoujian (jay)
2017-12-15  8:45 ` [Qemu-devel] [PATCH v2 2/2] vhost: double check used memslots number Jay Zhou
2017-12-22 18:48   ` Igor Mammedov
2017-12-22 21:15     ` Michael S. Tsirkin
2017-12-28 11:29       ` Igor Mammedov
2018-01-03 14:19         ` Zhoujian (jay)
2017-12-23  8:27     ` Zhoujian (jay)
2017-12-28 11:19       ` Igor Mammedov
2017-12-23  8:49     ` Zhoujian (jay)
2017-12-15  8:52 ` [Qemu-devel] [PATCH v2 0/2] vhost: two fixes no-reply
2017-12-19 21:46 ` Michael S. Tsirkin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.