All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] vhost-user: add separate memslot counter for vhost-user
@ 2020-09-28 13:17 Jiajun Chen
  2020-10-02  2:05 ` Raphael Norwitz
                   ` (2 more replies)
  0 siblings, 3 replies; 23+ messages in thread
From: Jiajun Chen @ 2020-09-28 13:17 UTC (permalink / raw)
  To: raphael.s.norwitz
  Cc: zhang.zhanghailiang, mst, jasowang, qemu-devel, xiexiangyou,
	imammedo, marcandre.lureau

Used_memslots is equal to dev->mem->nregions now, it is true for
vhost kernel, but not for vhost user, which uses the memory regions
that have file descriptor. In fact, not all of the memory regions
have file descriptor.
It is usefully in some scenarios, e.g. used_memslots is 8, and only
5 memory slots can be used by vhost user, it is failed to hot plug
a new memory RAM because vhost_has_free_slot just returned false,
but we can hot plug it safely in fact.

--
ChangeList:
v3:
-make used_memslots a member of struct vhost_dev instead of a global static value

v2:
-eliminating useless used_memslots_exceeded variable and used_memslots_is_exceeded() API

v1:
-vhost-user: add separate memslot counter for vhost-user

Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
---
 hw/virtio/vhost-backend.c         | 12 ++++++++++
 hw/virtio/vhost-user.c            | 25 +++++++++++++++++++++
 hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
 include/hw/virtio/vhost-backend.h |  5 +++++
 include/hw/virtio/vhost.h         |  1 +
 net/vhost-user.c                  |  7 ++++++
 6 files changed, 78 insertions(+), 9 deletions(-)

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 782b1d67d9..7016f23ec5 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -238,6 +238,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
 }
 
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+    dev->used_memslots = dev->mem->nregions;
+}
+
+static unsigned int vhost_kernel_get_used_memslots(struct vhost_dev *dev)
+{
+    return dev->used_memslots;
+}
+
 static const VhostOps kernel_ops = {
         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
         .vhost_backend_init = vhost_kernel_init,
@@ -269,6 +279,8 @@ static const VhostOps kernel_ops = {
 #endif /* CONFIG_VHOST_VSOCK */
         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
         .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
+        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
+        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
 };
 #endif
 
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 31231218dc..5dea64d8a8 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -2354,6 +2354,29 @@ void vhost_user_cleanup(VhostUserState *user)
     user->chr = NULL;
 }
 
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+    int i;
+    dev->used_memslots = 0;
+
+    for (i = 0; i < dev->mem->nregions; ++i) {
+        struct vhost_memory_region *reg = dev->mem->regions + i;
+        ram_addr_t offset;
+        MemoryRegion *mr;
+        int fd;
+
+        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
+        if (mr && fd > 0) {
+            dev->used_memslots++;
+        }
+    }
+}
+
+static unsigned int vhost_user_get_used_memslots(struct vhost_dev *dev)
+{
+    return dev->used_memslots;
+}
+
 const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_backend_init = vhost_user_backend_init,
@@ -2387,4 +2410,6 @@ const VhostOps user_ops = {
         .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
         .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
         .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
+        .vhost_set_used_memslots = vhost_user_set_used_memslots,
+        .vhost_get_used_memslots = vhost_user_get_used_memslots,
 };
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 1a1384e7a6..98b967669b 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -45,20 +45,20 @@
 static struct vhost_log *vhost_log;
 static struct vhost_log *vhost_log_shm;
 
-static unsigned int used_memslots;
 static QLIST_HEAD(, vhost_dev) vhost_devices =
     QLIST_HEAD_INITIALIZER(vhost_devices);
 
 bool vhost_has_free_slot(void)
 {
-    unsigned int slots_limit = ~0U;
     struct vhost_dev *hdev;
 
     QLIST_FOREACH(hdev, &vhost_devices, entry) {
-        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
-        slots_limit = MIN(slots_limit, r);
+        if (hdev->vhost_ops->vhost_get_used_memslots(hdev) >=
+            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+            return false;
+        }
     }
-    return slots_limit > used_memslots;
+    return true;
 }
 
 static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -502,7 +502,6 @@ static void vhost_commit(MemoryListener *listener)
                        dev->n_mem_sections * sizeof dev->mem->regions[0];
     dev->mem = g_realloc(dev->mem, regions_size);
     dev->mem->nregions = dev->n_mem_sections;
-    used_memslots = dev->mem->nregions;
     for (i = 0; i < dev->n_mem_sections; i++) {
         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
         struct MemoryRegionSection *mrs = dev->mem_sections + i;
@@ -678,6 +677,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
         memory_region_ref(section->mr);
     }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
 }
 
 /* Used for both add and nop callbacks */
@@ -693,6 +693,17 @@ static void vhost_region_addnop(MemoryListener *listener,
     vhost_region_add_section(dev, section);
 }
 
+static void vhost_region_del(MemoryListener *listener,
+                             MemoryRegionSection *section)
+{
+    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+                                         memory_listener);
+    if (!vhost_section(dev, section)) {
+        return;
+    }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
+}
+
 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
 {
     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
@@ -1300,6 +1311,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     hdev->memory_listener = (MemoryListener) {
         .begin = vhost_begin,
         .commit = vhost_commit,
+        .region_del = vhost_region_del,
         .region_add = vhost_region_addnop,
         .region_nop = vhost_region_addnop,
         .log_start = vhost_log_start,
@@ -1346,9 +1358,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
 
-    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
-        error_report("vhost backend memory slots limit is less"
-                " than current number of present memory slots");
+    /*
+     * If we started a VM without any vhost device,
+     * for the first time vhost device hot-plug
+     * (vhost_get_used_memslots is always 0),
+     * so it needs to double check here.
+     */
+    if (hdev->vhost_ops->vhost_get_used_memslots(hdev) >
+        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+        error_report("vhost backend memory slots limit is less than"
+                     " current number of present memory slots");
         r = -1;
         if (busyloop_timeout) {
             goto fail_busyloop;
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 8825bd278f..6569c95a43 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
 
 typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
 
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
+typedef unsigned int (*vhost_get_used_memslots_op)(struct vhost_dev *dev);
+
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_backend_init vhost_backend_init;
@@ -168,6 +171,8 @@ typedef struct VhostOps {
     vhost_vq_get_addr_op  vhost_vq_get_addr;
     vhost_get_device_id_op vhost_get_device_id;
     vhost_force_iommu_op vhost_force_iommu;
+    vhost_set_used_memslots_op vhost_set_used_memslots;
+    vhost_get_used_memslots_op vhost_get_used_memslots;
 } VhostOps;
 
 extern const VhostOps user_ops;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 767a95ec0b..5ded21f86d 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -90,6 +90,7 @@ struct vhost_dev {
     QLIST_HEAD(, vhost_iommu) iommu_list;
     IOMMUNotifier n;
     const VhostDevConfigOps *config_ops;
+    unsigned int used_memslots;
 };
 
 struct vhost_net {
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 17532daaf3..7e93955537 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -20,6 +20,7 @@
 #include "qemu/error-report.h"
 #include "qemu/option.h"
 #include "trace.h"
+#include "include/hw/virtio/vhost.h"
 
 typedef struct NetVhostUserState {
     NetClientState nc;
@@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
         qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
                                  net_vhost_user_event, NULL, nc0->name, NULL,
                                  true);
+
+        if (!vhost_has_free_slot()) {
+            error_report("used memslots exceeded the backend limit, quit "
+                         "loop");
+            goto err;
+        }
     } while (!s->started);
 
     assert(s->vhost_net);
-- 
2.27.0.dirty



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-09-28 13:17 [PATCH] vhost-user: add separate memslot counter for vhost-user Jiajun Chen
@ 2020-10-02  2:05 ` Raphael Norwitz
  2020-10-12 11:12   ` chenjiajun
  2020-10-06  9:48 ` Igor Mammedov
  2020-10-30  8:39 ` Michael S. Tsirkin
  2 siblings, 1 reply; 23+ messages in thread
From: Raphael Norwitz @ 2020-10-02  2:05 UTC (permalink / raw)
  To: Jiajun Chen
  Cc: zhang.zhanghailiang, Michael S. Tsirkin, jasowang, QEMU,
	xiexiangyou, imammedo, Marc-André Lureau

On Mon, Sep 28, 2020 at 9:17 AM Jiajun Chen <chenjiajun8@huawei.com> wrote:
>
> Used_memslots is equal to dev->mem->nregions now, it is true for
> vhost kernel, but not for vhost user, which uses the memory regions
> that have file descriptor. In fact, not all of the memory regions
> have file descriptor.
> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> 5 memory slots can be used by vhost user, it is failed to hot plug
> a new memory RAM because vhost_has_free_slot just returned false,
> but we can hot plug it safely in fact.
>
> --
> ChangeList:
> v3:
> -make used_memslots a member of struct vhost_dev instead of a global static value
>
> v2:
> -eliminating useless used_memslots_exceeded variable and used_memslots_is_exceeded() API
>
> v1:
> -vhost-user: add separate memslot counter for vhost-user
>
> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>

I'm happy with this from a vhost/vhost-user perspective. vhost-backend
change looks good too. I'm a little confused by what's going on with
net/vhost-user.c.

> ---
>  hw/virtio/vhost-backend.c         | 12 ++++++++++
>  hw/virtio/vhost-user.c            | 25 +++++++++++++++++++++
>  hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
>  include/hw/virtio/vhost-backend.h |  5 +++++
>  include/hw/virtio/vhost.h         |  1 +
>  net/vhost-user.c                  |  7 ++++++
>  6 files changed, 78 insertions(+), 9 deletions(-)
>

> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 17532daaf3..7e93955537 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -20,6 +20,7 @@
>  #include "qemu/error-report.h"
>  #include "qemu/option.h"
>  #include "trace.h"
> +#include "include/hw/virtio/vhost.h"
>
>  typedef struct NetVhostUserState {
>      NetClientState nc;
> @@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>          qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
>                                   net_vhost_user_event, NULL, nc0->name, NULL,
>                                   true);

Can you elaborate on this check here? What does it have to do with
fixing memslots accounting? Maybe it should be in a separate change?

> +
> +        if (!vhost_has_free_slot()) {
> +            error_report("used memslots exceeded the backend limit, quit "
> +                         "loop");
> +            goto err;
> +        }
>      } while (!s->started);
>
>      assert(s->vhost_net);
> --
> 2.27.0.dirty
>


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-09-28 13:17 [PATCH] vhost-user: add separate memslot counter for vhost-user Jiajun Chen
  2020-10-02  2:05 ` Raphael Norwitz
@ 2020-10-06  9:48 ` Igor Mammedov
  2020-10-14  0:58   ` Raphael Norwitz
  2020-10-30  8:39 ` Michael S. Tsirkin
  2 siblings, 1 reply; 23+ messages in thread
From: Igor Mammedov @ 2020-10-06  9:48 UTC (permalink / raw)
  To: Jiajun Chen
  Cc: raphael.s.norwitz, zhang.zhanghailiang, mst, jasowang,
	qemu-devel, xiexiangyou, marcandre.lureau

On Mon, 28 Sep 2020 21:17:31 +0800
Jiajun Chen <chenjiajun8@huawei.com> wrote:

> Used_memslots is equal to dev->mem->nregions now, it is true for
> vhost kernel, but not for vhost user, which uses the memory regions
> that have file descriptor. In fact, not all of the memory regions
> have file descriptor.
> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> 5 memory slots can be used by vhost user, it is failed to hot plug
> a new memory RAM because vhost_has_free_slot just returned false,
> but we can hot plug it safely in fact.

I had an impression that all guest RAM has to be shared with vhost,
so combination of anon and fd based RAM couldn't work.
Am I wrong?

> 
> --
> ChangeList:
> v3:
> -make used_memslots a member of struct vhost_dev instead of a global static value
it's global resource, so why?

> 
> v2:
> -eliminating useless used_memslots_exceeded variable and used_memslots_is_exceeded() API
> 
> v1:
> -vhost-user: add separate memslot counter for vhost-user
> 
> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
> ---
>  hw/virtio/vhost-backend.c         | 12 ++++++++++
>  hw/virtio/vhost-user.c            | 25 +++++++++++++++++++++
>  hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
>  include/hw/virtio/vhost-backend.h |  5 +++++
>  include/hw/virtio/vhost.h         |  1 +
>  net/vhost-user.c                  |  7 ++++++
>  6 files changed, 78 insertions(+), 9 deletions(-)
> 
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> index 782b1d67d9..7016f23ec5 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -238,6 +238,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
>          qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
>  }
>  
> +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
> +{
> +    dev->used_memslots = dev->mem->nregions;
> +}
> +
> +static unsigned int vhost_kernel_get_used_memslots(struct vhost_dev *dev)
> +{
> +    return dev->used_memslots;
> +}
> +
>  static const VhostOps kernel_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
>          .vhost_backend_init = vhost_kernel_init,
> @@ -269,6 +279,8 @@ static const VhostOps kernel_ops = {
>  #endif /* CONFIG_VHOST_VSOCK */
>          .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
>          .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
> +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
>  };
>  #endif
>  
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 31231218dc..5dea64d8a8 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -2354,6 +2354,29 @@ void vhost_user_cleanup(VhostUserState *user)
>      user->chr = NULL;
>  }
>  
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> +    int i;
> +    dev->used_memslots = 0;
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +        int fd;
> +
> +        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
> +        if (mr && fd > 0) {
> +            dev->used_memslots++;
> +        }
> +    }
> +}
> +
> +static unsigned int vhost_user_get_used_memslots(struct vhost_dev *dev)
> +{
> +    return dev->used_memslots;
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_backend_init,
> @@ -2387,4 +2410,6 @@ const VhostOps user_ops = {
>          .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
>          .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
>          .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
> +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_user_get_used_memslots,
>  };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 1a1384e7a6..98b967669b 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -45,20 +45,20 @@
>  static struct vhost_log *vhost_log;
>  static struct vhost_log *vhost_log_shm;
>  
> -static unsigned int used_memslots;
>  static QLIST_HEAD(, vhost_dev) vhost_devices =
>      QLIST_HEAD_INITIALIZER(vhost_devices);
>  
>  bool vhost_has_free_slot(void)
>  {
> -    unsigned int slots_limit = ~0U;
>      struct vhost_dev *hdev;
>  
>      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> -        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> -        slots_limit = MIN(slots_limit, r);
> +        if (hdev->vhost_ops->vhost_get_used_memslots(hdev) >=
> +            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +            return false;
> +        }
>      }
> -    return slots_limit > used_memslots;
> +    return true;
>  }
>  
>  static void vhost_dev_sync_region(struct vhost_dev *dev,
> @@ -502,7 +502,6 @@ static void vhost_commit(MemoryListener *listener)
>                         dev->n_mem_sections * sizeof dev->mem->regions[0];
>      dev->mem = g_realloc(dev->mem, regions_size);
>      dev->mem->nregions = dev->n_mem_sections;
> -    used_memslots = dev->mem->nregions;
>      for (i = 0; i < dev->n_mem_sections; i++) {
>          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
>          struct MemoryRegionSection *mrs = dev->mem_sections + i;
> @@ -678,6 +677,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
>          dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
>          memory_region_ref(section->mr);
>      }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
>  }
>  
>  /* Used for both add and nop callbacks */
> @@ -693,6 +693,17 @@ static void vhost_region_addnop(MemoryListener *listener,
>      vhost_region_add_section(dev, section);
>  }
>  
> +static void vhost_region_del(MemoryListener *listener,
> +                             MemoryRegionSection *section)
> +{
> +    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
> +                                         memory_listener);
> +    if (!vhost_section(dev, section)) {
> +        return;
> +    }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
> +}
> +
>  static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
>  {
>      struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
> @@ -1300,6 +1311,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      hdev->memory_listener = (MemoryListener) {
>          .begin = vhost_begin,
>          .commit = vhost_commit,
> +        .region_del = vhost_region_del,
>          .region_add = vhost_region_addnop,
>          .region_nop = vhost_region_addnop,
>          .log_start = vhost_log_start,
> @@ -1346,9 +1358,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>  
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> -        error_report("vhost backend memory slots limit is less"
> -                " than current number of present memory slots");
> +    /*
> +     * If we started a VM without any vhost device,
> +     * for the first time vhost device hot-plug
> +     * (vhost_get_used_memslots is always 0),
> +     * so it needs to double check here.
> +     */
> +    if (hdev->vhost_ops->vhost_get_used_memslots(hdev) >
> +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +        error_report("vhost backend memory slots limit is less than"
> +                     " current number of present memory slots");
>          r = -1;
>          if (busyloop_timeout) {
>              goto fail_busyloop;
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index 8825bd278f..6569c95a43 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
>  
>  typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
>  
> +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> +typedef unsigned int (*vhost_get_used_memslots_op)(struct vhost_dev *dev);
> +
>  typedef struct VhostOps {
>      VhostBackendType backend_type;
>      vhost_backend_init vhost_backend_init;
> @@ -168,6 +171,8 @@ typedef struct VhostOps {
>      vhost_vq_get_addr_op  vhost_vq_get_addr;
>      vhost_get_device_id_op vhost_get_device_id;
>      vhost_force_iommu_op vhost_force_iommu;
> +    vhost_set_used_memslots_op vhost_set_used_memslots;
> +    vhost_get_used_memslots_op vhost_get_used_memslots;
>  } VhostOps;
>  
>  extern const VhostOps user_ops;
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index 767a95ec0b..5ded21f86d 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -90,6 +90,7 @@ struct vhost_dev {
>      QLIST_HEAD(, vhost_iommu) iommu_list;
>      IOMMUNotifier n;
>      const VhostDevConfigOps *config_ops;
> +    unsigned int used_memslots;
>  };
>  
>  struct vhost_net {
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 17532daaf3..7e93955537 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -20,6 +20,7 @@
>  #include "qemu/error-report.h"
>  #include "qemu/option.h"
>  #include "trace.h"
> +#include "include/hw/virtio/vhost.h"
>  
>  typedef struct NetVhostUserState {
>      NetClientState nc;
> @@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>          qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
>                                   net_vhost_user_event, NULL, nc0->name, NULL,
>                                   true);
> +
> +        if (!vhost_has_free_slot()) {
> +            error_report("used memslots exceeded the backend limit, quit "
> +                         "loop");
> +            goto err;
> +        }
>      } while (!s->started);
>  
>      assert(s->vhost_net);



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-10-02  2:05 ` Raphael Norwitz
@ 2020-10-12 11:12   ` chenjiajun
  2020-10-14  1:22     ` Raphael Norwitz
  0 siblings, 1 reply; 23+ messages in thread
From: chenjiajun @ 2020-10-12 11:12 UTC (permalink / raw)
  To: Raphael Norwitz
  Cc: zhang.zhanghailiang, Michael S. Tsirkin, jasowang, QEMU,
	xiexiangyou, imammedo, Marc-André Lureau



On 2020/10/2 10:05, Raphael Norwitz wrote:
> On Mon, Sep 28, 2020 at 9:17 AM Jiajun Chen <chenjiajun8@huawei.com> wrote:
>>
>> Used_memslots is equal to dev->mem->nregions now, it is true for
>> vhost kernel, but not for vhost user, which uses the memory regions
>> that have file descriptor. In fact, not all of the memory regions
>> have file descriptor.
>> It is usefully in some scenarios, e.g. used_memslots is 8, and only
>> 5 memory slots can be used by vhost user, it is failed to hot plug
>> a new memory RAM because vhost_has_free_slot just returned false,
>> but we can hot plug it safely in fact.
>>
>> --
>> ChangeList:
>> v3:
>> -make used_memslots a member of struct vhost_dev instead of a global static value
>>
>> v2:
>> -eliminating useless used_memslots_exceeded variable and used_memslots_is_exceeded() API
>>
>> v1:
>> -vhost-user: add separate memslot counter for vhost-user
>>
>> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
>> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
> 
> I'm happy with this from a vhost/vhost-user perspective. vhost-backend
> change looks good too. I'm a little confused by what's going on with
> net/vhost-user.c.
> 
>> ---
>>  hw/virtio/vhost-backend.c         | 12 ++++++++++
>>  hw/virtio/vhost-user.c            | 25 +++++++++++++++++++++
>>  hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
>>  include/hw/virtio/vhost-backend.h |  5 +++++
>>  include/hw/virtio/vhost.h         |  1 +
>>  net/vhost-user.c                  |  7 ++++++
>>  6 files changed, 78 insertions(+), 9 deletions(-)
>>
> 
>> diff --git a/net/vhost-user.c b/net/vhost-user.c
>> index 17532daaf3..7e93955537 100644
>> --- a/net/vhost-user.c
>> +++ b/net/vhost-user.c
>> @@ -20,6 +20,7 @@
>>  #include "qemu/error-report.h"
>>  #include "qemu/option.h"
>>  #include "trace.h"
>> +#include "include/hw/virtio/vhost.h"
>>
>>  typedef struct NetVhostUserState {
>>      NetClientState nc;
>> @@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>>          qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
>>                                   net_vhost_user_event, NULL, nc0->name, NULL,
>>                                   true);
> 
> Can you elaborate on this check here? What does it have to do with
> fixing memslots accounting? Maybe it should be in a separate change?
> 
When the number of virtual machine memslots exceeds the upper limit of the back-end support,
QEMU main thread may enters an endless loop and cannot process other requests.
And number of memslots will not automatically decrease, so add a check here to exit from loop
in this scenario. For the newly started virtual machine, boot fails; for the hot plug network card,
hot plug fails.
>> +
>> +        if (!vhost_has_free_slot()) {
>> +            error_report("used memslots exceeded the backend limit, quit "
>> +                         "loop");
>> +            goto err;
>> +        }
>>      } while (!s->started);
>>
>>      assert(s->vhost_net);
>> --
>> 2.27.0.dirty
>>
> .
> 


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-10-06  9:48 ` Igor Mammedov
@ 2020-10-14  0:58   ` Raphael Norwitz
  2020-10-14  7:08     ` Michael S. Tsirkin
  0 siblings, 1 reply; 23+ messages in thread
From: Raphael Norwitz @ 2020-10-14  0:58 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: zhang.zhanghailiang, Michael S. Tsirkin, jasowang, QEMU,
	xiexiangyou, Jiajun Chen, Marc-André Lureau

On Tue, Oct 6, 2020 at 5:48 AM Igor Mammedov <imammedo@redhat.com> wrote:
>
> On Mon, 28 Sep 2020 21:17:31 +0800
> Jiajun Chen <chenjiajun8@huawei.com> wrote:
>
> > Used_memslots is equal to dev->mem->nregions now, it is true for
> > vhost kernel, but not for vhost user, which uses the memory regions
> > that have file descriptor. In fact, not all of the memory regions
> > have file descriptor.
> > It is usefully in some scenarios, e.g. used_memslots is 8, and only
> > 5 memory slots can be used by vhost user, it is failed to hot plug
> > a new memory RAM because vhost_has_free_slot just returned false,
> > but we can hot plug it safely in fact.
>
> I had an impression that all guest RAM has to be shared with vhost,
> so combination of anon and fd based RAM couldn't work.
> Am I wrong?

I'm not sure about the kernel backend, but I've tested adding anon
memory to a VM with a vhost-user-scsi device and it works (eventually
the VM crashed, but I could see the guest recognized the anon RAM).
The vhost-user code is designed to work with both. I'm not sure I see
a use case, but if there is one, this would be a valid issue. Maybe
Jiajun or Jianjay can elaborate.

>
> >
> > --
> > ChangeList:
> > v3:
> > -make used_memslots a member of struct vhost_dev instead of a global static value
> it's global resource, so why?

I suggested it because I thought it made the code a little cleaner.
I'm not opposed to changing it back, or having it stored at the
vhost_user level.


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-10-12 11:12   ` chenjiajun
@ 2020-10-14  1:22     ` Raphael Norwitz
  0 siblings, 0 replies; 23+ messages in thread
From: Raphael Norwitz @ 2020-10-14  1:22 UTC (permalink / raw)
  To: chenjiajun
  Cc: zhang.zhanghailiang, Michael S. Tsirkin, jasowang, QEMU,
	xiexiangyou, Igor Mammedov, Marc-André Lureau

On Mon, Oct 12, 2020 at 7:12 AM chenjiajun <chenjiajun8@huawei.com> wrote:
>
>
>
> On 2020/10/2 10:05, Raphael Norwitz wrote:
> > On Mon, Sep 28, 2020 at 9:17 AM Jiajun Chen <chenjiajun8@huawei.com> wrote:
> >>
> >> Used_memslots is equal to dev->mem->nregions now, it is true for
> >> vhost kernel, but not for vhost user, which uses the memory regions
> >> that have file descriptor. In fact, not all of the memory regions
> >> have file descriptor.
> >> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> >> 5 memory slots can be used by vhost user, it is failed to hot plug
> >> a new memory RAM because vhost_has_free_slot just returned false,
> >> but we can hot plug it safely in fact.
> >>
> >> --
> >> ChangeList:
> >> v3:
> >> -make used_memslots a member of struct vhost_dev instead of a global static value
> >>
> >> v2:
> >> -eliminating useless used_memslots_exceeded variable and used_memslots_is_exceeded() API
> >>
> >> v1:
> >> -vhost-user: add separate memslot counter for vhost-user
> >>
> >> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
> >> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
> >
> > I'm happy with this from a vhost/vhost-user perspective. vhost-backend
> > change looks good too. I'm a little confused by what's going on with
> > net/vhost-user.c.
> >
> >> ---
> >>  hw/virtio/vhost-backend.c         | 12 ++++++++++
> >>  hw/virtio/vhost-user.c            | 25 +++++++++++++++++++++
> >>  hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
> >>  include/hw/virtio/vhost-backend.h |  5 +++++
> >>  include/hw/virtio/vhost.h         |  1 +
> >>  net/vhost-user.c                  |  7 ++++++
> >>  6 files changed, 78 insertions(+), 9 deletions(-)
> >>
> >
> >> diff --git a/net/vhost-user.c b/net/vhost-user.c
> >> index 17532daaf3..7e93955537 100644
> >> --- a/net/vhost-user.c
> >> +++ b/net/vhost-user.c
> >> @@ -20,6 +20,7 @@
> >>  #include "qemu/error-report.h"
> >>  #include "qemu/option.h"
> >>  #include "trace.h"
> >> +#include "include/hw/virtio/vhost.h"
> >>
> >>  typedef struct NetVhostUserState {
> >>      NetClientState nc;
> >> @@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
> >>          qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
> >>                                   net_vhost_user_event, NULL, nc0->name, NULL,
> >>                                   true);
> >
> > Can you elaborate on this check here? What does it have to do with
> > fixing memslots accounting? Maybe it should be in a separate change?
> >
> When the number of virtual machine memslots exceeds the upper limit of the back-end support,
> QEMU main thread may enters an endless loop and cannot process other requests.
> And number of memslots will not automatically decrease, so add a check here to exit from loop
> in this scenario. For the newly started virtual machine, boot fails; for the hot plug network card,
> hot plug fails.

I don't understand what you mean by "number of memslots will not
automatically decrease". Where did this happen before and what changes
when the new memslots counter is introduced?

> >> +
> >> +        if (!vhost_has_free_slot()) {
> >> +            error_report("used memslots exceeded the backend limit, quit "
> >> +                         "loop");
> >> +            goto err;
> >> +        }
> >>      } while (!s->started);
> >>
> >>      assert(s->vhost_net);
> >> --
> >> 2.27.0.dirty
> >>
> > .
> >


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-10-14  0:58   ` Raphael Norwitz
@ 2020-10-14  7:08     ` Michael S. Tsirkin
  2020-10-14 16:11       ` Raphael Norwitz
  0 siblings, 1 reply; 23+ messages in thread
From: Michael S. Tsirkin @ 2020-10-14  7:08 UTC (permalink / raw)
  To: Raphael Norwitz
  Cc: zhang.zhanghailiang, jasowang, QEMU, xiexiangyou,
	Marc-André Lureau, Jiajun Chen, Igor Mammedov

On Tue, Oct 13, 2020 at 08:58:59PM -0400, Raphael Norwitz wrote:
> On Tue, Oct 6, 2020 at 5:48 AM Igor Mammedov <imammedo@redhat.com> wrote:
> >
> > On Mon, 28 Sep 2020 21:17:31 +0800
> > Jiajun Chen <chenjiajun8@huawei.com> wrote:
> >
> > > Used_memslots is equal to dev->mem->nregions now, it is true for
> > > vhost kernel, but not for vhost user, which uses the memory regions
> > > that have file descriptor. In fact, not all of the memory regions
> > > have file descriptor.
> > > It is usefully in some scenarios, e.g. used_memslots is 8, and only
> > > 5 memory slots can be used by vhost user, it is failed to hot plug
> > > a new memory RAM because vhost_has_free_slot just returned false,
> > > but we can hot plug it safely in fact.
> >
> > I had an impression that all guest RAM has to be shared with vhost,
> > so combination of anon and fd based RAM couldn't work.
> > Am I wrong?
> 
> I'm not sure about the kernel backend, but I've tested adding anon
> memory to a VM with a vhost-user-scsi device and it works (eventually
> the VM crashed, but I could see the guest recognized the anon RAM).
> The vhost-user code is designed to work with both. I'm not sure I see
> a use case, but if there is one, this would be a valid issue. Maybe
> Jiajun or Jianjay can elaborate.

Hmm does not vhost-user skip all regions that do not have an fd?


        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
        if (fd > 0) {
            if (track_ramblocks) {
                assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
                trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
                                                      reg->memory_size,
                                                      reg->guest_phys_addr,
                                                      reg->userspace_addr,
                                                      offset);
                u->region_rb_offset[i] = offset;
                u->region_rb[i] = mr->ram_block;
            } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
                error_report("Failed preparing vhost-user memory table msg");
                return -1;
            }
            vhost_user_fill_msg_region(&region_buffer, reg, offset);
            msg->payload.memory.regions[*fd_num] = region_buffer;
            fds[(*fd_num)++] = fd;
        } else if (track_ramblocks) {
            u->region_rb_offset[i] = 0;
            u->region_rb[i] = NULL;
        }



In your test, is it possible that you were lucky and guest did not send
any data from anon memory to the device?



> >
> > >
> > > --
> > > ChangeList:
> > > v3:
> > > -make used_memslots a member of struct vhost_dev instead of a global static value
> > it's global resource, so why?
> 
> I suggested it because I thought it made the code a little cleaner.
> I'm not opposed to changing it back, or having it stored at the
> vhost_user level.



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-10-14  7:08     ` Michael S. Tsirkin
@ 2020-10-14 16:11       ` Raphael Norwitz
  2020-10-14 16:26         ` Michael S. Tsirkin
  2020-10-21 14:34         ` Igor Mammedov
  0 siblings, 2 replies; 23+ messages in thread
From: Raphael Norwitz @ 2020-10-14 16:11 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: zhang.zhanghailiang, jasowang, QEMU, xiexiangyou,
	Marc-André Lureau, Jiajun Chen, Igor Mammedov

On Wed, Oct 14, 2020 at 3:08 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Tue, Oct 13, 2020 at 08:58:59PM -0400, Raphael Norwitz wrote:
> > On Tue, Oct 6, 2020 at 5:48 AM Igor Mammedov <imammedo@redhat.com> wrote:
> > >
> > > On Mon, 28 Sep 2020 21:17:31 +0800
> > > Jiajun Chen <chenjiajun8@huawei.com> wrote:
> > >
> > > > Used_memslots is equal to dev->mem->nregions now, it is true for
> > > > vhost kernel, but not for vhost user, which uses the memory regions
> > > > that have file descriptor. In fact, not all of the memory regions
> > > > have file descriptor.
> > > > It is usefully in some scenarios, e.g. used_memslots is 8, and only
> > > > 5 memory slots can be used by vhost user, it is failed to hot plug
> > > > a new memory RAM because vhost_has_free_slot just returned false,
> > > > but we can hot plug it safely in fact.
> > >
> > > I had an impression that all guest RAM has to be shared with vhost,
> > > so combination of anon and fd based RAM couldn't work.
> > > Am I wrong?
> >
> > I'm not sure about the kernel backend, but I've tested adding anon
> > memory to a VM with a vhost-user-scsi device and it works (eventually
> > the VM crashed, but I could see the guest recognized the anon RAM).
> > The vhost-user code is designed to work with both. I'm not sure I see
> > a use case, but if there is one, this would be a valid issue. Maybe
> > Jiajun or Jianjay can elaborate.
>
> Hmm does not vhost-user skip all regions that do not have an fd?
>
>
>         mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
>         if (fd > 0) {
>             if (track_ramblocks) {
>                 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
>                 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
>                                                       reg->memory_size,
>                                                       reg->guest_phys_addr,
>                                                       reg->userspace_addr,
>                                                       offset);
>                 u->region_rb_offset[i] = offset;
>                 u->region_rb[i] = mr->ram_block;
>             } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
>                 error_report("Failed preparing vhost-user memory table msg");
>                 return -1;
>             }
>             vhost_user_fill_msg_region(&region_buffer, reg, offset);
>             msg->payload.memory.regions[*fd_num] = region_buffer;
>             fds[(*fd_num)++] = fd;
>         } else if (track_ramblocks) {
>             u->region_rb_offset[i] = 0;
>             u->region_rb[i] = NULL;
>         }
>
>
>
> In your test, is it possible that you were lucky and guest did not send
> any data from anon memory to the device?

Yes - vhost-user skips the region and does not send anon memory to the
device, but it does not fail the hot-add operation.

In my test the fd > 0 check definitely failed and went on to add the
memory without sending it to the backend. I understand why this can be
problematic (it did eventually crash the VM), but it seems like we
allow it as of today. I can't think of a valid reason why you would
want anon and FD ram together, but I figured there may be a reason
since the vhost-user code allows for it. Should we maybe block that
path altogether instead of patching it up?

>
>
>
> > >
> > > >
> > > > --
> > > > ChangeList:
> > > > v3:
> > > > -make used_memslots a member of struct vhost_dev instead of a global static value
> > > it's global resource, so why?
> >
> > I suggested it because I thought it made the code a little cleaner.
> > I'm not opposed to changing it back, or having it stored at the
> > vhost_user level.
>


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-10-14 16:11       ` Raphael Norwitz
@ 2020-10-14 16:26         ` Michael S. Tsirkin
  2020-10-14 17:21           ` Raphael Norwitz
  2020-10-21 14:34         ` Igor Mammedov
  1 sibling, 1 reply; 23+ messages in thread
From: Michael S. Tsirkin @ 2020-10-14 16:26 UTC (permalink / raw)
  To: Raphael Norwitz
  Cc: zhang.zhanghailiang, jasowang, QEMU, xiexiangyou,
	Marc-André Lureau, Jiajun Chen, Igor Mammedov

On Wed, Oct 14, 2020 at 12:11:34PM -0400, Raphael Norwitz wrote:
> On Wed, Oct 14, 2020 at 3:08 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Tue, Oct 13, 2020 at 08:58:59PM -0400, Raphael Norwitz wrote:
> > > On Tue, Oct 6, 2020 at 5:48 AM Igor Mammedov <imammedo@redhat.com> wrote:
> > > >
> > > > On Mon, 28 Sep 2020 21:17:31 +0800
> > > > Jiajun Chen <chenjiajun8@huawei.com> wrote:
> > > >
> > > > > Used_memslots is equal to dev->mem->nregions now, it is true for
> > > > > vhost kernel, but not for vhost user, which uses the memory regions
> > > > > that have file descriptor. In fact, not all of the memory regions
> > > > > have file descriptor.
> > > > > It is usefully in some scenarios, e.g. used_memslots is 8, and only
> > > > > 5 memory slots can be used by vhost user, it is failed to hot plug
> > > > > a new memory RAM because vhost_has_free_slot just returned false,
> > > > > but we can hot plug it safely in fact.
> > > >
> > > > I had an impression that all guest RAM has to be shared with vhost,
> > > > so combination of anon and fd based RAM couldn't work.
> > > > Am I wrong?
> > >
> > > I'm not sure about the kernel backend, but I've tested adding anon
> > > memory to a VM with a vhost-user-scsi device and it works (eventually
> > > the VM crashed, but I could see the guest recognized the anon RAM).
> > > The vhost-user code is designed to work with both. I'm not sure I see
> > > a use case, but if there is one, this would be a valid issue. Maybe
> > > Jiajun or Jianjay can elaborate.
> >
> > Hmm does not vhost-user skip all regions that do not have an fd?
> >
> >
> >         mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
> >         if (fd > 0) {
> >             if (track_ramblocks) {
> >                 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
> >                 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
> >                                                       reg->memory_size,
> >                                                       reg->guest_phys_addr,
> >                                                       reg->userspace_addr,
> >                                                       offset);
> >                 u->region_rb_offset[i] = offset;
> >                 u->region_rb[i] = mr->ram_block;
> >             } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
> >                 error_report("Failed preparing vhost-user memory table msg");
> >                 return -1;
> >             }
> >             vhost_user_fill_msg_region(&region_buffer, reg, offset);
> >             msg->payload.memory.regions[*fd_num] = region_buffer;
> >             fds[(*fd_num)++] = fd;
> >         } else if (track_ramblocks) {
> >             u->region_rb_offset[i] = 0;
> >             u->region_rb[i] = NULL;
> >         }
> >
> >
> >
> > In your test, is it possible that you were lucky and guest did not send
> > any data from anon memory to the device?
> 
> Yes - vhost-user skips the region and does not send anon memory to the
> device, but it does not fail the hot-add operation.
> 
> In my test the fd > 0 check definitely failed and went on to add the
> memory without sending it to the backend. I understand why this can be
> problematic (it did eventually crash the VM), but it seems like we
> allow it as of today. I can't think of a valid reason why you would
> want anon and FD ram together, but I figured there may be a reason
> since the vhost-user code allows for it. Should we maybe block that
> path altogether instead of patching it up?


Hmm where do we patch it up? Reason we might have non FD MRs is IIUC
due to things like IO regions...


> >
> >
> >
> > > >
> > > > >
> > > > > --
> > > > > ChangeList:
> > > > > v3:
> > > > > -make used_memslots a member of struct vhost_dev instead of a global static value
> > > > it's global resource, so why?
> > >
> > > I suggested it because I thought it made the code a little cleaner.
> > > I'm not opposed to changing it back, or having it stored at the
> > > vhost_user level.
> >



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-10-14 16:26         ` Michael S. Tsirkin
@ 2020-10-14 17:21           ` Raphael Norwitz
  2020-10-14 17:54             ` Michael S. Tsirkin
  0 siblings, 1 reply; 23+ messages in thread
From: Raphael Norwitz @ 2020-10-14 17:21 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: zhang.zhanghailiang, jasowang, QEMU, xiexiangyou,
	Marc-André Lureau, Jiajun Chen, Igor Mammedov

On Wed, Oct 14, 2020 at 12:26 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Wed, Oct 14, 2020 at 12:11:34PM -0400, Raphael Norwitz wrote:
> > On Wed, Oct 14, 2020 at 3:08 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Tue, Oct 13, 2020 at 08:58:59PM -0400, Raphael Norwitz wrote:
> > > > On Tue, Oct 6, 2020 at 5:48 AM Igor Mammedov <imammedo@redhat.com> wrote:
> > > > >
> > > > > On Mon, 28 Sep 2020 21:17:31 +0800
> > > > > Jiajun Chen <chenjiajun8@huawei.com> wrote:
> > > > >
> > > > > > Used_memslots is equal to dev->mem->nregions now, it is true for
> > > > > > vhost kernel, but not for vhost user, which uses the memory regions
> > > > > > that have file descriptor. In fact, not all of the memory regions
> > > > > > have file descriptor.
> > > > > > It is usefully in some scenarios, e.g. used_memslots is 8, and only
> > > > > > 5 memory slots can be used by vhost user, it is failed to hot plug
> > > > > > a new memory RAM because vhost_has_free_slot just returned false,
> > > > > > but we can hot plug it safely in fact.
> > > > >
> > > > > I had an impression that all guest RAM has to be shared with vhost,
> > > > > so combination of anon and fd based RAM couldn't work.
> > > > > Am I wrong?
> > > >
> > > > I'm not sure about the kernel backend, but I've tested adding anon
> > > > memory to a VM with a vhost-user-scsi device and it works (eventually
> > > > the VM crashed, but I could see the guest recognized the anon RAM).
> > > > The vhost-user code is designed to work with both. I'm not sure I see
> > > > a use case, but if there is one, this would be a valid issue. Maybe
> > > > Jiajun or Jianjay can elaborate.
> > >
> > > Hmm does not vhost-user skip all regions that do not have an fd?
> > >
> > >
> > >         mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
> > >         if (fd > 0) {
> > >             if (track_ramblocks) {
> > >                 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
> > >                 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
> > >                                                       reg->memory_size,
> > >                                                       reg->guest_phys_addr,
> > >                                                       reg->userspace_addr,
> > >                                                       offset);
> > >                 u->region_rb_offset[i] = offset;
> > >                 u->region_rb[i] = mr->ram_block;
> > >             } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
> > >                 error_report("Failed preparing vhost-user memory table msg");
> > >                 return -1;
> > >             }
> > >             vhost_user_fill_msg_region(&region_buffer, reg, offset);
> > >             msg->payload.memory.regions[*fd_num] = region_buffer;
> > >             fds[(*fd_num)++] = fd;
> > >         } else if (track_ramblocks) {
> > >             u->region_rb_offset[i] = 0;
> > >             u->region_rb[i] = NULL;
> > >         }
> > >
> > >
> > >
> > > In your test, is it possible that you were lucky and guest did not send
> > > any data from anon memory to the device?
> >
> > Yes - vhost-user skips the region and does not send anon memory to the
> > device, but it does not fail the hot-add operation.
> >
> > In my test the fd > 0 check definitely failed and went on to add the
> > memory without sending it to the backend. I understand why this can be
> > problematic (it did eventually crash the VM), but it seems like we
> > allow it as of today. I can't think of a valid reason why you would
> > want anon and FD ram together, but I figured there may be a reason
> > since the vhost-user code allows for it. Should we maybe block that
> > path altogether instead of patching it up?
>
>
> Hmm where do we patch it up? Reason we might have non FD MRs is IIUC
> due to things like IO regions...

The issue is that today such non FD MRs count towards the vhost-user
max ramslots limit even though there is no good reason for them to. By
"patching it up", I mean accepting this change, which makes it so that
the vhost-user max ramslots limit only applies to FD RAM regions.

>
>
> > >
> > >
> > >
> > > > >
> > > > > >
> > > > > > --
> > > > > > ChangeList:
> > > > > > v3:
> > > > > > -make used_memslots a member of struct vhost_dev instead of a global static value
> > > > > it's global resource, so why?
> > > >
> > > > I suggested it because I thought it made the code a little cleaner.
> > > > I'm not opposed to changing it back, or having it stored at the
> > > > vhost_user level.
> > >
>


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-10-14 17:21           ` Raphael Norwitz
@ 2020-10-14 17:54             ` Michael S. Tsirkin
  0 siblings, 0 replies; 23+ messages in thread
From: Michael S. Tsirkin @ 2020-10-14 17:54 UTC (permalink / raw)
  To: Raphael Norwitz
  Cc: zhang.zhanghailiang, jasowang, QEMU, xiexiangyou,
	Marc-André Lureau, Jiajun Chen, Igor Mammedov

On Wed, Oct 14, 2020 at 01:21:39PM -0400, Raphael Norwitz wrote:
> On Wed, Oct 14, 2020 at 12:26 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Wed, Oct 14, 2020 at 12:11:34PM -0400, Raphael Norwitz wrote:
> > > On Wed, Oct 14, 2020 at 3:08 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > >
> > > > On Tue, Oct 13, 2020 at 08:58:59PM -0400, Raphael Norwitz wrote:
> > > > > On Tue, Oct 6, 2020 at 5:48 AM Igor Mammedov <imammedo@redhat.com> wrote:
> > > > > >
> > > > > > On Mon, 28 Sep 2020 21:17:31 +0800
> > > > > > Jiajun Chen <chenjiajun8@huawei.com> wrote:
> > > > > >
> > > > > > > Used_memslots is equal to dev->mem->nregions now, it is true for
> > > > > > > vhost kernel, but not for vhost user, which uses the memory regions
> > > > > > > that have file descriptor. In fact, not all of the memory regions
> > > > > > > have file descriptor.
> > > > > > > It is usefully in some scenarios, e.g. used_memslots is 8, and only
> > > > > > > 5 memory slots can be used by vhost user, it is failed to hot plug
> > > > > > > a new memory RAM because vhost_has_free_slot just returned false,
> > > > > > > but we can hot plug it safely in fact.
> > > > > >
> > > > > > I had an impression that all guest RAM has to be shared with vhost,
> > > > > > so combination of anon and fd based RAM couldn't work.
> > > > > > Am I wrong?
> > > > >
> > > > > I'm not sure about the kernel backend, but I've tested adding anon
> > > > > memory to a VM with a vhost-user-scsi device and it works (eventually
> > > > > the VM crashed, but I could see the guest recognized the anon RAM).
> > > > > The vhost-user code is designed to work with both. I'm not sure I see
> > > > > a use case, but if there is one, this would be a valid issue. Maybe
> > > > > Jiajun or Jianjay can elaborate.
> > > >
> > > > Hmm does not vhost-user skip all regions that do not have an fd?
> > > >
> > > >
> > > >         mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
> > > >         if (fd > 0) {
> > > >             if (track_ramblocks) {
> > > >                 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
> > > >                 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
> > > >                                                       reg->memory_size,
> > > >                                                       reg->guest_phys_addr,
> > > >                                                       reg->userspace_addr,
> > > >                                                       offset);
> > > >                 u->region_rb_offset[i] = offset;
> > > >                 u->region_rb[i] = mr->ram_block;
> > > >             } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
> > > >                 error_report("Failed preparing vhost-user memory table msg");
> > > >                 return -1;
> > > >             }
> > > >             vhost_user_fill_msg_region(&region_buffer, reg, offset);
> > > >             msg->payload.memory.regions[*fd_num] = region_buffer;
> > > >             fds[(*fd_num)++] = fd;
> > > >         } else if (track_ramblocks) {
> > > >             u->region_rb_offset[i] = 0;
> > > >             u->region_rb[i] = NULL;
> > > >         }
> > > >
> > > >
> > > >
> > > > In your test, is it possible that you were lucky and guest did not send
> > > > any data from anon memory to the device?
> > >
> > > Yes - vhost-user skips the region and does not send anon memory to the
> > > device, but it does not fail the hot-add operation.
> > >
> > > In my test the fd > 0 check definitely failed and went on to add the
> > > memory without sending it to the backend. I understand why this can be
> > > problematic (it did eventually crash the VM), but it seems like we
> > > allow it as of today. I can't think of a valid reason why you would
> > > want anon and FD ram together, but I figured there may be a reason
> > > since the vhost-user code allows for it. Should we maybe block that
> > > path altogether instead of patching it up?
> >
> >
> > Hmm where do we patch it up? Reason we might have non FD MRs is IIUC
> > due to things like IO regions...
> 
> The issue is that today such non FD MRs count towards the vhost-user
> max ramslots limit even though there is no good reason for them to. By
> "patching it up", I mean accepting this change, which makes it so that
> the vhost-user max ramslots limit only applies to FD RAM regions.

I don't really remember, maybe one can get these things with things
like ROMs ...

> >
> >
> > > >
> > > >
> > > >
> > > > > >
> > > > > > >
> > > > > > > --
> > > > > > > ChangeList:
> > > > > > > v3:
> > > > > > > -make used_memslots a member of struct vhost_dev instead of a global static value
> > > > > > it's global resource, so why?
> > > > >
> > > > > I suggested it because I thought it made the code a little cleaner.
> > > > > I'm not opposed to changing it back, or having it stored at the
> > > > > vhost_user level.
> > > >
> >



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-10-14 16:11       ` Raphael Norwitz
  2020-10-14 16:26         ` Michael S. Tsirkin
@ 2020-10-21 14:34         ` Igor Mammedov
  1 sibling, 0 replies; 23+ messages in thread
From: Igor Mammedov @ 2020-10-21 14:34 UTC (permalink / raw)
  To: Raphael Norwitz
  Cc: zhang.zhanghailiang, Michael S. Tsirkin, jasowang, QEMU,
	xiexiangyou, Jiajun Chen, Marc-André Lureau

On Wed, 14 Oct 2020 12:11:34 -0400
Raphael Norwitz <raphael.s.norwitz@gmail.com> wrote:

> On Wed, Oct 14, 2020 at 3:08 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Tue, Oct 13, 2020 at 08:58:59PM -0400, Raphael Norwitz wrote:  
> > > On Tue, Oct 6, 2020 at 5:48 AM Igor Mammedov <imammedo@redhat.com> wrote:  
> > > >
> > > > On Mon, 28 Sep 2020 21:17:31 +0800
> > > > Jiajun Chen <chenjiajun8@huawei.com> wrote:
> > > >  
> > > > > Used_memslots is equal to dev->mem->nregions now, it is true for
> > > > > vhost kernel, but not for vhost user, which uses the memory regions
> > > > > that have file descriptor. In fact, not all of the memory regions
> > > > > have file descriptor.

> > > > > It is usefully in some scenarios, e.g. used_memslots is 8, and only
> > > > > 5 memory slots can be used by vhost user, it is failed to hot plug
> > > > > a new memory RAM because vhost_has_free_slot just returned false,
> > > > > but we can hot plug it safely in fact.  
can you find out what are these extra 3 memory regions are and why they are
filtered out from regions that are passed to vhost-user?

> > > >
> > > > I had an impression that all guest RAM has to be shared with vhost,
> > > > so combination of anon and fd based RAM couldn't work.
> > > > Am I wrong?  
> > >
> > > I'm not sure about the kernel backend, but I've tested adding anon
> > > memory to a VM with a vhost-user-scsi device and it works (eventually
> > > the VM crashed, but I could see the guest recognized the anon RAM).
> > > The vhost-user code is designed to work with both. I'm not sure I see
> > > a use case, but if there is one, this would be a valid issue. Maybe
> > > Jiajun or Jianjay can elaborate.  
> >
> > Hmm does not vhost-user skip all regions that do not have an fd?
> >
> >
> >         mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
> >         if (fd > 0) {
> >             if (track_ramblocks) {
> >                 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
> >                 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
> >                                                       reg->memory_size,
> >                                                       reg->guest_phys_addr,
> >                                                       reg->userspace_addr,
> >                                                       offset);
> >                 u->region_rb_offset[i] = offset;
> >                 u->region_rb[i] = mr->ram_block;
> >             } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
> >                 error_report("Failed preparing vhost-user memory table msg");
> >                 return -1;
> >             }
> >             vhost_user_fill_msg_region(&region_buffer, reg, offset);
> >             msg->payload.memory.regions[*fd_num] = region_buffer;
> >             fds[(*fd_num)++] = fd;
> >         } else if (track_ramblocks) {
> >             u->region_rb_offset[i] = 0;
> >             u->region_rb[i] = NULL;
> >         }
> >
> >
> >
> > In your test, is it possible that you were lucky and guest did not send
> > any data from anon memory to the device?  
> 
> Yes - vhost-user skips the region and does not send anon memory to the
> device, but it does not fail the hot-add operation.
> 
> In my test the fd > 0 check definitely failed and went on to add the
> memory without sending it to the backend. I understand why this can be
> problematic (it did eventually crash the VM), but it seems like we
> allow it as of today. I can't think of a valid reason why you would
> want anon and FD ram together, but I figured there may be a reason
> since the vhost-user code allows for it. Should we maybe block that
> path altogether instead of patching it up?

I'm more inclined to disabling mixed (provided that's really the case)
anon and FD RAM whenever vhost (user) is used or disable hot plugging
vhost-user device in case machine has mixed RAM.
Otherwise it's just a time bomb, waiting till guest OS tries to transmit
data that it just allocated from anon RAM.


> > > >  
> > > > >
> > > > > --
> > > > > ChangeList:
> > > > > v3:
> > > > > -make used_memslots a member of struct vhost_dev instead of a global static value  
> > > > it's global resource, so why?  
> > >
> > > I suggested it because I thought it made the code a little cleaner.
> > > I'm not opposed to changing it back, or having it stored at the
> > > vhost_user level.  
> >  
> 



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-09-28 13:17 [PATCH] vhost-user: add separate memslot counter for vhost-user Jiajun Chen
  2020-10-02  2:05 ` Raphael Norwitz
  2020-10-06  9:48 ` Igor Mammedov
@ 2020-10-30  8:39 ` Michael S. Tsirkin
  2 siblings, 0 replies; 23+ messages in thread
From: Michael S. Tsirkin @ 2020-10-30  8:39 UTC (permalink / raw)
  To: Jiajun Chen
  Cc: raphael.s.norwitz, zhang.zhanghailiang, jasowang, qemu-devel,
	xiexiangyou, imammedo, marcandre.lureau

On Mon, Sep 28, 2020 at 09:17:31PM +0800, Jiajun Chen wrote:
> Used_memslots is equal to dev->mem->nregions now, it is true for
> vhost kernel, but not for vhost user, which uses the memory regions
> that have file descriptor. In fact, not all of the memory regions
> have file descriptor.
> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> 5 memory slots can be used by vhost user, it is failed to hot plug
> a new memory RAM because vhost_has_free_slot just returned false,
> but we can hot plug it safely in fact.


At this point I dropped this, if you are going to resubmit pls include
data on qemu invocation that manifests the problem.

> --
> ChangeList:
> v3:
> -make used_memslots a member of struct vhost_dev instead of a global static value
> 
> v2:
> -eliminating useless used_memslots_exceeded variable and used_memslots_is_exceeded() API
> 
> v1:
> -vhost-user: add separate memslot counter for vhost-user
> 
> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
> ---
>  hw/virtio/vhost-backend.c         | 12 ++++++++++
>  hw/virtio/vhost-user.c            | 25 +++++++++++++++++++++
>  hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
>  include/hw/virtio/vhost-backend.h |  5 +++++
>  include/hw/virtio/vhost.h         |  1 +
>  net/vhost-user.c                  |  7 ++++++
>  6 files changed, 78 insertions(+), 9 deletions(-)
> 
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> index 782b1d67d9..7016f23ec5 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -238,6 +238,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
>          qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
>  }
>  
> +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
> +{
> +    dev->used_memslots = dev->mem->nregions;
> +}
> +
> +static unsigned int vhost_kernel_get_used_memslots(struct vhost_dev *dev)
> +{
> +    return dev->used_memslots;
> +}
> +
>  static const VhostOps kernel_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
>          .vhost_backend_init = vhost_kernel_init,
> @@ -269,6 +279,8 @@ static const VhostOps kernel_ops = {
>  #endif /* CONFIG_VHOST_VSOCK */
>          .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
>          .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
> +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
>  };
>  #endif
>  
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 31231218dc..5dea64d8a8 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -2354,6 +2354,29 @@ void vhost_user_cleanup(VhostUserState *user)
>      user->chr = NULL;
>  }
>  
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> +    int i;
> +    dev->used_memslots = 0;
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +        int fd;
> +
> +        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
> +        if (mr && fd > 0) {
> +            dev->used_memslots++;
> +        }
> +    }
> +}
> +
> +static unsigned int vhost_user_get_used_memslots(struct vhost_dev *dev)
> +{
> +    return dev->used_memslots;
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_backend_init,
> @@ -2387,4 +2410,6 @@ const VhostOps user_ops = {
>          .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
>          .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
>          .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
> +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_user_get_used_memslots,
>  };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 1a1384e7a6..98b967669b 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -45,20 +45,20 @@
>  static struct vhost_log *vhost_log;
>  static struct vhost_log *vhost_log_shm;
>  
> -static unsigned int used_memslots;
>  static QLIST_HEAD(, vhost_dev) vhost_devices =
>      QLIST_HEAD_INITIALIZER(vhost_devices);
>  
>  bool vhost_has_free_slot(void)
>  {
> -    unsigned int slots_limit = ~0U;
>      struct vhost_dev *hdev;
>  
>      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> -        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> -        slots_limit = MIN(slots_limit, r);
> +        if (hdev->vhost_ops->vhost_get_used_memslots(hdev) >=
> +            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +            return false;
> +        }
>      }
> -    return slots_limit > used_memslots;
> +    return true;
>  }
>  
>  static void vhost_dev_sync_region(struct vhost_dev *dev,
> @@ -502,7 +502,6 @@ static void vhost_commit(MemoryListener *listener)
>                         dev->n_mem_sections * sizeof dev->mem->regions[0];
>      dev->mem = g_realloc(dev->mem, regions_size);
>      dev->mem->nregions = dev->n_mem_sections;
> -    used_memslots = dev->mem->nregions;
>      for (i = 0; i < dev->n_mem_sections; i++) {
>          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
>          struct MemoryRegionSection *mrs = dev->mem_sections + i;
> @@ -678,6 +677,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
>          dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
>          memory_region_ref(section->mr);
>      }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
>  }
>  
>  /* Used for both add and nop callbacks */
> @@ -693,6 +693,17 @@ static void vhost_region_addnop(MemoryListener *listener,
>      vhost_region_add_section(dev, section);
>  }
>  
> +static void vhost_region_del(MemoryListener *listener,
> +                             MemoryRegionSection *section)
> +{
> +    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
> +                                         memory_listener);
> +    if (!vhost_section(dev, section)) {
> +        return;
> +    }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
> +}
> +
>  static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
>  {
>      struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
> @@ -1300,6 +1311,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      hdev->memory_listener = (MemoryListener) {
>          .begin = vhost_begin,
>          .commit = vhost_commit,
> +        .region_del = vhost_region_del,
>          .region_add = vhost_region_addnop,
>          .region_nop = vhost_region_addnop,
>          .log_start = vhost_log_start,
> @@ -1346,9 +1358,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>  
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> -        error_report("vhost backend memory slots limit is less"
> -                " than current number of present memory slots");
> +    /*
> +     * If we started a VM without any vhost device,
> +     * for the first time vhost device hot-plug
> +     * (vhost_get_used_memslots is always 0),
> +     * so it needs to double check here.
> +     */
> +    if (hdev->vhost_ops->vhost_get_used_memslots(hdev) >
> +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +        error_report("vhost backend memory slots limit is less than"
> +                     " current number of present memory slots");
>          r = -1;
>          if (busyloop_timeout) {
>              goto fail_busyloop;
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index 8825bd278f..6569c95a43 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
>  
>  typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
>  
> +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> +typedef unsigned int (*vhost_get_used_memslots_op)(struct vhost_dev *dev);
> +
>  typedef struct VhostOps {
>      VhostBackendType backend_type;
>      vhost_backend_init vhost_backend_init;
> @@ -168,6 +171,8 @@ typedef struct VhostOps {
>      vhost_vq_get_addr_op  vhost_vq_get_addr;
>      vhost_get_device_id_op vhost_get_device_id;
>      vhost_force_iommu_op vhost_force_iommu;
> +    vhost_set_used_memslots_op vhost_set_used_memslots;
> +    vhost_get_used_memslots_op vhost_get_used_memslots;
>  } VhostOps;
>  
>  extern const VhostOps user_ops;
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index 767a95ec0b..5ded21f86d 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -90,6 +90,7 @@ struct vhost_dev {
>      QLIST_HEAD(, vhost_iommu) iommu_list;
>      IOMMUNotifier n;
>      const VhostDevConfigOps *config_ops;
> +    unsigned int used_memslots;
>  };
>  
>  struct vhost_net {
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 17532daaf3..7e93955537 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -20,6 +20,7 @@
>  #include "qemu/error-report.h"
>  #include "qemu/option.h"
>  #include "trace.h"
> +#include "include/hw/virtio/vhost.h"
>  
>  typedef struct NetVhostUserState {
>      NetClientState nc;
> @@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>          qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
>                                   net_vhost_user_event, NULL, nc0->name, NULL,
>                                   true);
> +
> +        if (!vhost_has_free_slot()) {
> +            error_report("used memslots exceeded the backend limit, quit "
> +                         "loop");
> +            goto err;
> +        }
>      } while (!s->started);
>  
>      assert(s->vhost_net);
> -- 
> 2.27.0.dirty



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-09-08  6:11 Jiajun Chen
  2020-09-08  8:37 ` Michael S. Tsirkin
@ 2020-09-15  1:27 ` Raphael Norwitz
  1 sibling, 0 replies; 23+ messages in thread
From: Raphael Norwitz @ 2020-09-15  1:27 UTC (permalink / raw)
  To: Jiajun Chen
  Cc: zhang.zhanghailiang, Michael S. Tsirkin, jasowang, QEMU,
	xiexiangyou, imammedo, Marc-André Lureau

A few more comments mixed in below.

Once these are addressed I'd support this change from the vhost-user
side. We should get opinions from other maintainers though,
particularly for the kernel backend and vhost-user-net changes.

On Tue, Sep 8, 2020 at 2:11 AM Jiajun Chen <chenjiajun8@huawei.com> wrote:
>
> Used_memslots is equal to dev->mem->nregions now, it is true for
> vhost kernel, but not for vhost user, which uses the memory regions
> that have file descriptor. In fact, not all of the memory regions
> have file descriptor.
> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> 5 memory slots can be used by vhost user, it is failed to hot plug
> a new memory RAM because vhost_has_free_slot just returned false,
> but we can hot plug it safely in fact.
>
> --
> ChangeList:
> v2:
> -eliminating useless used_memslots_exceeded variable and used_memslots_is_exceeded() API
>
> v1:
> -vhost-user: add separate memslot counter for vhost-user
>
> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
> ---
>  hw/virtio/vhost-backend.c         | 14 ++++++++++++
>  hw/virtio/vhost-user.c            | 28 +++++++++++++++++++++++
>  hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
>  include/hw/virtio/vhost-backend.h |  5 +++++
>  net/vhost-user.c                  |  7 ++++++
>  5 files changed, 82 insertions(+), 9 deletions(-)
>
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 31231218dc..ec46a745fa 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -232,6 +232,7 @@ static VhostUserMsg m __attribute__ ((unused));
>
>  /* The version of the protocol we support */
>  #define VHOST_USER_VERSION    (0x1)
> +static unsigned int vhost_user_used_memslots;

I worry that using a global static value may introduce a lot of edge
cases which may be difficult to account for. Can we store this state
per vhost_user dev, either in the VhostUserState or struct vhost_user?

>
>  struct vhost_user {
>      struct vhost_dev *dev;
> @@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
>      user->chr = NULL;
>  }
>
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> +    unsigned int counter = 0;
> +    int i;
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +        int fd;
> +
> +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);

This assert is redundant - vhost_user_get_mr_data already checks this.

> +        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
> +        if (mr && fd > 0) {
> +            counter++;
> +        }
> +    }
> +    vhost_user_used_memslots = counter;
> +}
> +
> +static unsigned int vhost_user_get_used_memslots(void)
> +{
> +    return vhost_user_used_memslots;
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_backend_init,
> @@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
>          .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
>          .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
>          .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
> +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_user_get_used_memslots,
>  };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 1a1384e7a6..b8d617d24c 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -45,20 +45,20 @@
>  static struct vhost_log *vhost_log;
>  static struct vhost_log *vhost_log_shm;
>
> -static unsigned int used_memslots;
>  static QLIST_HEAD(, vhost_dev) vhost_devices =
>      QLIST_HEAD_INITIALIZER(vhost_devices);
>
>  bool vhost_has_free_slot(void)
>  {
> -    unsigned int slots_limit = ~0U;
>      struct vhost_dev *hdev;
>
>      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> -        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> -        slots_limit = MIN(slots_limit, r);
> +        if (hdev->vhost_ops->vhost_get_used_memslots() >=
> +            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +            return false;
> +        }
>      }
> -    return slots_limit > used_memslots;
> +    return true;
>  }
>
>  static void vhost_dev_sync_region(struct vhost_dev *dev,
> @@ -502,7 +502,6 @@ static void vhost_commit(MemoryListener *listener)
>                         dev->n_mem_sections * sizeof dev->mem->regions[0];
>      dev->mem = g_realloc(dev->mem, regions_size);
>      dev->mem->nregions = dev->n_mem_sections;
> -    used_memslots = dev->mem->nregions;
>      for (i = 0; i < dev->n_mem_sections; i++) {
>          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
>          struct MemoryRegionSection *mrs = dev->mem_sections + i;
> @@ -678,6 +677,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
>          dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
>          memory_region_ref(section->mr);
>      }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
>  }
>
>  /* Used for both add and nop callbacks */
> @@ -693,6 +693,17 @@ static void vhost_region_addnop(MemoryListener *listener,
>      vhost_region_add_section(dev, section);
>  }
>
> +static void vhost_region_del(MemoryListener *listener,
> +                             MemoryRegionSection *section)
> +{
> +    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
> +                                         memory_listener);
> +    if (!vhost_section(dev, section)) {
> +        return;
> +    }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
> +}
> +
>  static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
>  {
>      struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
> @@ -1300,6 +1311,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      hdev->memory_listener = (MemoryListener) {
>          .begin = vhost_begin,
>          .commit = vhost_commit,
> +        .region_del = vhost_region_del,
>          .region_add = vhost_region_addnop,
>          .region_nop = vhost_region_addnop,
>          .log_start = vhost_log_start,
> @@ -1346,9 +1358,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> -        error_report("vhost backend memory slots limit is less"
> -                " than current number of present memory slots");
> +    /*
> +     * If we started a VM without any vhost device,
> +     * for the first time vhost device hot-plug
> +     * (vhost_get_used_memslots is always 0),
> +     * so it needs to double check here

NIT - There should be a period at the end of the sentence in this comment.


> +     */
> +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +        error_report("vhost backend memory slots limit is less than"
> +                     " current number of present memory slots");
>          r = -1;
>          if (busyloop_timeout) {
>              goto fail_busyloop;


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-09-08  6:11 Jiajun Chen
@ 2020-09-08  8:37 ` Michael S. Tsirkin
  2020-09-15  1:27 ` Raphael Norwitz
  1 sibling, 0 replies; 23+ messages in thread
From: Michael S. Tsirkin @ 2020-09-08  8:37 UTC (permalink / raw)
  To: Jiajun Chen
  Cc: raphael.s.norwitz, zhang.zhanghailiang, jasowang, qemu-devel,
	xiexiangyou, Marc-André Lureau, Igor Mammedov

+	Cc: Igor Mammedov <imammedo@redhat.com>
+	Cc: Marc-André Lureau <marcandre.lureau@redhat.com>

who worked on this in the past.

On Tue, Sep 08, 2020 at 02:11:41PM +0800, Jiajun Chen wrote:
> Used_memslots is equal to dev->mem->nregions now, it is true for
> vhost kernel, but not for vhost user, which uses the memory regions
> that have file descriptor. In fact, not all of the memory regions
> have file descriptor.
> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> 5 memory slots can be used by vhost user, it is failed to hot plug
> a new memory RAM because vhost_has_free_slot just returned false,
> but we can hot plug it safely in fact.
> 
> --
> ChangeList:
> v2:
> -eliminating useless used_memslots_exceeded variable and used_memslots_is_exceeded() API
> 
> v1:
> -vhost-user: add separate memslot counter for vhost-user
> 
> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
> ---
>  hw/virtio/vhost-backend.c         | 14 ++++++++++++
>  hw/virtio/vhost-user.c            | 28 +++++++++++++++++++++++
>  hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
>  include/hw/virtio/vhost-backend.h |  5 +++++
>  net/vhost-user.c                  |  7 ++++++
>  5 files changed, 82 insertions(+), 9 deletions(-)
> 
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> index 782b1d67d9..35eec7e166 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -20,6 +20,8 @@
>  #include <linux/vhost.h>
>  #include <sys/ioctl.h>
>  
> +static unsigned int vhost_kernel_used_memslots;
> +
>  static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
>                               void *arg)
>  {
> @@ -238,6 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
>          qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
>  }
>  
> +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
> +{
> +    vhost_kernel_used_memslots = dev->mem->nregions;
> +}
> +
> +static unsigned int vhost_kernel_get_used_memslots(void)
> +{
> +    return vhost_kernel_used_memslots;
> +}
> +
>  static const VhostOps kernel_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
>          .vhost_backend_init = vhost_kernel_init,
> @@ -269,6 +281,8 @@ static const VhostOps kernel_ops = {
>  #endif /* CONFIG_VHOST_VSOCK */
>          .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
>          .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
> +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
>  };
>  #endif
>  
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 31231218dc..ec46a745fa 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -232,6 +232,7 @@ static VhostUserMsg m __attribute__ ((unused));
>  
>  /* The version of the protocol we support */
>  #define VHOST_USER_VERSION    (0x1)
> +static unsigned int vhost_user_used_memslots;
>  
>  struct vhost_user {
>      struct vhost_dev *dev;
> @@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
>      user->chr = NULL;
>  }
>  
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> +    unsigned int counter = 0;
> +    int i;
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +        int fd;
> +
> +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> +        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
> +        if (mr && fd > 0) {
> +            counter++;
> +        }
> +    }
> +    vhost_user_used_memslots = counter;
> +}
> +
> +static unsigned int vhost_user_get_used_memslots(void)
> +{
> +    return vhost_user_used_memslots;
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_backend_init,
> @@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
>          .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
>          .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
>          .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
> +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_user_get_used_memslots,
>  };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 1a1384e7a6..b8d617d24c 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -45,20 +45,20 @@
>  static struct vhost_log *vhost_log;
>  static struct vhost_log *vhost_log_shm;
>  
> -static unsigned int used_memslots;
>  static QLIST_HEAD(, vhost_dev) vhost_devices =
>      QLIST_HEAD_INITIALIZER(vhost_devices);
>  
>  bool vhost_has_free_slot(void)
>  {
> -    unsigned int slots_limit = ~0U;
>      struct vhost_dev *hdev;
>  
>      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> -        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> -        slots_limit = MIN(slots_limit, r);
> +        if (hdev->vhost_ops->vhost_get_used_memslots() >=
> +            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +            return false;
> +        }
>      }
> -    return slots_limit > used_memslots;
> +    return true;
>  }
>  
>  static void vhost_dev_sync_region(struct vhost_dev *dev,
> @@ -502,7 +502,6 @@ static void vhost_commit(MemoryListener *listener)
>                         dev->n_mem_sections * sizeof dev->mem->regions[0];
>      dev->mem = g_realloc(dev->mem, regions_size);
>      dev->mem->nregions = dev->n_mem_sections;
> -    used_memslots = dev->mem->nregions;
>      for (i = 0; i < dev->n_mem_sections; i++) {
>          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
>          struct MemoryRegionSection *mrs = dev->mem_sections + i;
> @@ -678,6 +677,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
>          dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
>          memory_region_ref(section->mr);
>      }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
>  }
>  
>  /* Used for both add and nop callbacks */
> @@ -693,6 +693,17 @@ static void vhost_region_addnop(MemoryListener *listener,
>      vhost_region_add_section(dev, section);
>  }
>  
> +static void vhost_region_del(MemoryListener *listener,
> +                             MemoryRegionSection *section)
> +{
> +    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
> +                                         memory_listener);
> +    if (!vhost_section(dev, section)) {
> +        return;
> +    }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
> +}
> +
>  static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
>  {
>      struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
> @@ -1300,6 +1311,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      hdev->memory_listener = (MemoryListener) {
>          .begin = vhost_begin,
>          .commit = vhost_commit,
> +        .region_del = vhost_region_del,
>          .region_add = vhost_region_addnop,
>          .region_nop = vhost_region_addnop,
>          .log_start = vhost_log_start,
> @@ -1346,9 +1358,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>  
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> -        error_report("vhost backend memory slots limit is less"
> -                " than current number of present memory slots");
> +    /*
> +     * If we started a VM without any vhost device,
> +     * for the first time vhost device hot-plug
> +     * (vhost_get_used_memslots is always 0),
> +     * so it needs to double check here
> +     */
> +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +        error_report("vhost backend memory slots limit is less than"
> +                     " current number of present memory slots");
>          r = -1;
>          if (busyloop_timeout) {
>              goto fail_busyloop;
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index 8825bd278f..ed43a93692 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
>  
>  typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
>  
> +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> +typedef unsigned int (*vhost_get_used_memslots_op)(void);
> +
>  typedef struct VhostOps {
>      VhostBackendType backend_type;
>      vhost_backend_init vhost_backend_init;
> @@ -168,6 +171,8 @@ typedef struct VhostOps {
>      vhost_vq_get_addr_op  vhost_vq_get_addr;
>      vhost_get_device_id_op vhost_get_device_id;
>      vhost_force_iommu_op vhost_force_iommu;
> +    vhost_set_used_memslots_op vhost_set_used_memslots;
> +    vhost_get_used_memslots_op vhost_get_used_memslots;
>  } VhostOps;
>  
>  extern const VhostOps user_ops;
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 17532daaf3..7e93955537 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -20,6 +20,7 @@
>  #include "qemu/error-report.h"
>  #include "qemu/option.h"
>  #include "trace.h"
> +#include "include/hw/virtio/vhost.h"
>  
>  typedef struct NetVhostUserState {
>      NetClientState nc;
> @@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>          qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
>                                   net_vhost_user_event, NULL, nc0->name, NULL,
>                                   true);
> +
> +        if (!vhost_has_free_slot()) {
> +            error_report("used memslots exceeded the backend limit, quit "
> +                         "loop");
> +            goto err;
> +        }
>      } while (!s->started);
>  
>      assert(s->vhost_net);
> -- 
> 2.19.1



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH] vhost-user: add separate memslot counter for vhost-user
@ 2020-09-08  6:11 Jiajun Chen
  2020-09-08  8:37 ` Michael S. Tsirkin
  2020-09-15  1:27 ` Raphael Norwitz
  0 siblings, 2 replies; 23+ messages in thread
From: Jiajun Chen @ 2020-09-08  6:11 UTC (permalink / raw)
  To: raphael.s.norwitz
  Cc: zhang.zhanghailiang, jasowang, qemu-devel, xiexiangyou, mst

Used_memslots is equal to dev->mem->nregions now, it is true for
vhost kernel, but not for vhost user, which uses the memory regions
that have file descriptor. In fact, not all of the memory regions
have file descriptor.
It is usefully in some scenarios, e.g. used_memslots is 8, and only
5 memory slots can be used by vhost user, it is failed to hot plug
a new memory RAM because vhost_has_free_slot just returned false,
but we can hot plug it safely in fact.

--
ChangeList:
v2:
-eliminating useless used_memslots_exceeded variable and used_memslots_is_exceeded() API

v1:
-vhost-user: add separate memslot counter for vhost-user

Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
---
 hw/virtio/vhost-backend.c         | 14 ++++++++++++
 hw/virtio/vhost-user.c            | 28 +++++++++++++++++++++++
 hw/virtio/vhost.c                 | 37 +++++++++++++++++++++++--------
 include/hw/virtio/vhost-backend.h |  5 +++++
 net/vhost-user.c                  |  7 ++++++
 5 files changed, 82 insertions(+), 9 deletions(-)

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 782b1d67d9..35eec7e166 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -20,6 +20,8 @@
 #include <linux/vhost.h>
 #include <sys/ioctl.h>
 
+static unsigned int vhost_kernel_used_memslots;
+
 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
                              void *arg)
 {
@@ -238,6 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
 }
 
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+    vhost_kernel_used_memslots = dev->mem->nregions;
+}
+
+static unsigned int vhost_kernel_get_used_memslots(void)
+{
+    return vhost_kernel_used_memslots;
+}
+
 static const VhostOps kernel_ops = {
         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
         .vhost_backend_init = vhost_kernel_init,
@@ -269,6 +281,8 @@ static const VhostOps kernel_ops = {
 #endif /* CONFIG_VHOST_VSOCK */
         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
         .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
+        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
+        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
 };
 #endif
 
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 31231218dc..ec46a745fa 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -232,6 +232,7 @@ static VhostUserMsg m __attribute__ ((unused));
 
 /* The version of the protocol we support */
 #define VHOST_USER_VERSION    (0x1)
+static unsigned int vhost_user_used_memslots;
 
 struct vhost_user {
     struct vhost_dev *dev;
@@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
     user->chr = NULL;
 }
 
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+    unsigned int counter = 0;
+    int i;
+
+    for (i = 0; i < dev->mem->nregions; ++i) {
+        struct vhost_memory_region *reg = dev->mem->regions + i;
+        ram_addr_t offset;
+        MemoryRegion *mr;
+        int fd;
+
+        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
+        if (mr && fd > 0) {
+            counter++;
+        }
+    }
+    vhost_user_used_memslots = counter;
+}
+
+static unsigned int vhost_user_get_used_memslots(void)
+{
+    return vhost_user_used_memslots;
+}
+
 const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_backend_init = vhost_user_backend_init,
@@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
         .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
         .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
         .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
+        .vhost_set_used_memslots = vhost_user_set_used_memslots,
+        .vhost_get_used_memslots = vhost_user_get_used_memslots,
 };
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 1a1384e7a6..b8d617d24c 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -45,20 +45,20 @@
 static struct vhost_log *vhost_log;
 static struct vhost_log *vhost_log_shm;
 
-static unsigned int used_memslots;
 static QLIST_HEAD(, vhost_dev) vhost_devices =
     QLIST_HEAD_INITIALIZER(vhost_devices);
 
 bool vhost_has_free_slot(void)
 {
-    unsigned int slots_limit = ~0U;
     struct vhost_dev *hdev;
 
     QLIST_FOREACH(hdev, &vhost_devices, entry) {
-        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
-        slots_limit = MIN(slots_limit, r);
+        if (hdev->vhost_ops->vhost_get_used_memslots() >=
+            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+            return false;
+        }
     }
-    return slots_limit > used_memslots;
+    return true;
 }
 
 static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -502,7 +502,6 @@ static void vhost_commit(MemoryListener *listener)
                        dev->n_mem_sections * sizeof dev->mem->regions[0];
     dev->mem = g_realloc(dev->mem, regions_size);
     dev->mem->nregions = dev->n_mem_sections;
-    used_memslots = dev->mem->nregions;
     for (i = 0; i < dev->n_mem_sections; i++) {
         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
         struct MemoryRegionSection *mrs = dev->mem_sections + i;
@@ -678,6 +677,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
         memory_region_ref(section->mr);
     }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
 }
 
 /* Used for both add and nop callbacks */
@@ -693,6 +693,17 @@ static void vhost_region_addnop(MemoryListener *listener,
     vhost_region_add_section(dev, section);
 }
 
+static void vhost_region_del(MemoryListener *listener,
+                             MemoryRegionSection *section)
+{
+    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+                                         memory_listener);
+    if (!vhost_section(dev, section)) {
+        return;
+    }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
+}
+
 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
 {
     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
@@ -1300,6 +1311,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     hdev->memory_listener = (MemoryListener) {
         .begin = vhost_begin,
         .commit = vhost_commit,
+        .region_del = vhost_region_del,
         .region_add = vhost_region_addnop,
         .region_nop = vhost_region_addnop,
         .log_start = vhost_log_start,
@@ -1346,9 +1358,16 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
 
-    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
-        error_report("vhost backend memory slots limit is less"
-                " than current number of present memory slots");
+    /*
+     * If we started a VM without any vhost device,
+     * for the first time vhost device hot-plug
+     * (vhost_get_used_memslots is always 0),
+     * so it needs to double check here
+     */
+    if (hdev->vhost_ops->vhost_get_used_memslots() >
+        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+        error_report("vhost backend memory slots limit is less than"
+                     " current number of present memory slots");
         r = -1;
         if (busyloop_timeout) {
             goto fail_busyloop;
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 8825bd278f..ed43a93692 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
 
 typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
 
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
+typedef unsigned int (*vhost_get_used_memslots_op)(void);
+
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_backend_init vhost_backend_init;
@@ -168,6 +171,8 @@ typedef struct VhostOps {
     vhost_vq_get_addr_op  vhost_vq_get_addr;
     vhost_get_device_id_op vhost_get_device_id;
     vhost_force_iommu_op vhost_force_iommu;
+    vhost_set_used_memslots_op vhost_set_used_memslots;
+    vhost_get_used_memslots_op vhost_get_used_memslots;
 } VhostOps;
 
 extern const VhostOps user_ops;
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 17532daaf3..7e93955537 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -20,6 +20,7 @@
 #include "qemu/error-report.h"
 #include "qemu/option.h"
 #include "trace.h"
+#include "include/hw/virtio/vhost.h"
 
 typedef struct NetVhostUserState {
     NetClientState nc;
@@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
         qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
                                  net_vhost_user_event, NULL, nc0->name, NULL,
                                  true);
+
+        if (!vhost_has_free_slot()) {
+            error_report("used memslots exceeded the backend limit, quit "
+                         "loop");
+            goto err;
+        }
     } while (!s->started);
 
     assert(s->vhost_net);
-- 
2.19.1



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-08-11  1:43 Jiajun Chen
  2020-08-27 12:11 ` Michael S. Tsirkin
@ 2020-09-02  5:02 ` Raphael Norwitz
  1 sibling, 0 replies; 23+ messages in thread
From: Raphael Norwitz @ 2020-09-02  5:02 UTC (permalink / raw)
  To: Jiajun Chen
  Cc: zhang.zhanghailiang, jasowang, QEMU, xiexiangyou, Michael S. Tsirkin

I see how this fixes vhost_has_free_slot() to correctly determine
whether or not regions can be added, but why are the
used_memslots_exceeded variable and the used_memslots_is_exceeded()
API needed?

On Mon, Aug 10, 2020 at 9:44 PM Jiajun Chen <chenjiajun8@huawei.com> wrote:
>
> Used_memslots is equal to dev->mem->nregions now, it is true for
> vhost kernel, but not for vhost user, which uses the memory regions
> that have file descriptor. In fact, not all of the memory regions
> have file descriptor.
> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> 5 memory slots can be used by vhost user, it is failed to hot plug
> a new memory RAM because vhost_has_free_slot just returned false,
> but we can hot plug it safely in fact.
>
> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
> ---
>  hw/virtio/vhost-backend.c         | 14 ++++++++
>  hw/virtio/vhost-user.c            | 28 ++++++++++++++++
>  hw/virtio/vhost.c                 | 54 +++++++++++++++++++++++++------
>  include/hw/virtio/vhost-backend.h |  5 +++
>  include/hw/virtio/vhost.h         |  1 +
>  net/vhost-user.c                  |  7 ++++
>  6 files changed, 100 insertions(+), 9 deletions(-)
>
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 31231218dc..04d20fc3ee 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
>      user->chr = NULL;
>  }
>
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> +    unsigned int counter = 0;
> +    int i;
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +
> +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> +        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> +                                    &offset);

Why not use the  vhost_user_get_mr_data helper? It would simplify the
code a bit.

> +        if (mr && memory_region_get_fd(mr) > 0) {
> +            counter++;
> +        }
> +    }
> +    vhost_user_used_memslots = counter;
> +}
> +
> +static unsigned int vhost_user_get_used_memslots(void)
> +{
> +    return vhost_user_used_memslots;
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_backend_init,
> @@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 1a1384e7a6..7f36d7af25 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -1346,9 +1373,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> -        error_report("vhost backend memory slots limit is less"
> -                " than current number of present memory slots");
> +    /*
> +     * If we started a VM without any vhost device,
> +     * vhost_dev_used_memslots_is_exceeded will always return false for the
> +     * first time vhost device hot-plug(vhost_get_used_memslots is always 0),
> +     * so it needs to double check here
> +     */
> +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {

Why can't we just check if hdev->vhost_ops->vhost_get_used_memslots()
> hdev->vhost_ops->vhost_backend_memslots_limit(hdev)?

>          r = -1;
>          if (busyloop_timeout) {
>              goto fail_busyloop;
> @@ -1773,3 +1804,8 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
>
>      return -1;
>  }
> +
> +bool used_memslots_is_exceeded(void)
> +{
> +    return used_memslots_exceeded;
> +}
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 17532daaf3..2f0216b518 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>          qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
>                                   net_vhost_user_event, NULL, nc0->name, NULL,
>                                   true);
> +
> +        if (used_memslots_is_exceeded()) {

Why can't you use vhost_has_free_slot() instead here?

> +            error_report("used memslots exceeded the backend limit, quit "
> +                            "loop");
> +            goto err;
> +        }
>      } while (!s->started);
>
>      assert(s->vhost_net);
> --
> 2.27.0.dirty
>
>


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-08-11  1:43 Jiajun Chen
@ 2020-08-27 12:11 ` Michael S. Tsirkin
  2020-09-02  5:02 ` Raphael Norwitz
  1 sibling, 0 replies; 23+ messages in thread
From: Michael S. Tsirkin @ 2020-08-27 12:11 UTC (permalink / raw)
  To: Jiajun Chen
  Cc: zhang.zhanghailiang, jasowang, qemu-devel, xiexiangyou,
	raphael.norwitz, kraxel, marcandre.lureau, imammedo

On Tue, Aug 11, 2020 at 09:43:43AM +0800, Jiajun Chen wrote:
> Used_memslots is equal to dev->mem->nregions now, it is true for
> vhost kernel, but not for vhost user, which uses the memory regions
> that have file descriptor. In fact, not all of the memory regions
> have file descriptor.
> It is usefully in some scenarios, e.g. used_memslots is 8, and only
> 5 memory slots can be used by vhost user, it is failed to hot plug
> a new memory RAM because vhost_has_free_slot just returned false,
> but we can hot plug it safely in fact.
> 
> Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
> Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>

cc a bunch more people

> ---
>  hw/virtio/vhost-backend.c         | 14 ++++++++
>  hw/virtio/vhost-user.c            | 28 ++++++++++++++++
>  hw/virtio/vhost.c                 | 54 +++++++++++++++++++++++++------
>  include/hw/virtio/vhost-backend.h |  5 +++
>  include/hw/virtio/vhost.h         |  1 +
>  net/vhost-user.c                  |  7 ++++
>  6 files changed, 100 insertions(+), 9 deletions(-)
> 
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> index 782b1d67d9..35eec7e166 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -20,6 +20,8 @@
>  #include <linux/vhost.h>
>  #include <sys/ioctl.h>
>  
> +static unsigned int vhost_kernel_used_memslots;
> +
>  static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
>                               void *arg)
>  {
> @@ -238,6 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
>          qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
>  }
>  
> +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
> +{
> +    vhost_kernel_used_memslots = dev->mem->nregions;
> +}
> +
> +static unsigned int vhost_kernel_get_used_memslots(void)
> +{
> +    return vhost_kernel_used_memslots;
> +}
> +
>  static const VhostOps kernel_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
>          .vhost_backend_init = vhost_kernel_init,
> @@ -269,6 +281,8 @@ static const VhostOps kernel_ops = {
>  #endif /* CONFIG_VHOST_VSOCK */
>          .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
>          .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
> +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
>  };
>  #endif
>  
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 31231218dc..04d20fc3ee 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -232,6 +232,7 @@ static VhostUserMsg m __attribute__ ((unused));
>  
>  /* The version of the protocol we support */
>  #define VHOST_USER_VERSION    (0x1)
> +static unsigned int vhost_user_used_memslots;
>  
>  struct vhost_user {
>      struct vhost_dev *dev;
> @@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
>      user->chr = NULL;
>  }
>  
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> +    unsigned int counter = 0;
> +    int i;
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +
> +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> +        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> +                                    &offset);
> +        if (mr && memory_region_get_fd(mr) > 0) {
> +            counter++;
> +        }
> +    }
> +    vhost_user_used_memslots = counter;
> +}
> +
> +static unsigned int vhost_user_get_used_memslots(void)
> +{
> +    return vhost_user_used_memslots;
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_backend_init,
> @@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
>          .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
>          .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
>          .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
> +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> +        .vhost_get_used_memslots = vhost_user_get_used_memslots,
>  };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 1a1384e7a6..7f36d7af25 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -45,20 +45,22 @@
>  static struct vhost_log *vhost_log;
>  static struct vhost_log *vhost_log_shm;
>  
> -static unsigned int used_memslots;
>  static QLIST_HEAD(, vhost_dev) vhost_devices =
>      QLIST_HEAD_INITIALIZER(vhost_devices);
>  
> +bool used_memslots_exceeded;
> +
>  bool vhost_has_free_slot(void)
>  {
> -    unsigned int slots_limit = ~0U;
>      struct vhost_dev *hdev;
>  
>      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> -        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> -        slots_limit = MIN(slots_limit, r);
> +        if (hdev->vhost_ops->vhost_get_used_memslots() >=
> +            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +            return false;
> +        }
>      }
> -    return slots_limit > used_memslots;
> +    return true;
>  }
>  
>  static void vhost_dev_sync_region(struct vhost_dev *dev,
> @@ -502,7 +504,6 @@ static void vhost_commit(MemoryListener *listener)
>                         dev->n_mem_sections * sizeof dev->mem->regions[0];
>      dev->mem = g_realloc(dev->mem, regions_size);
>      dev->mem->nregions = dev->n_mem_sections;
> -    used_memslots = dev->mem->nregions;
>      for (i = 0; i < dev->n_mem_sections; i++) {
>          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
>          struct MemoryRegionSection *mrs = dev->mem_sections + i;
> @@ -678,6 +679,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
>          dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
>          memory_region_ref(section->mr);
>      }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
>  }
>  
>  /* Used for both add and nop callbacks */
> @@ -693,6 +695,17 @@ static void vhost_region_addnop(MemoryListener *listener,
>      vhost_region_add_section(dev, section);
>  }
>  
> +static void vhost_region_del(MemoryListener *listener,
> +                             MemoryRegionSection *section)
> +{
> +    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
> +                                         memory_listener);
> +    if (!vhost_section(dev, section)) {
> +        return;
> +    }
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
> +}
> +
>  static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
>  {
>      struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
> @@ -1248,6 +1261,19 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
>      event_notifier_cleanup(&vq->masked_notifier);
>  }
>  
> +static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
> +{
> +    if (hdev->vhost_ops->vhost_get_used_memslots() >
> +        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +        error_report("vhost backend memory slots limit is less"
> +                " than current number of present memory slots");
> +        used_memslots_exceeded = true;
> +        return true;
> +    }
> +    used_memslots_exceeded = false;
> +    return false;
> +}
> +
>  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>                     VhostBackendType backend_type, uint32_t busyloop_timeout)
>  {
> @@ -1300,6 +1326,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      hdev->memory_listener = (MemoryListener) {
>          .begin = vhost_begin,
>          .commit = vhost_commit,
> +        .region_del = vhost_region_del,
>          .region_add = vhost_region_addnop,
>          .region_nop = vhost_region_addnop,
>          .log_start = vhost_log_start,
> @@ -1346,9 +1373,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>  
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> -        error_report("vhost backend memory slots limit is less"
> -                " than current number of present memory slots");
> +    /*
> +     * If we started a VM without any vhost device,
> +     * vhost_dev_used_memslots_is_exceeded will always return false for the
> +     * first time vhost device hot-plug(vhost_get_used_memslots is always 0),
> +     * so it needs to double check here
> +     */
> +    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
>          r = -1;
>          if (busyloop_timeout) {
>              goto fail_busyloop;
> @@ -1773,3 +1804,8 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
>  
>      return -1;
>  }
> +
> +bool used_memslots_is_exceeded(void)
> +{
> +    return used_memslots_exceeded;
> +}
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index 8825bd278f..ed43a93692 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
>  
>  typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
>  
> +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> +typedef unsigned int (*vhost_get_used_memslots_op)(void);
> +
>  typedef struct VhostOps {
>      VhostBackendType backend_type;
>      vhost_backend_init vhost_backend_init;
> @@ -168,6 +171,8 @@ typedef struct VhostOps {
>      vhost_vq_get_addr_op  vhost_vq_get_addr;
>      vhost_get_device_id_op vhost_get_device_id;
>      vhost_force_iommu_op vhost_force_iommu;
> +    vhost_set_used_memslots_op vhost_set_used_memslots;
> +    vhost_get_used_memslots_op vhost_get_used_memslots;
>  } VhostOps;
>  
>  extern const VhostOps user_ops;
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index 767a95ec0b..bf7cec445f 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -144,4 +144,5 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
>                             struct vhost_inflight *inflight);
>  int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
>                             struct vhost_inflight *inflight);
> +bool used_memslots_is_exceeded(void);
>  #endif
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 17532daaf3..2f0216b518 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -20,6 +20,7 @@
>  #include "qemu/error-report.h"
>  #include "qemu/option.h"
>  #include "trace.h"
> +#include "include/hw/virtio/vhost.h"
>  
>  typedef struct NetVhostUserState {
>      NetClientState nc;
> @@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
>          qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
>                                   net_vhost_user_event, NULL, nc0->name, NULL,
>                                   true);
> +
> +        if (used_memslots_is_exceeded()) {
> +            error_report("used memslots exceeded the backend limit, quit "
> +                            "loop");
> +            goto err;
> +        }
>      } while (!s->started);
>  
>      assert(s->vhost_net);
> -- 
> 2.27.0.dirty



^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH] vhost-user: add separate memslot counter for vhost-user
@ 2020-08-11  1:43 Jiajun Chen
  2020-08-27 12:11 ` Michael S. Tsirkin
  2020-09-02  5:02 ` Raphael Norwitz
  0 siblings, 2 replies; 23+ messages in thread
From: Jiajun Chen @ 2020-08-11  1:43 UTC (permalink / raw)
  To: mst, jasowang; +Cc: qemu-devel, xiexiangyou, zhang.zhanghailiang

Used_memslots is equal to dev->mem->nregions now, it is true for
vhost kernel, but not for vhost user, which uses the memory regions
that have file descriptor. In fact, not all of the memory regions
have file descriptor.
It is usefully in some scenarios, e.g. used_memslots is 8, and only
5 memory slots can be used by vhost user, it is failed to hot plug
a new memory RAM because vhost_has_free_slot just returned false,
but we can hot plug it safely in fact.

Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
---
 hw/virtio/vhost-backend.c         | 14 ++++++++
 hw/virtio/vhost-user.c            | 28 ++++++++++++++++
 hw/virtio/vhost.c                 | 54 +++++++++++++++++++++++++------
 include/hw/virtio/vhost-backend.h |  5 +++
 include/hw/virtio/vhost.h         |  1 +
 net/vhost-user.c                  |  7 ++++
 6 files changed, 100 insertions(+), 9 deletions(-)

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 782b1d67d9..35eec7e166 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -20,6 +20,8 @@
 #include <linux/vhost.h>
 #include <sys/ioctl.h>
 
+static unsigned int vhost_kernel_used_memslots;
+
 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
                              void *arg)
 {
@@ -238,6 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
 }
 
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+    vhost_kernel_used_memslots = dev->mem->nregions;
+}
+
+static unsigned int vhost_kernel_get_used_memslots(void)
+{
+    return vhost_kernel_used_memslots;
+}
+
 static const VhostOps kernel_ops = {
         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
         .vhost_backend_init = vhost_kernel_init,
@@ -269,6 +281,8 @@ static const VhostOps kernel_ops = {
 #endif /* CONFIG_VHOST_VSOCK */
         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
         .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
+        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
+        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
 };
 #endif
 
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 31231218dc..04d20fc3ee 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -232,6 +232,7 @@ static VhostUserMsg m __attribute__ ((unused));
 
 /* The version of the protocol we support */
 #define VHOST_USER_VERSION    (0x1)
+static unsigned int vhost_user_used_memslots;
 
 struct vhost_user {
     struct vhost_dev *dev;
@@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
     user->chr = NULL;
 }
 
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+    unsigned int counter = 0;
+    int i;
+
+    for (i = 0; i < dev->mem->nregions; ++i) {
+        struct vhost_memory_region *reg = dev->mem->regions + i;
+        ram_addr_t offset;
+        MemoryRegion *mr;
+
+        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
+                                    &offset);
+        if (mr && memory_region_get_fd(mr) > 0) {
+            counter++;
+        }
+    }
+    vhost_user_used_memslots = counter;
+}
+
+static unsigned int vhost_user_get_used_memslots(void)
+{
+    return vhost_user_used_memslots;
+}
+
 const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_backend_init = vhost_user_backend_init,
@@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
         .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
         .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
         .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
+        .vhost_set_used_memslots = vhost_user_set_used_memslots,
+        .vhost_get_used_memslots = vhost_user_get_used_memslots,
 };
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 1a1384e7a6..7f36d7af25 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -45,20 +45,22 @@
 static struct vhost_log *vhost_log;
 static struct vhost_log *vhost_log_shm;
 
-static unsigned int used_memslots;
 static QLIST_HEAD(, vhost_dev) vhost_devices =
     QLIST_HEAD_INITIALIZER(vhost_devices);
 
+bool used_memslots_exceeded;
+
 bool vhost_has_free_slot(void)
 {
-    unsigned int slots_limit = ~0U;
     struct vhost_dev *hdev;
 
     QLIST_FOREACH(hdev, &vhost_devices, entry) {
-        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
-        slots_limit = MIN(slots_limit, r);
+        if (hdev->vhost_ops->vhost_get_used_memslots() >=
+            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+            return false;
+        }
     }
-    return slots_limit > used_memslots;
+    return true;
 }
 
 static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -502,7 +504,6 @@ static void vhost_commit(MemoryListener *listener)
                        dev->n_mem_sections * sizeof dev->mem->regions[0];
     dev->mem = g_realloc(dev->mem, regions_size);
     dev->mem->nregions = dev->n_mem_sections;
-    used_memslots = dev->mem->nregions;
     for (i = 0; i < dev->n_mem_sections; i++) {
         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
         struct MemoryRegionSection *mrs = dev->mem_sections + i;
@@ -678,6 +679,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
         memory_region_ref(section->mr);
     }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
 }
 
 /* Used for both add and nop callbacks */
@@ -693,6 +695,17 @@ static void vhost_region_addnop(MemoryListener *listener,
     vhost_region_add_section(dev, section);
 }
 
+static void vhost_region_del(MemoryListener *listener,
+                             MemoryRegionSection *section)
+{
+    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+                                         memory_listener);
+    if (!vhost_section(dev, section)) {
+        return;
+    }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
+}
+
 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
 {
     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
@@ -1248,6 +1261,19 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
     event_notifier_cleanup(&vq->masked_notifier);
 }
 
+static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
+{
+    if (hdev->vhost_ops->vhost_get_used_memslots() >
+        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+        error_report("vhost backend memory slots limit is less"
+                " than current number of present memory slots");
+        used_memslots_exceeded = true;
+        return true;
+    }
+    used_memslots_exceeded = false;
+    return false;
+}
+
 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
                    VhostBackendType backend_type, uint32_t busyloop_timeout)
 {
@@ -1300,6 +1326,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     hdev->memory_listener = (MemoryListener) {
         .begin = vhost_begin,
         .commit = vhost_commit,
+        .region_del = vhost_region_del,
         .region_add = vhost_region_addnop,
         .region_nop = vhost_region_addnop,
         .log_start = vhost_log_start,
@@ -1346,9 +1373,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
 
-    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
-        error_report("vhost backend memory slots limit is less"
-                " than current number of present memory slots");
+    /*
+     * If we started a VM without any vhost device,
+     * vhost_dev_used_memslots_is_exceeded will always return false for the
+     * first time vhost device hot-plug(vhost_get_used_memslots is always 0),
+     * so it needs to double check here
+     */
+    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
         r = -1;
         if (busyloop_timeout) {
             goto fail_busyloop;
@@ -1773,3 +1804,8 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
 
     return -1;
 }
+
+bool used_memslots_is_exceeded(void)
+{
+    return used_memslots_exceeded;
+}
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 8825bd278f..ed43a93692 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
 
 typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
 
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
+typedef unsigned int (*vhost_get_used_memslots_op)(void);
+
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_backend_init vhost_backend_init;
@@ -168,6 +171,8 @@ typedef struct VhostOps {
     vhost_vq_get_addr_op  vhost_vq_get_addr;
     vhost_get_device_id_op vhost_get_device_id;
     vhost_force_iommu_op vhost_force_iommu;
+    vhost_set_used_memslots_op vhost_set_used_memslots;
+    vhost_get_used_memslots_op vhost_get_used_memslots;
 } VhostOps;
 
 extern const VhostOps user_ops;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 767a95ec0b..bf7cec445f 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -144,4 +144,5 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
                            struct vhost_inflight *inflight);
 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
                            struct vhost_inflight *inflight);
+bool used_memslots_is_exceeded(void);
 #endif
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 17532daaf3..2f0216b518 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -20,6 +20,7 @@
 #include "qemu/error-report.h"
 #include "qemu/option.h"
 #include "trace.h"
+#include "include/hw/virtio/vhost.h"
 
 typedef struct NetVhostUserState {
     NetClientState nc;
@@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
         qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
                                  net_vhost_user_event, NULL, nc0->name, NULL,
                                  true);
+
+        if (used_memslots_is_exceeded()) {
+            error_report("used memslots exceeded the backend limit, quit "
+                            "loop");
+            goto err;
+        }
     } while (!s->started);
 
     assert(s->vhost_net);
-- 
2.27.0.dirty



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-08-07  9:47 Jiajun Chen
@ 2020-08-07  9:55 ` no-reply
  0 siblings, 0 replies; 23+ messages in thread
From: no-reply @ 2020-08-07  9:55 UTC (permalink / raw)
  To: chenjiajun8; +Cc: zhang.zhanghailiang, jasowang, qemu-devel, xiexiangyou, mst

Patchew URL: https://patchew.org/QEMU/20200807094750.13404-1-chenjiajun8@huawei.com/



Hi,

This series failed the docker-quick@centos7 build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.

=== TEST SCRIPT BEGIN ===
#!/bin/bash
make docker-image-centos7 V=1 NETWORK=1
time make docker-test-quick@centos7 SHOW_ENV=1 J=14 NETWORK=1
=== TEST SCRIPT END ===




The full log is available at
http://patchew.org/logs/20200807094750.13404-1-chenjiajun8@huawei.com/testing.docker-quick@centos7/?type=message.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH] vhost-user: add separate memslot counter for vhost-user
@ 2020-08-07  9:47 Jiajun Chen
  2020-08-07  9:55 ` no-reply
  0 siblings, 1 reply; 23+ messages in thread
From: Jiajun Chen @ 2020-08-07  9:47 UTC (permalink / raw)
  To: mst, jasowang; +Cc: qemu-devel, xiexiangyou, zhang.zhanghailiang

Used_memslots is equal to dev->mem->nregions now, it is true for
vhost kernel, but not for vhost user, which uses the memory regions
that have file descriptor. In fact, not all of the memory regions
have file descriptor.
It is usefully in some scenarios, e.g. used_memslots is 8, and only
5 memory slots can be used by vhost user, it is failed to hot plug
a new memory RAM because vhost_has_free_slot just returned false,
but we can hot plug it safely in fact.

Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
---
 hw/virtio/vhost-backend.c         | 14 ++++++++
 hw/virtio/vhost-user.c            | 28 ++++++++++++++++
 hw/virtio/vhost.c                 | 54 +++++++++++++++++++++++++------
 include/hw/virtio/vhost-backend.h |  5 +++
 include/hw/virtio/vhost.h         |  1 +
 net/vhost-user.c                  |  7 ++++
 6 files changed, 100 insertions(+), 9 deletions(-)

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 782b1d67d9..35eec7e166 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -20,6 +20,8 @@
 #include <linux/vhost.h>
 #include <sys/ioctl.h>
 
+static unsigned int vhost_kernel_used_memslots;
+
 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
                              void *arg)
 {
@@ -238,6 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
 }
 
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+    vhost_kernel_used_memslots = dev->mem->nregions;
+}
+
+static unsigned int vhost_kernel_get_used_memslots(void)
+{
+    return vhost_kernel_used_memslots;
+}
+
 static const VhostOps kernel_ops = {
         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
         .vhost_backend_init = vhost_kernel_init,
@@ -269,6 +281,8 @@ static const VhostOps kernel_ops = {
 #endif /* CONFIG_VHOST_VSOCK */
         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
         .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
+        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
+        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
 };
 #endif
 
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 31231218dc..04d20fc3ee 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -232,6 +232,7 @@ static VhostUserMsg m __attribute__ ((unused));
 
 /* The version of the protocol we support */
 #define VHOST_USER_VERSION    (0x1)
+static unsigned int vhost_user_used_memslots;
 
 struct vhost_user {
     struct vhost_dev *dev;
@@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
     user->chr = NULL;
 }
 
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+    unsigned int counter = 0;
+    int i;
+
+    for (i = 0; i < dev->mem->nregions; ++i) {
+        struct vhost_memory_region *reg = dev->mem->regions + i;
+        ram_addr_t offset;
+        MemoryRegion *mr;
+
+        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
+                                    &offset);
+        if (mr && memory_region_get_fd(mr) > 0) {
+            counter++;
+        }
+    }
+    vhost_user_used_memslots = counter;
+}
+
+static unsigned int vhost_user_get_used_memslots(void)
+{
+    return vhost_user_used_memslots;
+}
+
 const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_backend_init = vhost_user_backend_init,
@@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
         .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
         .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
         .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
+        .vhost_set_used_memslots = vhost_user_set_used_memslots,
+        .vhost_get_used_memslots = vhost_user_get_used_memslots,
 };
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 1a1384e7a6..7f36d7af25 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -45,20 +45,22 @@
 static struct vhost_log *vhost_log;
 static struct vhost_log *vhost_log_shm;
 
-static unsigned int used_memslots;
 static QLIST_HEAD(, vhost_dev) vhost_devices =
     QLIST_HEAD_INITIALIZER(vhost_devices);
 
+bool used_memslots_exceeded;
+
 bool vhost_has_free_slot(void)
 {
-    unsigned int slots_limit = ~0U;
     struct vhost_dev *hdev;
 
     QLIST_FOREACH(hdev, &vhost_devices, entry) {
-        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
-        slots_limit = MIN(slots_limit, r);
+        if (hdev->vhost_ops->vhost_get_used_memslots() >=
+            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+            return false;
+        }
     }
-    return slots_limit > used_memslots;
+    return true;
 }
 
 static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -502,7 +504,6 @@ static void vhost_commit(MemoryListener *listener)
                        dev->n_mem_sections * sizeof dev->mem->regions[0];
     dev->mem = g_realloc(dev->mem, regions_size);
     dev->mem->nregions = dev->n_mem_sections;
-    used_memslots = dev->mem->nregions;
     for (i = 0; i < dev->n_mem_sections; i++) {
         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
         struct MemoryRegionSection *mrs = dev->mem_sections + i;
@@ -678,6 +679,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
         memory_region_ref(section->mr);
     }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
 }
 
 /* Used for both add and nop callbacks */
@@ -693,6 +695,17 @@ static void vhost_region_addnop(MemoryListener *listener,
     vhost_region_add_section(dev, section);
 }
 
+static void vhost_region_del(MemoryListener *listener,
+                             MemoryRegionSection *section)
+{
+    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+                                         memory_listener);
+    if (!vhost_section(dev, section)) {
+        return;
+    }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
+}
+
 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
 {
     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
@@ -1248,6 +1261,19 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
     event_notifier_cleanup(&vq->masked_notifier);
 }
 
+static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
+{
+    if (hdev->vhost_ops->vhost_get_used_memslots() >
+        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+        error_report("vhost backend memory slots limit is less"
+                " than current number of present memory slots");
+        used_memslots_exceeded = true;
+        return true;
+    }
+    used_memslots_exceeded = false;
+    return false;
+}
+
 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
                    VhostBackendType backend_type, uint32_t busyloop_timeout)
 {
@@ -1300,6 +1326,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     hdev->memory_listener = (MemoryListener) {
         .begin = vhost_begin,
         .commit = vhost_commit,
+        .region_del = vhost_region_del,
         .region_add = vhost_region_addnop,
         .region_nop = vhost_region_addnop,
         .log_start = vhost_log_start,
@@ -1346,9 +1373,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
 
-    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
-        error_report("vhost backend memory slots limit is less"
-                " than current number of present memory slots");
+    /*
+     * If we started a VM without any vhost device,
+     * vhost_dev_used_memslots_is_exceeded will always return false for the
+     * first time vhost device hot-plug(vhost_get_used_memslots is always 0),
+     * so it needs to double check here
+     */
+    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
         r = -1;
         if (busyloop_timeout) {
             goto fail_busyloop;
@@ -1773,3 +1804,8 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
 
     return -1;
 }
+
+bool used_memslots_is_exceeded(void)
+{
+    return used_memslots_exceeded;
+}
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 8825bd278f..ed43a93692 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
 
 typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
 
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
+typedef unsigned int (*vhost_get_used_memslots_op)(void);
+
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_backend_init vhost_backend_init;
@@ -168,6 +171,8 @@ typedef struct VhostOps {
     vhost_vq_get_addr_op  vhost_vq_get_addr;
     vhost_get_device_id_op vhost_get_device_id;
     vhost_force_iommu_op vhost_force_iommu;
+    vhost_set_used_memslots_op vhost_set_used_memslots;
+    vhost_get_used_memslots_op vhost_get_used_memslots;
 } VhostOps;
 
 extern const VhostOps user_ops;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 767a95ec0b..bf7cec445f 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -144,4 +144,5 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
                            struct vhost_inflight *inflight);
 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
                            struct vhost_inflight *inflight);
+bool used_memslots_is_exceeded(void);
 #endif
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 17532daaf3..2f0216b518 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -20,6 +20,7 @@
 #include "qemu/error-report.h"
 #include "qemu/option.h"
 #include "trace.h"
+#include "include/hw/virtio/vhost.h"
 
 typedef struct NetVhostUserState {
     NetClientState nc;
@@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
         qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
                                  net_vhost_user_event, NULL, nc0->name, NULL,
                                  true);
+
+        if (used_memslots_is_exceeded()) {
+            error_report("used memslots exceeded the backend limit, quit "
+                            "loop");
+            goto err;
+        }
     } while (!s->started);
 
     assert(s->vhost_net);
-- 
2.27.0.dirty



^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH] vhost-user: add separate memslot counter for vhost-user
  2020-08-07  8:19 Jiajun Chen
@ 2020-08-07  8:25 ` no-reply
  0 siblings, 0 replies; 23+ messages in thread
From: no-reply @ 2020-08-07  8:25 UTC (permalink / raw)
  To: chenjiajun8; +Cc: zhang.zhanghailiang, jasowang, qemu-devel, xiexiangyou, mst

Patchew URL: https://patchew.org/QEMU/20200807081900.69156-1-chenjiajun8@huawei.com/



Hi,

This series failed the docker-quick@centos7 build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.

=== TEST SCRIPT BEGIN ===
#!/bin/bash
make docker-image-centos7 V=1 NETWORK=1
time make docker-test-quick@centos7 SHOW_ENV=1 J=14 NETWORK=1
=== TEST SCRIPT END ===




The full log is available at
http://patchew.org/logs/20200807081900.69156-1-chenjiajun8@huawei.com/testing.docker-quick@centos7/?type=message.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH] vhost-user: add separate memslot counter for vhost-user
@ 2020-08-07  8:19 Jiajun Chen
  2020-08-07  8:25 ` no-reply
  0 siblings, 1 reply; 23+ messages in thread
From: Jiajun Chen @ 2020-08-07  8:19 UTC (permalink / raw)
  To: mst, jasowang; +Cc: qemu-devel, xiexiangyou, zhang.zhanghailiang

Used_memslots is equal to dev->mem->nregions now, it is true for
vhost kernel, but not for vhost user, which uses the memory regions
that have file descriptor. In fact, not all of the memory regions
have file descriptor.
It is usefully in some scenarios, e.g. used_memslots is 8, and only
5 memory slots can be used by vhost user, it is failed to hot plug
a new memory RAM because vhost_has_free_slot just returned false,
but we can hot plug it safely in fact.

Signed-off-by: Jiajun Chen <chenjiajun8@huawei.com>
Signed-off-by: Jianjay Zhou <jianjay.zhou@huawei.com>
---
 hw/virtio/vhost-backend.c         | 14 ++++++++
 hw/virtio/vhost-user.c            | 28 ++++++++++++++++
 hw/virtio/vhost.c                 | 54 +++++++++++++++++++++++++------
 include/hw/virtio/vhost-backend.h |  5 +++
 include/hw/virtio/vhost.h         |  1 +
 net/vhost-user.c                  |  7 ++++
 6 files changed, 100 insertions(+), 9 deletions(-)

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 782b1d67d9..35eec7e166 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -20,6 +20,8 @@
 #include <linux/vhost.h>
 #include <sys/ioctl.h>
 
+static unsigned int vhost_kernel_used_memslots;
+
 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
                              void *arg)
 {
@@ -238,6 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
 }
 
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+    vhost_kernel_used_memslots = dev->mem->nregions;
+}
+
+static unsigned int vhost_kernel_get_used_memslots(void)
+{
+    return vhost_kernel_used_memslots;
+}
+
 static const VhostOps kernel_ops = {
         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
         .vhost_backend_init = vhost_kernel_init,
@@ -269,6 +281,8 @@ static const VhostOps kernel_ops = {
 #endif /* CONFIG_VHOST_VSOCK */
         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
         .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
+        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
+        .vhost_get_used_memslots = vhost_kernel_get_used_memslots,
 };
 #endif
 
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 31231218dc..04d20fc3ee 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -232,6 +232,7 @@ static VhostUserMsg m __attribute__ ((unused));
 
 /* The version of the protocol we support */
 #define VHOST_USER_VERSION    (0x1)
+static unsigned int vhost_user_used_memslots;
 
 struct vhost_user {
     struct vhost_dev *dev;
@@ -2354,6 +2355,31 @@ void vhost_user_cleanup(VhostUserState *user)
     user->chr = NULL;
 }
 
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+    unsigned int counter = 0;
+    int i;
+
+    for (i = 0; i < dev->mem->nregions; ++i) {
+        struct vhost_memory_region *reg = dev->mem->regions + i;
+        ram_addr_t offset;
+        MemoryRegion *mr;
+
+        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
+                                    &offset);
+        if (mr && memory_region_get_fd(mr) > 0) {
+            counter++;
+        }
+    }
+    vhost_user_used_memslots = counter;
+}
+
+static unsigned int vhost_user_get_used_memslots(void)
+{
+    return vhost_user_used_memslots;
+}
+
 const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_backend_init = vhost_user_backend_init,
@@ -2387,4 +2413,6 @@ const VhostOps user_ops = {
         .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
         .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
         .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
+        .vhost_set_used_memslots = vhost_user_set_used_memslots,
+        .vhost_get_used_memslots = vhost_user_get_used_memslots,
 };
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 1a1384e7a6..7f36d7af25 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -45,20 +45,22 @@
 static struct vhost_log *vhost_log;
 static struct vhost_log *vhost_log_shm;
 
-static unsigned int used_memslots;
 static QLIST_HEAD(, vhost_dev) vhost_devices =
     QLIST_HEAD_INITIALIZER(vhost_devices);
 
+bool used_memslots_exceeded;
+
 bool vhost_has_free_slot(void)
 {
-    unsigned int slots_limit = ~0U;
     struct vhost_dev *hdev;
 
     QLIST_FOREACH(hdev, &vhost_devices, entry) {
-        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
-        slots_limit = MIN(slots_limit, r);
+        if (hdev->vhost_ops->vhost_get_used_memslots() >=
+            hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+            return false;
+        }
     }
-    return slots_limit > used_memslots;
+    return true;
 }
 
 static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -502,7 +504,6 @@ static void vhost_commit(MemoryListener *listener)
                        dev->n_mem_sections * sizeof dev->mem->regions[0];
     dev->mem = g_realloc(dev->mem, regions_size);
     dev->mem->nregions = dev->n_mem_sections;
-    used_memslots = dev->mem->nregions;
     for (i = 0; i < dev->n_mem_sections; i++) {
         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
         struct MemoryRegionSection *mrs = dev->mem_sections + i;
@@ -678,6 +679,7 @@ static void vhost_region_add_section(struct vhost_dev *dev,
         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
         memory_region_ref(section->mr);
     }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
 }
 
 /* Used for both add and nop callbacks */
@@ -693,6 +695,17 @@ static void vhost_region_addnop(MemoryListener *listener,
     vhost_region_add_section(dev, section);
 }
 
+static void vhost_region_del(MemoryListener *listener,
+                             MemoryRegionSection *section)
+{
+    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+                                         memory_listener);
+    if (!vhost_section(dev, section)) {
+        return;
+    }
+    dev->vhost_ops->vhost_set_used_memslots(dev);
+}
+
 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
 {
     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
@@ -1248,6 +1261,19 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
     event_notifier_cleanup(&vq->masked_notifier);
 }
 
+static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev)
+{
+    if (hdev->vhost_ops->vhost_get_used_memslots() >
+        hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+        error_report("vhost backend memory slots limit is less"
+                " than current number of present memory slots");
+        used_memslots_exceeded = true;
+        return true;
+    }
+    used_memslots_exceeded = false;
+    return false;
+}
+
 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
                    VhostBackendType backend_type, uint32_t busyloop_timeout)
 {
@@ -1300,6 +1326,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     hdev->memory_listener = (MemoryListener) {
         .begin = vhost_begin,
         .commit = vhost_commit,
+        .region_del = vhost_region_del,
         .region_add = vhost_region_addnop,
         .region_nop = vhost_region_addnop,
         .log_start = vhost_log_start,
@@ -1346,9 +1373,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
 
-    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
-        error_report("vhost backend memory slots limit is less"
-                " than current number of present memory slots");
+    /*
+     * If we started a VM without any vhost device,
+     * vhost_dev_used_memslots_is_exceeded will always return false for the
+     * first time vhost device hot-plug(vhost_get_used_memslots is always 0),
+     * so it needs to double check here
+     */
+    if (vhost_dev_used_memslots_is_exceeded(hdev)) {
         r = -1;
         if (busyloop_timeout) {
             goto fail_busyloop;
@@ -1773,3 +1804,8 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
 
     return -1;
 }
+
+bool used_memslots_is_exceeded(void)
+{
+    return used_memslots_exceeded;
+}
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 8825bd278f..ed43a93692 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -124,6 +124,9 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
 
 typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
 
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
+typedef unsigned int (*vhost_get_used_memslots_op)(void);
+
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_backend_init vhost_backend_init;
@@ -168,6 +171,8 @@ typedef struct VhostOps {
     vhost_vq_get_addr_op  vhost_vq_get_addr;
     vhost_get_device_id_op vhost_get_device_id;
     vhost_force_iommu_op vhost_force_iommu;
+    vhost_set_used_memslots_op vhost_set_used_memslots;
+    vhost_get_used_memslots_op vhost_get_used_memslots;
 } VhostOps;
 
 extern const VhostOps user_ops;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 767a95ec0b..bf7cec445f 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -144,4 +144,5 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
                            struct vhost_inflight *inflight);
 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
                            struct vhost_inflight *inflight);
+bool used_memslots_is_exceeded(void);
 #endif
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 17532daaf3..2f0216b518 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -20,6 +20,7 @@
 #include "qemu/error-report.h"
 #include "qemu/option.h"
 #include "trace.h"
+#include "include/hw/virtio/vhost.h"
 
 typedef struct NetVhostUserState {
     NetClientState nc;
@@ -347,6 +348,12 @@ static int net_vhost_user_init(NetClientState *peer, const char *device,
         qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
                                  net_vhost_user_event, NULL, nc0->name, NULL,
                                  true);
+
+        if (used_memslots_is_exceeded()) {
+            error_report("used memslots exceeded the backend limit, quit "
+                            "loop");
+            goto err;
+        }
     } while (!s->started);
 
     assert(s->vhost_net);
-- 
2.27.0.dirty



^ permalink raw reply related	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2020-10-30  8:40 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-28 13:17 [PATCH] vhost-user: add separate memslot counter for vhost-user Jiajun Chen
2020-10-02  2:05 ` Raphael Norwitz
2020-10-12 11:12   ` chenjiajun
2020-10-14  1:22     ` Raphael Norwitz
2020-10-06  9:48 ` Igor Mammedov
2020-10-14  0:58   ` Raphael Norwitz
2020-10-14  7:08     ` Michael S. Tsirkin
2020-10-14 16:11       ` Raphael Norwitz
2020-10-14 16:26         ` Michael S. Tsirkin
2020-10-14 17:21           ` Raphael Norwitz
2020-10-14 17:54             ` Michael S. Tsirkin
2020-10-21 14:34         ` Igor Mammedov
2020-10-30  8:39 ` Michael S. Tsirkin
  -- strict thread matches above, loose matches on Subject: below --
2020-09-08  6:11 Jiajun Chen
2020-09-08  8:37 ` Michael S. Tsirkin
2020-09-15  1:27 ` Raphael Norwitz
2020-08-11  1:43 Jiajun Chen
2020-08-27 12:11 ` Michael S. Tsirkin
2020-09-02  5:02 ` Raphael Norwitz
2020-08-07  9:47 Jiajun Chen
2020-08-07  9:55 ` no-reply
2020-08-07  8:19 Jiajun Chen
2020-08-07  8:25 ` no-reply

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.