All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v8 0/2] vhost: used_memslots limit check fixes and refactoring
@ 2018-02-27  7:10 Jay Zhou
  2018-02-27  7:10 ` [Qemu-devel] [PATCH v8 1/2] vhost: fix memslot limit check Jay Zhou
  2018-02-27  7:10 ` [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring Jay Zhou
  0 siblings, 2 replies; 6+ messages in thread
From: Jay Zhou @ 2018-02-27  7:10 UTC (permalink / raw)
  To: qemu-devel
  Cc: mst, imammedo, weidong.huang, wangxinxin.wang, arei.gonglei,
	jianjay.zhou, liuzhe13

v8:
 - rebased on the master
v7:
 - rebased on the master
v2 ... v6:
 - delete the "used_memslots" global variable, and add it
   for vhost-user and vhost-kernel separately
 - refine the function, commit log
 - used_memslots refactoring

Jay Zhou (2):
  vhost: fix memslot limit check
  vhost: used_memslots refactoring

 hw/virtio/vhost-backend.c         | 15 +++++++-
 hw/virtio/vhost-user.c            | 77 ++++++++++++++++++++++++++-------------
 hw/virtio/vhost.c                 | 30 ++++++++-------
 include/hw/virtio/vhost-backend.h |  6 ++-
 4 files changed, 86 insertions(+), 42 deletions(-)

--
1.8.3.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [Qemu-devel] [PATCH v8 1/2] vhost: fix memslot limit check
  2018-02-27  7:10 [Qemu-devel] [PATCH v8 0/2] vhost: used_memslots limit check fixes and refactoring Jay Zhou
@ 2018-02-27  7:10 ` Jay Zhou
  2018-02-27  7:10 ` [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring Jay Zhou
  1 sibling, 0 replies; 6+ messages in thread
From: Jay Zhou @ 2018-02-27  7:10 UTC (permalink / raw)
  To: qemu-devel
  Cc: mst, imammedo, weidong.huang, wangxinxin.wang, arei.gonglei,
	jianjay.zhou, liuzhe13

Since used_memslots will be updated to the actual value after
registering memory listener for the first time, move the
memslots limit checking to the right place.

Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
---
 hw/virtio/vhost.c | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 4a44e6e..4a583a3 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1106,13 +1106,6 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
         goto fail;
     }
 
-    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
-        error_report("vhost backend memory slots limit is less"
-                " than current number of present memory slots");
-        r = -1;
-        goto fail;
-    }
-
     r = hdev->vhost_ops->vhost_set_owner(hdev);
     if (r < 0) {
         VHOST_OPS_DEBUG("vhost_set_owner failed");
@@ -1192,6 +1185,18 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     hdev->started = false;
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
+
+    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+        error_report("vhost backend memory slots limit is less"
+                " than current number of present memory slots");
+        r = -1;
+        if (busyloop_timeout) {
+            goto fail_busyloop;
+        } else {
+            goto fail;
+        }
+    }
+
     return 0;
 
 fail_busyloop:
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring
  2018-02-27  7:10 [Qemu-devel] [PATCH v8 0/2] vhost: used_memslots limit check fixes and refactoring Jay Zhou
  2018-02-27  7:10 ` [Qemu-devel] [PATCH v8 1/2] vhost: fix memslot limit check Jay Zhou
@ 2018-02-27  7:10 ` Jay Zhou
  2018-03-01 16:16   ` Michael S. Tsirkin
  1 sibling, 1 reply; 6+ messages in thread
From: Jay Zhou @ 2018-02-27  7:10 UTC (permalink / raw)
  To: qemu-devel
  Cc: mst, imammedo, weidong.huang, wangxinxin.wang, arei.gonglei,
	jianjay.zhou, liuzhe13

Used_memslots is shared by vhost kernel and user, it is equal to
dev->mem->nregions, which is correct for vhost kernel, but not for
vhost user, the latter one uses memory regions that have file
descriptor. E.g. a VM has a vhost-user NIC and 8(vhost user memslot
upper limit) memory slots, it will be failed to hotplug a new DIMM
device since vhost_has_free_slot() finds no free slot left. It
should be successful if only part of memory slots have file
descriptor, so setting used memslots for vhost-user and
vhost-kernel respectively.

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
Signed-off-by: Liuzhe <liuzhe13@huawei.com>
---
 hw/virtio/vhost-backend.c         | 15 +++++++-
 hw/virtio/vhost-user.c            | 77 ++++++++++++++++++++++++++-------------
 hw/virtio/vhost.c                 | 13 +++----
 include/hw/virtio/vhost-backend.h |  6 ++-
 4 files changed, 75 insertions(+), 36 deletions(-)

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 7f09efa..59def69 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -15,6 +15,8 @@
 #include "hw/virtio/vhost-backend.h"
 #include "qemu/error-report.h"
 
+static unsigned int vhost_kernel_used_memslots;
+
 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
                              void *arg)
 {
@@ -62,6 +64,11 @@ static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
     return limit;
 }
 
+static bool vhost_kernel_has_free_memslots(struct vhost_dev *dev)
+{
+    return vhost_kernel_used_memslots < vhost_kernel_memslots_limit(dev);
+}
+
 static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
                                         struct vhost_vring_file *file)
 {
@@ -233,11 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
 }
 
+static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
+{
+    vhost_kernel_used_memslots = dev->mem->nregions;
+}
+
 static const VhostOps kernel_ops = {
         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
         .vhost_backend_init = vhost_kernel_init,
         .vhost_backend_cleanup = vhost_kernel_cleanup,
-        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
+        .vhost_backend_has_free_memslots = vhost_kernel_has_free_memslots,
         .vhost_net_set_backend = vhost_kernel_net_set_backend,
         .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
         .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
@@ -264,6 +276,7 @@ static const VhostOps kernel_ops = {
 #endif /* CONFIG_VHOST_VSOCK */
         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
         .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
+        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
 };
 
 int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 6eb9798..f732c80 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -147,6 +147,8 @@ static VhostUserMsg m __attribute__ ((unused));
 /* The version of the protocol we support */
 #define VHOST_USER_VERSION    (0x1)
 
+static bool vhost_user_free_memslots = true;
+
 struct vhost_user {
     CharBackend *chr;
     int slave_fd;
@@ -314,12 +316,43 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
     return 0;
 }
 
+static int vhost_user_prepare_msg(struct vhost_dev *dev, VhostUserMemory *mem,
+                                  int *fds)
+{
+    int i, fd;
+
+    vhost_user_free_memslots = true;
+    for (i = 0, mem->nregions = 0; i < dev->mem->nregions; ++i) {
+        struct vhost_memory_region *reg = dev->mem->regions + i;
+        ram_addr_t offset;
+        MemoryRegion *mr;
+
+        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
+                                     &offset);
+        fd = memory_region_get_fd(mr);
+        if (fd > 0) {
+            if (mem->nregions == VHOST_MEMORY_MAX_NREGIONS) {
+                vhost_user_free_memslots = false;
+                return -1;
+            }
+
+            mem->regions[mem->nregions].userspace_addr = reg->userspace_addr;
+            mem->regions[mem->nregions].memory_size = reg->memory_size;
+            mem->regions[mem->nregions].guest_phys_addr = reg->guest_phys_addr;
+            mem->regions[mem->nregions].mmap_offset = offset;
+            fds[mem->nregions++] = fd;
+        }
+    }
+
+    return 0;
+}
+
 static int vhost_user_set_mem_table(struct vhost_dev *dev,
                                     struct vhost_memory *mem)
 {
     int fds[VHOST_MEMORY_MAX_NREGIONS];
-    int i, fd;
-    size_t fd_num = 0;
+    size_t fd_num;
     bool reply_supported = virtio_has_feature(dev->protocol_features,
                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
 
@@ -332,29 +365,12 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
     }
 
-    for (i = 0; i < dev->mem->nregions; ++i) {
-        struct vhost_memory_region *reg = dev->mem->regions + i;
-        ram_addr_t offset;
-        MemoryRegion *mr;
-
-        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
-        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
-                                     &offset);
-        fd = memory_region_get_fd(mr);
-        if (fd > 0) {
-            if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
-                error_report("Failed preparing vhost-user memory table msg");
-                return -1;
-            }
-            msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr;
-            msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
-            msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
-            msg.payload.memory.regions[fd_num].mmap_offset = offset;
-            fds[fd_num++] = fd;
-        }
+    if (vhost_user_prepare_msg(dev, &msg.payload.memory, fds) < 0) {
+        error_report("Failed preparing vhost-user memory table msg");
+        return -1;
     }
 
-    msg.payload.memory.nregions = fd_num;
+    fd_num = msg.payload.memory.nregions;
 
     if (!fd_num) {
         error_report("Failed initializing vhost-user memory map, "
@@ -870,9 +886,9 @@ static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
     return idx;
 }
 
-static int vhost_user_memslots_limit(struct vhost_dev *dev)
+static bool vhost_user_has_free_memslots(struct vhost_dev *dev)
 {
-    return VHOST_MEMORY_MAX_NREGIONS;
+    return vhost_user_free_memslots;
 }
 
 static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
@@ -1054,11 +1070,19 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
     return 0;
 }
 
+static void vhost_user_set_used_memslots(struct vhost_dev *dev)
+{
+    int fds[VHOST_MEMORY_MAX_NREGIONS];
+    VhostUserMsg msg;
+
+    vhost_user_prepare_msg(dev, &msg.payload.memory, fds);
+}
+
 const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_backend_init = vhost_user_init,
         .vhost_backend_cleanup = vhost_user_cleanup,
-        .vhost_backend_memslots_limit = vhost_user_memslots_limit,
+        .vhost_backend_has_free_memslots = vhost_user_has_free_memslots,
         .vhost_set_log_base = vhost_user_set_log_base,
         .vhost_set_mem_table = vhost_user_set_mem_table,
         .vhost_set_vring_addr = vhost_user_set_vring_addr,
@@ -1082,4 +1106,5 @@ const VhostOps user_ops = {
         .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
         .vhost_get_config = vhost_user_get_config,
         .vhost_set_config = vhost_user_set_config,
+        .vhost_set_used_memslots = vhost_user_set_used_memslots,
 };
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 4a583a3..7f17de9 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -44,20 +44,19 @@
 static struct vhost_log *vhost_log;
 static struct vhost_log *vhost_log_shm;
 
-static unsigned int used_memslots;
 static QLIST_HEAD(, vhost_dev) vhost_devices =
     QLIST_HEAD_INITIALIZER(vhost_devices);
 
 bool vhost_has_free_slot(void)
 {
-    unsigned int slots_limit = ~0U;
     struct vhost_dev *hdev;
 
     QLIST_FOREACH(hdev, &vhost_devices, entry) {
-        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
-        slots_limit = MIN(slots_limit, r);
+        if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
+            return false;
+        }
     }
-    return slots_limit > used_memslots;
+    return true;
 }
 
 static void vhost_dev_sync_region(struct vhost_dev *dev,
@@ -442,7 +441,7 @@ static void vhost_commit(MemoryListener *listener)
                        dev->n_mem_sections * sizeof dev->mem->regions[0];
     dev->mem = g_realloc(dev->mem, regions_size);
     dev->mem->nregions = dev->n_mem_sections;
-    used_memslots = dev->mem->nregions;
+    dev->vhost_ops->vhost_set_used_memslots(dev);
     for (i = 0; i < dev->n_mem_sections; i++) {
         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
         struct MemoryRegionSection *mrs = dev->mem_sections + i;
@@ -1186,7 +1185,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
 
-    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+    if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
         error_report("vhost backend memory slots limit is less"
                 " than current number of present memory slots");
         r = -1;
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 592254f..2eac224 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -36,7 +36,7 @@ struct vhost_iotlb_msg;
 
 typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
 typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
-typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
+typedef bool (*vhost_backend_has_free_memslots)(struct vhost_dev *dev);
 
 typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
                                 struct vhost_vring_file *file);
@@ -94,12 +94,13 @@ typedef int (*vhost_set_config_op)(struct vhost_dev *dev, const uint8_t *data,
                                    uint32_t flags);
 typedef int (*vhost_get_config_op)(struct vhost_dev *dev, uint8_t *config,
                                    uint32_t config_len);
+typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
 
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_backend_init vhost_backend_init;
     vhost_backend_cleanup vhost_backend_cleanup;
-    vhost_backend_memslots_limit vhost_backend_memslots_limit;
+    vhost_backend_has_free_memslots vhost_backend_has_free_memslots;
     vhost_net_set_backend_op vhost_net_set_backend;
     vhost_net_set_mtu_op vhost_net_set_mtu;
     vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
@@ -130,6 +131,7 @@ typedef struct VhostOps {
     vhost_send_device_iotlb_msg_op vhost_send_device_iotlb_msg;
     vhost_get_config_op vhost_get_config;
     vhost_set_config_op vhost_set_config;
+    vhost_set_used_memslots_op vhost_set_used_memslots;
 } VhostOps;
 
 extern const VhostOps user_ops;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring
  2018-02-27  7:10 ` [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring Jay Zhou
@ 2018-03-01 16:16   ` Michael S. Tsirkin
  2018-03-02  1:55     ` Zhoujian (jay)
  2018-03-05  9:23     ` Zhoujian (jay)
  0 siblings, 2 replies; 6+ messages in thread
From: Michael S. Tsirkin @ 2018-03-01 16:16 UTC (permalink / raw)
  To: Jay Zhou
  Cc: qemu-devel, imammedo, weidong.huang, wangxinxin.wang,
	arei.gonglei, liuzhe13

On Tue, Feb 27, 2018 at 03:10:05PM +0800, Jay Zhou wrote:
> Used_memslots is shared by vhost kernel and user, it is equal to
> dev->mem->nregions, which is correct for vhost kernel, but not for
> vhost user, the latter one uses memory regions that have file
> descriptor. E.g. a VM has a vhost-user NIC and 8(vhost user memslot
> upper limit) memory slots, it will be failed to hotplug a new DIMM
> device since vhost_has_free_slot() finds no free slot left. It
> should be successful if only part of memory slots have file
> descriptor, so setting used memslots for vhost-user and
> vhost-kernel respectively.
> 
> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> Signed-off-by: Liuzhe <liuzhe13@huawei.com>

make check fails with this patch, I dropped it for now.

> ---
>  hw/virtio/vhost-backend.c         | 15 +++++++-
>  hw/virtio/vhost-user.c            | 77 ++++++++++++++++++++++++++-------------
>  hw/virtio/vhost.c                 | 13 +++----
>  include/hw/virtio/vhost-backend.h |  6 ++-
>  4 files changed, 75 insertions(+), 36 deletions(-)
> 
> diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> index 7f09efa..59def69 100644
> --- a/hw/virtio/vhost-backend.c
> +++ b/hw/virtio/vhost-backend.c
> @@ -15,6 +15,8 @@
>  #include "hw/virtio/vhost-backend.h"
>  #include "qemu/error-report.h"
>  
> +static unsigned int vhost_kernel_used_memslots;
> +
>  static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
>                               void *arg)
>  {
> @@ -62,6 +64,11 @@ static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
>      return limit;
>  }
>  
> +static bool vhost_kernel_has_free_memslots(struct vhost_dev *dev)
> +{
> +    return vhost_kernel_used_memslots < vhost_kernel_memslots_limit(dev);
> +}
> +
>  static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
>                                          struct vhost_vring_file *file)
>  {
> @@ -233,11 +240,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
>          qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
>  }
>  
> +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev)
> +{
> +    vhost_kernel_used_memslots = dev->mem->nregions;
> +}
> +
>  static const VhostOps kernel_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
>          .vhost_backend_init = vhost_kernel_init,
>          .vhost_backend_cleanup = vhost_kernel_cleanup,
> -        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
> +        .vhost_backend_has_free_memslots = vhost_kernel_has_free_memslots,
>          .vhost_net_set_backend = vhost_kernel_net_set_backend,
>          .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
>          .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
> @@ -264,6 +276,7 @@ static const VhostOps kernel_ops = {
>  #endif /* CONFIG_VHOST_VSOCK */
>          .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
>          .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
> +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
>  };
>  
>  int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 6eb9798..f732c80 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -147,6 +147,8 @@ static VhostUserMsg m __attribute__ ((unused));
>  /* The version of the protocol we support */
>  #define VHOST_USER_VERSION    (0x1)
>  
> +static bool vhost_user_free_memslots = true;
> +
>  struct vhost_user {
>      CharBackend *chr;
>      int slave_fd;
> @@ -314,12 +316,43 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
>      return 0;
>  }
>  
> +static int vhost_user_prepare_msg(struct vhost_dev *dev, VhostUserMemory *mem,
> +                                  int *fds)
> +{
> +    int i, fd;
> +
> +    vhost_user_free_memslots = true;
> +    for (i = 0, mem->nregions = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +
> +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> +        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> +                                     &offset);
> +        fd = memory_region_get_fd(mr);
> +        if (fd > 0) {
> +            if (mem->nregions == VHOST_MEMORY_MAX_NREGIONS) {
> +                vhost_user_free_memslots = false;
> +                return -1;
> +            }
> +
> +            mem->regions[mem->nregions].userspace_addr = reg->userspace_addr;
> +            mem->regions[mem->nregions].memory_size = reg->memory_size;
> +            mem->regions[mem->nregions].guest_phys_addr = reg->guest_phys_addr;
> +            mem->regions[mem->nregions].mmap_offset = offset;
> +            fds[mem->nregions++] = fd;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
>  static int vhost_user_set_mem_table(struct vhost_dev *dev,
>                                      struct vhost_memory *mem)
>  {
>      int fds[VHOST_MEMORY_MAX_NREGIONS];
> -    int i, fd;
> -    size_t fd_num = 0;
> +    size_t fd_num;
>      bool reply_supported = virtio_has_feature(dev->protocol_features,
>                                                VHOST_USER_PROTOCOL_F_REPLY_ACK);
>  
> @@ -332,29 +365,12 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
>          msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>      }
>  
> -    for (i = 0; i < dev->mem->nregions; ++i) {
> -        struct vhost_memory_region *reg = dev->mem->regions + i;
> -        ram_addr_t offset;
> -        MemoryRegion *mr;
> -
> -        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> -        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> -                                     &offset);
> -        fd = memory_region_get_fd(mr);
> -        if (fd > 0) {
> -            if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
> -                error_report("Failed preparing vhost-user memory table msg");
> -                return -1;
> -            }
> -            msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr;
> -            msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
> -            msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
> -            msg.payload.memory.regions[fd_num].mmap_offset = offset;
> -            fds[fd_num++] = fd;
> -        }
> +    if (vhost_user_prepare_msg(dev, &msg.payload.memory, fds) < 0) {
> +        error_report("Failed preparing vhost-user memory table msg");
> +        return -1;
>      }
>  
> -    msg.payload.memory.nregions = fd_num;
> +    fd_num = msg.payload.memory.nregions;
>  
>      if (!fd_num) {
>          error_report("Failed initializing vhost-user memory map, "
> @@ -870,9 +886,9 @@ static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
>      return idx;
>  }
>  
> -static int vhost_user_memslots_limit(struct vhost_dev *dev)
> +static bool vhost_user_has_free_memslots(struct vhost_dev *dev)
>  {
> -    return VHOST_MEMORY_MAX_NREGIONS;
> +    return vhost_user_free_memslots;
>  }
>  
>  static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
> @@ -1054,11 +1070,19 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
>      return 0;
>  }
>  
> +static void vhost_user_set_used_memslots(struct vhost_dev *dev)
> +{
> +    int fds[VHOST_MEMORY_MAX_NREGIONS];
> +    VhostUserMsg msg;
> +
> +    vhost_user_prepare_msg(dev, &msg.payload.memory, fds);
> +}
> +
>  const VhostOps user_ops = {
>          .backend_type = VHOST_BACKEND_TYPE_USER,
>          .vhost_backend_init = vhost_user_init,
>          .vhost_backend_cleanup = vhost_user_cleanup,
> -        .vhost_backend_memslots_limit = vhost_user_memslots_limit,
> +        .vhost_backend_has_free_memslots = vhost_user_has_free_memslots,
>          .vhost_set_log_base = vhost_user_set_log_base,
>          .vhost_set_mem_table = vhost_user_set_mem_table,
>          .vhost_set_vring_addr = vhost_user_set_vring_addr,
> @@ -1082,4 +1106,5 @@ const VhostOps user_ops = {
>          .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
>          .vhost_get_config = vhost_user_get_config,
>          .vhost_set_config = vhost_user_set_config,
> +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
>  };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 4a583a3..7f17de9 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -44,20 +44,19 @@
>  static struct vhost_log *vhost_log;
>  static struct vhost_log *vhost_log_shm;
>  
> -static unsigned int used_memslots;
>  static QLIST_HEAD(, vhost_dev) vhost_devices =
>      QLIST_HEAD_INITIALIZER(vhost_devices);
>  
>  bool vhost_has_free_slot(void)
>  {
> -    unsigned int slots_limit = ~0U;
>      struct vhost_dev *hdev;
>  
>      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> -        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> -        slots_limit = MIN(slots_limit, r);
> +        if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
> +            return false;
> +        }
>      }
> -    return slots_limit > used_memslots;
> +    return true;
>  }
>  
>  static void vhost_dev_sync_region(struct vhost_dev *dev,
> @@ -442,7 +441,7 @@ static void vhost_commit(MemoryListener *listener)
>                         dev->n_mem_sections * sizeof dev->mem->regions[0];
>      dev->mem = g_realloc(dev->mem, regions_size);
>      dev->mem->nregions = dev->n_mem_sections;
> -    used_memslots = dev->mem->nregions;
> +    dev->vhost_ops->vhost_set_used_memslots(dev);
>      for (i = 0; i < dev->n_mem_sections; i++) {
>          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
>          struct MemoryRegionSection *mrs = dev->mem_sections + i;
> @@ -1186,7 +1185,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>  
> -    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
> +    if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
>          error_report("vhost backend memory slots limit is less"
>                  " than current number of present memory slots");
>          r = -1;
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index 592254f..2eac224 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -36,7 +36,7 @@ struct vhost_iotlb_msg;
>  
>  typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
>  typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
> -typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
> +typedef bool (*vhost_backend_has_free_memslots)(struct vhost_dev *dev);
>  
>  typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
>                                  struct vhost_vring_file *file);
> @@ -94,12 +94,13 @@ typedef int (*vhost_set_config_op)(struct vhost_dev *dev, const uint8_t *data,
>                                     uint32_t flags);
>  typedef int (*vhost_get_config_op)(struct vhost_dev *dev, uint8_t *config,
>                                     uint32_t config_len);
> +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
>  
>  typedef struct VhostOps {
>      VhostBackendType backend_type;
>      vhost_backend_init vhost_backend_init;
>      vhost_backend_cleanup vhost_backend_cleanup;
> -    vhost_backend_memslots_limit vhost_backend_memslots_limit;
> +    vhost_backend_has_free_memslots vhost_backend_has_free_memslots;
>      vhost_net_set_backend_op vhost_net_set_backend;
>      vhost_net_set_mtu_op vhost_net_set_mtu;
>      vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
> @@ -130,6 +131,7 @@ typedef struct VhostOps {
>      vhost_send_device_iotlb_msg_op vhost_send_device_iotlb_msg;
>      vhost_get_config_op vhost_get_config;
>      vhost_set_config_op vhost_set_config;
> +    vhost_set_used_memslots_op vhost_set_used_memslots;
>  } VhostOps;
>  
>  extern const VhostOps user_ops;
> -- 
> 1.8.3.1
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring
  2018-03-01 16:16   ` Michael S. Tsirkin
@ 2018-03-02  1:55     ` Zhoujian (jay)
  2018-03-05  9:23     ` Zhoujian (jay)
  1 sibling, 0 replies; 6+ messages in thread
From: Zhoujian (jay) @ 2018-03-02  1:55 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: qemu-devel, imammedo, Huangweidong (C), wangxin (U),
	Gonglei (Arei), Liuzhe (Ahriy, Euler)



> -----Original Message-----
> From: Michael S. Tsirkin [mailto:mst@redhat.com]
> Sent: Friday, March 02, 2018 12:17 AM
> To: Zhoujian (jay) <jianjay.zhou@huawei.com>
> Cc: qemu-devel@nongnu.org; imammedo@redhat.com; Huangweidong (C)
> <weidong.huang@huawei.com>; wangxin (U) <wangxinxin.wang@huawei.com>; Gonglei
> (Arei) <arei.gonglei@huawei.com>; Liuzhe (Ahriy, Euler) <liuzhe13@huawei.com>
> Subject: Re: [PATCH v8 2/2] vhost: used_memslots refactoring
> 
> On Tue, Feb 27, 2018 at 03:10:05PM +0800, Jay Zhou wrote:
> > Used_memslots is shared by vhost kernel and user, it is equal to
> > dev->mem->nregions, which is correct for vhost kernel, but not for
> > vhost user, the latter one uses memory regions that have file
> > descriptor. E.g. a VM has a vhost-user NIC and 8(vhost user memslot
> > upper limit) memory slots, it will be failed to hotplug a new DIMM
> > device since vhost_has_free_slot() finds no free slot left. It should
> > be successful if only part of memory slots have file descriptor, so
> > setting used memslots for vhost-user and vhost-kernel respectively.
> >
> > Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> > Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> > Signed-off-by: Liuzhe <liuzhe13@huawei.com>
> 
> make check fails with this patch, I dropped it for now.

Maybe something updated on the master tree affects this patch, will
look into and resolve.

Regards,
Jay

> 
> > ---
> >  hw/virtio/vhost-backend.c         | 15 +++++++-
> >  hw/virtio/vhost-user.c            | 77 ++++++++++++++++++++++++++---------
> ----
> >  hw/virtio/vhost.c                 | 13 +++----
> >  include/hw/virtio/vhost-backend.h |  6 ++-
> >  4 files changed, 75 insertions(+), 36 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> > index 7f09efa..59def69 100644
> > --- a/hw/virtio/vhost-backend.c
> > +++ b/hw/virtio/vhost-backend.c
> > @@ -15,6 +15,8 @@
> >  #include "hw/virtio/vhost-backend.h"
> >  #include "qemu/error-report.h"
> >
> > +static unsigned int vhost_kernel_used_memslots;
> > +
> >  static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int
> request,
> >                               void *arg)  { @@ -62,6 +64,11 @@ static
> > int vhost_kernel_memslots_limit(struct vhost_dev *dev)
> >      return limit;
> >  }
> >
> > +static bool vhost_kernel_has_free_memslots(struct vhost_dev *dev) {
> > +    return vhost_kernel_used_memslots <
> > +vhost_kernel_memslots_limit(dev); }
> > +
> >  static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
> >                                          struct vhost_vring_file
> > *file)  { @@ -233,11 +240,16 @@ static void
> > vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
> >          qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL,
> > NULL);  }
> >
> > +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev) {
> > +    vhost_kernel_used_memslots = dev->mem->nregions; }
> > +
> >  static const VhostOps kernel_ops = {
> >          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
> >          .vhost_backend_init = vhost_kernel_init,
> >          .vhost_backend_cleanup = vhost_kernel_cleanup,
> > -        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
> > +        .vhost_backend_has_free_memslots =
> > + vhost_kernel_has_free_memslots,
> >          .vhost_net_set_backend = vhost_kernel_net_set_backend,
> >          .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
> >          .vhost_scsi_clear_endpoint =
> > vhost_kernel_scsi_clear_endpoint, @@ -264,6 +276,7 @@ static const
> > VhostOps kernel_ops = {  #endif /* CONFIG_VHOST_VSOCK */
> >          .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
> >          .vhost_send_device_iotlb_msg =
> > vhost_kernel_send_device_iotlb_msg,
> > +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> >  };
> >
> >  int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType
> > backend_type) diff --git a/hw/virtio/vhost-user.c
> > b/hw/virtio/vhost-user.c index 6eb9798..f732c80 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -147,6 +147,8 @@ static VhostUserMsg m __attribute__ ((unused));
> >  /* The version of the protocol we support */
> >  #define VHOST_USER_VERSION    (0x1)
> >
> > +static bool vhost_user_free_memslots = true;
> > +
> >  struct vhost_user {
> >      CharBackend *chr;
> >      int slave_fd;
> > @@ -314,12 +316,43 @@ static int vhost_user_set_log_base(struct vhost_dev
> *dev, uint64_t base,
> >      return 0;
> >  }
> >
> > +static int vhost_user_prepare_msg(struct vhost_dev *dev, VhostUserMemory
> *mem,
> > +                                  int *fds) {
> > +    int i, fd;
> > +
> > +    vhost_user_free_memslots = true;
> > +    for (i = 0, mem->nregions = 0; i < dev->mem->nregions; ++i) {
> > +        struct vhost_memory_region *reg = dev->mem->regions + i;
> > +        ram_addr_t offset;
> > +        MemoryRegion *mr;
> > +
> > +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> > +        mr = memory_region_from_host((void *)(uintptr_t)reg-
> >userspace_addr,
> > +                                     &offset);
> > +        fd = memory_region_get_fd(mr);
> > +        if (fd > 0) {
> > +            if (mem->nregions == VHOST_MEMORY_MAX_NREGIONS) {
> > +                vhost_user_free_memslots = false;
> > +                return -1;
> > +            }
> > +
> > +            mem->regions[mem->nregions].userspace_addr = reg-
> >userspace_addr;
> > +            mem->regions[mem->nregions].memory_size = reg->memory_size;
> > +            mem->regions[mem->nregions].guest_phys_addr = reg-
> >guest_phys_addr;
> > +            mem->regions[mem->nregions].mmap_offset = offset;
> > +            fds[mem->nregions++] = fd;
> > +        }
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> >  static int vhost_user_set_mem_table(struct vhost_dev *dev,
> >                                      struct vhost_memory *mem)  {
> >      int fds[VHOST_MEMORY_MAX_NREGIONS];
> > -    int i, fd;
> > -    size_t fd_num = 0;
> > +    size_t fd_num;
> >      bool reply_supported = virtio_has_feature(dev->protocol_features,
> >
> > VHOST_USER_PROTOCOL_F_REPLY_ACK);
> >
> > @@ -332,29 +365,12 @@ static int vhost_user_set_mem_table(struct vhost_dev
> *dev,
> >          msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> >      }
> >
> > -    for (i = 0; i < dev->mem->nregions; ++i) {
> > -        struct vhost_memory_region *reg = dev->mem->regions + i;
> > -        ram_addr_t offset;
> > -        MemoryRegion *mr;
> > -
> > -        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> > -        mr = memory_region_from_host((void *)(uintptr_t)reg-
> >userspace_addr,
> > -                                     &offset);
> > -        fd = memory_region_get_fd(mr);
> > -        if (fd > 0) {
> > -            if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
> > -                error_report("Failed preparing vhost-user memory table
> msg");
> > -                return -1;
> > -            }
> > -            msg.payload.memory.regions[fd_num].userspace_addr = reg-
> >userspace_addr;
> > -            msg.payload.memory.regions[fd_num].memory_size  = reg-
> >memory_size;
> > -            msg.payload.memory.regions[fd_num].guest_phys_addr = reg-
> >guest_phys_addr;
> > -            msg.payload.memory.regions[fd_num].mmap_offset = offset;
> > -            fds[fd_num++] = fd;
> > -        }
> > +    if (vhost_user_prepare_msg(dev, &msg.payload.memory, fds) < 0) {
> > +        error_report("Failed preparing vhost-user memory table msg");
> > +        return -1;
> >      }
> >
> > -    msg.payload.memory.nregions = fd_num;
> > +    fd_num = msg.payload.memory.nregions;
> >
> >      if (!fd_num) {
> >          error_report("Failed initializing vhost-user memory map, "
> > @@ -870,9 +886,9 @@ static int vhost_user_get_vq_index(struct vhost_dev
> *dev, int idx)
> >      return idx;
> >  }
> >
> > -static int vhost_user_memslots_limit(struct vhost_dev *dev)
> > +static bool vhost_user_has_free_memslots(struct vhost_dev *dev)
> >  {
> > -    return VHOST_MEMORY_MAX_NREGIONS;
> > +    return vhost_user_free_memslots;
> >  }
> >
> >  static bool vhost_user_requires_shm_log(struct vhost_dev *dev) @@
> > -1054,11 +1070,19 @@ static int vhost_user_set_config(struct vhost_dev *dev,
> const uint8_t *data,
> >      return 0;
> >  }
> >
> > +static void vhost_user_set_used_memslots(struct vhost_dev *dev) {
> > +    int fds[VHOST_MEMORY_MAX_NREGIONS];
> > +    VhostUserMsg msg;
> > +
> > +    vhost_user_prepare_msg(dev, &msg.payload.memory, fds); }
> > +
> >  const VhostOps user_ops = {
> >          .backend_type = VHOST_BACKEND_TYPE_USER,
> >          .vhost_backend_init = vhost_user_init,
> >          .vhost_backend_cleanup = vhost_user_cleanup,
> > -        .vhost_backend_memslots_limit = vhost_user_memslots_limit,
> > +        .vhost_backend_has_free_memslots =
> > + vhost_user_has_free_memslots,
> >          .vhost_set_log_base = vhost_user_set_log_base,
> >          .vhost_set_mem_table = vhost_user_set_mem_table,
> >          .vhost_set_vring_addr = vhost_user_set_vring_addr, @@ -1082,4
> > +1106,5 @@ const VhostOps user_ops = {
> >          .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
> >          .vhost_get_config = vhost_user_get_config,
> >          .vhost_set_config = vhost_user_set_config,
> > +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> >  };
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index
> > 4a583a3..7f17de9 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -44,20 +44,19 @@
> >  static struct vhost_log *vhost_log;
> >  static struct vhost_log *vhost_log_shm;
> >
> > -static unsigned int used_memslots;
> >  static QLIST_HEAD(, vhost_dev) vhost_devices =
> >      QLIST_HEAD_INITIALIZER(vhost_devices);
> >
> >  bool vhost_has_free_slot(void)
> >  {
> > -    unsigned int slots_limit = ~0U;
> >      struct vhost_dev *hdev;
> >
> >      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> > -        unsigned int r = hdev->vhost_ops-
> >vhost_backend_memslots_limit(hdev);
> > -        slots_limit = MIN(slots_limit, r);
> > +        if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
> > +            return false;
> > +        }
> >      }
> > -    return slots_limit > used_memslots;
> > +    return true;
> >  }
> >
> >  static void vhost_dev_sync_region(struct vhost_dev *dev, @@ -442,7
> > +441,7 @@ static void vhost_commit(MemoryListener *listener)
> >                         dev->n_mem_sections * sizeof dev->mem->regions[0];
> >      dev->mem = g_realloc(dev->mem, regions_size);
> >      dev->mem->nregions = dev->n_mem_sections;
> > -    used_memslots = dev->mem->nregions;
> > +    dev->vhost_ops->vhost_set_used_memslots(dev);
> >      for (i = 0; i < dev->n_mem_sections; i++) {
> >          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
> >          struct MemoryRegionSection *mrs = dev->mem_sections + i; @@
> > -1186,7 +1185,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> >      memory_listener_register(&hdev->memory_listener,
> &address_space_memory);
> >      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> >
> > -    if (used_memslots > hdev->vhost_ops-
> >vhost_backend_memslots_limit(hdev)) {
> > +    if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
> >          error_report("vhost backend memory slots limit is less"
> >                  " than current number of present memory slots");
> >          r = -1;
> > diff --git a/include/hw/virtio/vhost-backend.h
> > b/include/hw/virtio/vhost-backend.h
> > index 592254f..2eac224 100644
> > --- a/include/hw/virtio/vhost-backend.h
> > +++ b/include/hw/virtio/vhost-backend.h
> > @@ -36,7 +36,7 @@ struct vhost_iotlb_msg;
> >
> >  typedef int (*vhost_backend_init)(struct vhost_dev *dev, void
> > *opaque);  typedef int (*vhost_backend_cleanup)(struct vhost_dev
> > *dev); -typedef int (*vhost_backend_memslots_limit)(struct vhost_dev
> > *dev);
> > +typedef bool (*vhost_backend_has_free_memslots)(struct vhost_dev
> > +*dev);
> >
> >  typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
> >                                  struct vhost_vring_file *file); @@
> > -94,12 +94,13 @@ typedef int (*vhost_set_config_op)(struct vhost_dev *dev,
> const uint8_t *data,
> >                                     uint32_t flags);  typedef int
> > (*vhost_get_config_op)(struct vhost_dev *dev, uint8_t *config,
> >                                     uint32_t config_len);
> > +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> >
> >  typedef struct VhostOps {
> >      VhostBackendType backend_type;
> >      vhost_backend_init vhost_backend_init;
> >      vhost_backend_cleanup vhost_backend_cleanup;
> > -    vhost_backend_memslots_limit vhost_backend_memslots_limit;
> > +    vhost_backend_has_free_memslots vhost_backend_has_free_memslots;
> >      vhost_net_set_backend_op vhost_net_set_backend;
> >      vhost_net_set_mtu_op vhost_net_set_mtu;
> >      vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint; @@ -130,6
> > +131,7 @@ typedef struct VhostOps {
> >      vhost_send_device_iotlb_msg_op vhost_send_device_iotlb_msg;
> >      vhost_get_config_op vhost_get_config;
> >      vhost_set_config_op vhost_set_config;
> > +    vhost_set_used_memslots_op vhost_set_used_memslots;
> >  } VhostOps;
> >
> >  extern const VhostOps user_ops;
> > --
> > 1.8.3.1
> >

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring
  2018-03-01 16:16   ` Michael S. Tsirkin
  2018-03-02  1:55     ` Zhoujian (jay)
@ 2018-03-05  9:23     ` Zhoujian (jay)
  1 sibling, 0 replies; 6+ messages in thread
From: Zhoujian (jay) @ 2018-03-05  9:23 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: qemu-devel, imammedo, Huangweidong (C), wangxin (U),
	Gonglei (Arei), Liuzhe (Ahriy, Euler)



> -----Original Message-----
> From: Michael S. Tsirkin [mailto:mst@redhat.com]
> Sent: Friday, March 02, 2018 12:17 AM
> To: Zhoujian (jay) <jianjay.zhou@huawei.com>
> Cc: qemu-devel@nongnu.org; imammedo@redhat.com; Huangweidong (C)
> <weidong.huang@huawei.com>; wangxin (U) <wangxinxin.wang@huawei.com>; Gonglei
> (Arei) <arei.gonglei@huawei.com>; Liuzhe (Ahriy, Euler) <liuzhe13@huawei.com>
> Subject: Re: [PATCH v8 2/2] vhost: used_memslots refactoring
> 
> On Tue, Feb 27, 2018 at 03:10:05PM +0800, Jay Zhou wrote:
> > Used_memslots is shared by vhost kernel and user, it is equal to
> > dev->mem->nregions, which is correct for vhost kernel, but not for
> > vhost user, the latter one uses memory regions that have file
> > descriptor. E.g. a VM has a vhost-user NIC and 8(vhost user memslot
> > upper limit) memory slots, it will be failed to hotplug a new DIMM
> > device since vhost_has_free_slot() finds no free slot left. It should
> > be successful if only part of memory slots have file descriptor, so
> > setting used memslots for vhost-user and vhost-kernel respectively.
> >
> > Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> > Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
> > Signed-off-by: Liuzhe <liuzhe13@huawei.com>
> 
> make check fails with this patch, I dropped it for now.

Hi Michael, pls see the reason inline.

> 
> > ---
> >  hw/virtio/vhost-backend.c         | 15 +++++++-
> >  hw/virtio/vhost-user.c            | 77 ++++++++++++++++++++++++++---------
> ----
> >  hw/virtio/vhost.c                 | 13 +++----
> >  include/hw/virtio/vhost-backend.h |  6 ++-
> >  4 files changed, 75 insertions(+), 36 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> > index 7f09efa..59def69 100644
> > --- a/hw/virtio/vhost-backend.c
> > +++ b/hw/virtio/vhost-backend.c
> > @@ -15,6 +15,8 @@
> >  #include "hw/virtio/vhost-backend.h"
> >  #include "qemu/error-report.h"
> >
> > +static unsigned int vhost_kernel_used_memslots;
> > +
> >  static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int
> request,
> >                               void *arg)  { @@ -62,6 +64,11 @@ static
> > int vhost_kernel_memslots_limit(struct vhost_dev *dev)
> >      return limit;
> >  }
> >
> > +static bool vhost_kernel_has_free_memslots(struct vhost_dev *dev) {
> > +    return vhost_kernel_used_memslots <
> > +vhost_kernel_memslots_limit(dev); }
> > +
> >  static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
> >                                          struct vhost_vring_file
> > *file)  { @@ -233,11 +240,16 @@ static void
> > vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
> >          qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL,
> > NULL);  }
> >
> > +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev) {
> > +    vhost_kernel_used_memslots = dev->mem->nregions; }
> > +
> >  static const VhostOps kernel_ops = {
> >          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
> >          .vhost_backend_init = vhost_kernel_init,
> >          .vhost_backend_cleanup = vhost_kernel_cleanup,
> > -        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
> > +        .vhost_backend_has_free_memslots =
> > + vhost_kernel_has_free_memslots,
> >          .vhost_net_set_backend = vhost_kernel_net_set_backend,
> >          .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
> >          .vhost_scsi_clear_endpoint =
> > vhost_kernel_scsi_clear_endpoint, @@ -264,6 +276,7 @@ static const
> > VhostOps kernel_ops = {  #endif /* CONFIG_VHOST_VSOCK */
> >          .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
> >          .vhost_send_device_iotlb_msg =
> > vhost_kernel_send_device_iotlb_msg,
> > +        .vhost_set_used_memslots = vhost_kernel_set_used_memslots,
> >  };
> >
> >  int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType
> > backend_type) diff --git a/hw/virtio/vhost-user.c
> > b/hw/virtio/vhost-user.c index 6eb9798..f732c80 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -147,6 +147,8 @@ static VhostUserMsg m __attribute__ ((unused));
> >  /* The version of the protocol we support */
> >  #define VHOST_USER_VERSION    (0x1)
> >
> > +static bool vhost_user_free_memslots = true;
> > +
> >  struct vhost_user {
> >      CharBackend *chr;
> >      int slave_fd;
> > @@ -314,12 +316,43 @@ static int vhost_user_set_log_base(struct vhost_dev
> *dev, uint64_t base,
> >      return 0;
> >  }
> >
> > +static int vhost_user_prepare_msg(struct vhost_dev *dev, VhostUserMemory
> *mem,
> > +                                  int *fds) {
> > +    int i, fd;
> > +
> > +    vhost_user_free_memslots = true;
> > +    for (i = 0, mem->nregions = 0; i < dev->mem->nregions; ++i) {
> > +        struct vhost_memory_region *reg = dev->mem->regions + i;
> > +        ram_addr_t offset;
> > +        MemoryRegion *mr;
> > +
> > +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> > +        mr = memory_region_from_host((void *)(uintptr_t)reg-
> >userspace_addr,
> > +                                     &offset);
> > +        fd = memory_region_get_fd(mr);
> > +        if (fd > 0) {
> > +            if (mem->nregions == VHOST_MEMORY_MAX_NREGIONS) {
> > +                vhost_user_free_memslots = false;
> > +                return -1;
> > +            }
> > +
> > +            mem->regions[mem->nregions].userspace_addr = reg-
> >userspace_addr;
> > +            mem->regions[mem->nregions].memory_size = reg->memory_size;
> > +            mem->regions[mem->nregions].guest_phys_addr = reg-
> >guest_phys_addr;
> > +            mem->regions[mem->nregions].mmap_offset = offset;
> > +            fds[mem->nregions++] = fd;
> > +        }
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> >  static int vhost_user_set_mem_table(struct vhost_dev *dev,
> >                                      struct vhost_memory *mem)  {
> >      int fds[VHOST_MEMORY_MAX_NREGIONS];
> > -    int i, fd;
> > -    size_t fd_num = 0;
> > +    size_t fd_num;
> >      bool reply_supported = virtio_has_feature(dev->protocol_features,
> >
> > VHOST_USER_PROTOCOL_F_REPLY_ACK);
> >
> > @@ -332,29 +365,12 @@ static int vhost_user_set_mem_table(struct vhost_dev
> *dev,
> >          msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> >      }
> >
> > -    for (i = 0; i < dev->mem->nregions; ++i) {
> > -        struct vhost_memory_region *reg = dev->mem->regions + i;
> > -        ram_addr_t offset;
> > -        MemoryRegion *mr;
> > -
> > -        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> > -        mr = memory_region_from_host((void *)(uintptr_t)reg-
> >userspace_addr,
> > -                                     &offset);
> > -        fd = memory_region_get_fd(mr);
> > -        if (fd > 0) {
> > -            if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
> > -                error_report("Failed preparing vhost-user memory table
> msg");
> > -                return -1;
> > -            }
> > -            msg.payload.memory.regions[fd_num].userspace_addr = reg-
> >userspace_addr;
> > -            msg.payload.memory.regions[fd_num].memory_size  = reg-
> >memory_size;
> > -            msg.payload.memory.regions[fd_num].guest_phys_addr = reg-
> >guest_phys_addr;
> > -            msg.payload.memory.regions[fd_num].mmap_offset = offset;
> > -            fds[fd_num++] = fd;
> > -        }
> > +    if (vhost_user_prepare_msg(dev, &msg.payload.memory, fds) < 0) {
> > +        error_report("Failed preparing vhost-user memory table msg");
> > +        return -1;
> >      }
> >
> > -    msg.payload.memory.nregions = fd_num;
> > +    fd_num = msg.payload.memory.nregions;
> >
> >      if (!fd_num) {
> >          error_report("Failed initializing vhost-user memory map, "
> > @@ -870,9 +886,9 @@ static int vhost_user_get_vq_index(struct vhost_dev
> *dev, int idx)
> >      return idx;
> >  }
> >
> > -static int vhost_user_memslots_limit(struct vhost_dev *dev)
> > +static bool vhost_user_has_free_memslots(struct vhost_dev *dev)
> >  {
> > -    return VHOST_MEMORY_MAX_NREGIONS;
> > +    return vhost_user_free_memslots;
> >  }
> >
> >  static bool vhost_user_requires_shm_log(struct vhost_dev *dev) @@
> > -1054,11 +1070,19 @@ static int vhost_user_set_config(struct vhost_dev *dev,
> const uint8_t *data,
> >      return 0;
> >  }
> >
> > +static void vhost_user_set_used_memslots(struct vhost_dev *dev) {
> > +    int fds[VHOST_MEMORY_MAX_NREGIONS];
> > +    VhostUserMsg msg;
> > +
> > +    vhost_user_prepare_msg(dev, &msg.payload.memory, fds); }
> > +
> >  const VhostOps user_ops = {
> >          .backend_type = VHOST_BACKEND_TYPE_USER,
> >          .vhost_backend_init = vhost_user_init,
> >          .vhost_backend_cleanup = vhost_user_cleanup,
> > -        .vhost_backend_memslots_limit = vhost_user_memslots_limit,
> > +        .vhost_backend_has_free_memslots =
> > + vhost_user_has_free_memslots,
> >          .vhost_set_log_base = vhost_user_set_log_base,
> >          .vhost_set_mem_table = vhost_user_set_mem_table,
> >          .vhost_set_vring_addr = vhost_user_set_vring_addr, @@ -1082,4
> > +1106,5 @@ const VhostOps user_ops = {
> >          .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
> >          .vhost_get_config = vhost_user_get_config,
> >          .vhost_set_config = vhost_user_set_config,
> > +        .vhost_set_used_memslots = vhost_user_set_used_memslots,
> >  };
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index
> > 4a583a3..7f17de9 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -44,20 +44,19 @@
> >  static struct vhost_log *vhost_log;
> >  static struct vhost_log *vhost_log_shm;
> >
> > -static unsigned int used_memslots;
> >  static QLIST_HEAD(, vhost_dev) vhost_devices =
> >      QLIST_HEAD_INITIALIZER(vhost_devices);
> >
> >  bool vhost_has_free_slot(void)
> >  {
> > -    unsigned int slots_limit = ~0U;
> >      struct vhost_dev *hdev;
> >
> >      QLIST_FOREACH(hdev, &vhost_devices, entry) {
> > -        unsigned int r = hdev->vhost_ops-
> >vhost_backend_memslots_limit(hdev);
> > -        slots_limit = MIN(slots_limit, r);
> > +        if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
> > +            return false;
> > +        }
> >      }
> > -    return slots_limit > used_memslots;
> > +    return true;
> >  }
> >
> >  static void vhost_dev_sync_region(struct vhost_dev *dev, @@ -442,7
> > +441,7 @@ static void vhost_commit(MemoryListener *listener)
> >                         dev->n_mem_sections * sizeof dev->mem->regions[0];
> >      dev->mem = g_realloc(dev->mem, regions_size);
> >      dev->mem->nregions = dev->n_mem_sections;
> > -    used_memslots = dev->mem->nregions;
> > +    dev->vhost_ops->vhost_set_used_memslots(dev);

Since vhost_set_used_memslots() using the information of dev->mem->regions,
so it should be called after updating dev->mem->regions(the for loop below),
I have fixed this in v9.

Regards,
Jay

> >      for (i = 0; i < dev->n_mem_sections; i++) {
> >          struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
> >          struct MemoryRegionSection *mrs = dev->mem_sections + i; @@
> > -1186,7 +1185,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
> >      memory_listener_register(&hdev->memory_listener,
> &address_space_memory);
> >      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> >
> > -    if (used_memslots > hdev->vhost_ops-
> >vhost_backend_memslots_limit(hdev)) {
> > +    if (!hdev->vhost_ops->vhost_backend_has_free_memslots(hdev)) {
> >          error_report("vhost backend memory slots limit is less"
> >                  " than current number of present memory slots");
> >          r = -1;
> > diff --git a/include/hw/virtio/vhost-backend.h
> > b/include/hw/virtio/vhost-backend.h
> > index 592254f..2eac224 100644
> > --- a/include/hw/virtio/vhost-backend.h
> > +++ b/include/hw/virtio/vhost-backend.h
> > @@ -36,7 +36,7 @@ struct vhost_iotlb_msg;
> >
> >  typedef int (*vhost_backend_init)(struct vhost_dev *dev, void
> > *opaque);  typedef int (*vhost_backend_cleanup)(struct vhost_dev
> > *dev); -typedef int (*vhost_backend_memslots_limit)(struct vhost_dev
> > *dev);
> > +typedef bool (*vhost_backend_has_free_memslots)(struct vhost_dev
> > +*dev);
> >
> >  typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
> >                                  struct vhost_vring_file *file); @@
> > -94,12 +94,13 @@ typedef int (*vhost_set_config_op)(struct vhost_dev *dev,
> const uint8_t *data,
> >                                     uint32_t flags);  typedef int
> > (*vhost_get_config_op)(struct vhost_dev *dev, uint8_t *config,
> >                                     uint32_t config_len);
> > +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev);
> >
> >  typedef struct VhostOps {
> >      VhostBackendType backend_type;
> >      vhost_backend_init vhost_backend_init;
> >      vhost_backend_cleanup vhost_backend_cleanup;
> > -    vhost_backend_memslots_limit vhost_backend_memslots_limit;
> > +    vhost_backend_has_free_memslots vhost_backend_has_free_memslots;
> >      vhost_net_set_backend_op vhost_net_set_backend;
> >      vhost_net_set_mtu_op vhost_net_set_mtu;
> >      vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint; @@ -130,6
> > +131,7 @@ typedef struct VhostOps {
> >      vhost_send_device_iotlb_msg_op vhost_send_device_iotlb_msg;
> >      vhost_get_config_op vhost_get_config;
> >      vhost_set_config_op vhost_set_config;
> > +    vhost_set_used_memslots_op vhost_set_used_memslots;
> >  } VhostOps;
> >
> >  extern const VhostOps user_ops;
> > --
> > 1.8.3.1
> >

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-03-05  9:23 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-27  7:10 [Qemu-devel] [PATCH v8 0/2] vhost: used_memslots limit check fixes and refactoring Jay Zhou
2018-02-27  7:10 ` [Qemu-devel] [PATCH v8 1/2] vhost: fix memslot limit check Jay Zhou
2018-02-27  7:10 ` [Qemu-devel] [PATCH v8 2/2] vhost: used_memslots refactoring Jay Zhou
2018-03-01 16:16   ` Michael S. Tsirkin
2018-03-02  1:55     ` Zhoujian (jay)
2018-03-05  9:23     ` Zhoujian (jay)

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.