All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Peter Maydell" <peter.maydell@linaro.org>,
	"Johannes Berg" <johannes.berg@intel.com>,
	"Dr. David Alan Gilbert" <dgilbert@redhat.com>,
	"Raphael Norwitz" <raphael.norwitz@nutanix.com>,
	"Stefan Hajnoczi" <stefanha@redhat.com>,
	"Marc-André Lureau" <marcandre.lureau@redhat.com>
Subject: [PULL v2 45/58] Lift max ram slots limit in libvhost-user
Date: Fri, 12 Jun 2020 10:52:41 -0400	[thread overview]
Message-ID: <20200612141917.9446-46-mst@redhat.com> (raw)
In-Reply-To: <20200612141917.9446-1-mst@redhat.com>

From: Raphael Norwitz <raphael.norwitz@nutanix.com>

Historically, VMs with vhost-user devices could hot-add memory a maximum
of 8 times. Now that the VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
protocol feature has been added, VMs with vhost-user backends which
support this new feature can support a configurable number of ram slots
up to the maximum supported by the target platform.

This change adds VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS support for
backends built with libvhost-user, and increases the number of supported
ram slots from 8 to 32.

Memory hot-add, hot-remove and postcopy migration were tested with
the vhost-user-bridge sample.

Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com>
Message-Id: <1588533678-23450-11-git-send-email-raphael.norwitz@nutanix.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 contrib/libvhost-user/libvhost-user.h | 15 +++++++++++----
 contrib/libvhost-user/libvhost-user.c | 17 +++++++++--------
 2 files changed, 20 insertions(+), 12 deletions(-)

diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
index f8439713a8..844c37c648 100644
--- a/contrib/libvhost-user/libvhost-user.h
+++ b/contrib/libvhost-user/libvhost-user.h
@@ -28,7 +28,13 @@
 
 #define VIRTQUEUE_MAX_SIZE 1024
 
-#define VHOST_MEMORY_MAX_NREGIONS 8
+#define VHOST_MEMORY_BASELINE_NREGIONS 8
+
+/*
+ * Set a reasonable maximum number of ram slots, which will be supported by
+ * any architecture.
+ */
+#define VHOST_USER_MAX_RAM_SLOTS 32
 
 typedef enum VhostSetConfigType {
     VHOST_SET_CONFIG_TYPE_MASTER = 0,
@@ -55,6 +61,7 @@ enum VhostUserProtocolFeature {
     VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
     VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
     VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
+    VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
 
     VHOST_USER_PROTOCOL_F_MAX
 };
@@ -123,7 +130,7 @@ typedef struct VhostUserMemoryRegion {
 typedef struct VhostUserMemory {
     uint32_t nregions;
     uint32_t padding;
-    VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
+    VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
 } VhostUserMemory;
 
 typedef struct VhostUserMemRegMsg {
@@ -190,7 +197,7 @@ typedef struct VhostUserMsg {
         VhostUserInflight inflight;
     } payload;
 
-    int fds[VHOST_MEMORY_MAX_NREGIONS];
+    int fds[VHOST_MEMORY_BASELINE_NREGIONS];
     int fd_num;
     uint8_t *data;
 } VU_PACKED VhostUserMsg;
@@ -368,7 +375,7 @@ typedef struct VuDevInflightInfo {
 struct VuDev {
     int sock;
     uint32_t nregions;
-    VuDevRegion regions[VHOST_MEMORY_MAX_NREGIONS];
+    VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS];
     VuVirtq *vq;
     VuDevInflightInfo inflight_info;
     int log_call_fd;
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 386449b697..b1e607298c 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -269,7 +269,7 @@ have_userfault(void)
 static bool
 vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 {
-    char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
+    char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {};
     struct iovec iov = {
         .iov_base = (char *)vmsg,
         .iov_len = VHOST_USER_HDR_SIZE,
@@ -340,7 +340,7 @@ vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 {
     int rc;
     uint8_t *p = (uint8_t *)vmsg;
-    char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
+    char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {};
     struct iovec iov = {
         .iov_base = (char *)vmsg,
         .iov_len = VHOST_USER_HDR_SIZE,
@@ -353,7 +353,7 @@ vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
     struct cmsghdr *cmsg;
 
     memset(control, 0, sizeof(control));
-    assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS);
+    assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS);
     if (vmsg->fd_num > 0) {
         size_t fdsize = vmsg->fd_num * sizeof(int);
         msg.msg_controllen = CMSG_SPACE(fdsize);
@@ -780,7 +780,7 @@ static bool
 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
     int i, j;
     bool found = false;
-    VuDevRegion shadow_regions[VHOST_MEMORY_MAX_NREGIONS] = {};
+    VuDevRegion shadow_regions[VHOST_USER_MAX_RAM_SLOTS] = {};
     VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
 
     DPRINT("Removing region:\n");
@@ -813,7 +813,7 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
 
     if (found) {
         memcpy(dev->regions, shadow_regions,
-               sizeof(VuDevRegion) * VHOST_MEMORY_MAX_NREGIONS);
+               sizeof(VuDevRegion) * VHOST_USER_MAX_RAM_SLOTS);
         DPRINT("Successfully removed a region\n");
         dev->nregions--;
         vmsg_set_reply_u64(vmsg, 0);
@@ -1394,7 +1394,8 @@ vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
                         1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
                         1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
                         1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD |
-                        1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK;
+                        1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK |
+                        1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS;
 
     if (have_userfault()) {
         features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
@@ -1732,14 +1733,14 @@ static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg)
 {
     vmsg->flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION;
     vmsg->size  = sizeof(vmsg->payload.u64);
-    vmsg->payload.u64 = VHOST_MEMORY_MAX_NREGIONS;
+    vmsg->payload.u64 = VHOST_USER_MAX_RAM_SLOTS;
     vmsg->fd_num = 0;
 
     if (!vu_message_write(dev, dev->sock, vmsg)) {
         vu_panic(dev, "Failed to send max ram slots: %s\n", strerror(errno));
     }
 
-    DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_MEMORY_MAX_NREGIONS);
+    DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS);
 
     return false;
 }
-- 
MST



  parent reply	other threads:[~2020-06-12 15:11 UTC|newest]

Thread overview: 71+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-12 14:51 [PULL v2 00/58] virtio,acpi,pci: features, fixes, cleanups, tests Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 01/58] msix: allow qword MSI-X table accesses Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 02/58] diffs-allowed: add the SRAT AML to diffs-allowed Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 03/58] hw/acpi/nvdimm: add a helper to augment SRAT generation Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 04/58] tests/acpi: update expected SRAT files Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 05/58] qtest: allow DSDT acpi table changes Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 06/58] acpi: move aml builder code for rtc device Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 07/58] acpi: rtc: use a single crs range Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 08/58] acpi: serial: don't use _STA method Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 09/58] acpi: move aml builder code for serial device Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 10/58] acpi: parallel: don't use _STA method Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 11/58] acpi: move aml builder code for parallel device Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 12/58] tests/acpi: update DSDT expected files Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 13/58] acpi: tpm: Do not build TCPA table for TPM 2 Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 14/58] acpi: Convert build_tpm2() to build_append* API Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 15/58] acpi: Move build_tpm2() in the generic part Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 16/58] arm/acpi: TPM2 ACPI table support Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 17/58] test/tpm-emu: include sockets and channel headers in tpm-emu header Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 18/58] tests/acpi: Add void tables for Q35/TPM-TIS bios-tables-test Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 19/58] tests: tpm-emu: Remove assert on TPM2_ST_NO_SESSIONS Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 20/58] bios-tables-test: Add Q35/TPM-TIS test Michael S. Tsirkin
2020-06-15 10:02   ` Philippe Mathieu-Daudé
2020-06-15 10:22     ` Thomas Huth
2020-06-15 12:35       ` Auger Eric
2020-06-15 12:58         ` Thomas Huth
2020-06-12 14:51 ` [PULL v2 21/58] bios-tables-test: Generate reference tables for Q35/TPM-TIS Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 22/58] virtio-balloon: fix free page hinting without an iothread Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 23/58] virtio-balloon: fix free page hinting check on unrealize Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 24/58] virtio-balloon: unref the iothread when unrealizing Michael S. Tsirkin
2020-06-12 14:51 ` [PULL v2 25/58] virtio-balloon: Implement support for page poison reporting feature Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 26/58] virtio-balloon: Provide an interface for free page reporting Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 27/58] MAINTAINERS: Fix the classification of bios-tables-test-allowed-diff.h Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 28/58] hw/pci/pcie: Move hot plug capability check to pre_plug callback Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 29/58] pci: assert configuration access is within bounds Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 30/58] hw/pci-host/prep: Correct RAVEN bus bridge memory region size Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 31/58] hw/pci/pci_bridge: Correct pci_bridge_io " Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 32/58] hw/pci/pci_bridge: Use the IEC binary prefix definitions Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 33/58] hw/pci-host: " Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 34/58] char-socket: return -1 in case of disconnect during tcp_chr_write Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 35/58] vhost-user-blk: delay vhost_user_blk_disconnect Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 36/58] Add helper to populate vhost-user message regions Michael S. Tsirkin
2020-06-19 12:59   ` Peter Maydell
2020-06-12 14:52 ` [PULL v2 37/58] Add vhost-user helper to get MemoryRegion data Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 38/58] Add VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 39/58] Transmit vhost-user memory regions individually Michael S. Tsirkin
2020-06-19 13:02   ` Peter Maydell
2020-06-22 18:51     ` Raphael Norwitz
2020-06-12 14:52 ` [PULL v2 40/58] Lift max memory slots limit imposed by vhost-user Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 41/58] Refactor out libvhost-user fault generation logic Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 42/58] Support ram slot configuration in libvhost-user Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 43/58] Support adding individual regions " Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 44/58] Support individual region unmap " Michael S. Tsirkin
2020-06-12 14:52 ` Michael S. Tsirkin [this message]
2020-06-12 14:52 ` [PULL v2 46/58] libvhost-user: advertise vring features Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 47/58] hw/pci: Fix crash when running QEMU with "-nic model=rocker" Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 48/58] vhost-vsock: add vhost-vsock-common abstraction Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 49/58] virtio: add vhost-user-vsock base device Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 50/58] virtio: add vhost-user-vsock-pci device Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 51/58] acpi: make build_madt() more generic Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 52/58] acpi: create acpi-common.c and move madt code Michael S. Tsirkin
2020-06-12 14:52 ` [PULL v2 53/58] acpi: madt: skip pci override on pci-less systems Michael S. Tsirkin
2020-06-12 14:53 ` [PULL v2 54/58] acpi: fadt: add hw-reduced sleep register support Michael S. Tsirkin
2020-06-12 14:53 ` [PULL v2 55/58] acpi: ged: rename event memory region Michael S. Tsirkin
2020-06-12 14:53 ` [PULL v2 56/58] Fix parameter type in vhost migration log path Michael S. Tsirkin
2020-06-12 14:53 ` [PULL v2 57/58] pci: Display PCI IRQ pin in "info pci" Michael S. Tsirkin
2020-06-12 14:53 ` [PULL v2 58/58] virtio-pci: fix queue_enable write Michael S. Tsirkin
2020-06-12 15:51 ` [PULL v2 00/58] virtio, acpi, pci: features, fixes, cleanups, tests no-reply
2020-06-12 16:11   ` Michael S. Tsirkin
2020-06-12 22:01 ` Peter Maydell
2020-06-16  7:26 ` Auger Eric
2020-06-16  7:43   ` Auger Eric

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200612141917.9446-46-mst@redhat.com \
    --to=mst@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=johannes.berg@intel.com \
    --cc=marcandre.lureau@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=raphael.norwitz@nutanix.com \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.