From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:57160) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Z8Uxe-0004i7-Kw for qemu-devel@nongnu.org; Fri, 26 Jun 2015 10:50:36 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1Z8UxZ-0002FO-L6 for qemu-devel@nongnu.org; Fri, 26 Jun 2015 10:50:34 -0400 Received: from mail-qg0-x236.google.com ([2607:f8b0:400d:c04::236]:34303) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Z8UxZ-0002FH-FV for qemu-devel@nongnu.org; Fri, 26 Jun 2015 10:50:29 -0400 Received: by qgev13 with SMTP id v13so35967026qge.1 for ; Fri, 26 Jun 2015 07:50:29 -0700 (PDT) From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Fri, 26 Jun 2015 16:49:17 +0200 Message-Id: <1435330185-23248-12-git-send-email-marcandre.lureau@gmail.com> In-Reply-To: <1435330185-23248-1-git-send-email-marcandre.lureau@gmail.com> References: <1435330185-23248-1-git-send-email-marcandre.lureau@gmail.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Subject: [Qemu-devel] [PATCH 11/39] ivshmem: increase_dynamic_storage() improvements List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: cam@cs.ualberta.ca, =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= , stefanha@redhat.com - rename the function resize_peers() to better reflect the content - limit the maximum number of peers to IVSHMEM_MAX_PEERS as MAXUINT16, which is more realistic and also better reflects the limit of the doorbell register. - change the argument to be the size, not the 0-index max value - immediately allocate the eventfds instead of lazily Signed-off-by: Marc-André Lureau --- hw/misc/ivshmem.c | 43 ++++++++++++++++--------------------------- 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index 7ba93c0..66f9c0b 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -34,6 +34,7 @@ #define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET #define PCI_DEVICE_ID_IVSHMEM 0x1110 +#define IVSHMEM_MAX_PEERS G_MAXUINT16 #define IVSHMEM_IOEVENTFD 0 #define IVSHMEM_MSI 1 @@ -416,31 +417,28 @@ static void close_guest_eventfds(IVShmemState *s, int posn) /* this function increase the dynamic storage need to store data about other * guests */ -static int increase_dynamic_storage(IVShmemState *s, int new_min_size) +static int resize_peers(IVShmemState *s, int new_min_size) { - int j, old_nb_alloc; + int j, old_size; - /* check for integer overflow */ - if (new_min_size >= INT_MAX / sizeof(Peer) - 1 || new_min_size <= 0) { + /* limit number of max peers */ + if (new_min_size <= 0 || new_min_size > IVSHMEM_MAX_PEERS) { return -1; } - - old_nb_alloc = s->nb_peers; - - if (new_min_size >= s->nb_peers) { - /* +1 because #new_min_size is used as last array index */ - s->nb_peers = new_min_size + 1; - } else { + if (new_min_size <= s->nb_peers) { return 0; } + old_size = s->nb_peers; + s->nb_peers = new_min_size; + IVSHMEM_DPRINTF("bumping storage to %d guests\n", s->nb_peers); + s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer)); - /* zero out new pointers */ - for (j = old_nb_alloc; j < s->nb_peers; j++) { - s->peers[j].eventfds = NULL; + for (j = old_size; j < s->nb_peers; j++) { + s->peers[j].eventfds = g_new(EventNotifier, s->vectors); s->peers[j].nb_eventfds = 0; } @@ -507,8 +505,8 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size) /* make sure we have enough space for this guest */ if (incoming_posn >= s->nb_peers) { - if (increase_dynamic_storage(s, incoming_posn) < 0) { - error_report("increase_dynamic_storage() failed"); + if (resize_peers(s, incoming_posn + 1) < 0) { + error_report("failed to resize peers array"); if (incoming_fd != -1) { close(incoming_fd); } @@ -518,8 +516,7 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size) if (incoming_fd == -1) { /* if posn is positive and unseen before then this is our posn*/ - if ((incoming_posn >= 0) && - (s->peers[incoming_posn].eventfds == NULL)) { + if (incoming_posn >= 0 && s->vm_id == -1) { /* receive our posn */ s->vm_id = incoming_posn; return; @@ -570,11 +567,6 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size) * guests for each VM */ guest_max_eventfd = s->peers[incoming_posn].nb_eventfds; - if (guest_max_eventfd == 0) { - /* one eventfd per MSI vector */ - s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors); - } - /* this is an eventfd for a particular guest VM */ IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn, guest_max_eventfd, incoming_fd); @@ -811,12 +803,9 @@ static void pci_ivshmem_realize(PCIDevice *dev, Error **errp) } /* we allocate enough space for 16 guests and grow as needed */ - s->nb_peers = 16; + resize_peers(s, 16); s->vm_id = -1; - /* allocate/initialize space for interrupt handling */ - s->peers = g_malloc0(s->nb_peers * sizeof(Peer)); - pci_register_bar(dev, 2, attr, &s->bar); s->eventfd_chr = g_malloc0(s->vectors * sizeof(CharDriverState *)); -- 2.4.3