From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:42124) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZjY0g-0003yB-47 for qemu-devel@nongnu.org; Tue, 06 Oct 2015 15:34:56 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ZjY0e-0006B6-0x for qemu-devel@nongnu.org; Tue, 06 Oct 2015 15:34:49 -0400 Received: from mail-qg0-x22a.google.com ([2607:f8b0:400d:c04::22a]:35180) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZjXnh-0005fR-Ie for qemu-devel@nongnu.org; Tue, 06 Oct 2015 15:21:25 -0400 Received: by qgt47 with SMTP id 47so183775223qgt.2 for ; Tue, 06 Oct 2015 12:21:25 -0700 (PDT) Sender: =?UTF-8?B?TWFyYy1BbmRyw6kgTHVyZWF1?= From: marcandre.lureau@redhat.com Date: Tue, 6 Oct 2015 21:19:24 +0200 Message-Id: <1444159184-18153-29-git-send-email-marcandre.lureau@redhat.com> In-Reply-To: <1444159184-18153-1-git-send-email-marcandre.lureau@redhat.com> References: <1444159184-18153-1-git-send-email-marcandre.lureau@redhat.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Subject: [Qemu-devel] [PULL 28/48] ivshmem: replace 'guest' for 'peer' appropriately List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: peter.maydell@linaro.org Cc: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= , qemu-devel@nongnu.org From: Marc-André Lureau The terms 'guest' and 'peer' are used sometime interchangeably which may be confusing. Instead, use 'peer' for the remote instances of ivshmem clients, and 'guest' for the local VM. Signed-off-by: Marc-André Lureau Reviewed-by: Claudio Fontana --- hw/misc/ivshmem.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index d1b5d35..0e31d1d 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -89,7 +89,7 @@ typedef struct IVShmemState { int shm_fd; /* shared memory file descriptor */ Peer *peers; - int nb_peers; /* how many guests we have space for */ + int nb_peers; /* how many peers we have space for */ int vm_id; uint32_t vectors; @@ -387,9 +387,9 @@ static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i) &s->peers[posn].eventfds[i]); } -static void close_guest_eventfds(IVShmemState *s, int posn) +static void close_peer_eventfds(IVShmemState *s, int posn) { - int i, guest_curr_max; + int i, n; if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { return; @@ -399,14 +399,14 @@ static void close_guest_eventfds(IVShmemState *s, int posn) return; } - guest_curr_max = s->peers[posn].nb_eventfds; + n = s->peers[posn].nb_eventfds; memory_region_transaction_begin(); - for (i = 0; i < guest_curr_max; i++) { + for (i = 0; i < n; i++) { ivshmem_del_eventfd(s, posn, i); } memory_region_transaction_commit(); - for (i = 0; i < guest_curr_max; i++) { + for (i = 0; i < n; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); } @@ -415,7 +415,7 @@ static void close_guest_eventfds(IVShmemState *s, int posn) } /* this function increase the dynamic storage need to store data about other - * guests */ + * peers */ static int resize_peers(IVShmemState *s, int new_min_size) { @@ -432,7 +432,7 @@ static int resize_peers(IVShmemState *s, int new_min_size) old_size = s->nb_peers; s->nb_peers = new_min_size; - IVSHMEM_DPRINTF("bumping storage to %d guests\n", s->nb_peers); + IVSHMEM_DPRINTF("bumping storage to %d peers\n", s->nb_peers); s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer)); @@ -503,7 +503,7 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size) incoming_fd = qemu_chr_fe_get_msgfd(s->server_chr); IVSHMEM_DPRINTF("posn is %ld, fd is %d\n", incoming_posn, incoming_fd); - /* make sure we have enough space for this guest */ + /* make sure we have enough space for this peer */ if (incoming_posn >= s->nb_peers) { if (resize_peers(s, incoming_posn + 1) < 0) { error_report("failed to resize peers array"); @@ -522,9 +522,9 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size) /* receive our posn */ s->vm_id = incoming_posn; } else { - /* otherwise an fd == -1 means an existing guest has gone away */ + /* otherwise an fd == -1 means an existing peer has gone away */ IVSHMEM_DPRINTF("posn %ld has gone away\n", incoming_posn); - close_guest_eventfds(s, incoming_posn); + close_peer_eventfds(s, incoming_posn); } return; } @@ -573,7 +573,7 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size) /* get a new eventfd: */ new_eventfd = peer->nb_eventfds++; - /* this is an eventfd for a particular guest VM */ + /* this is an eventfd for a particular peer VM */ IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn, new_eventfd, incoming_fd); event_notifier_init_fd(&peer->eventfds[new_eventfd], incoming_fd); @@ -753,7 +753,7 @@ static void pci_ivshmem_realize(PCIDevice *dev, Error **errp) return; } - /* we allocate enough space for 16 guests and grow as needed */ + /* we allocate enough space for 16 peers and grow as needed */ resize_peers(s, 16); s->vm_id = -1; @@ -831,7 +831,7 @@ static void pci_ivshmem_exit(PCIDevice *dev) if (s->peers) { for (i = 0; i < s->nb_peers; i++) { - close_guest_eventfds(s, i); + close_peer_eventfds(s, i); } g_free(s->peers); } -- 2.4.3