From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:37037) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1c04eG-0006QI-5m for qemu-devel@nongnu.org; Fri, 28 Oct 2016 06:44:33 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1c04eC-0001LT-Vl for qemu-devel@nongnu.org; Fri, 28 Oct 2016 06:44:32 -0400 Received: from mx1.redhat.com ([209.132.183.28]:56900) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1c04eC-0001KY-Nj for qemu-devel@nongnu.org; Fri, 28 Oct 2016 06:44:28 -0400 Date: Fri, 28 Oct 2016 12:44:24 +0200 From: Niels de Vos Message-ID: <20161028104424.GA1283@chromebook.lan.nixpanic.net> References: <1477581890-4811-1-git-send-email-prasanna.kalever@redhat.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1477581890-4811-1-git-send-email-prasanna.kalever@redhat.com> Subject: Re: [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Prasanna Kumar Kalever Cc: qemu-devel@nongnu.org, kwolf@redhat.com, eblake@redhat.com, pkrempa@redhat.com, jcody@redhat.com, bharata@linux.vnet.ibm.com, berrange@redhat.com, armbru@redhat.com, vbellur@redhat.com, rtalur@redhat.com On Thu, Oct 27, 2016 at 08:54:50PM +0530, Prasanna Kumar Kalever wrote: > Currently, for every drive accessed via gfapi we create a new glfs > instance (call glfs_new() followed by glfs_init()) which could consume > memory in few 100 MB's, from the table below it looks like for each > instance ~300 MB VSZ was consumed > > Before: > ------- > Disks VSZ RSS > 1 1098728 187756 > 2 1430808 198656 > 3 1764932 199704 > 4 2084728 202684 > > This patch maintains a list of pre-opened glfs objects. On adding > a new drive belonging to the same gluster volume, we just reuse the > existing glfs object by updating its refcount. > > With this approch we shrink up the unwanted memory consumption and > glfs_new/glfs_init calls for accessing a disk (file) if belongs to > same volume. > > From below table notice that the memory usage after adding a disk > (which will reuse the existing glfs object hence) is in negligible > compared to before. > > After: > ------ > Disks VSZ RSS > 1 1101964 185768 > 2 1109604 194920 > 3 1114012 196036 > 4 1114496 199868 > > Disks: number of -drive > VSZ: virtual memory size of the process in KiB > RSS: resident set size, the non-swapped physical memory (in kiloBytes) > > VSZ and RSS are analyzed using 'ps aux' utility. > > Signed-off-by: Prasanna Kumar Kalever > --- > v2: Address comments from Jeff Cody on v1 > v1: Initial patch > --- > block/gluster.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++--------- > 1 file changed, 80 insertions(+), 14 deletions(-) > > diff --git a/block/gluster.c b/block/gluster.c > index 01b479f..7e39201 100644 > --- a/block/gluster.c > +++ b/block/gluster.c > @@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState { > } BDRVGlusterReopenState; > > > +typedef struct GlfsPreopened { > + char *volume; > + glfs_t *fs; > + int ref; > +} GlfsPreopened; > + > +typedef struct ListElement { > + QLIST_ENTRY(ListElement) list; > + GlfsPreopened saved; > +} ListElement; > + > +static QLIST_HEAD(glfs_list, ListElement) glfs_list; > + > static QemuOptsList qemu_gluster_create_opts = { > .name = "qemu-gluster-create-opts", > .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head), > @@ -182,6 +195,57 @@ static QemuOptsList runtime_tcp_opts = { > }, > }; > > +static void glfs_set_preopened(const char *volume, glfs_t *fs) > +{ > + ListElement *entry = NULL; > + > + entry = g_new(ListElement, 1); > + > + entry->saved.volume = g_strdup(volume); > + > + entry->saved.fs = fs; > + entry->saved.ref = 1; > + > + QLIST_INSERT_HEAD(&glfs_list, entry, list); > +} > + > +static glfs_t *glfs_find_preopened(const char *volume) > +{ > + ListElement *entry = NULL; > + > + QLIST_FOREACH(entry, &glfs_list, list) { Indention seems off a space. Does QLIST_FOREACH do locking? If not, it looks possible that glfs_find_preopened() and glfs_clear_preopened() race for accessing 'entry' and it may get g_free()'d before this dereference. Maybe the block layer prevents this from happening? If others confirm that there is no explicit locking needed here, you can add my Reviewed-by line. Thanks, Niels > + if (strcmp(entry->saved.volume, volume) == 0) { > + entry->saved.ref++; > + return entry->saved.fs; > + } > + } > + > + return NULL; > +} > + > +static void glfs_clear_preopened(glfs_t *fs) > +{ > + ListElement *entry = NULL; > + > + if (fs == NULL) { > + return; > + } > + > + QLIST_FOREACH(entry, &glfs_list, list) { > + if (entry->saved.fs == fs) { > + if (--entry->saved.ref) { > + return; > + } > + > + QLIST_REMOVE(entry, list); > + > + glfs_fini(entry->saved.fs); > + g_free(entry->saved.volume); > + g_free(entry); > + } > + } > +} > + > static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path) > { > char *p, *q; > @@ -319,11 +383,18 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf, > int old_errno; > GlusterServerList *server; > > + glfs = glfs_find_preopened(gconf->volume); > + if (glfs) { > + return glfs; > + } > + > glfs = glfs_new(gconf->volume); > if (!glfs) { > goto out; > } > > + glfs_set_preopened(gconf->volume, glfs); > + > for (server = gconf->server; server; server = server->next) { > if (server->value->type == GLUSTER_TRANSPORT_UNIX) { > ret = glfs_set_volfile_server(glfs, > @@ -375,7 +446,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf, > out: > if (glfs) { > old_errno = errno; > - glfs_fini(glfs); > + glfs_clear_preopened(glfs); > errno = old_errno; > } > return NULL; > @@ -741,9 +812,9 @@ out: > if (s->fd) { > glfs_close(s->fd); > } > - if (s->glfs) { > - glfs_fini(s->glfs); > - } > + > + glfs_clear_preopened(s->glfs); > + > return ret; > } > > @@ -808,9 +879,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state) > if (s->fd) { > glfs_close(s->fd); > } > - if (s->glfs) { > - glfs_fini(s->glfs); > - } > + > + glfs_clear_preopened(s->glfs); > > /* use the newly opened image / connection */ > s->fd = reop_s->fd; > @@ -835,9 +905,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state) > glfs_close(reop_s->fd); > } > > - if (reop_s->glfs) { > - glfs_fini(reop_s->glfs); > - } > + glfs_clear_preopened(reop_s->glfs); > > g_free(state->opaque); > state->opaque = NULL; > @@ -955,9 +1023,7 @@ static int qemu_gluster_create(const char *filename, > out: > g_free(tmp); > qapi_free_BlockdevOptionsGluster(gconf); > - if (glfs) { > - glfs_fini(glfs); > - } > + glfs_clear_preopened(glfs); > return ret; > } > > @@ -1029,7 +1095,7 @@ static void qemu_gluster_close(BlockDriverState *bs) > glfs_close(s->fd); > s->fd = NULL; > } > - glfs_fini(s->glfs); > + glfs_clear_preopened(s->glfs); > } > > static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) > -- > 2.7.4 >