All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume
@ 2016-10-27 15:24 Prasanna Kumar Kalever
  2016-10-27 16:16 ` Raghavendra Talur
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Prasanna Kumar Kalever @ 2016-10-27 15:24 UTC (permalink / raw)
  To: qemu-devel
  Cc: kwolf, eblake, pkrempa, jcody, bharata, berrange, armbru, ndevos,
	vbellur, rtalur, Prasanna Kumar Kalever

Currently, for every drive accessed via gfapi we create a new glfs
instance (call glfs_new() followed by glfs_init()) which could consume
memory in few 100 MB's, from the table below it looks like for each
instance ~300 MB VSZ was consumed

Before:
-------
Disks   VSZ     RSS
1       1098728 187756
2       1430808 198656
3       1764932 199704
4       2084728 202684

This patch maintains a list of pre-opened glfs objects. On adding
a new drive belonging to the same gluster volume, we just reuse the
existing glfs object by updating its refcount.

With this approch we shrink up the unwanted memory consumption and
glfs_new/glfs_init calls for accessing a disk (file) if belongs to
same volume.

>From below table notice that the memory usage after adding a disk
(which will reuse the existing glfs object hence) is in negligible
compared to before.

After:
------
Disks   VSZ     RSS
1       1101964 185768
2       1109604 194920
3       1114012 196036
4       1114496 199868

Disks: number of -drive
VSZ: virtual memory size of the process in KiB
RSS: resident set size, the non-swapped physical memory (in kiloBytes)

VSZ and RSS are analyzed using 'ps aux' utility.

Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
---
v2: Address comments from Jeff Cody on v1
v1: Initial patch
---
 block/gluster.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 80 insertions(+), 14 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 01b479f..7e39201 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
 } BDRVGlusterReopenState;
 
 
+typedef struct GlfsPreopened {
+    char *volume;
+    glfs_t *fs;
+    int ref;
+} GlfsPreopened;
+
+typedef struct ListElement {
+    QLIST_ENTRY(ListElement) list;
+    GlfsPreopened saved;
+} ListElement;
+
+static QLIST_HEAD(glfs_list, ListElement) glfs_list;
+
 static QemuOptsList qemu_gluster_create_opts = {
     .name = "qemu-gluster-create-opts",
     .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -182,6 +195,57 @@ static QemuOptsList runtime_tcp_opts = {
     },
 };
 
+static void glfs_set_preopened(const char *volume, glfs_t *fs)
+{
+    ListElement *entry = NULL;
+
+    entry = g_new(ListElement, 1);
+
+    entry->saved.volume = g_strdup(volume);
+
+    entry->saved.fs = fs;
+    entry->saved.ref = 1;
+
+    QLIST_INSERT_HEAD(&glfs_list, entry, list);
+}
+
+static glfs_t *glfs_find_preopened(const char *volume)
+{
+    ListElement *entry = NULL;
+
+     QLIST_FOREACH(entry, &glfs_list, list) {
+        if (strcmp(entry->saved.volume, volume) == 0) {
+            entry->saved.ref++;
+            return entry->saved.fs;
+        }
+     }
+
+    return NULL;
+}
+
+static void glfs_clear_preopened(glfs_t *fs)
+{
+    ListElement *entry = NULL;
+
+    if (fs == NULL) {
+        return;
+    }
+
+    QLIST_FOREACH(entry, &glfs_list, list) {
+        if (entry->saved.fs == fs) {
+            if (--entry->saved.ref) {
+                return;
+            }
+
+            QLIST_REMOVE(entry, list);
+
+            glfs_fini(entry->saved.fs);
+            g_free(entry->saved.volume);
+            g_free(entry);
+        }
+    }
+}
+
 static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
     char *p, *q;
@@ -319,11 +383,18 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
     int old_errno;
     GlusterServerList *server;
 
+    glfs = glfs_find_preopened(gconf->volume);
+    if (glfs) {
+        return glfs;
+    }
+
     glfs = glfs_new(gconf->volume);
     if (!glfs) {
         goto out;
     }
 
+    glfs_set_preopened(gconf->volume, glfs);
+
     for (server = gconf->server; server; server = server->next) {
         if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
             ret = glfs_set_volfile_server(glfs,
@@ -375,7 +446,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 out:
     if (glfs) {
         old_errno = errno;
-        glfs_fini(glfs);
+        glfs_clear_preopened(glfs);
         errno = old_errno;
     }
     return NULL;
@@ -741,9 +812,9 @@ out:
     if (s->fd) {
         glfs_close(s->fd);
     }
-    if (s->glfs) {
-        glfs_fini(s->glfs);
-    }
+
+    glfs_clear_preopened(s->glfs);
+
     return ret;
 }
 
@@ -808,9 +879,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state)
     if (s->fd) {
         glfs_close(s->fd);
     }
-    if (s->glfs) {
-        glfs_fini(s->glfs);
-    }
+
+    glfs_clear_preopened(s->glfs);
 
     /* use the newly opened image / connection */
     s->fd         = reop_s->fd;
@@ -835,9 +905,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
         glfs_close(reop_s->fd);
     }
 
-    if (reop_s->glfs) {
-        glfs_fini(reop_s->glfs);
-    }
+    glfs_clear_preopened(reop_s->glfs);
 
     g_free(state->opaque);
     state->opaque = NULL;
@@ -955,9 +1023,7 @@ static int qemu_gluster_create(const char *filename,
 out:
     g_free(tmp);
     qapi_free_BlockdevOptionsGluster(gconf);
-    if (glfs) {
-        glfs_fini(glfs);
-    }
+    glfs_clear_preopened(glfs);
     return ret;
 }
 
@@ -1029,7 +1095,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
         glfs_close(s->fd);
         s->fd = NULL;
     }
-    glfs_fini(s->glfs);
+    glfs_clear_preopened(s->glfs);
 }
 
 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume
  2016-10-27 15:24 [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume Prasanna Kumar Kalever
@ 2016-10-27 16:16 ` Raghavendra Talur
  2016-10-28 10:44 ` Niels de Vos
  2016-10-28 17:40 ` Jeff Cody
  2 siblings, 0 replies; 5+ messages in thread
From: Raghavendra Talur @ 2016-10-27 16:16 UTC (permalink / raw)
  To: Prasanna Kumar Kalever
  Cc: qemu-devel, kwolf, eblake, pkrempa, jcody, bharata, berrange,
	armbru, ndevos, vbellur

On Thu, Oct 27, 2016 at 8:54 PM, Prasanna Kumar Kalever <
prasanna.kalever@redhat.com> wrote:

> Currently, for every drive accessed via gfapi we create a new glfs
> instance (call glfs_new() followed by glfs_init()) which could consume
> memory in few 100 MB's, from the table below it looks like for each
> instance ~300 MB VSZ was consumed
>
> Before:
> -------
> Disks   VSZ     RSS
> 1       1098728 187756
> 2       1430808 198656
> 3       1764932 199704
> 4       2084728 202684
>
> This patch maintains a list of pre-opened glfs objects. On adding
> a new drive belonging to the same gluster volume, we just reuse the
> existing glfs object by updating its refcount.
>
> With this approch we shrink up the unwanted memory consumption and
> glfs_new/glfs_init calls for accessing a disk (file) if belongs to
> same volume.
>
> From below table notice that the memory usage after adding a disk
> (which will reuse the existing glfs object hence) is in negligible
> compared to before.
>
> After:
> ------
> Disks   VSZ     RSS
> 1       1101964 185768
> 2       1109604 194920
> 3       1114012 196036
> 4       1114496 199868
>
> Disks: number of -drive
> VSZ: virtual memory size of the process in KiB
> RSS: resident set size, the non-swapped physical memory (in kiloBytes)
>
> VSZ and RSS are analyzed using 'ps aux' utility.
>
> Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
> ---
> v2: Address comments from Jeff Cody on v1
> v1: Initial patch
> ---
>  block/gluster.c | 94 ++++++++++++++++++++++++++++++
> ++++++++++++++++++---------
>  1 file changed, 80 insertions(+), 14 deletions(-)
>
> diff --git a/block/gluster.c b/block/gluster.c
> index 01b479f..7e39201 100644
> --- a/block/gluster.c
> +++ b/block/gluster.c
> @@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
>  } BDRVGlusterReopenState;
>
>
> +typedef struct GlfsPreopened {
> +    char *volume;
> +    glfs_t *fs;
> +    int ref;
> +} GlfsPreopened;
> +
> +typedef struct ListElement {
> +    QLIST_ENTRY(ListElement) list;
> +    GlfsPreopened saved;
> +} ListElement;
> +
> +static QLIST_HEAD(glfs_list, ListElement) glfs_list;
> +
>  static QemuOptsList qemu_gluster_create_opts = {
>      .name = "qemu-gluster-create-opts",
>      .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
> @@ -182,6 +195,57 @@ static QemuOptsList runtime_tcp_opts = {
>      },
>  };
>
> +static void glfs_set_preopened(const char *volume, glfs_t *fs)
> +{
> +    ListElement *entry = NULL;
> +
> +    entry = g_new(ListElement, 1);
> +
> +    entry->saved.volume = g_strdup(volume);
> +
> +    entry->saved.fs = fs;
> +    entry->saved.ref = 1;
> +
> +    QLIST_INSERT_HEAD(&glfs_list, entry, list);
> +}
> +
> +static glfs_t *glfs_find_preopened(const char *volume)
> +{
> +    ListElement *entry = NULL;
> +
> +     QLIST_FOREACH(entry, &glfs_list, list) {
> +        if (strcmp(entry->saved.volume, volume) == 0) {
> +            entry->saved.ref++;
> +            return entry->saved.fs;
> +        }
> +     }
> +
> +    return NULL;
> +}
> +
> +static void glfs_clear_preopened(glfs_t *fs)
> +{
> +    ListElement *entry = NULL;
> +
> +    if (fs == NULL) {
> +        return;
> +    }
> +
> +    QLIST_FOREACH(entry, &glfs_list, list) {
> +        if (entry->saved.fs == fs) {
> +            if (--entry->saved.ref) {
> +                return;
> +            }
> +
> +            QLIST_REMOVE(entry, list);
> +
> +            glfs_fini(entry->saved.fs);
> +            g_free(entry->saved.volume);
> +            g_free(entry);
> +        }
> +    }
> +}
> +
>  static int parse_volume_options(BlockdevOptionsGluster *gconf, char
> *path)
>  {
>      char *p, *q;
> @@ -319,11 +383,18 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster
> *gconf,
>      int old_errno;
>      GlusterServerList *server;
>
> +    glfs = glfs_find_preopened(gconf->volume);
> +    if (glfs) {
> +        return glfs;
> +    }
> +
>      glfs = glfs_new(gconf->volume);
>      if (!glfs) {
>          goto out;
>      }
>
> +    glfs_set_preopened(gconf->volume, glfs);
> +
>      for (server = gconf->server; server; server = server->next) {
>          if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
>              ret = glfs_set_volfile_server(glfs,
> @@ -375,7 +446,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster
> *gconf,
>  out:
>      if (glfs) {
>          old_errno = errno;
> -        glfs_fini(glfs);
> +        glfs_clear_preopened(glfs);
>          errno = old_errno;
>      }
>      return NULL;
> @@ -741,9 +812,9 @@ out:
>      if (s->fd) {
>          glfs_close(s->fd);
>      }
> -    if (s->glfs) {
> -        glfs_fini(s->glfs);
> -    }
> +
> +    glfs_clear_preopened(s->glfs);
> +
>      return ret;
>  }
>
> @@ -808,9 +879,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState
> *state)
>      if (s->fd) {
>          glfs_close(s->fd);
>      }
> -    if (s->glfs) {
> -        glfs_fini(s->glfs);
> -    }
> +
> +    glfs_clear_preopened(s->glfs);
>
>      /* use the newly opened image / connection */
>      s->fd         = reop_s->fd;
> @@ -835,9 +905,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState
> *state)
>          glfs_close(reop_s->fd);
>      }
>
> -    if (reop_s->glfs) {
> -        glfs_fini(reop_s->glfs);
> -    }
> +    glfs_clear_preopened(reop_s->glfs);
>
>      g_free(state->opaque);
>      state->opaque = NULL;
> @@ -955,9 +1023,7 @@ static int qemu_gluster_create(const char *filename,
>  out:
>      g_free(tmp);
>      qapi_free_BlockdevOptionsGluster(gconf);
> -    if (glfs) {
> -        glfs_fini(glfs);
> -    }
> +    glfs_clear_preopened(glfs);
>      return ret;
>  }
>
> @@ -1029,7 +1095,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
>          glfs_close(s->fd);
>          s->fd = NULL;
>      }
> -    glfs_fini(s->glfs);
> +    glfs_clear_preopened(s->glfs);
>  }
>
>  static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState
> *bs)
> --
> 2.7.4
>
>

+1 from Gluster perspective.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume
  2016-10-27 15:24 [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume Prasanna Kumar Kalever
  2016-10-27 16:16 ` Raghavendra Talur
@ 2016-10-28 10:44 ` Niels de Vos
  2016-10-28 17:04   ` Jeff Cody
  2016-10-28 17:40 ` Jeff Cody
  2 siblings, 1 reply; 5+ messages in thread
From: Niels de Vos @ 2016-10-28 10:44 UTC (permalink / raw)
  To: Prasanna Kumar Kalever
  Cc: qemu-devel, kwolf, eblake, pkrempa, jcody, bharata, berrange,
	armbru, vbellur, rtalur

On Thu, Oct 27, 2016 at 08:54:50PM +0530, Prasanna Kumar Kalever wrote:
> Currently, for every drive accessed via gfapi we create a new glfs
> instance (call glfs_new() followed by glfs_init()) which could consume
> memory in few 100 MB's, from the table below it looks like for each
> instance ~300 MB VSZ was consumed
> 
> Before:
> -------
> Disks   VSZ     RSS
> 1       1098728 187756
> 2       1430808 198656
> 3       1764932 199704
> 4       2084728 202684
> 
> This patch maintains a list of pre-opened glfs objects. On adding
> a new drive belonging to the same gluster volume, we just reuse the
> existing glfs object by updating its refcount.
> 
> With this approch we shrink up the unwanted memory consumption and
> glfs_new/glfs_init calls for accessing a disk (file) if belongs to
> same volume.
> 
> From below table notice that the memory usage after adding a disk
> (which will reuse the existing glfs object hence) is in negligible
> compared to before.
> 
> After:
> ------
> Disks   VSZ     RSS
> 1       1101964 185768
> 2       1109604 194920
> 3       1114012 196036
> 4       1114496 199868
> 
> Disks: number of -drive
> VSZ: virtual memory size of the process in KiB
> RSS: resident set size, the non-swapped physical memory (in kiloBytes)
> 
> VSZ and RSS are analyzed using 'ps aux' utility.
> 
> Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
> ---
> v2: Address comments from Jeff Cody on v1
> v1: Initial patch
> ---
>  block/gluster.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++---------
>  1 file changed, 80 insertions(+), 14 deletions(-)
> 
> diff --git a/block/gluster.c b/block/gluster.c
> index 01b479f..7e39201 100644
> --- a/block/gluster.c
> +++ b/block/gluster.c
> @@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
>  } BDRVGlusterReopenState;
>  
>  
> +typedef struct GlfsPreopened {
> +    char *volume;
> +    glfs_t *fs;
> +    int ref;
> +} GlfsPreopened;
> +
> +typedef struct ListElement {
> +    QLIST_ENTRY(ListElement) list;
> +    GlfsPreopened saved;
> +} ListElement;
> +
> +static QLIST_HEAD(glfs_list, ListElement) glfs_list;
> +
>  static QemuOptsList qemu_gluster_create_opts = {
>      .name = "qemu-gluster-create-opts",
>      .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
> @@ -182,6 +195,57 @@ static QemuOptsList runtime_tcp_opts = {
>      },
>  };
>  
> +static void glfs_set_preopened(const char *volume, glfs_t *fs)
> +{
> +    ListElement *entry = NULL;
> +
> +    entry = g_new(ListElement, 1);
> +
> +    entry->saved.volume = g_strdup(volume);
> +
> +    entry->saved.fs = fs;
> +    entry->saved.ref = 1;
> +
> +    QLIST_INSERT_HEAD(&glfs_list, entry, list);
> +}
> +
> +static glfs_t *glfs_find_preopened(const char *volume)
> +{
> +    ListElement *entry = NULL;
> +
> +     QLIST_FOREACH(entry, &glfs_list, list) {

Indention seems off a space.

Does QLIST_FOREACH do locking? If not, it looks possible that
glfs_find_preopened() and glfs_clear_preopened() race for accessing
'entry' and it may get g_free()'d before this dereference. Maybe the
block layer prevents this from happening?

If others confirm that there is no explicit locking needed here, you can
add my Reviewed-by line.

Thanks,
Niels


> +        if (strcmp(entry->saved.volume, volume) == 0) {
> +            entry->saved.ref++;
> +            return entry->saved.fs;
> +        }
> +     }
> +
> +    return NULL;
> +}
> +
> +static void glfs_clear_preopened(glfs_t *fs)
> +{
> +    ListElement *entry = NULL;
> +
> +    if (fs == NULL) {
> +        return;
> +    }
> +
> +    QLIST_FOREACH(entry, &glfs_list, list) {
> +        if (entry->saved.fs == fs) {
> +            if (--entry->saved.ref) {
> +                return;
> +            }
> +
> +            QLIST_REMOVE(entry, list);
> +
> +            glfs_fini(entry->saved.fs);
> +            g_free(entry->saved.volume);
> +            g_free(entry);
> +        }
> +    }
> +}
> +
>  static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
>  {
>      char *p, *q;
> @@ -319,11 +383,18 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
>      int old_errno;
>      GlusterServerList *server;
>  
> +    glfs = glfs_find_preopened(gconf->volume);
> +    if (glfs) {
> +        return glfs;
> +    }
> +
>      glfs = glfs_new(gconf->volume);
>      if (!glfs) {
>          goto out;
>      }
>  
> +    glfs_set_preopened(gconf->volume, glfs);
> +
>      for (server = gconf->server; server; server = server->next) {
>          if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
>              ret = glfs_set_volfile_server(glfs,
> @@ -375,7 +446,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
>  out:
>      if (glfs) {
>          old_errno = errno;
> -        glfs_fini(glfs);
> +        glfs_clear_preopened(glfs);
>          errno = old_errno;
>      }
>      return NULL;
> @@ -741,9 +812,9 @@ out:
>      if (s->fd) {
>          glfs_close(s->fd);
>      }
> -    if (s->glfs) {
> -        glfs_fini(s->glfs);
> -    }
> +
> +    glfs_clear_preopened(s->glfs);
> +
>      return ret;
>  }
>  
> @@ -808,9 +879,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state)
>      if (s->fd) {
>          glfs_close(s->fd);
>      }
> -    if (s->glfs) {
> -        glfs_fini(s->glfs);
> -    }
> +
> +    glfs_clear_preopened(s->glfs);
>  
>      /* use the newly opened image / connection */
>      s->fd         = reop_s->fd;
> @@ -835,9 +905,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
>          glfs_close(reop_s->fd);
>      }
>  
> -    if (reop_s->glfs) {
> -        glfs_fini(reop_s->glfs);
> -    }
> +    glfs_clear_preopened(reop_s->glfs);
>  
>      g_free(state->opaque);
>      state->opaque = NULL;
> @@ -955,9 +1023,7 @@ static int qemu_gluster_create(const char *filename,
>  out:
>      g_free(tmp);
>      qapi_free_BlockdevOptionsGluster(gconf);
> -    if (glfs) {
> -        glfs_fini(glfs);
> -    }
> +    glfs_clear_preopened(glfs);
>      return ret;
>  }
>  
> @@ -1029,7 +1095,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
>          glfs_close(s->fd);
>          s->fd = NULL;
>      }
> -    glfs_fini(s->glfs);
> +    glfs_clear_preopened(s->glfs);
>  }
>  
>  static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
> -- 
> 2.7.4
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume
  2016-10-28 10:44 ` Niels de Vos
@ 2016-10-28 17:04   ` Jeff Cody
  0 siblings, 0 replies; 5+ messages in thread
From: Jeff Cody @ 2016-10-28 17:04 UTC (permalink / raw)
  To: Niels de Vos
  Cc: Prasanna Kumar Kalever, qemu-devel, kwolf, eblake, pkrempa,
	bharata, berrange, armbru, vbellur, rtalur

On Fri, Oct 28, 2016 at 12:44:24PM +0200, Niels de Vos wrote:
> On Thu, Oct 27, 2016 at 08:54:50PM +0530, Prasanna Kumar Kalever wrote:
> > Currently, for every drive accessed via gfapi we create a new glfs
> > instance (call glfs_new() followed by glfs_init()) which could consume
> > memory in few 100 MB's, from the table below it looks like for each
> > instance ~300 MB VSZ was consumed
> > 
> > Before:
> > -------
> > Disks   VSZ     RSS
> > 1       1098728 187756
> > 2       1430808 198656
> > 3       1764932 199704
> > 4       2084728 202684
> > 
> > This patch maintains a list of pre-opened glfs objects. On adding
> > a new drive belonging to the same gluster volume, we just reuse the
> > existing glfs object by updating its refcount.
> > 
> > With this approch we shrink up the unwanted memory consumption and
> > glfs_new/glfs_init calls for accessing a disk (file) if belongs to
> > same volume.
> > 
> > From below table notice that the memory usage after adding a disk
> > (which will reuse the existing glfs object hence) is in negligible
> > compared to before.
> > 
> > After:
> > ------
> > Disks   VSZ     RSS
> > 1       1101964 185768
> > 2       1109604 194920
> > 3       1114012 196036
> > 4       1114496 199868
> > 
> > Disks: number of -drive
> > VSZ: virtual memory size of the process in KiB
> > RSS: resident set size, the non-swapped physical memory (in kiloBytes)
> > 
> > VSZ and RSS are analyzed using 'ps aux' utility.
> > 
> > Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
> > ---
> > v2: Address comments from Jeff Cody on v1
> > v1: Initial patch
> > ---
> >  block/gluster.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++---------
> >  1 file changed, 80 insertions(+), 14 deletions(-)
> > 
> > diff --git a/block/gluster.c b/block/gluster.c
> > index 01b479f..7e39201 100644
> > --- a/block/gluster.c
> > +++ b/block/gluster.c
> > @@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
> >  } BDRVGlusterReopenState;
> >  
> >  
> > +typedef struct GlfsPreopened {
> > +    char *volume;
> > +    glfs_t *fs;
> > +    int ref;
> > +} GlfsPreopened;
> > +
> > +typedef struct ListElement {
> > +    QLIST_ENTRY(ListElement) list;
> > +    GlfsPreopened saved;
> > +} ListElement;
> > +
> > +static QLIST_HEAD(glfs_list, ListElement) glfs_list;
> > +
> >  static QemuOptsList qemu_gluster_create_opts = {
> >      .name = "qemu-gluster-create-opts",
> >      .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
> > @@ -182,6 +195,57 @@ static QemuOptsList runtime_tcp_opts = {
> >      },
> >  };
> >  
> > +static void glfs_set_preopened(const char *volume, glfs_t *fs)
> > +{
> > +    ListElement *entry = NULL;
> > +
> > +    entry = g_new(ListElement, 1);
> > +
> > +    entry->saved.volume = g_strdup(volume);
> > +
> > +    entry->saved.fs = fs;
> > +    entry->saved.ref = 1;
> > +
> > +    QLIST_INSERT_HEAD(&glfs_list, entry, list);
> > +}
> > +
> > +static glfs_t *glfs_find_preopened(const char *volume)
> > +{
> > +    ListElement *entry = NULL;
> > +
> > +     QLIST_FOREACH(entry, &glfs_list, list) {
> 
> Indention seems off a space.
> 
> Does QLIST_FOREACH do locking? If not, it looks possible that
> glfs_find_preopened() and glfs_clear_preopened() race for accessing
> 'entry' and it may get g_free()'d before this dereference. Maybe the
> block layer prevents this from happening?
> 
> If others confirm that there is no explicit locking needed here, you can
> add my Reviewed-by line.
>

There should be no issue with locking - it will all be running in the same
thread.

I'll add your R-b when I pull it in, and adding my own:

Reviewed-by: Jeff Cody <jcody@redhat.com>

> 
> 
> > +        if (strcmp(entry->saved.volume, volume) == 0) {
> > +            entry->saved.ref++;
> > +            return entry->saved.fs;
> > +        }
> > +     }
> > +
> > +    return NULL;
> > +}
> > +
> > +static void glfs_clear_preopened(glfs_t *fs)
> > +{
> > +    ListElement *entry = NULL;
> > +
> > +    if (fs == NULL) {
> > +        return;
> > +    }
> > +
> > +    QLIST_FOREACH(entry, &glfs_list, list) {
> > +        if (entry->saved.fs == fs) {
> > +            if (--entry->saved.ref) {
> > +                return;
> > +            }
> > +
> > +            QLIST_REMOVE(entry, list);
> > +
> > +            glfs_fini(entry->saved.fs);
> > +            g_free(entry->saved.volume);
> > +            g_free(entry);
> > +        }
> > +    }
> > +}
> > +
> >  static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
> >  {
> >      char *p, *q;
> > @@ -319,11 +383,18 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
> >      int old_errno;
> >      GlusterServerList *server;
> >  
> > +    glfs = glfs_find_preopened(gconf->volume);
> > +    if (glfs) {
> > +        return glfs;
> > +    }
> > +
> >      glfs = glfs_new(gconf->volume);
> >      if (!glfs) {
> >          goto out;
> >      }
> >  
> > +    glfs_set_preopened(gconf->volume, glfs);
> > +
> >      for (server = gconf->server; server; server = server->next) {
> >          if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
> >              ret = glfs_set_volfile_server(glfs,
> > @@ -375,7 +446,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
> >  out:
> >      if (glfs) {
> >          old_errno = errno;
> > -        glfs_fini(glfs);
> > +        glfs_clear_preopened(glfs);
> >          errno = old_errno;
> >      }
> >      return NULL;
> > @@ -741,9 +812,9 @@ out:
> >      if (s->fd) {
> >          glfs_close(s->fd);
> >      }
> > -    if (s->glfs) {
> > -        glfs_fini(s->glfs);
> > -    }
> > +
> > +    glfs_clear_preopened(s->glfs);
> > +
> >      return ret;
> >  }
> >  
> > @@ -808,9 +879,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state)
> >      if (s->fd) {
> >          glfs_close(s->fd);
> >      }
> > -    if (s->glfs) {
> > -        glfs_fini(s->glfs);
> > -    }
> > +
> > +    glfs_clear_preopened(s->glfs);
> >  
> >      /* use the newly opened image / connection */
> >      s->fd         = reop_s->fd;
> > @@ -835,9 +905,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
> >          glfs_close(reop_s->fd);
> >      }
> >  
> > -    if (reop_s->glfs) {
> > -        glfs_fini(reop_s->glfs);
> > -    }
> > +    glfs_clear_preopened(reop_s->glfs);
> >  
> >      g_free(state->opaque);
> >      state->opaque = NULL;
> > @@ -955,9 +1023,7 @@ static int qemu_gluster_create(const char *filename,
> >  out:
> >      g_free(tmp);
> >      qapi_free_BlockdevOptionsGluster(gconf);
> > -    if (glfs) {
> > -        glfs_fini(glfs);
> > -    }
> > +    glfs_clear_preopened(glfs);
> >      return ret;
> >  }
> >  
> > @@ -1029,7 +1095,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
> >          glfs_close(s->fd);
> >          s->fd = NULL;
> >      }
> > -    glfs_fini(s->glfs);
> > +    glfs_clear_preopened(s->glfs);
> >  }
> >  
> >  static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
> > -- 
> > 2.7.4
> > 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume
  2016-10-27 15:24 [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume Prasanna Kumar Kalever
  2016-10-27 16:16 ` Raghavendra Talur
  2016-10-28 10:44 ` Niels de Vos
@ 2016-10-28 17:40 ` Jeff Cody
  2 siblings, 0 replies; 5+ messages in thread
From: Jeff Cody @ 2016-10-28 17:40 UTC (permalink / raw)
  To: Prasanna Kumar Kalever
  Cc: qemu-devel, kwolf, eblake, pkrempa, bharata, berrange, armbru,
	ndevos, vbellur, rtalur

On Thu, Oct 27, 2016 at 08:54:50PM +0530, Prasanna Kumar Kalever wrote:
> Currently, for every drive accessed via gfapi we create a new glfs
> instance (call glfs_new() followed by glfs_init()) which could consume
> memory in few 100 MB's, from the table below it looks like for each
> instance ~300 MB VSZ was consumed
> 
> Before:
> -------
> Disks   VSZ     RSS
> 1       1098728 187756
> 2       1430808 198656
> 3       1764932 199704
> 4       2084728 202684
> 
> This patch maintains a list of pre-opened glfs objects. On adding
> a new drive belonging to the same gluster volume, we just reuse the
> existing glfs object by updating its refcount.
> 
> With this approch we shrink up the unwanted memory consumption and
> glfs_new/glfs_init calls for accessing a disk (file) if belongs to
> same volume.
> 
> From below table notice that the memory usage after adding a disk
> (which will reuse the existing glfs object hence) is in negligible
> compared to before.
> 
> After:
> ------
> Disks   VSZ     RSS
> 1       1101964 185768
> 2       1109604 194920
> 3       1114012 196036
> 4       1114496 199868
> 
> Disks: number of -drive
> VSZ: virtual memory size of the process in KiB
> RSS: resident set size, the non-swapped physical memory (in kiloBytes)
> 
> VSZ and RSS are analyzed using 'ps aux' utility.
> 
> Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
> ---
> v2: Address comments from Jeff Cody on v1
> v1: Initial patch
> ---
>  block/gluster.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++---------
>  1 file changed, 80 insertions(+), 14 deletions(-)
> 
> diff --git a/block/gluster.c b/block/gluster.c
> index 01b479f..7e39201 100644
> --- a/block/gluster.c
> +++ b/block/gluster.c
> @@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
>  } BDRVGlusterReopenState;
>  
>  
> +typedef struct GlfsPreopened {
> +    char *volume;
> +    glfs_t *fs;
> +    int ref;
> +} GlfsPreopened;
> +
> +typedef struct ListElement {
> +    QLIST_ENTRY(ListElement) list;
> +    GlfsPreopened saved;
> +} ListElement;
> +
> +static QLIST_HEAD(glfs_list, ListElement) glfs_list;
> +
>  static QemuOptsList qemu_gluster_create_opts = {
>      .name = "qemu-gluster-create-opts",
>      .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
> @@ -182,6 +195,57 @@ static QemuOptsList runtime_tcp_opts = {
>      },
>  };
>  
> +static void glfs_set_preopened(const char *volume, glfs_t *fs)
> +{
> +    ListElement *entry = NULL;
> +
> +    entry = g_new(ListElement, 1);
> +
> +    entry->saved.volume = g_strdup(volume);
> +
> +    entry->saved.fs = fs;
> +    entry->saved.ref = 1;
> +
> +    QLIST_INSERT_HEAD(&glfs_list, entry, list);
> +}
> +
> +static glfs_t *glfs_find_preopened(const char *volume)
> +{
> +    ListElement *entry = NULL;
> +
> +     QLIST_FOREACH(entry, &glfs_list, list) {
> +        if (strcmp(entry->saved.volume, volume) == 0) {
> +            entry->saved.ref++;
> +            return entry->saved.fs;
> +        }
> +     }
> +
> +    return NULL;
> +}
> +
> +static void glfs_clear_preopened(glfs_t *fs)
> +{
> +    ListElement *entry = NULL;
> +
> +    if (fs == NULL) {
> +        return;
> +    }
> +
> +    QLIST_FOREACH(entry, &glfs_list, list) {
> +        if (entry->saved.fs == fs) {
> +            if (--entry->saved.ref) {
> +                return;
> +            }
> +
> +            QLIST_REMOVE(entry, list);
> +
> +            glfs_fini(entry->saved.fs);
> +            g_free(entry->saved.volume);
> +            g_free(entry);
> +        }
> +    }
> +}
> +
>  static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
>  {
>      char *p, *q;
> @@ -319,11 +383,18 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
>      int old_errno;
>      GlusterServerList *server;
>  
> +    glfs = glfs_find_preopened(gconf->volume);
> +    if (glfs) {
> +        return glfs;
> +    }
> +
>      glfs = glfs_new(gconf->volume);
>      if (!glfs) {
>          goto out;
>      }
>  
> +    glfs_set_preopened(gconf->volume, glfs);
> +
>      for (server = gconf->server; server; server = server->next) {
>          if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
>              ret = glfs_set_volfile_server(glfs,
> @@ -375,7 +446,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
>  out:
>      if (glfs) {
>          old_errno = errno;
> -        glfs_fini(glfs);
> +        glfs_clear_preopened(glfs);
>          errno = old_errno;
>      }
>      return NULL;
> @@ -741,9 +812,9 @@ out:
>      if (s->fd) {
>          glfs_close(s->fd);
>      }
> -    if (s->glfs) {
> -        glfs_fini(s->glfs);
> -    }
> +
> +    glfs_clear_preopened(s->glfs);
> +
>      return ret;
>  }
>  
> @@ -808,9 +879,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state)
>      if (s->fd) {
>          glfs_close(s->fd);
>      }
> -    if (s->glfs) {
> -        glfs_fini(s->glfs);
> -    }
> +
> +    glfs_clear_preopened(s->glfs);
>  
>      /* use the newly opened image / connection */
>      s->fd         = reop_s->fd;
> @@ -835,9 +905,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
>          glfs_close(reop_s->fd);
>      }
>  
> -    if (reop_s->glfs) {
> -        glfs_fini(reop_s->glfs);
> -    }
> +    glfs_clear_preopened(reop_s->glfs);
>  
>      g_free(state->opaque);
>      state->opaque = NULL;
> @@ -955,9 +1023,7 @@ static int qemu_gluster_create(const char *filename,
>  out:
>      g_free(tmp);
>      qapi_free_BlockdevOptionsGluster(gconf);
> -    if (glfs) {
> -        glfs_fini(glfs);
> -    }
> +    glfs_clear_preopened(glfs);
>      return ret;
>  }
>  
> @@ -1029,7 +1095,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
>          glfs_close(s->fd);
>          s->fd = NULL;
>      }
> -    glfs_fini(s->glfs);
> +    glfs_clear_preopened(s->glfs);
>  }
>  
>  static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
> -- 
> 2.7.4
> 

Thanks,

Applied to my block branch:

git://github.com/codyprime/qemu-kvm-jtc.git block

-Jeff

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-10-28 17:40 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-10-27 15:24 [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume Prasanna Kumar Kalever
2016-10-27 16:16 ` Raghavendra Talur
2016-10-28 10:44 ` Niels de Vos
2016-10-28 17:04   ` Jeff Cody
2016-10-28 17:40 ` Jeff Cody

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.