All of lore.kernel.org
 help / color / mirror / Atom feed
From: Wei Liu <wei.liu2@citrix.com>
To: xen-devel@lists.xen.org
Cc: Wei Liu <wei.liu2@citrix.com>,
	ian.campbell@citrix.com, andrew.cooper3@citrix.com,
	dario.faggioli@citrix.com, ian.jackson@eu.citrix.com,
	JBeulich@suse.com, ufimtseva@gmail.com
Subject: [PATCH v5 10/24] libxl: functions to build vmemranges for PV guest
Date: Thu, 12 Feb 2015 19:44:40 +0000	[thread overview]
Message-ID: <1423770294-9779-11-git-send-email-wei.liu2@citrix.com> (raw)
In-Reply-To: <1423770294-9779-1-git-send-email-wei.liu2@citrix.com>

Introduce a arch-independent routine to generate one vmemrange per
vnode. Also introduce arch-dependent routines for different
architectures because part of the process is arch-specific -- ARM has
yet have NUMA support and E820 is x86 only.

For those x86 guests who care about machine E820 map (i.e. with
e820_host=1), vnode is further split into several vmemranges to
accommodate memory holes.  A few stubs for libxl_arm.c are created.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Dario Faggioli <dario.faggioli@citrix.com>
Cc: Elena Ufimtseva <ufimtseva@gmail.com>
---
Changes in v5:
1. Allocate array all in one go.
2. Reverse the logic of vmemranges generation.

Changes in v4:
1. Adapt to new interface.
2. Address Ian Jackson's comments.

Changes in v3:
1. Rewrite commit log.
---
 tools/libxl/libxl_arch.h     |  6 ++++
 tools/libxl/libxl_arm.c      |  8 +++++
 tools/libxl/libxl_internal.h |  8 +++++
 tools/libxl/libxl_vnuma.c    | 41 +++++++++++++++++++++++++
 tools/libxl/libxl_x86.c      | 73 ++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 136 insertions(+)

diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
index d3bc136..e249048 100644
--- a/tools/libxl/libxl_arch.h
+++ b/tools/libxl/libxl_arch.h
@@ -27,4 +27,10 @@ int libxl__arch_domain_init_hw_description(libxl__gc *gc,
 int libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
                                       libxl_domain_build_info *info,
                                       struct xc_dom_image *dom);
+
+/* build vNUMA vmemrange with arch specific information */
+int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
+                                      uint32_t domid,
+                                      libxl_domain_build_info *b_info,
+                                      libxl__domain_build_state *state);
 #endif
diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c
index 65a762b..7da254f 100644
--- a/tools/libxl/libxl_arm.c
+++ b/tools/libxl/libxl_arm.c
@@ -707,6 +707,14 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
     return 0;
 }
 
+int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
+                                      uint32_t domid,
+                                      libxl_domain_build_info *info,
+                                      libxl__domain_build_state *state)
+{
+    return libxl__vnuma_build_vmemrange_pv_generic(gc, domid, info, state);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 258be0d..7d1e1cf 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -3400,6 +3400,14 @@ void libxl__numa_candidate_put_nodemap(libxl__gc *gc,
 int libxl__vnuma_config_check(libxl__gc *gc,
                               const libxl_domain_build_info *b_info,
                               const libxl__domain_build_state *state);
+int libxl__vnuma_build_vmemrange_pv_generic(libxl__gc *gc,
+                                            uint32_t domid,
+                                            libxl_domain_build_info *b_info,
+                                            libxl__domain_build_state *state);
+int libxl__vnuma_build_vmemrange_pv(libxl__gc *gc,
+                                    uint32_t domid,
+                                    libxl_domain_build_info *b_info,
+                                    libxl__domain_build_state *state);
 
 _hidden int libxl__ms_vm_genid_set(libxl__gc *gc, uint32_t domid,
                                    const libxl_ms_vm_genid *id);
diff --git a/tools/libxl/libxl_vnuma.c b/tools/libxl/libxl_vnuma.c
index fa5aa8d..3d46239 100644
--- a/tools/libxl/libxl_vnuma.c
+++ b/tools/libxl/libxl_vnuma.c
@@ -14,6 +14,7 @@
  */
 #include "libxl_osdeps.h" /* must come before any other headers */
 #include "libxl_internal.h"
+#include "libxl_arch.h"
 #include <stdlib.h>
 
 /* Sort vmemranges in ascending order with "start" */
@@ -122,6 +123,46 @@ out:
     return rc;
 }
 
+
+int libxl__vnuma_build_vmemrange_pv_generic(libxl__gc *gc,
+                                            uint32_t domid,
+                                            libxl_domain_build_info *b_info,
+                                            libxl__domain_build_state *state)
+{
+    int i;
+    uint64_t next;
+    xen_vmemrange_t *v = NULL;
+
+    /* Generate one vmemrange for each virtual node. */
+    GCREALLOC_ARRAY(v, b_info->num_vnuma_nodes);
+    next = 0;
+    for (i = 0; i < b_info->num_vnuma_nodes; i++) {
+        libxl_vnode_info *p = &b_info->vnuma_nodes[i];
+
+        v[i].start = next;
+        v[i].end = next + (p->memkb << 10);
+        v[i].flags = 0;
+        v[i].nid = i;
+
+        next = v[i].end;
+    }
+
+    state->vmemranges = v;
+    state->num_vmemranges = i;
+
+    return 0;
+}
+
+/* Build vmemranges for PV guest */
+int libxl__vnuma_build_vmemrange_pv(libxl__gc *gc,
+                                    uint32_t domid,
+                                    libxl_domain_build_info *b_info,
+                                    libxl__domain_build_state *state)
+{
+    assert(state->vmemranges == NULL);
+    return libxl__arch_vnuma_build_vmemrange(gc, domid, b_info, state);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
index d012b4d..d37cca1 100644
--- a/tools/libxl/libxl_x86.c
+++ b/tools/libxl/libxl_x86.c
@@ -339,6 +339,79 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
     return 0;
 }
 
+/* Return 0 on success, ERROR_* on failure. */
+int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
+                                      uint32_t domid,
+                                      libxl_domain_build_info *b_info,
+                                      libxl__domain_build_state *state)
+{
+    int nid, nr_vmemrange, rc;
+    uint32_t nr_e820, e820_count;
+    struct e820entry map[E820MAX];
+    xen_vmemrange_t *vmemranges;
+
+    /* If e820_host is not set, call the generic function */
+    if (!(b_info->type == LIBXL_DOMAIN_TYPE_PV &&
+          libxl_defbool_val(b_info->u.pv.e820_host)))
+        return libxl__vnuma_build_vmemrange_pv_generic(gc, domid, b_info,
+                                                       state);
+
+    assert(state->vmemranges == NULL);
+
+    nr_e820 = E820MAX;
+    rc = e820_host_sanitize(gc, b_info, map, &nr_e820);
+    if (rc) goto out;
+
+    e820_count = 0;
+    nr_vmemrange = 0;
+    vmemranges = NULL;
+    for (nid = 0; nid < b_info->num_vnuma_nodes; nid++) {
+        libxl_vnode_info *p = &b_info->vnuma_nodes[nid];
+        uint64_t remaining_bytes = (p->memkb << 10), bytes;
+
+        while (remaining_bytes > 0) {
+            if (e820_count >= nr_e820) {
+                rc = ERROR_NOMEM;
+                goto out;
+            }
+
+            /* Skip non RAM region */
+            if (map[e820_count].type != E820_RAM) {
+                e820_count++;
+                continue;
+            }
+
+            GCREALLOC_ARRAY(vmemranges, nr_vmemrange+1);
+
+            bytes = map[e820_count].size >= remaining_bytes ?
+                remaining_bytes : map[e820_count].size;
+
+            vmemranges[nr_vmemrange].start = map[e820_count].addr;
+            vmemranges[nr_vmemrange].end = map[e820_count].addr + bytes;
+
+            if (map[e820_count].size >= remaining_bytes) {
+                map[e820_count].addr += bytes;
+                map[e820_count].size -= bytes;
+            } else {
+                e820_count++;
+            }
+
+            remaining_bytes -= bytes;
+
+            vmemranges[nr_vmemrange].flags = 0;
+            vmemranges[nr_vmemrange].nid = nid;
+            nr_vmemrange++;
+        }
+    }
+
+    state->vmemranges = vmemranges;
+    state->num_vmemranges = nr_vmemrange;
+
+    rc = 0;
+out:
+    return rc;
+}
+
 /*
  * Local variables:
  * mode: C
-- 
1.9.1

  parent reply	other threads:[~2015-02-12 19:44 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-02-12 19:44 [PATCH v5 00/24] Virtual NUMA for PV and HVM Wei Liu
2015-02-12 19:44 ` [PATCH v5 01/24] xen: dump vNUMA information with debug key "u" Wei Liu
2015-02-13 11:50   ` Andrew Cooper
2015-02-16 14:35     ` Dario Faggioli
2015-02-12 19:44 ` [PATCH v5 02/24] xen: make two memory hypercalls vNUMA-aware Wei Liu
2015-02-13 12:00   ` Andrew Cooper
2015-02-13 13:24     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 03/24] libxc: duplicate snippet to allocate p2m_host array Wei Liu
2015-02-12 19:44 ` [PATCH v5 04/24] libxc: add p2m_size to xc_dom_image Wei Liu
2015-02-16 14:46   ` Dario Faggioli
2015-02-16 14:49     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 05/24] libxc: allocate memory with vNUMA information for PV guest Wei Liu
2015-02-13 14:30   ` Andrew Cooper
2015-02-13 15:05     ` Wei Liu
2015-02-13 15:17       ` Andrew Cooper
2015-02-16 16:58   ` Dario Faggioli
2015-02-16 17:44     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 06/24] libxl: introduce vNUMA types Wei Liu
2015-02-16 14:58   ` Dario Faggioli
2015-02-16 15:17     ` Wei Liu
2015-02-16 15:56       ` Dario Faggioli
2015-02-16 16:11         ` Wei Liu
2015-02-16 16:51           ` Dario Faggioli
2015-02-16 17:38             ` Wei Liu
2015-02-17 10:42               ` Dario Faggioli
2015-02-12 19:44 ` [PATCH v5 07/24] libxl: add vmemrange to libxl__domain_build_state Wei Liu
2015-02-16 16:00   ` Dario Faggioli
2015-02-16 16:15     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 08/24] libxl: introduce libxl__vnuma_config_check Wei Liu
2015-02-13 14:15   ` Ian Jackson
2015-02-13 15:12     ` Wei Liu
2015-02-13 15:39       ` Elena Ufimtseva
2015-02-13 16:06         ` Wei Liu
2015-02-13 16:11           ` Elena Ufimtseva
2015-02-17 16:51             ` Dario Faggioli
2015-02-22 15:50               ` Wei Liu
2015-02-17 16:44       ` Dario Faggioli
2015-02-13 15:40   ` Andrew Cooper
2015-02-17 12:56     ` Wei Liu
2015-03-02 15:13       ` Ian Campbell
2015-03-02 15:25         ` Andrew Cooper
2015-03-02 16:05           ` Ian Campbell
2015-02-17 16:38   ` Dario Faggioli
2015-02-22 15:47     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 09/24] libxl: x86: factor out e820_host_sanitize Wei Liu
2015-02-13 15:42   ` Andrew Cooper
2015-02-16 17:00     ` Dario Faggioli
2015-02-12 19:44 ` Wei Liu [this message]
2015-02-13 15:49   ` [PATCH v5 10/24] libxl: functions to build vmemranges for PV guest Andrew Cooper
2015-02-17 14:08     ` Wei Liu
2015-02-17 15:28   ` Dario Faggioli
2015-02-17 15:32     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 11/24] libxl: build, check and pass vNUMA info to Xen " Wei Liu
2015-02-13 15:54   ` Andrew Cooper
2015-02-17 14:49   ` Dario Faggioli
2015-02-12 19:44 ` [PATCH v5 12/24] hvmloader: retrieve vNUMA information from hypervisor Wei Liu
2015-02-13 15:58   ` Andrew Cooper
2015-02-17 11:36   ` Jan Beulich
2015-02-17 11:42     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 13/24] hvmloader: construct SRAT Wei Liu
2015-02-13 16:07   ` Andrew Cooper
2015-02-12 19:44 ` [PATCH v5 14/24] hvmloader: construct SLIT Wei Liu
2015-02-13 16:10   ` Andrew Cooper
2015-02-12 19:44 ` [PATCH v5 15/24] libxc: indentation change to xc_hvm_build_x86.c Wei Liu
2015-02-12 19:44 ` [PATCH v5 16/24] libxc: allocate memory with vNUMA information for HVM guest Wei Liu
2015-02-13 16:22   ` Andrew Cooper
2015-02-12 19:44 ` [PATCH v5 17/24] libxl: build, check and pass vNUMA info to Xen " Wei Liu
2015-02-13 14:21   ` Ian Jackson
2015-02-13 15:18     ` Wei Liu
2015-02-17 14:26   ` Dario Faggioli
2015-02-17 14:41     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 18/24] libxl: disallow memory relocation when vNUMA is enabled Wei Liu
2015-02-13 14:17   ` Ian Jackson
2015-02-13 15:18     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 19/24] libxl: define LIBXL_HAVE_VNUMA Wei Liu
2015-02-13 14:12   ` Ian Jackson
2015-02-13 15:21     ` Wei Liu
2015-02-13 15:26       ` Ian Jackson
2015-02-13 15:27         ` Ian Jackson
2015-02-13 15:28         ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 20/24] libxlu: rework internal representation of setting Wei Liu
2015-02-13 14:24   ` Ian Jackson
2015-02-12 19:44 ` [PATCH v5 21/24] libxlu: nested list support Wei Liu
2015-02-12 19:44 ` [PATCH v5 22/24] libxlu: introduce new APIs Wei Liu
2015-02-13 14:12   ` Ian Jackson
2015-02-16 19:10     ` Wei Liu
2015-02-16 19:47       ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 23/24] xl: introduce xcalloc Wei Liu
2015-02-12 20:17   ` Andrew Cooper
2015-02-13 10:25     ` Wei Liu
2015-02-12 19:44 ` [PATCH v5 24/24] xl: vNUMA support Wei Liu
2015-02-24 16:19   ` Dario Faggioli
2015-02-24 16:31     ` Wei Liu
2015-02-24 16:44       ` Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1423770294-9779-11-git-send-email-wei.liu2@citrix.com \
    --to=wei.liu2@citrix.com \
    --cc=JBeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dario.faggioli@citrix.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=ufimtseva@gmail.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.