From mboxrd@z Thu Jan 1 00:00:00 1970 From: Elena Ufimtseva Subject: [PATCH v10 7/9] libxc: allocate domain memory for vnuma enabled Date: Wed, 3 Sep 2014 00:24:16 -0400 Message-ID: <1409718258-3276-5-git-send-email-ufimtseva@gmail.com> References: <1409718258-3276-1-git-send-email-ufimtseva@gmail.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1409718258-3276-1-git-send-email-ufimtseva@gmail.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: keir@xen.org, Ian.Campbell@citrix.com, stefano.stabellini@eu.citrix.com, george.dunlap@eu.citrix.com, msw@linux.com, dario.faggioli@citrix.com, lccycc123@gmail.com, ian.jackson@eu.citrix.com, JBeulich@suse.com, Elena Ufimtseva List-Id: xen-devel@lists.xenproject.org vNUMA-aware domain memory allocation based on provided vnode to pnode map. If this map is not defined, use default allocation. Default allocation will not specify any physical node when allocating memory. Domain creation will fail if at least one node was not defined. Signed-off-by: Elena Ufimtseva --- tools/libxc/xc_dom.h | 13 ++++++++ tools/libxc/xc_dom_x86.c | 76 ++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 77 insertions(+), 12 deletions(-) diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h index 6ae6a9f..61c2a06 100644 --- a/tools/libxc/xc_dom.h +++ b/tools/libxc/xc_dom.h @@ -164,6 +164,16 @@ struct xc_dom_image { /* kernel loader */ struct xc_dom_arch *arch_hooks; + + /* + * vNUMA topology and memory allocation structure. + * Defines the way to allocate memory on per NUMA + * physical defined by vnode_to_pnode. + */ + uint32_t vnodes; + uint64_t *numa_memszs; + unsigned int *vnode_to_pnode; + /* allocate up to virt_alloc_end */ int (*allocate) (struct xc_dom_image * dom, xen_vaddr_t up_to); }; @@ -385,6 +395,9 @@ static inline xen_pfn_t xc_dom_p2m_guest(struct xc_dom_image *dom, int arch_setup_meminit(struct xc_dom_image *dom); int arch_setup_bootearly(struct xc_dom_image *dom); int arch_setup_bootlate(struct xc_dom_image *dom); +int arch_boot_alloc(struct xc_dom_image *dom); + +#define LIBXC_VNUMA_NO_NODE ~((unsigned int)0) /* * Local variables: diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c index bf06fe4..f2b4c98 100644 --- a/tools/libxc/xc_dom_x86.c +++ b/tools/libxc/xc_dom_x86.c @@ -759,7 +759,7 @@ static int x86_shadow(xc_interface *xch, domid_t domid) int arch_setup_meminit(struct xc_dom_image *dom) { int rc; - xen_pfn_t pfn, allocsz, i, j, mfn; + xen_pfn_t pfn, i, j, mfn; rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type); if ( rc ) @@ -811,25 +811,77 @@ int arch_setup_meminit(struct xc_dom_image *dom) /* setup initial p2m */ for ( pfn = 0; pfn < dom->total_pages; pfn++ ) dom->p2m_host[pfn] = pfn; + + /* + * Any PV domain should have at least one vNUMA node. + * If no config was defined, one default vNUMA node + * will be set. + */ + if ( dom->vnodes == 0 ) { + xc_dom_printf(dom->xch, + "%s: Cannot construct vNUMA topology with 0 vnodes\n", + __FUNCTION__); + return -EINVAL; + } /* allocate guest memory */ - for ( i = rc = allocsz = 0; - (i < dom->total_pages) && !rc; - i += allocsz ) - { - allocsz = dom->total_pages - i; - if ( allocsz > 1024*1024 ) - allocsz = 1024*1024; - rc = xc_domain_populate_physmap_exact( - dom->xch, dom->guest_domid, allocsz, - 0, 0, &dom->p2m_host[i]); - } + rc = arch_boot_alloc(dom); + if ( rc ) + return rc; /* Ensure no unclaimed pages are left unused. * OK to call if hadn't done the earlier claim call. */ (void)xc_domain_claim_pages(dom->xch, dom->guest_domid, 0 /* cancels the claim */); } + return rc; +} + +/* + * Allocates domain memory taking into account + * defined vnuma topology and vnode_to_pnode map. + * Any pv guest will have at least one vnuma node + * with vnuma_memszs[0] = domain memory and the rest + * topology initialized with default values. + */ +int arch_boot_alloc(struct xc_dom_image *dom) +{ + int rc; + unsigned int n, memflags; + unsigned long long vnode_pages; + unsigned long long allocsz = 0, node_pfn_base, i; + + rc = allocsz = node_pfn_base = n = 0; + + for ( n = 0; n < dom->vnodes; n++ ) + { + memflags = 0; + if ( dom->vnode_to_pnode[n] != LIBXC_VNUMA_NO_NODE ) + { + memflags |= XENMEMF_exact_node(dom->vnode_to_pnode[n]); + memflags |= XENMEMF_exact_node_request; + } + /* memeszs are in megabytes, calc pages from it for this node. */ + vnode_pages = (dom->numa_memszs[n] << 20) >> PAGE_SHIFT_X86; + for ( i = 0; i < vnode_pages; i += allocsz ) + { + allocsz = vnode_pages - i; + if ( allocsz > 1024*1024 ) + allocsz = 1024*1024; + + rc = xc_domain_populate_physmap_exact(dom->xch, dom->guest_domid, + allocsz, 0, memflags, + &dom->p2m_host[node_pfn_base + i]); + if ( rc ) + { + xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, + "%s: Failed allocation of %Lu pages for vnode %d on pnode %d out of %lu\n", + __FUNCTION__, vnode_pages, n, dom->vnode_to_pnode[n], dom->total_pages); + return rc; + } + } + node_pfn_base += i; + } return rc; } -- 1.7.10.4