From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andrew Cooper Subject: Re: [PATCH v5 01/24] xen: dump vNUMA information with debug key "u" Date: Fri, 13 Feb 2015 11:50:09 +0000 Message-ID: <54DDE4F1.2020506@citrix.com> References: <1423770294-9779-1-git-send-email-wei.liu2@citrix.com> <1423770294-9779-2-git-send-email-wei.liu2@citrix.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1423770294-9779-2-git-send-email-wei.liu2@citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Wei Liu , xen-devel@lists.xen.org Cc: dario.faggioli@citrix.com, JBeulich@suse.com, ian.jackson@eu.citrix.com, ian.campbell@citrix.com, ufimtseva@gmail.com List-Id: xen-devel@lists.xenproject.org On 12/02/15 19:44, Wei Liu wrote: > Signed-off-by: Elena Ufimsteva > Signed-off-by: Wei Liu > Cc: Jan Beulich Reviewed-by: Andrew Cooper > --- > Changes in v5: > 1. Use read_trylock. > 2. Use correct array size for strlcpy. > 3. Coding style fix. > > Changes in v4: > 1. Acquire rwlock before accessing vnuma struct. > 2. Improve output. > > Changes in v3: > 1. Constify struct vnuma_info. > 2. Don't print amount of ram of a vmemrange. > 3. Process softirqs when dumping information. > 4. Fix format string. > > Changes in v2: > 1. Use unsigned int for loop vars. > 2. Use strlcpy. > 3. Properly align output. > --- > xen/arch/x86/numa.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++- > 1 file changed, 70 insertions(+), 1 deletion(-) > > diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c > index 628a40a..e500f33 100644 > --- a/xen/arch/x86/numa.c > +++ b/xen/arch/x86/numa.c > @@ -16,6 +16,7 @@ > #include > #include > #include > +#include > > static int numa_setup(char *s); > custom_param("numa", numa_setup); > @@ -363,10 +364,12 @@ EXPORT_SYMBOL(node_data); > static void dump_numa(unsigned char key) > { > s_time_t now = NOW(); > - int i; > + unsigned int i, j; > + int err; > struct domain *d; > struct page_info *page; > unsigned int page_num_node[MAX_NUMNODES]; > + const struct vnuma_info *vnuma; > > printk("'%c' pressed -> dumping numa info (now-0x%X:%08X)\n", key, > (u32)(now>>32), (u32)now); > @@ -393,6 +396,8 @@ static void dump_numa(unsigned char key) > printk("Memory location of each domain:\n"); > for_each_domain ( d ) > { > + process_pending_softirqs(); > + > printk("Domain %u (total: %u):\n", d->domain_id, d->tot_pages); > > for_each_online_node ( i ) > @@ -408,6 +413,70 @@ static void dump_numa(unsigned char key) > > for_each_online_node ( i ) > printk(" Node %u: %u\n", i, page_num_node[i]); > + > + if ( !read_trylock(&d->vnuma_rwlock) ) > + continue; > + > + if ( !d->vnuma ) > + { > + read_unlock(&d->vnuma_rwlock); > + continue; > + } > + > + vnuma = d->vnuma; > + printk(" %u vnodes, %u vcpus, guest physical layout:\n", > + vnuma->nr_vnodes, d->max_vcpus); > + for ( i = 0; i < vnuma->nr_vnodes; i++ )r > + { > + unsigned int start_cpu = ~0U; > + > + err = snprintf(keyhandler_scratch, 12, "%3u", > + vnuma->vnode_to_pnode[i]); > + if ( err < 0 || vnuma->vnode_to_pnode[i] == NUMA_NO_NODE ) > + strlcpy(keyhandler_scratch, "???", sizeof(keyhandler_scratch)); > + > + printk(" %3u: pnode %s,", i, keyhandler_scratch); > + > + printk(" vcpus "); > + > + for ( j = 0; j < d->max_vcpus; j++ ) > + { > + if ( !(j & 0x3f) ) > + process_pending_softirqs(); > + > + if ( vnuma->vcpu_to_vnode[j] == i ) > + { > + if ( start_cpu == ~0U ) > + { > + printk("%d", j); > + start_cpu = j; > + } > + } > + else if ( start_cpu != ~0U ) > + { > + if ( j - 1 != start_cpu ) > + printk("-%d ", j - 1); > + else > + printk(" "); > + start_cpu = ~0U; > + } > + } > + > + if ( start_cpu != ~0U && start_cpu != j - 1 ) > + printk("-%d", j - 1); > + > + printk("\n"); > + > + for ( j = 0; j < vnuma->nr_vmemranges; j++ ) > + { > + if ( vnuma->vmemrange[j].nid == i ) > + printk(" %016"PRIx64" - %016"PRIx64"\n", > + vnuma->vmemrange[j].start, > + vnuma->vmemrange[j].end); > + } > + } > + > + read_unlock(&d->vnuma_rwlock); > } > > rcu_read_unlock(&domlist_read_lock);