All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] numa.c: convert to xen coding style
@ 2014-09-04  4:04 Elena Ufimtseva
  2014-09-04  4:23 ` Elena Ufimtseva
  2014-09-04 10:43 ` Dario Faggioli
  0 siblings, 2 replies; 7+ messages in thread
From: Elena Ufimtseva @ 2014-09-04  4:04 UTC (permalink / raw)
  To: xen-devel
  Cc: keir, Ian.Campbell, george.dunlap, dario.faggioli, ian.jackson,
	JBeulich, Elena Ufimtseva

Convert to Xen coding style from mixed one.

Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>
---
 xen/arch/x86/numa.c |  500 ++++++++++++++++++++++++++-------------------------
 1 file changed, 257 insertions(+), 243 deletions(-)

diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index b141877..3e5445b 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -36,13 +36,13 @@ unsigned long memnodemapsize;
 u8 *memnodemap;
 
 unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
-	[0 ... NR_CPUS-1] = NUMA_NO_NODE
+    [0 ... NR_CPUS-1] = NUMA_NO_NODE
 };
 /*
  * Keep BIOS's CPU2node information, should not be used for memory allocaion
  */
 unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
- 	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+    [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
 };
 cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
 
@@ -54,7 +54,7 @@ int acpi_numa __devinitdata;
 
 int srat_disabled(void)
 {
-	return numa_off || acpi_numa < 0;
+    return numa_off || acpi_numa < 0;
 }
 
 /*
@@ -67,53 +67,55 @@ int srat_disabled(void)
 static int __init populate_memnodemap(const struct node *nodes,
                                       int numnodes, int shift, int *nodeids)
 {
-	unsigned long spdx, epdx;
-	int i, res = -1;
-
-	memset(memnodemap, NUMA_NO_NODE, memnodemapsize * sizeof(*memnodemap));
-	for (i = 0; i < numnodes; i++) {
-		spdx = paddr_to_pdx(nodes[i].start);
-		epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
-		if (spdx >= epdx)
-			continue;
-		if ((epdx >> shift) >= memnodemapsize)
-			return 0;
-		do {
-			if (memnodemap[spdx >> shift] != NUMA_NO_NODE)
-				return -1;
-
-			if (!nodeids)
-				memnodemap[spdx >> shift] = i;
-			else
-				memnodemap[spdx >> shift] = nodeids[i];
-
-			spdx += (1UL << shift);
-		} while (spdx < epdx);
-		res = 1;
-	}
-	return res;
+    unsigned long spdx, epdx;
+    int i, res = -1;
+
+    memset(memnodemap, NUMA_NO_NODE, memnodemapsize * sizeof(*memnodemap));
+    for ( i = 0; i < numnodes; i++ )
+    {
+        spdx = paddr_to_pdx(nodes[i].start);
+        epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
+        if ( spdx >= epdx )
+            continue;
+        if ( (epdx >> shift) >= memnodemapsize )
+            return 0;
+        do {
+            if ( memnodemap[spdx >> shift] != NUMA_NO_NODE )
+                return -1;
+
+            if ( !nodeids )
+                memnodemap[spdx >> shift] = i;
+            else
+                memnodemap[spdx >> shift] = nodeids[i];
+
+            spdx += (1UL << shift);
+        } while ( spdx < epdx );
+        res = 1;
+    }
+    return res;
 }
 
 static int __init allocate_cachealigned_memnodemap(void)
 {
-	unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
-	unsigned long mfn = alloc_boot_pages(size, 1);
-
-	if (!mfn) {
-		printk(KERN_ERR
-		       "NUMA: Unable to allocate Memory to Node hash map\n");
-		memnodemapsize = 0;
-		return -1;
-	}
-
-	memnodemap = mfn_to_virt(mfn);
-	mfn <<= PAGE_SHIFT;
-	size <<= PAGE_SHIFT;
-	printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
-	       mfn, mfn + size);
-	memnodemapsize = size / sizeof(*memnodemap);
-
-	return 0;
+    unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
+    unsigned long mfn = alloc_boot_pages(size, 1);
+
+    if ( !mfn )
+    {
+        printk(KERN_ERR
+               "NUMA: Unable to allocate Memory to Node hash map\n");
+        memnodemapsize = 0;
+        return -1;
+    }
+
+    memnodemap = mfn_to_virt(mfn);
+    mfn <<= PAGE_SHIFT;
+    size <<= PAGE_SHIFT;
+    printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
+           mfn, mfn + size);
+    memnodemapsize = size / sizeof(*memnodemap);
+
+    return 0;
 }
 
 /*
@@ -121,84 +123,85 @@ static int __init allocate_cachealigned_memnodemap(void)
  * maximum possible shift.
  */
 static int __init extract_lsb_from_nodes(const struct node *nodes,
-					 int numnodes)
+                                         int numnodes)
 {
-	int i, nodes_used = 0;
-	unsigned long spdx, epdx;
-	unsigned long bitfield = 0, memtop = 0;
-
-	for (i = 0; i < numnodes; i++) {
-		spdx = paddr_to_pdx(nodes[i].start);
-		epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
-		if (spdx >= epdx)
-			continue;
-		bitfield |= spdx;
-		nodes_used++;
-		if (epdx > memtop)
-			memtop = epdx;
-	}
-	if (nodes_used <= 1)
-		i = BITS_PER_LONG - 1;
-	else
-		i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
-	memnodemapsize = (memtop >> i) + 1;
-	return i;
+    int i, nodes_used = 0;
+    unsigned long spdx, epdx;
+    unsigned long bitfield = 0, memtop = 0;
+
+    for ( i = 0; i < numnodes; i++ )
+    {
+        spdx = paddr_to_pdx(nodes[i].start);
+        epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
+        if ( spdx >= epdx )
+            continue;
+        bitfield |= spdx;
+        nodes_used++;
+        if ( epdx > memtop )
+            memtop = epdx;
+    }
+    if ( nodes_used <= 1 )
+        i = BITS_PER_LONG - 1;
+    else
+        i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
+    memnodemapsize = (memtop >> i) + 1;
+    return i;
 }
 
 int __init compute_hash_shift(struct node *nodes, int numnodes,
-			      int *nodeids)
+                              int *nodeids)
 {
-	int shift;
-
-	shift = extract_lsb_from_nodes(nodes, numnodes);
-	if (memnodemapsize <= ARRAY_SIZE(_memnodemap))
-		memnodemap = _memnodemap;
-	else if (allocate_cachealigned_memnodemap())
-		return -1;
-	printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
-		shift);
-
-	if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
-		printk(KERN_INFO "Your memory is not aligned you need to "
-		       "rebuild your kernel with a bigger NODEMAPSIZE "
-		       "shift=%d\n", shift);
-		return -1;
-	}
-	return shift;
+    int shift;
+
+    shift = extract_lsb_from_nodes(nodes, numnodes);
+    if ( memnodemapsize <= ARRAY_SIZE(_memnodemap) )
+        memnodemap = _memnodemap;
+    else if ( allocate_cachealigned_memnodemap() )
+        return -1;
+    printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", shift);
+
+    if ( populate_memnodemap(nodes, numnodes, shift, nodeids) != 1 )
+    {
+        printk(KERN_INFO "Your memory is not aligned you need to "
+               "rebuild your kernel with a bigger NODEMAPSIZE "
+               "shift=%d\n", shift);
+        return -1;
+    }
+    return shift;
 }
 /* initialize NODE_DATA given nodeid and start/end */
 void __init setup_node_bootmem(int nodeid, u64 start, u64 end)
 { 
-	unsigned long start_pfn, end_pfn;
+    unsigned long start_pfn, end_pfn;
 
-	start_pfn = start >> PAGE_SHIFT;
-	end_pfn = end >> PAGE_SHIFT;
+    start_pfn = start >> PAGE_SHIFT;
+    end_pfn = end >> PAGE_SHIFT;
 
-	NODE_DATA(nodeid)->node_id = nodeid;
-	NODE_DATA(nodeid)->node_start_pfn = start_pfn;
-	NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
+    NODE_DATA(nodeid)->node_id = nodeid;
+    NODE_DATA(nodeid)->node_start_pfn = start_pfn;
+    NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
 
-	node_set_online(nodeid);
+    node_set_online(nodeid);
 } 
 
 void __init numa_init_array(void)
 {
-	int rr, i;
-	/* There are unfortunately some poorly designed mainboards around
-	   that only connect memory to a single CPU. This breaks the 1:1 cpu->node
-	   mapping. To avoid this fill in the mapping for all possible
-	   CPUs, as the number of CPUs is not known yet. 
-	   We round robin the existing nodes. */
-	rr = first_node(node_online_map);
-	for (i = 0; i < nr_cpu_ids; i++) {
-		if (cpu_to_node[i] != NUMA_NO_NODE)
-			continue;
- 		numa_set_node(i, rr);
-		rr = next_node(rr, node_online_map);
-		if (rr == MAX_NUMNODES)
-			rr = first_node(node_online_map);
-	}
-
+    int rr, i;
+    /* There are unfortunately some poorly designed mainboards around
+       that only connect memory to a single CPU. This breaks the 1:1 cpu->node
+       mapping. To avoid this fill in the mapping for all possible
+       CPUs, as the number of CPUs is not known yet.
+       We round robin the existing nodes. */
+    rr = first_node(node_online_map);
+    for ( i = 0; i < nr_cpu_ids; i++ )
+    {
+        if ( cpu_to_node[i] != NUMA_NO_NODE )
+            continue;
+        numa_set_node(i, rr);
+        rr = next_node(rr, node_online_map);
+        if ( rr == MAX_NUMNODES )
+            rr = first_node(node_online_map);
+    }
 }
 
 #ifdef CONFIG_NUMA_EMU
@@ -207,109 +210,115 @@ static int numa_fake __initdata = 0;
 /* Numa emulation */
 static int __init numa_emulation(u64 start_pfn, u64 end_pfn)
 {
- 	int i;
- 	struct node nodes[MAX_NUMNODES];
- 	u64 sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
-
- 	/* Kludge needed for the hash function */
- 	if (hweight64(sz) > 1) {
- 		u64 x = 1;
- 		while ((x << 1) < sz)
- 			x <<= 1;
- 		if (x < sz/2)
- 			printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
- 		sz = x;
- 	}
-
- 	memset(&nodes,0,sizeof(nodes));
- 	for (i = 0; i < numa_fake; i++) {
- 		nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
- 		if (i == numa_fake-1)
- 			sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
- 		nodes[i].end = nodes[i].start + sz;
- 		printk(KERN_INFO "Faking node %d at %"PRIx64"-%"PRIx64" (%"PRIu64"MB)\n",
-		       i,
-		       nodes[i].start, nodes[i].end,
-		       (nodes[i].end - nodes[i].start) >> 20);
-		node_set_online(i);
- 	}
- 	memnode_shift = compute_hash_shift(nodes, numa_fake, NULL);
- 	if (memnode_shift < 0) {
- 		memnode_shift = 0;
- 		printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n");
- 		return -1;
- 	}
- 	for_each_online_node(i)
- 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
- 	numa_init_array();
- 	return 0;
+    int i;
+    struct node nodes[MAX_NUMNODES];
+    u64 sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
+
+    /* Kludge needed for the hash function */
+    if ( hweight64(sz) > 1 )
+    {
+        u64 x = 1;
+        while ( (x << 1) < sz )
+            x <<= 1;
+        if ( x < sz/2 )
+            printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
+        sz = x;
+    }
+
+    memset(&nodes,0,sizeof(nodes));
+    for ( i = 0; i < numa_fake; i++ )
+    {
+        nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
+        if ( i == numa_fake - 1 )
+            sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
+        nodes[i].end = nodes[i].start + sz;
+        printk(KERN_INFO "Faking node %d at %"PRIx64"-%"PRIx64" (%"PRIu64"MB)\n",
+               i,
+               nodes[i].start, nodes[i].end,
+               (nodes[i].end - nodes[i].start) >> 20);
+        node_set_online(i);
+    }
+    memnode_shift = compute_hash_shift(nodes, numa_fake, NULL);
+    if ( memnode_shift < 0 )
+    {
+        memnode_shift = 0;
+        printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n");
+        return -1;
+    }
+    for_each_online_node ( i )
+        setup_node_bootmem(i, nodes[i].start, nodes[i].end);
+    numa_init_array();
+    return 0;
 }
 #endif
 
 void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
 { 
-	int i;
+    int i;
 
 #ifdef CONFIG_NUMA_EMU
-	if (numa_fake && !numa_emulation(start_pfn, end_pfn))
-		return;
+    if ( numa_fake && !numa_emulation(start_pfn, end_pfn) )
+        return;
 #endif
 
 #ifdef CONFIG_ACPI_NUMA
-	if (!numa_off && !acpi_scan_nodes((u64)start_pfn << PAGE_SHIFT,
-					  (u64)end_pfn << PAGE_SHIFT))
-		return;
+    if ( !numa_off && !acpi_scan_nodes((u64)start_pfn << PAGE_SHIFT,
+         (u64)end_pfn << PAGE_SHIFT) )
+        return;
 #endif
 
-	printk(KERN_INFO "%s\n",
-	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
-
-	printk(KERN_INFO "Faking a node at %016"PRIx64"-%016"PRIx64"\n",
-	       (u64)start_pfn << PAGE_SHIFT,
-	       (u64)end_pfn << PAGE_SHIFT);
-	/* setup dummy node covering all memory */ 
-	memnode_shift = BITS_PER_LONG - 1;
-	memnodemap = _memnodemap;
-	nodes_clear(node_online_map);
-	node_set_online(0);
-	for (i = 0; i < nr_cpu_ids; i++)
-		numa_set_node(i, 0);
-	cpumask_copy(&node_to_cpumask[0], cpumask_of(0));
-	setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT, (u64)end_pfn << PAGE_SHIFT);
+    printk(KERN_INFO "%s\n",
+           numa_off ? "NUMA turned off" : "No NUMA configuration found");
+
+    printk(KERN_INFO "Faking a node at %016"PRIx64"-%016"PRIx64"\n",
+           (u64)start_pfn << PAGE_SHIFT,
+           (u64)end_pfn << PAGE_SHIFT);
+    /* setup dummy node covering all memory */
+    memnode_shift = BITS_PER_LONG - 1;
+    memnodemap = _memnodemap;
+    nodes_clear(node_online_map);
+    node_set_online(0);
+    for ( i = 0; i < nr_cpu_ids; i++ )
+        numa_set_node(i, 0);
+    cpumask_copy(&node_to_cpumask[0], cpumask_of(0));
+    setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT,
+                    (u64)end_pfn << PAGE_SHIFT);
 }
 
 __cpuinit void numa_add_cpu(int cpu)
 {
-	cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
+    cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
 } 
 
 void __cpuinit numa_set_node(int cpu, int node)
 {
-	cpu_to_node[cpu] = node;
+    cpu_to_node[cpu] = node;
 }
 
 /* [numa=off] */
 static __init int numa_setup(char *opt) 
 { 
-	if (!strncmp(opt,"off",3))
-		numa_off = 1;
-	if (!strncmp(opt,"on",2))
-		numa_off = 0;
+    if ( !strncmp(opt,"off",3) )
+        numa_off = 1;
+    if ( !strncmp(opt,"on",2) )
+        numa_off = 0;
 #ifdef CONFIG_NUMA_EMU
-	if(!strncmp(opt, "fake=", 5)) {
-		numa_off = 0;
-		numa_fake = simple_strtoul(opt+5,NULL,0); ;
-		if (numa_fake >= MAX_NUMNODES)
-			numa_fake = MAX_NUMNODES;
-	}
+    if ( !strncmp(opt, "fake=", 5) )
+    {
+        numa_off = 0;
+        numa_fake = simple_strtoul(opt+5,NULL,0);
+        if ( numa_fake >= MAX_NUMNODES )
+            numa_fake = MAX_NUMNODES;
+    }
 #endif
 #ifdef CONFIG_ACPI_NUMA
-	if (!strncmp(opt,"noacpi",6)) {
-		numa_off = 0;
-		acpi_numa = -1;
-	}
+    if ( !strncmp(opt,"noacpi",6) )
+    {
+        numa_off = 0;
+        acpi_numa = -1;
+    }
 #endif
-	return 1;
+    return 1;
 } 
 
 /*
@@ -326,16 +335,17 @@ static __init int numa_setup(char *opt)
  */
 void __init init_cpu_to_node(void)
 {
-	int i, node;
- 	for (i = 0; i < nr_cpu_ids; i++) {
-		u32 apicid = x86_cpu_to_apicid[i];
-		if (apicid == BAD_APICID)
-			continue;
-		node = apicid_to_node[apicid];
-		if ( node == NUMA_NO_NODE || !node_online(node) )
-			node = 0;
-		numa_set_node(i, node);
-	}
+    int i, node;
+    for ( i = 0; i < nr_cpu_ids; i++ )
+    {
+        u32 apicid = x86_cpu_to_apicid[i];
+        if ( apicid == BAD_APICID )
+            continue;
+        node = apicid_to_node[apicid];
+        if ( node == NUMA_NO_NODE || !node_online(node) )
+            node = 0;
+        numa_set_node(i, node);
+    }
 }
 
 EXPORT_SYMBOL(cpu_to_node);
@@ -346,64 +356,68 @@ EXPORT_SYMBOL(node_data);
 
 static void dump_numa(unsigned char key)
 {
-	s_time_t now = NOW();
-	int i;
-	struct domain *d;
-	struct page_info *page;
-	unsigned int page_num_node[MAX_NUMNODES];
-
-	printk("'%c' pressed -> dumping numa info (now-0x%X:%08X)\n", key,
-		  (u32)(now>>32), (u32)now);
-
-	for_each_online_node(i) {
-		paddr_t pa = (paddr_t)(NODE_DATA(i)->node_start_pfn + 1)<< PAGE_SHIFT;
-		printk("idx%d -> NODE%d start->%lu size->%lu free->%lu\n",
-			  i, NODE_DATA(i)->node_id,
-			  NODE_DATA(i)->node_start_pfn,
-			  NODE_DATA(i)->node_spanned_pages,
-			  avail_node_heap_pages(i));
-		/* sanity check phys_to_nid() */
-		printk("phys_to_nid(%"PRIpaddr") -> %d should be %d\n", pa, phys_to_nid(pa),
-			  NODE_DATA(i)->node_id);
-	}
-	for_each_online_cpu(i)
-		printk("CPU%d -> NODE%d\n", i, cpu_to_node[i]);
-
-	rcu_read_lock(&domlist_read_lock);
-
-	printk("Memory location of each domain:\n");
-	for_each_domain(d)
-	{
-		printk("Domain %u (total: %u):\n", d->domain_id, d->tot_pages);
-
-		for_each_online_node(i)
-			page_num_node[i] = 0;
-
-		spin_lock(&d->page_alloc_lock);
-		page_list_for_each(page, &d->page_list)
-		{
-			i = phys_to_nid((paddr_t)page_to_mfn(page) << PAGE_SHIFT);
-			page_num_node[i]++;
-		}
-		spin_unlock(&d->page_alloc_lock);
-
-		for_each_online_node(i)
-			printk("    Node %u: %u\n", i, page_num_node[i]);
-	}
-
-	rcu_read_unlock(&domlist_read_lock);
+    s_time_t now = NOW();
+    int i;
+    struct domain *d;
+    struct page_info *page;
+    unsigned int page_num_node[MAX_NUMNODES];
+    uint64_t mem;
+
+    printk("'%c' pressed -> dumping numa info (now-0x%X:%08X)\n", key,
+           (u32)(now>>32), (u32)now);
+
+    for_each_online_node ( i )
+    {
+        paddr_t pa = (paddr_t)(NODE_DATA(i)->node_start_pfn + 1)<< PAGE_SHIFT;
+        printk("idx%d -> NODE%d start->%lu size->%lu free->%lu\n",
+               i, NODE_DATA(i)->node_id,
+               NODE_DATA(i)->node_start_pfn,
+               NODE_DATA(i)->node_spanned_pages,
+               avail_node_heap_pages(i));
+        /* sanity check phys_to_nid() */
+        printk("phys_to_nid(%"PRIpaddr") -> %d should be %d\n", pa,
+               phys_to_nid(pa),
+               NODE_DATA(i)->node_id);
+    }
+
+    for_each_online_cpu ( i )
+        printk("CPU%d -> NODE%d\n", i, cpu_to_node[i]);
+
+    rcu_read_lock(&domlist_read_lock);
+
+    printk("Memory location of each domain:\n");
+    for_each_domain ( d )
+    {
+        printk("Domain %u (total: %u):\n", d->domain_id, d->tot_pages);
+
+        for_each_online_node ( i )
+            page_num_node[i] = 0;
+
+        spin_lock(&d->page_alloc_lock);
+        page_list_for_each(page, &d->page_list)
+        {
+            i = phys_to_nid((paddr_t)page_to_mfn(page) << PAGE_SHIFT);
+            page_num_node[i]++;
+        }
+        spin_unlock(&d->page_alloc_lock);
+
+        for_each_online_node ( i )
+            printk("    Node %u: %u\n", i, page_num_node[i]);
+    }
+
+    rcu_read_unlock(&domlist_read_lock);
 }
 
 static struct keyhandler dump_numa_keyhandler = {
-	.diagnostic = 1,
-	.u.fn = dump_numa,
-	.desc = "dump numa info"
+    .diagnostic = 1,
+    .u.fn = dump_numa,
+    .desc = "dump numa info"
 };
 
 static __init int register_numa_trigger(void)
 {
-	register_keyhandler('u', &dump_numa_keyhandler);
-	return 0;
+    register_keyhandler('u', &dump_numa_keyhandler);
+    return 0;
 }
 __initcall(register_numa_trigger);
 
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] numa.c: convert to xen coding style
  2014-09-04  4:04 [PATCH] numa.c: convert to xen coding style Elena Ufimtseva
@ 2014-09-04  4:23 ` Elena Ufimtseva
  2014-09-04 10:43 ` Dario Faggioli
  1 sibling, 0 replies; 7+ messages in thread
From: Elena Ufimtseva @ 2014-09-04  4:23 UTC (permalink / raw)
  To: xen-devel

On Thu, Sep 4, 2014 at 12:04 AM, Elena Ufimtseva <ufimtseva@gmail.com> wrote:
> Convert to Xen coding style from mixed one.
>
> Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>
> ---
>  xen/arch/x86/numa.c |  500 ++++++++++++++++++++++++++-------------------------
>  1 file changed, 257 insertions(+), 243 deletions(-)
>
> diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
> index b141877..3e5445b 100644
> --- a/xen/arch/x86/numa.c
> +++ b/xen/arch/x86/numa.c
> @@ -36,13 +36,13 @@ unsigned long memnodemapsize;
>  u8 *memnodemap;
>
>  unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
> -       [0 ... NR_CPUS-1] = NUMA_NO_NODE
> +    [0 ... NR_CPUS-1] = NUMA_NO_NODE
>  };
>  /*
>   * Keep BIOS's CPU2node information, should not be used for memory allocaion
>   */
>  unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
> -       [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
> +    [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
>  };
>  cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
>
> @@ -54,7 +54,7 @@ int acpi_numa __devinitdata;
>
>  int srat_disabled(void)
>  {
> -       return numa_off || acpi_numa < 0;
> +    return numa_off || acpi_numa < 0;
>  }
>
>  /*
> @@ -67,53 +67,55 @@ int srat_disabled(void)
>  static int __init populate_memnodemap(const struct node *nodes,
>                                        int numnodes, int shift, int *nodeids)
>  {
> -       unsigned long spdx, epdx;
> -       int i, res = -1;
> -
> -       memset(memnodemap, NUMA_NO_NODE, memnodemapsize * sizeof(*memnodemap));
> -       for (i = 0; i < numnodes; i++) {
> -               spdx = paddr_to_pdx(nodes[i].start);
> -               epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
> -               if (spdx >= epdx)
> -                       continue;
> -               if ((epdx >> shift) >= memnodemapsize)
> -                       return 0;
> -               do {
> -                       if (memnodemap[spdx >> shift] != NUMA_NO_NODE)
> -                               return -1;
> -
> -                       if (!nodeids)
> -                               memnodemap[spdx >> shift] = i;
> -                       else
> -                               memnodemap[spdx >> shift] = nodeids[i];
> -
> -                       spdx += (1UL << shift);
> -               } while (spdx < epdx);
> -               res = 1;
> -       }
> -       return res;
> +    unsigned long spdx, epdx;
> +    int i, res = -1;
> +
> +    memset(memnodemap, NUMA_NO_NODE, memnodemapsize * sizeof(*memnodemap));
> +    for ( i = 0; i < numnodes; i++ )
> +    {
> +        spdx = paddr_to_pdx(nodes[i].start);
> +        epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
> +        if ( spdx >= epdx )
> +            continue;
> +        if ( (epdx >> shift) >= memnodemapsize )
> +            return 0;
> +        do {
> +            if ( memnodemap[spdx >> shift] != NUMA_NO_NODE )
> +                return -1;
> +
> +            if ( !nodeids )
> +                memnodemap[spdx >> shift] = i;
> +            else
> +                memnodemap[spdx >> shift] = nodeids[i];
> +
> +            spdx += (1UL << shift);
> +        } while ( spdx < epdx );
> +        res = 1;
> +    }
> +    return res;
>  }
>
>  static int __init allocate_cachealigned_memnodemap(void)
>  {
> -       unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
> -       unsigned long mfn = alloc_boot_pages(size, 1);
> -
> -       if (!mfn) {
> -               printk(KERN_ERR
> -                      "NUMA: Unable to allocate Memory to Node hash map\n");
> -               memnodemapsize = 0;
> -               return -1;
> -       }
> -
> -       memnodemap = mfn_to_virt(mfn);
> -       mfn <<= PAGE_SHIFT;
> -       size <<= PAGE_SHIFT;
> -       printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
> -              mfn, mfn + size);
> -       memnodemapsize = size / sizeof(*memnodemap);
> -
> -       return 0;
> +    unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
> +    unsigned long mfn = alloc_boot_pages(size, 1);
> +
> +    if ( !mfn )
> +    {
> +        printk(KERN_ERR
> +               "NUMA: Unable to allocate Memory to Node hash map\n");
> +        memnodemapsize = 0;
> +        return -1;
> +    }
> +
> +    memnodemap = mfn_to_virt(mfn);
> +    mfn <<= PAGE_SHIFT;
> +    size <<= PAGE_SHIFT;
> +    printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
> +           mfn, mfn + size);
> +    memnodemapsize = size / sizeof(*memnodemap);
> +
> +    return 0;
>  }
>
>  /*
> @@ -121,84 +123,85 @@ static int __init allocate_cachealigned_memnodemap(void)
>   * maximum possible shift.
>   */
>  static int __init extract_lsb_from_nodes(const struct node *nodes,
> -                                        int numnodes)
> +                                         int numnodes)
>  {
> -       int i, nodes_used = 0;
> -       unsigned long spdx, epdx;
> -       unsigned long bitfield = 0, memtop = 0;
> -
> -       for (i = 0; i < numnodes; i++) {
> -               spdx = paddr_to_pdx(nodes[i].start);
> -               epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
> -               if (spdx >= epdx)
> -                       continue;
> -               bitfield |= spdx;
> -               nodes_used++;
> -               if (epdx > memtop)
> -                       memtop = epdx;
> -       }
> -       if (nodes_used <= 1)
> -               i = BITS_PER_LONG - 1;
> -       else
> -               i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
> -       memnodemapsize = (memtop >> i) + 1;
> -       return i;
> +    int i, nodes_used = 0;
> +    unsigned long spdx, epdx;
> +    unsigned long bitfield = 0, memtop = 0;
> +
> +    for ( i = 0; i < numnodes; i++ )
> +    {
> +        spdx = paddr_to_pdx(nodes[i].start);
> +        epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
> +        if ( spdx >= epdx )
> +            continue;
> +        bitfield |= spdx;
> +        nodes_used++;
> +        if ( epdx > memtop )
> +            memtop = epdx;
> +    }
> +    if ( nodes_used <= 1 )
> +        i = BITS_PER_LONG - 1;
> +    else
> +        i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
> +    memnodemapsize = (memtop >> i) + 1;
> +    return i;
>  }
>
>  int __init compute_hash_shift(struct node *nodes, int numnodes,
> -                             int *nodeids)
> +                              int *nodeids)
>  {
> -       int shift;
> -
> -       shift = extract_lsb_from_nodes(nodes, numnodes);
> -       if (memnodemapsize <= ARRAY_SIZE(_memnodemap))
> -               memnodemap = _memnodemap;
> -       else if (allocate_cachealigned_memnodemap())
> -               return -1;
> -       printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
> -               shift);
> -
> -       if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
> -               printk(KERN_INFO "Your memory is not aligned you need to "
> -                      "rebuild your kernel with a bigger NODEMAPSIZE "
> -                      "shift=%d\n", shift);
> -               return -1;
> -       }
> -       return shift;
> +    int shift;
> +
> +    shift = extract_lsb_from_nodes(nodes, numnodes);
> +    if ( memnodemapsize <= ARRAY_SIZE(_memnodemap) )
> +        memnodemap = _memnodemap;
> +    else if ( allocate_cachealigned_memnodemap() )
> +        return -1;
> +    printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", shift);
> +
> +    if ( populate_memnodemap(nodes, numnodes, shift, nodeids) != 1 )
> +    {
> +        printk(KERN_INFO "Your memory is not aligned you need to "
> +               "rebuild your kernel with a bigger NODEMAPSIZE "
> +               "shift=%d\n", shift);
> +        return -1;
> +    }
> +    return shift;
>  }
>  /* initialize NODE_DATA given nodeid and start/end */
>  void __init setup_node_bootmem(int nodeid, u64 start, u64 end)
>  {
> -       unsigned long start_pfn, end_pfn;
> +    unsigned long start_pfn, end_pfn;
>
> -       start_pfn = start >> PAGE_SHIFT;
> -       end_pfn = end >> PAGE_SHIFT;
> +    start_pfn = start >> PAGE_SHIFT;
> +    end_pfn = end >> PAGE_SHIFT;
>
> -       NODE_DATA(nodeid)->node_id = nodeid;
> -       NODE_DATA(nodeid)->node_start_pfn = start_pfn;
> -       NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
> +    NODE_DATA(nodeid)->node_id = nodeid;
> +    NODE_DATA(nodeid)->node_start_pfn = start_pfn;
> +    NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
>
> -       node_set_online(nodeid);
> +    node_set_online(nodeid);
>  }
>
>  void __init numa_init_array(void)
>  {
> -       int rr, i;
> -       /* There are unfortunately some poorly designed mainboards around
> -          that only connect memory to a single CPU. This breaks the 1:1 cpu->node
> -          mapping. To avoid this fill in the mapping for all possible
> -          CPUs, as the number of CPUs is not known yet.
> -          We round robin the existing nodes. */
> -       rr = first_node(node_online_map);
> -       for (i = 0; i < nr_cpu_ids; i++) {
> -               if (cpu_to_node[i] != NUMA_NO_NODE)
> -                       continue;
> -               numa_set_node(i, rr);
> -               rr = next_node(rr, node_online_map);
> -               if (rr == MAX_NUMNODES)
> -                       rr = first_node(node_online_map);
> -       }
> -
> +    int rr, i;
> +    /* There are unfortunately some poorly designed mainboards around
> +       that only connect memory to a single CPU. This breaks the 1:1 cpu->node
> +       mapping. To avoid this fill in the mapping for all possible
> +       CPUs, as the number of CPUs is not known yet.
> +       We round robin the existing nodes. */
> +    rr = first_node(node_online_map);
> +    for ( i = 0; i < nr_cpu_ids; i++ )
> +    {
> +        if ( cpu_to_node[i] != NUMA_NO_NODE )
> +            continue;
> +        numa_set_node(i, rr);
> +        rr = next_node(rr, node_online_map);
> +        if ( rr == MAX_NUMNODES )
> +            rr = first_node(node_online_map);
> +    }
>  }
>
>  #ifdef CONFIG_NUMA_EMU
> @@ -207,109 +210,115 @@ static int numa_fake __initdata = 0;
>  /* Numa emulation */
>  static int __init numa_emulation(u64 start_pfn, u64 end_pfn)
>  {
> -       int i;
> -       struct node nodes[MAX_NUMNODES];
> -       u64 sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
> -
> -       /* Kludge needed for the hash function */
> -       if (hweight64(sz) > 1) {
> -               u64 x = 1;
> -               while ((x << 1) < sz)
> -                       x <<= 1;
> -               if (x < sz/2)
> -                       printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
> -               sz = x;
> -       }
> -
> -       memset(&nodes,0,sizeof(nodes));
> -       for (i = 0; i < numa_fake; i++) {
> -               nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
> -               if (i == numa_fake-1)
> -                       sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
> -               nodes[i].end = nodes[i].start + sz;
> -               printk(KERN_INFO "Faking node %d at %"PRIx64"-%"PRIx64" (%"PRIu64"MB)\n",
> -                      i,
> -                      nodes[i].start, nodes[i].end,
> -                      (nodes[i].end - nodes[i].start) >> 20);
> -               node_set_online(i);
> -       }
> -       memnode_shift = compute_hash_shift(nodes, numa_fake, NULL);
> -       if (memnode_shift < 0) {
> -               memnode_shift = 0;
> -               printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n");
> -               return -1;
> -       }
> -       for_each_online_node(i)
> -               setup_node_bootmem(i, nodes[i].start, nodes[i].end);
> -       numa_init_array();
> -       return 0;
> +    int i;
> +    struct node nodes[MAX_NUMNODES];
> +    u64 sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
> +
> +    /* Kludge needed for the hash function */
> +    if ( hweight64(sz) > 1 )
> +    {
> +        u64 x = 1;
> +        while ( (x << 1) < sz )
> +            x <<= 1;
> +        if ( x < sz/2 )
> +            printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
> +        sz = x;
> +    }
> +
> +    memset(&nodes,0,sizeof(nodes));
> +    for ( i = 0; i < numa_fake; i++ )
> +    {
> +        nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
> +        if ( i == numa_fake - 1 )
> +            sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;/home/xendev/numa_c_2/0001-numa.c-convert-to-xen-coding-style.patch
> +        nodes[i].end = nodes[i].start + sz;
> +        printk(KERN_INFO "Faking node %d at %"PRIx64"-%"PRIx64" (%"PRIu64"MB)\n",
> +               i,
> +               nodes[i].start, nodes[i].end,
> +               (nodes[i].end - nodes[i].start) >> 20);
> +        node_set_online(i);
> +    }
> +    memnode_shift = compute_hash_shift(nodes, numa_fake, NULL);
> +    if ( memnode_shift < 0 )
> +    {
> +        memnode_shift = 0;
> +        printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n");
> +        return -1;
> +    }
> +    for_each_online_node ( i )
> +        setup_node_bootmem(i, nodes[i].start, nodes[i].end);
> +    numa_init_array();
> +    return 0;
>  }
>  #endif
>
>  void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
>  {
> -       int i;
> +    int i;
>
>  #ifdef CONFIG_NUMA_EMU
> -       if (numa_fake && !numa_emulation(start_pfn, end_pfn))
> -               return;
> +    if ( numa_fake && !numa_emulation(start_pfn, end_pfn) )
> +        return;
>  #endif
>
>  #ifdef CONFIG_ACPI_NUMA
> -       if (!numa_off && !acpi_scan_nodes((u64)start_pfn << PAGE_SHIFT,
> -                                         (u64)end_pfn << PAGE_SHIFT))
> -               return;
> +    if ( !numa_off && !acpi_scan_nodes((u64)start_pfn << PAGE_SHIFT,
> +         (u64)end_pfn << PAGE_SHIFT) )
> +        return;
>  #endif
>
> -       printk(KERN_INFO "%s\n",
> -              numa_off ? "NUMA turned off" : "No NUMA configuration found");
> -
> -       printk(KERN_INFO "Faking a node at %016"PRIx64"-%016"PRIx64"\n",
> -              (u64)start_pfn << PAGE_SHIFT,
> -              (u64)end_pfn << PAGE_SHIFT);
> -       /* setup dummy node covering all memory */
> -       memnode_shift = BITS_PER_LONG - 1;
> -       memnodemap = _memnodemap;
> -       nodes_clear(node_online_map);
> -       node_set_online(0);
> -       for (i = 0; i < nr_cpu_ids; i++)
> -               numa_set_node(i, 0);
> -       cpumask_copy(&node_to_cpumask[0], cpumask_of(0));
> -       setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT, (u64)end_pfn << PAGE_SHIFT);
> +    printk(KERN_INFO "%s\n",
> +           numa_off ? "NUMA turned off" : "No NUMA configuration found");
> +
> +    printk(KERN_INFO "Faking a node at %016"PRIx64"-%016"PRIx64"\n",
> +           (u64)start_pfn << PAGE_SHIFT,
> +           (u64)end_pfn << PAGE_SHIFT);
> +    /* setup dummy node covering all memory */
> +    memnode_shift = BITS_PER_LONG - 1;
> +    memnodemap = _memnodemap;
> +    nodes_clear(node_online_map);
> +    node_set_online(0);
> +    for ( i = 0; i < nr_cpu_ids; i++ )
> +        numa_set_node(i, 0);
> +    cpumask_copy(&node_to_cpumask[0], cpumask_of(0));
> +    setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT,
> +                    (u64)end_pfn << PAGE_SHIFT);
>  }
>
>  __cpuinit void numa_add_cpu(int cpu)
>  {
> -       cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
> +    cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
>  }
>
>  void __cpuinit numa_set_node(int cpu, int node)
>  {
> -       cpu_to_node[cpu] = node;
> +    cpu_to_node[cpu] = node;
>  }
>
>  /* [numa=off] */
>  static __init int numa_setup(char *opt)
>  {
> -       if (!strncmp(opt,"off",3))
> -               numa_off = 1;
> -       if (!strncmp(opt,"on",2))
> -               numa_off = 0;
> +    if ( !strncmp(opt,"off",3) )
> +        numa_off = 1;
> +    if ( !strncmp(opt,"on",2) )
> +        numa_off = 0;
>  #ifdef CONFIG_NUMA_EMU
> -       if(!strncmp(opt, "fake=", 5)) {
> -               numa_off = 0;
> -               numa_fake = simple_strtoul(opt+5,NULL,0); ;
> -               if (numa_fake >= MAX_NUMNODES)
> -                       numa_fake = MAX_NUMNODES;
> -       }
> +    if ( !strncmp(opt, "fake=", 5) )
> +    {
> +        numa_off = 0;
> +        numa_fake = simple_strtoul(opt+5,NULL,0);
> +        if ( numa_fake >= MAX_NUMNODES )
> +            numa_fake = MAX_NUMNODES;
> +    }
>  #endif
>  #ifdef CONFIG_ACPI_NUMA
> -       if (!strncmp(opt,"noacpi",6)) {
> -               numa_off = 0;
> -               acpi_numa = -1;
> -       }
> +    if ( !strncmp(opt,"noacpi",6) )
> +    {
> +        numa_off = 0;
> +        acpi_numa = -1;
> +    }
>  #endif
> -       return 1;
> +    return 1;
>  }
>
>  /*
> @@ -326,16 +335,17 @@ static __init int numa_setup(char *opt)
>   */
>  void __init init_cpu_to_node(void)
>  {
> -       int i, node;
> -       for (i = 0; i < nr_cpu_ids; i++) {
> -               u32 apicid = x86_cpu_to_apicid[i];
> -               if (apicid == BAD_APICID)
> -                       continue;
> -               node = apicid_to_node[apicid];
> -               if ( node == NUMA_NO_NODE || !node_online(node) )
> -                       node = 0;
> -               numa_set_node(i, node);
> -       }
> +    int i, node;
> +    for ( i = 0; i < nr_cpu_ids; i++ )
> +    {
> +        u32 apicid = x86_cpu_to_apicid[i];
> +        if ( apicid == BAD_APICID )
> +            continue;
> +        node = apicid_to_node[apicid];
> +        if ( node == NUMA_NO_NODE || !node_online(node) )
> +            node = 0;
> +        numa_set_node(i, node);
> +    }
>  }
>
>  EXPORT_SYMBOL(cpu_to_node);
> @@ -346,64 +356,68 @@ EXPORT_SYMBOL(node_data);
>
>  static void dump_numa(unsigned char key)
>  {
> -       s_time_t now = NOW();
> -       int i;
> -       struct domain *d;
> -       struct page_info *page;
> -       unsigned int page_num_node[MAX_NUMNODES];
> -
> -       printk("'%c' pressed -> dumping numa info (now-0x%X:%08X)\n", key,
> -                 (u32)(now>>32), (u32)now);
> -
> -       for_each_online_node(i) {
> -               paddr_t pa = (paddr_t)(NODE_DATA(i)->node_start_pfn + 1)<< PAGE_SHIFT;
> -               printk("idx%d -> NODE%d start->%lu size->%lu free->%lu\n",
> -                         i, NODE_DATA(i)->node_id,
> -                         NODE_DATA(i)->node_start_pfn,
> -                         NODE_DATA(i)->node_spanned_pages,
> -                         avail_node_heap_pages(i));
> -               /* sanity check phys_to_nid() */
> -               printk("phys_to_nid(%"PRIpaddr") -> %d should be %d\n", pa, phys_to_nid(pa),
> -                         NODE_DATA(i)->node_id);
> -       }
> -       for_each_online_cpu(i)
> -               printk("CPU%d -> NODE%d\n", i, cpu_to_node[i]);
> -
> -       rcu_read_lock(&domlist_read_lock);
> -
> -       printk("Memory location of each domain:\n");
> -       for_each_domain(d)
> -       {
> -               printk("Domain %u (total: %u):\n", d->domain_id, d->tot_pages);
> -
> -               for_each_online_node(i)
> -                       page_num_node[i] = 0;
> -
> -               spin_lock(&d->page_alloc_lock);
> -               page_list_for_each(page, &d->page_list)
> -               {
> -                       i = phys_to_nid((paddr_t)page_to_mfn(page) << PAGE_SHIFT);
> -                       page_num_node[i]++;
> -               }
> -               spin_unlock(&d->page_alloc_lock);
> -
> -               for_each_online_node(i)
> -                       printk("    Node %u: %u\n", i, page_num_node[i]);
> -       }
> -
> -       rcu_read_unlock(&domlist_read_lock);
> +    s_time_t now = NOW();
> +    int i;
> +    struct domain *d;
> +    struct page_info *page;
> +    unsigned int page_num_node[MAX_NUMNODES];
> +    uint64_t mem;
> +
> +    printk("'%c' pressed -> dumping numa info (now-0x%X:%08X)\n", key,
> +           (u32)(now>>32), (u32)now);
> +
> +    for_each_online_node ( i )
> +    {
> +        paddr_t pa = (paddr_t)(NODE_DATA(i)->node_start_pfn + 1)<< PAGE_SHIFT;
> +        printk("idx%d -> NODE%d start->%lu size->%lu free->%lu\n",
> +               i, NODE_DATA(i)->node_id,
> +               NODE_DATA(i)->node_start_pfn,
> +               NODE_DATA(i)->node_spanned_pages,
> +               avail_node_heap_pages(i));
> +        /* sanity check phys_to_nid() */
> +        printk("phys_to_nid(%"PRIpaddr") -> %d should be %d\n", pa,
> +               phys_to_nid(pa),
> +               NODE_DATA(i)->node_id);
> +    }
> +
> +    for_each_online_cpu ( i )
> +        printk("CPU%d -> NODE%d\n", i, cpu_to_node[i]);
> +
> +    rcu_read_lock(&domlist_read_lock);
> +
> +    printk("Memory location of each domain:\n");
> +    for_each_domain ( d )
> +    {
> +        printk("Domain %u (total: %u):\n", d->domain_id, d->tot_pages);
> +
> +        for_each_online_node ( i )
> +            page_num_node[i] = 0;
> +
> +        spin_lock(&d->page_alloc_lock);
> +        page_list_for_each(page, &d->page_list)
> +        {
> +            i = phys_to_nid((paddr_t)page_to_mfn(page) << PAGE_SHIFT);
> +            page_num_node[i]++;
> +        }
> +        spin_unlock(&d->page_alloc_lock);
> +
> +        for_each_online_node ( i )
> +            printk("    Node %u: %u\n", i, page_num_node[i]);
> +    }
> +
> +    rcu_read_unlock(&domlist_read_lock);
>  }
>
>  static struct keyhandler dump_numa_keyhandler = {
> -       .diagnostic = 1,
> -       .u.fn = dump_numa,
> -       .desc = "dump numa info"
> +    .diagnostic = 1,
> +    .u.fn = dump_numa,
> +    .desc = "dump numa info"
>  };
>
>  static __init int register_numa_trigger(void)
>  {
> -       register_keyhandler('u', &dump_numa_keyhandler);
> -       return 0;
> +    register_keyhandler('u', &dump_numa_keyhandler);
> +    return 0;
>  }
>  __initcall(register_numa_trigger);
>
> --
> 1.7.10.4
>

Jan

Will be this a better version of converting to Xen coding style?

-- 
Elena

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] numa.c: convert to xen coding style
  2014-09-04  4:04 [PATCH] numa.c: convert to xen coding style Elena Ufimtseva
  2014-09-04  4:23 ` Elena Ufimtseva
@ 2014-09-04 10:43 ` Dario Faggioli
  2014-09-04 13:00   ` Elena Ufimtseva
  1 sibling, 1 reply; 7+ messages in thread
From: Dario Faggioli @ 2014-09-04 10:43 UTC (permalink / raw)
  To: Elena Ufimtseva
  Cc: keir, Ian.Campbell, george.dunlap, ian.jackson, xen-devel, JBeulich


[-- Attachment #1.1: Type: text/plain, Size: 1883 bytes --]

On gio, 2014-09-04 at 00:04 -0400, Elena Ufimtseva wrote:
> Convert to Xen coding style from mixed one.
> 
Ah... Much better, thanks Elena for doing this! :-)

I only have one nit.

> --- a/xen/arch/x86/numa.c
> +++ b/xen/arch/x86/numa.c

>  void __init numa_init_array(void)
>  {
> -	int rr, i;
> -	/* There are unfortunately some poorly designed mainboards around
> -	   that only connect memory to a single CPU. This breaks the 1:1 cpu->node
> -	   mapping. To avoid this fill in the mapping for all possible
> -	   CPUs, as the number of CPUs is not known yet. 
> -	   We round robin the existing nodes. */
> -	rr = first_node(node_online_map);
> -	for (i = 0; i < nr_cpu_ids; i++) {
> -		if (cpu_to_node[i] != NUMA_NO_NODE)
> -			continue;
> - 		numa_set_node(i, rr);
> -		rr = next_node(rr, node_online_map);
> -		if (rr == MAX_NUMNODES)
> -			rr = first_node(node_online_map);
> -	}
> -
> +    int rr, i;
> +    /* There are unfortunately some poorly designed mainboards around
> +       that only connect memory to a single CPU. This breaks the 1:1 cpu->node
> +       mapping. To avoid this fill in the mapping for all possible
> +       CPUs, as the number of CPUs is not known yet.
> +       We round robin the existing nodes. */
>
We are not super consistent when it comes to code comments, but I think
this should have become something like this:

/*
 * Example, multi-line comment block.
 *
 * Note beginning and end markers on separate lines and leading '*'.
 */

With this fixed,

Reviewed-by: Dario Faggioli <dario.faggioli@citrix.com>

Regards,
Dario

-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)


[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 181 bytes --]

[-- Attachment #2: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] numa.c: convert to xen coding style
  2014-09-04 10:43 ` Dario Faggioli
@ 2014-09-04 13:00   ` Elena Ufimtseva
  2014-09-04 14:32     ` Jan Beulich
  0 siblings, 1 reply; 7+ messages in thread
From: Elena Ufimtseva @ 2014-09-04 13:00 UTC (permalink / raw)
  To: Dario Faggioli
  Cc: Keir Fraser, Ian Campbell, George Dunlap, Ian Jackson, xen-devel,
	Jan Beulich

On Thu, Sep 4, 2014 at 6:43 AM, Dario Faggioli
<dario.faggioli@citrix.com> wrote:
> On gio, 2014-09-04 at 00:04 -0400, Elena Ufimtseva wrote:
>> Convert to Xen coding style from mixed one.
>>
> Ah... Much better, thanks Elena for doing this! :-)
>
> I only have one nit.
>
>> --- a/xen/arch/x86/numa.c
>> +++ b/xen/arch/x86/numa.c
>
>>  void __init numa_init_array(void)
>>  {
>> -     int rr, i;
>> -     /* There are unfortunately some poorly designed mainboards around
>> -        that only connect memory to a single CPU. This breaks the 1:1 cpu->node
>> -        mapping. To avoid this fill in the mapping for all possible
>> -        CPUs, as the number of CPUs is not known yet.
>> -        We round robin the existing nodes. */
>> -     rr = first_node(node_online_map);
>> -     for (i = 0; i < nr_cpu_ids; i++) {
>> -             if (cpu_to_node[i] != NUMA_NO_NODE)
>> -                     continue;
>> -             numa_set_node(i, rr);
>> -             rr = next_node(rr, node_online_map);
>> -             if (rr == MAX_NUMNODES)
>> -                     rr = first_node(node_online_map);
>> -     }
>> -
>> +    int rr, i;
>> +    /* There are unfortunately some poorly designed mainboards around
>> +       that only connect memory to a single CPU. This breaks the 1:1 cpu->node
>> +       mapping. To avoid this fill in the mapping for all possible
>> +       CPUs, as the number of CPUs is not known yet.
>> +       We round robin the existing nodes. */
>>
> We are not super consistent when it comes to code comments, but I think
> this should have become something like this:
>
> /*
>  * Example, multi-line comment block.
>  *
>  * Note beginning and end markers on separate lines and leading '*'.
>  */
>
> With this fixed,
>
> Reviewed-by: Dario Faggioli <dario.faggioli@citrix.com>
>
> Regards,
> Dario
>
> --
> <<This happens because I choose it to happen!>> (Raistlin Majere)
> -----------------------------------------------------------------
> Dario Faggioli, Ph.D, http://about.me/dario.faggioli
> Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)
>

Thanks Jan, Dario.

Sorry for missing 'i'.

Jan, do you want me to re-send with what Dario mentioned?

Elena


-- 
Elena

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] numa.c: convert to xen coding style
  2014-09-04 13:00   ` Elena Ufimtseva
@ 2014-09-04 14:32     ` Jan Beulich
  2014-09-04 14:33       ` Elena Ufimtseva
  2014-09-04 14:48       ` Dario Faggioli
  0 siblings, 2 replies; 7+ messages in thread
From: Jan Beulich @ 2014-09-04 14:32 UTC (permalink / raw)
  To: Dario Faggioli, Elena Ufimtseva
  Cc: Keir Fraser, Ian Campbell, George Dunlap, Ian Jackson, xen-devel

>>> On 04.09.14 at 15:00, <ufimtseva@gmail.com> wrote:
> Jan, do you want me to re-send with what Dario mentioned?

As said in an earlier mail, this got applied already after having fixed the
build issue.

Jan

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] numa.c: convert to xen coding style
  2014-09-04 14:32     ` Jan Beulich
@ 2014-09-04 14:33       ` Elena Ufimtseva
  2014-09-04 14:48       ` Dario Faggioli
  1 sibling, 0 replies; 7+ messages in thread
From: Elena Ufimtseva @ 2014-09-04 14:33 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Keir Fraser, Ian Campbell, George Dunlap, Dario Faggioli,
	Ian Jackson, xen-devel

On Thu, Sep 4, 2014 at 10:32 AM, Jan Beulich <JBeulich@suse.com> wrote:
>>>> On 04.09.14 at 15:00, <ufimtseva@gmail.com> wrote:
>> Jan, do you want me to re-send with what Dario mentioned?
>
> As said in an earlier mail, this got applied already after having fixed the
> build issue.
>
> Jan
>

Thanks Jan, got it.

-- 
Elena

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] numa.c: convert to xen coding style
  2014-09-04 14:32     ` Jan Beulich
  2014-09-04 14:33       ` Elena Ufimtseva
@ 2014-09-04 14:48       ` Dario Faggioli
  1 sibling, 0 replies; 7+ messages in thread
From: Dario Faggioli @ 2014-09-04 14:48 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Keir Fraser, Ian Campbell, George Dunlap, Ian Jackson, xen-devel,
	Elena Ufimtseva


[-- Attachment #1.1: Type: text/plain, Size: 672 bytes --]

On gio, 2014-09-04 at 15:32 +0100, Jan Beulich wrote:
> >>> On 04.09.14 at 15:00, <ufimtseva@gmail.com> wrote:
> > Jan, do you want me to re-send with what Dario mentioned?
> 
> As said in an earlier mail, this got applied already after having fixed the
> build issue.
> 
My bad! I was not Cc-ed to that part of the thread, and (assuming I was)
I missed it on the list.

Sorry and Regards,
Dario

-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)


[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 181 bytes --]

[-- Attachment #2: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2014-09-04 14:48 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-09-04  4:04 [PATCH] numa.c: convert to xen coding style Elena Ufimtseva
2014-09-04  4:23 ` Elena Ufimtseva
2014-09-04 10:43 ` Dario Faggioli
2014-09-04 13:00   ` Elena Ufimtseva
2014-09-04 14:32     ` Jan Beulich
2014-09-04 14:33       ` Elena Ufimtseva
2014-09-04 14:48       ` Dario Faggioli

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.