All of lore.kernel.org
 help / color / mirror / Atom feed
* Host Numa informtion in dom0
@ 2010-01-29 23:05 ` Kamble, Nitin A
  2010-01-30  8:09   ` Keir Fraser
                     ` (3 more replies)
  0 siblings, 4 replies; 14+ messages in thread
From: Kamble, Nitin A @ 2010-01-29 23:05 UTC (permalink / raw)
  To: xen-devel


[-- Attachment #1.1: Type: text/plain, Size: 421 bytes --]

Hi Keir,
   Attached is the patch which exposes the host numa information to dom0. With the patch "xm info" command now also gives the cpu topology & host numa information. This will be later used to build guest numa support.
The patch basically changes physinfo sysctl, and adds topology_info & numa_info sysctls, and also changes the python & libxc code accordingly.

Please apply.

Thanks & Regards,
Nitin



[-- Attachment #1.2: Type: text/html, Size: 4975 bytes --]

[-- Attachment #2: numactl_patch_20100128_1.diff --]
[-- Type: application/octet-stream, Size: 30175 bytes --]

diff -r 2636e5619708 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c	Tue Jan 26 15:54:40 2010 +0000
+++ b/tools/libxc/xc_misc.c	Thu Jan 28 14:17:32 2010 -0800
@@ -80,6 +80,43 @@
     return 0;
 }
 
+int xc_topologyinfo(int xc_handle,
+                xc_topologyinfo_t *put_info)
+{
+    int ret;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_topologyinfo;
+
+    memcpy(&sysctl.u.topologyinfo, put_info, sizeof(*put_info));
+
+    if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
+        return ret;
+
+    memcpy(put_info, &sysctl.u.topologyinfo, sizeof(*put_info));
+
+    return 0;
+}
+
+int xc_numainfo(int xc_handle,
+                xc_numainfo_t *put_info)
+{
+    int ret;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_numainfo;
+
+    memcpy(&sysctl.u.numainfo, put_info, sizeof(*put_info));
+
+    if ((ret = do_sysctl(xc_handle, &sysctl)) != 0)
+        return ret;
+
+    memcpy(put_info, &sysctl.u.numainfo, sizeof(*put_info));
+
+    return 0;
+}
+
+
 int xc_sched_id(int xc_handle,
                 int *sched_id)
 {
diff -r 2636e5619708 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Tue Jan 26 15:54:40 2010 +0000
+++ b/tools/libxc/xenctrl.h	Thu Jan 28 14:17:32 2010 -0800
@@ -609,9 +609,19 @@
 int xc_send_debug_keys(int xc_handle, char *keys);
 
 typedef xen_sysctl_physinfo_t xc_physinfo_t;
+typedef xen_sysctl_topologyinfo_t xc_topologyinfo_t;
+typedef xen_sysctl_numainfo_t xc_numainfo_t;
+
 typedef uint32_t xc_cpu_to_node_t;
-int xc_physinfo(int xc_handle,
-                xc_physinfo_t *info);
+typedef uint32_t xc_cpu_to_socket_t;
+typedef uint32_t xc_cpu_to_core_t;
+typedef uint64_t xc_node_to_memsize_t;
+typedef uint64_t xc_node_to_memfree_t;
+typedef uint32_t xc_node_to_node_dist_t;
+
+int xc_physinfo(int xc_handle, xc_physinfo_t *info);
+int xc_topologyinfo(int xc_handle, xc_topologyinfo_t *info);
+int xc_numainfo(int xc_handle, xc_numainfo_t *info);
 
 int xc_sched_id(int xc_handle,
                 int *sched_id);
diff -r 2636e5619708 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Tue Jan 26 15:54:40 2010 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c	Thu Jan 28 14:17:32 2010 -0800
@@ -1097,105 +1097,178 @@
     return PyLong_FromUnsignedLong(pages_to_kib(pages));
 }
 
-
 static PyObject *pyxc_physinfo(XcObject *self)
 {
-#define MAX_CPU_ID 255
-    xc_physinfo_t info;
+    xc_physinfo_t pinfo;
     char cpu_cap[128], virt_caps[128], *p;
-    int i, j, max_cpu_id, nr_nodes = 0;
-    uint64_t free_heap;
-    PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj;
-    PyObject *node_to_dma32_mem_obj;
-    xc_cpu_to_node_t map[MAX_CPU_ID + 1];
+    int i;
     const char *virtcap_names[] = { "hvm", "hvm_directio" };
 
-    set_xen_guest_handle(info.cpu_to_node, map);
-    info.max_cpu_id = MAX_CPU_ID;
-
-    if ( xc_physinfo(self->xc_handle, &info) != 0 )
+    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
         return pyxc_error_to_exception();
 
     p = cpu_cap;
     *p = '\0';
-    for ( i = 0; i < sizeof(info.hw_cap)/4; i++ )
-        p += sprintf(p, "%08x:", info.hw_cap[i]);
+    for ( i = 0; i < sizeof(pinfo.hw_cap)/4; i++ )
+        p += sprintf(p, "%08x:", pinfo.hw_cap[i]);
     *(p-1) = 0;
 
     p = virt_caps;
     *p = '\0';
     for ( i = 0; i < 2; i++ )
-        if ( (info.capabilities >> i) & 1 )
+        if ( (pinfo.capabilities >> i) & 1 )
           p += sprintf(p, "%s ", virtcap_names[i]);
     if ( p != virt_caps )
       *(p-1) = '\0';
 
-    max_cpu_id = info.max_cpu_id;
-    if ( max_cpu_id > MAX_CPU_ID )
-        max_cpu_id = MAX_CPU_ID;
+    return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s,s:s}",
+                            "nr_nodes",         pinfo.nr_nodes,
+                            "threads_per_core", pinfo.threads_per_core,
+                            "cores_per_socket", pinfo.cores_per_socket,
+                            "sockets_per_node", pinfo.sockets_per_node,
+                            "nr_cpus",          pinfo.nr_cpus, 
+                            "total_memory",     pages_to_kib(pinfo.total_pages),
+                            "free_memory",      pages_to_kib(pinfo.free_pages),
+                            "scrub_memory",     pages_to_kib(pinfo.scrub_pages),
+                            "cpu_khz",          pinfo.cpu_khz,
+                            "hw_caps",          cpu_cap,
+                            "virt_caps",        virt_caps);
+}
+
+static PyObject *pyxc_topologyinfo(XcObject *self)
+{
+#define MAX_CPU_INDEX 255
+    xc_topologyinfo_t tinfo;
+    int i, max_cpu_index;
+    PyObject *ret_obj;
+    PyObject *cpu_to_core_obj, *cpu_to_socket_obj, *cpu_to_node_obj;
+    xc_cpu_to_core_t coremap[MAX_CPU_INDEX + 1];
+    xc_cpu_to_socket_t socketmap[MAX_CPU_INDEX + 1];
+    xc_cpu_to_node_t nodemap[MAX_CPU_INDEX + 1];
+
+
+    set_xen_guest_handle(tinfo.cpu_to_core, coremap);
+    set_xen_guest_handle(tinfo.cpu_to_socket, socketmap);
+    set_xen_guest_handle(tinfo.cpu_to_node, nodemap);
+    tinfo.max_cpu_index = MAX_CPU_INDEX;
+
+    if ( xc_topologyinfo(self->xc_handle, &tinfo) != 0 )
+        return pyxc_error_to_exception();
+
+    max_cpu_index = tinfo.max_cpu_index;
+    if ( max_cpu_index > MAX_CPU_INDEX )
+        max_cpu_index = MAX_CPU_INDEX;
+
+    /* Construct cpu-to-* lists. */
+    cpu_to_core_obj = PyList_New(0);
+    cpu_to_socket_obj = PyList_New(0);
+    cpu_to_node_obj = PyList_New(0);
+    for ( i = 0; i < max_cpu_index; i++ )
+    {
+        PyObject *pyint;
+
+        pyint = PyInt_FromLong(coremap[i]);
+        PyList_Append(cpu_to_core_obj, pyint);
+        Py_DECREF(pyint);
+
+        pyint = PyInt_FromLong(socketmap[i]);
+        PyList_Append(cpu_to_socket_obj, pyint);
+        Py_DECREF(pyint);
+
+        pyint = PyInt_FromLong(nodemap[i]);
+        PyList_Append(cpu_to_node_obj, pyint);
+        Py_DECREF(pyint);
+    }
+
+    ret_obj = Py_BuildValue("{s:i}", "max_cpu_index", max_cpu_index);
+
+    PyDict_SetItemString(ret_obj, "cpu_to_core", cpu_to_core_obj);
+    Py_DECREF(cpu_to_core_obj);
+
+    PyDict_SetItemString(ret_obj, "cpu_to_socket", cpu_to_socket_obj);
+    Py_DECREF(cpu_to_socket_obj);
+ 
+    PyDict_SetItemString(ret_obj, "cpu_to_node", cpu_to_node_obj);
+    Py_DECREF(cpu_to_node_obj);
+ 
+    return ret_obj;
+#undef MAX_CPU_INDEX
+}
+
+static PyObject *pyxc_numainfo(XcObject *self)
+{
+#define MAX_NODE_INDEX 31
+    xc_numainfo_t ninfo;
+    int i, j, max_node_index;
+    uint64_t free_heap;
+    PyObject *ret_obj;
+    PyObject *node_to_memsize_obj, *node_to_memfree_obj;
+    PyObject *node_to_dma32_mem_obj, *node_to_node_dist_obj;
+    xc_node_to_memsize_t node_memsize[MAX_NODE_INDEX + 1];
+    xc_node_to_memfree_t node_memfree[MAX_NODE_INDEX + 1];
+    xc_node_to_node_dist_t nodes_dist[(MAX_NODE_INDEX * MAX_NODE_INDEX) + 1];
+
+    set_xen_guest_handle(ninfo.node_to_memsize, node_memsize);
+    set_xen_guest_handle(ninfo.node_to_memfree, node_memfree);
+    set_xen_guest_handle(ninfo.node_to_node_distance, nodes_dist);
+    ninfo.max_node_index = MAX_NODE_INDEX;
+    if( xc_numainfo(self->xc_handle, &ninfo) != 0 )
+        return pyxc_error_to_exception();
+
+    max_node_index = ninfo.max_node_index;
+    if ( max_node_index > MAX_NODE_INDEX )
+        max_node_index = MAX_NODE_INDEX;
 
     /* Construct node-to-* lists. */
-    node_to_cpu_obj = PyList_New(0);
-    node_to_memory_obj = PyList_New(0);
+    node_to_memsize_obj = PyList_New(0);
+    node_to_memfree_obj = PyList_New(0);
     node_to_dma32_mem_obj = PyList_New(0);
-    for ( i = 0; i <= info.max_node_id; i++ )
+    node_to_node_dist_obj = PyList_New(0);
+    for ( i = 0; i < max_node_index; i++ )
     {
-        int node_exists = 0;
         PyObject *pyint;
 
-        /* CPUs. */
-        PyObject *cpus = PyList_New(0);
-        for ( j = 0; j <= max_cpu_id; j++ )
-        {
-            if ( i != map[j] )
-                continue;
-            pyint = PyInt_FromLong(j);
-            PyList_Append(cpus, pyint);
-            Py_DECREF(pyint);
-            node_exists = 1;
-        }
-        PyList_Append(node_to_cpu_obj, cpus); 
-        Py_DECREF(cpus);
+        /* Total Memory */
+        pyint = PyInt_FromLong(node_memsize[i] >> 20); /* MB */
+        PyList_Append(node_to_memsize_obj, pyint);
+        Py_DECREF(pyint);
 
-        /* Memory. */
-        xc_availheap(self->xc_handle, 0, 0, i, &free_heap);
-        node_exists = node_exists || (free_heap != 0);
-        pyint = PyInt_FromLong(free_heap / 1024);
-        PyList_Append(node_to_memory_obj, pyint);
+        /* Free Memory */
+        pyint = PyInt_FromLong(node_memfree[i] >> 20); /* MB */
+        PyList_Append(node_to_memfree_obj, pyint);
         Py_DECREF(pyint);
 
         /* DMA memory. */
         xc_availheap(self->xc_handle, 0, 32, i, &free_heap);
-        pyint = PyInt_FromLong(free_heap / 1024);
+        pyint = PyInt_FromLong(free_heap >> 20); /* MB */
         PyList_Append(node_to_dma32_mem_obj, pyint);
         Py_DECREF(pyint);
 
-        if ( node_exists )
-            nr_nodes++;
+        /* Node to Node Distance */
+        for ( j = 0; j < ninfo.max_node_index; j++ )
+        {
+            pyint = PyInt_FromLong(nodes_dist[(i * ninfo.max_node_index) + j]);
+            PyList_Append(node_to_node_dist_obj, pyint);
+            Py_DECREF(pyint);
+        }
     }
 
-    ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s:s:s}",
-                            "nr_nodes",         nr_nodes,
-                            "max_node_id",      info.max_node_id,
-                            "max_cpu_id",       info.max_cpu_id,
-                            "threads_per_core", info.threads_per_core,
-                            "cores_per_socket", info.cores_per_socket,
-                            "nr_cpus",          info.nr_cpus, 
-                            "total_memory",     pages_to_kib(info.total_pages),
-                            "free_memory",      pages_to_kib(info.free_pages),
-                            "scrub_memory",     pages_to_kib(info.scrub_pages),
-                            "cpu_khz",          info.cpu_khz,
-                            "hw_caps",          cpu_cap,
-                            "virt_caps",        virt_caps);
-    PyDict_SetItemString(ret_obj, "node_to_cpu", node_to_cpu_obj);
-    Py_DECREF(node_to_cpu_obj);
-    PyDict_SetItemString(ret_obj, "node_to_memory", node_to_memory_obj);
-    Py_DECREF(node_to_memory_obj);
+    ret_obj = Py_BuildValue("{s:i}", "max_node_index", max_node_index);
+
+    PyDict_SetItemString(ret_obj, "node_memsize", node_to_memsize_obj);
+    Py_DECREF(node_to_memsize_obj);
+
+    PyDict_SetItemString(ret_obj, "node_memfree", node_to_memfree_obj);
+    Py_DECREF(node_to_memfree_obj);
+
     PyDict_SetItemString(ret_obj, "node_to_dma32_mem", node_to_dma32_mem_obj);
     Py_DECREF(node_to_dma32_mem_obj);
+
+    PyDict_SetItemString(ret_obj, "node_to_node_dist", node_to_node_dist_obj);
+    Py_DECREF(node_to_node_dist_obj);
  
     return ret_obj;
-#undef MAX_CPU_ID
+#undef MAX_NODE_INDEX
 }
 
 static PyObject *pyxc_xeninfo(XcObject *self)
@@ -2004,6 +2077,20 @@
       "Returns [dict]: information about the hardware"
       "        [None]: on failure.\n" },
 
+    { "topologyinfo",
+      (PyCFunction)pyxc_topologyinfo,
+      METH_NOARGS, "\n"
+      "Get information about the cpu topology on the host machine\n"
+      "Returns [dict]: information about the cpu topology on host"
+      "        [None]: on failure.\n" },
+
+    { "numainfo",
+      (PyCFunction)pyxc_numainfo,
+      METH_NOARGS, "\n"
+      "Get NUMA information on the host machine\n"
+      "Returns [dict]: NUMA information on host"
+      "        [None]: on failure.\n" },
+
     { "xeninfo",
       (PyCFunction)pyxc_xeninfo,
       METH_NOARGS, "\n"
diff -r 2636e5619708 tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py	Tue Jan 26 15:54:40 2010 +0000
+++ b/tools/python/xen/xend/XendNode.py	Thu Jan 28 14:17:32 2010 -0800
@@ -874,66 +874,71 @@
     def list_to_strrange(self,list):
         return self.format_pairs(self.list_to_rangepairs(list))
 
-    def format_node_to_cpu(self, pinfo):
-        str=''
-        whitespace=''
+    def format_cpu_to_core_socket_node(self, tinfo):
         try:
-            node_to_cpu=pinfo['node_to_cpu']
-            for i in range(0, pinfo['max_node_id']+1):
-                str+='%snode%d:%s\n' % (whitespace,
-                                        i, 
-                                      self.list_to_strrange(node_to_cpu[i]))
-                whitespace='%25s' % ''        
-        except:
-            str='none\n'
-        return str[:-1];
-    def format_node_to_memory(self, pinfo, key):
-        str=''
-        whitespace=''
-        try:
-            node_to_memory=pinfo[key]
-            for i in range(0, pinfo['max_node_id']+1):
-                str+='%snode%d:%d\n' % (whitespace,
-                                        i,
-                                        node_to_memory[i] / 1024)
-                whitespace='%25s' % ''
+            nr_cpus=tinfo['max_cpu_index']
+            str='\ncpu:    core    socket     node\n'
+            for i in range(0, nr_cpus):
+                str+='%3d:%8d %8d %8d\n' % (i, 
+                                          tinfo['cpu_to_core'][i],
+                                          tinfo['cpu_to_socket'][i],
+                                          tinfo['cpu_to_node'][i])
         except:
             str='none\n'
         return str[:-1];
 
+    def format_numa_info(self, ninfo):
+        try:
+            nr_nodes=ninfo['max_node_index']
+            str='\nnode: TotalMemory FreeMemory dma32Memory NodeDist:'
+            for i in range(0, nr_nodes):
+                str+='%4d ' % i
+            str+='\n'
+            for i in range(0, nr_nodes):
+                str+='%4d:  %8dMB %8dMB  %8dMB         :' % (i, 
+                                      ninfo['node_memsize'][i],
+                                      ninfo['node_memfree'][i],
+                                      ninfo['node_to_dma32_mem'][i])
+                for j in range(0, nr_nodes):
+                    str+='%4d ' % ninfo['node_to_node_dist'][(i*nr_nodes)+j]
+                str+='\n'
+        except:
+            str='none\n'
+        return str[:-1];
 
     def physinfo(self):
         info = self.xc.physinfo()
+        tinfo = self.xc.topologyinfo()
+        ninfo = self.xc.numainfo()
 
         info['cpu_mhz'] = info['cpu_khz'] / 1000
         
         # physinfo is in KiB, need it in MiB
         info['total_memory'] = info['total_memory'] / 1024
         info['free_memory']  = info['free_memory'] / 1024
-        info['node_to_cpu']  = self.format_node_to_cpu(info)
-        info['node_to_memory'] = \
-            self.format_node_to_memory(info, 'node_to_memory')
-        info['node_to_dma32_mem'] = \
-            self.format_node_to_memory(info, 'node_to_dma32_mem')
+
+        info['cpu_topology']  = \
+             self.format_cpu_to_core_socket_node(tinfo)
+
+        info['numa_info']  = \
+             self.format_numa_info(ninfo)
 
         ITEM_ORDER = ['nr_cpus',
                       'nr_nodes',
                       'cores_per_socket',
                       'threads_per_core',
+                      'sockets_per_node',
                       'cpu_mhz',
                       'hw_caps',
                       'virt_caps',
                       'total_memory',
                       'free_memory',
-                      'node_to_cpu',
-                      'node_to_memory',
-                      'node_to_dma32_mem',
-                      'max_node_id'
+                      'cpu_topology',
+                      'numa_info',
                       ]
 
         return [[k, info[k]] for k in ITEM_ORDER]
 
-
     def pciinfo(self):
         from xen.xend.server.pciif import get_all_assigned_pci_devices
         assigned_devs = get_all_assigned_pci_devices()
diff -r 2636e5619708 tools/python/xen/xend/balloon.py
--- a/tools/python/xen/xend/balloon.py	Tue Jan 26 15:54:40 2010 +0000
+++ b/tools/python/xen/xend/balloon.py	Thu Jan 28 14:17:32 2010 -0800
@@ -184,15 +184,11 @@
             waitscrub = 1
             vcpus = dominfo.info['cpus'][0]
             for vcpu in vcpus:
-                nodenum = 0
-                for node in physinfo['node_to_cpu']:
-                    for cpu in node:
-                        if vcpu == cpu:
-                            if oldnode == -1:
-                                oldnode = nodenum
-                            elif oldnode != nodenum:
-                                waitscrub = 0
-                    nodenum = nodenum + 1
+                nodenum = xc.numainfo()['cpu_to_node'][cpu]
+                if oldnode == -1:
+                    oldnode = nodenum
+                elif oldnode != nodenum:
+                    waitscrub = 0
 
             if waitscrub == 1 and scrub_mem > 0:
                 log.debug("wait for scrub %s", scrub_mem)
diff -r 2636e5619708 xen/arch/x86/sysctl.c
--- a/xen/arch/x86/sysctl.c	Tue Jan 26 15:54:40 2010 +0000
+++ b/xen/arch/x86/sysctl.c	Thu Jan 28 14:17:32 2010 -0800
@@ -35,6 +35,8 @@
     return cpu_down(cpu);
 }
 
+extern int __node_distance(int a, int b);
+
 long arch_do_sysctl(
     struct xen_sysctl *sysctl, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
 {
@@ -45,25 +47,22 @@
 
     case XEN_SYSCTL_physinfo:
     {
-        uint32_t i, max_array_ent;
-        XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
-
         xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
 
         ret = xsm_physinfo();
         if ( ret )
             break;
 
-        max_array_ent = pi->max_cpu_id;
-        cpu_to_node_arr = pi->cpu_to_node;
 
         memset(pi, 0, sizeof(*pi));
-        pi->cpu_to_node = cpu_to_node_arr;
         pi->threads_per_core =
             cpus_weight(per_cpu(cpu_sibling_map, 0));
         pi->cores_per_socket =
             cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
         pi->nr_cpus = (u32)num_online_cpus();
+        pi->nr_nodes = (u32)num_online_nodes();
+        pi->sockets_per_node =  pi->nr_cpus / 
+                     (pi->nr_nodes * pi->cores_per_socket * pi->threads_per_core);
         pi->total_pages = total_pages;
         pi->free_pages = avail_domheap_pages();
         pi->scrub_pages = 0;
@@ -74,15 +73,56 @@
         if ( iommu_enabled )
             pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
 
-        pi->max_node_id = last_node(node_online_map);
-        pi->max_cpu_id = last_cpu(cpu_online_map);
-        max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
+        if ( copy_to_guest(u_sysctl, sysctl, 1) )
+            ret = -EFAULT;
+    }
+    break;
+        
+    case XEN_SYSCTL_topologyinfo:
+    {
+        uint32_t i, max_cpu_index;
+        XEN_GUEST_HANDLE_64(uint32) cpu_to_core_arr;
+        XEN_GUEST_HANDLE_64(uint32) cpu_to_socket_arr;
+        XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
+
+        xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
+
+        max_cpu_index = ti->max_cpu_index;
+        cpu_to_core_arr = ti->cpu_to_core;
+        cpu_to_socket_arr = ti->cpu_to_socket;
+        cpu_to_node_arr = ti->cpu_to_node;
+
+        memset(ti, 0, sizeof(*ti));
+        ti->cpu_to_core = cpu_to_core_arr;
+        ti->cpu_to_socket = cpu_to_socket_arr;
+        ti->cpu_to_node = cpu_to_node_arr;
+
+        max_cpu_index = min_t(uint32_t, max_cpu_index, num_online_cpus());
+        ti->max_cpu_index = max_cpu_index;
 
         ret = 0;
 
-        if ( !guest_handle_is_null(cpu_to_node_arr) )
+        for ( i = 0; i < max_cpu_index; i++ )
         {
-            for ( i = 0; i <= max_array_ent; i++ )
+            if ( !guest_handle_is_null(cpu_to_core_arr) )
+            {
+                uint32_t core = cpu_online(i) ? cpu_to_core(i) : ~0u;
+                if ( copy_to_guest_offset(cpu_to_core_arr, i, &core, 1) )
+                {
+                    ret = -EFAULT;
+                    break;
+                }
+            }
+            if ( !guest_handle_is_null(cpu_to_socket_arr) )
+            {
+                uint32_t socket = cpu_online(i) ? cpu_to_socket(i) : ~0u;
+                if ( copy_to_guest_offset(cpu_to_socket_arr, i, &socket, 1) )
+                {
+                    ret = -EFAULT;
+                    break;
+                }
+            }
+            if ( !guest_handle_is_null(cpu_to_node_arr) )
             {
                 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
                 if ( copy_to_guest_offset(cpu_to_node_arr, i, &node, 1) )
@@ -93,6 +133,82 @@
             }
         }
 
+        if (ret)
+            break;
+ 
+        if ( copy_to_guest(u_sysctl, sysctl, 1) )
+            ret = -EFAULT;
+    }
+    break;
+
+    case XEN_SYSCTL_numainfo:
+    {
+        uint32_t i, max_node_index;
+        XEN_GUEST_HANDLE_64(uint64) node_to_memsize_arr;
+        XEN_GUEST_HANDLE_64(uint64) node_to_memfree_arr;
+        XEN_GUEST_HANDLE_64(uint32) node_to_node_distance_arr;
+
+        xen_sysctl_numainfo_t *ni = &sysctl->u.numainfo;
+
+        max_node_index = ni->max_node_index;
+        node_to_memsize_arr = ni->node_to_memsize;
+        node_to_memfree_arr = ni->node_to_memfree;
+        node_to_node_distance_arr = ni->node_to_node_distance;
+
+        memset(ni, 0, sizeof(*ni));
+        ni->node_to_memsize = node_to_memsize_arr;
+        ni->node_to_memfree = node_to_memfree_arr;
+        ni->node_to_node_distance = node_to_node_distance_arr;
+
+        max_node_index = min_t(uint32_t, max_node_index, num_online_nodes());
+        ni->max_node_index = max_node_index;
+
+        ret = 0;
+
+        for ( i = 0; i < max_node_index; i++ )
+        {
+            if ( !guest_handle_is_null(node_to_memsize_arr) )
+            {
+                uint64_t memsize = node_online(i) ? 
+                                   node_spanned_pages(i) << PAGE_SHIFT : 0ul;
+                if ( copy_to_guest_offset(node_to_memsize_arr, i, &memsize, 1) )
+                {
+                    ret = -EFAULT;
+                    break;
+                }
+            }
+            if ( !guest_handle_is_null(node_to_memfree_arr) )
+            {
+                uint64_t memfree = node_online(i) ? 
+                                   avail_node_heap_pages(i) << PAGE_SHIFT : 0ul;
+                if ( copy_to_guest_offset(node_to_memfree_arr, i, &memfree, 1) )
+                {
+                    ret = -EFAULT;
+                    break;
+                }
+            }
+
+            if ( !guest_handle_is_null(node_to_node_distance_arr) )
+	    {
+                int j;
+                for ( j = 0; j < max_node_index; j++)
+                {
+                    uint32_t distance = ~0u;
+                    if (node_online(i) && node_online (j)) 
+                        distance = __node_distance(i, j);
+                    
+                    if ( copy_to_guest_offset(node_to_node_distance_arr, 
+                         (i * max_node_index + j), &distance, 1) )
+                    {
+                        ret = -EFAULT;
+                        break;
+                    }
+                }
+            }
+        }
+        if (ret)
+            break;
+
         if ( copy_to_guest(u_sysctl, sysctl, 1) )
             ret = -EFAULT;
     }
diff -r 2636e5619708 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c	Tue Jan 26 15:54:40 2010 +0000
+++ b/xen/common/page_alloc.c	Thu Jan 28 14:17:32 2010 -0800
@@ -1211,6 +1211,12 @@
                             -1);
 }
 
+unsigned long avail_node_heap_pages(unsigned int nodeid)
+{
+    return avail_heap_pages(MEMZONE_XEN, NR_ZONES -1, nodeid);
+}
+
+
 static void pagealloc_info(unsigned char key)
 {
     unsigned int zone = MEMZONE_XEN;
diff -r 2636e5619708 xen/include/asm-x86/numa.h
--- a/xen/include/asm-x86/numa.h	Tue Jan 26 15:54:40 2010 +0000
+++ b/xen/include/asm-x86/numa.h	Thu Jan 28 14:17:32 2010 -0800
@@ -73,6 +73,7 @@
 #define NODE_DATA(nid)		(&(node_data[nid]))
 
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
+#define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
 #define node_end_pfn(nid)       (NODE_DATA(nid)->node_start_pfn + \
 				 NODE_DATA(nid)->node_spanned_pages)
 
diff -r 2636e5619708 xen/include/public/sysctl.h
--- a/xen/include/public/sysctl.h	Tue Jan 26 15:54:40 2010 +0000
+++ b/xen/include/public/sysctl.h	Thu Jan 28 14:17:32 2010 -0800
@@ -34,7 +34,7 @@
 #include "xen.h"
 #include "domctl.h"
 
-#define XEN_SYSCTL_INTERFACE_VERSION 0x00000007
+#define XEN_SYSCTL_INTERFACE_VERSION 0x00000008
 
 /*
  * Read console content from Xen buffer ring.
@@ -93,30 +93,15 @@
 struct xen_sysctl_physinfo {
     uint32_t threads_per_core;
     uint32_t cores_per_socket;
+    uint32_t sockets_per_node;
     uint32_t nr_cpus;
-    uint32_t max_node_id;
+    uint32_t nr_nodes;
     uint32_t cpu_khz;
     uint64_aligned_t total_pages;
     uint64_aligned_t free_pages;
     uint64_aligned_t scrub_pages;
     uint32_t hw_cap[8];
 
-    /*
-     * IN: maximum addressable entry in the caller-provided cpu_to_node array.
-     * OUT: largest cpu identifier in the system.
-     * If OUT is greater than IN then the cpu_to_node array is truncated!
-     */
-    uint32_t max_cpu_id;
-    /*
-     * If not NULL, this array is filled with node identifier for each cpu.
-     * If a cpu has no node information (e.g., cpu not present) then the
-     * sentinel value ~0u is written.
-     * The size of this array is specified by the caller in @max_cpu_id.
-     * If the actual @max_cpu_id is smaller than the array then the trailing
-     * elements of the array will not be written by the sysctl.
-     */
-    XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
-
     /* XEN_SYSCTL_PHYSCAP_??? */
     uint32_t capabilities;
 };
@@ -486,6 +471,73 @@
 typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
 
+#define XEN_SYSCTL_topologyinfo         16 
+struct xen_sysctl_topologyinfo {
+
+    /*
+     * IN: maximum addressable entry in the caller-provided cpu_to_core, 
+     * cpu_to_socket & cpu_to_node arrays.
+     * OUT: largest cpu identifier in the system.
+     * If OUT is greater than IN then the cpu_to_node array is truncated!
+     */
+    uint32_t max_cpu_index;
+
+    /*
+     * If not NULL, this array is filled with core/socket/node identifier for 
+     * each cpu.
+     * If a cpu has no core/socket/node information (e.g., cpu not present) 
+     * then the sentinel value ~0u is written.
+     * The size of this array is specified by the caller in @max_cpu_index.
+     * If the actual @max_cpu_index is smaller than the array then the trailing
+     * elements of the array will not be written by the sysctl.
+     */
+    XEN_GUEST_HANDLE_64(uint32) cpu_to_core;
+    XEN_GUEST_HANDLE_64(uint32) cpu_to_socket;
+    XEN_GUEST_HANDLE_64(uint32) cpu_to_node;  /* node_number */
+
+};
+typedef struct xen_sysctl_topologyinfo xen_sysctl_topologyinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
+
+#define XEN_SYSCTL_numainfo          17	
+struct xen_sysctl_numainfo {
+    /*
+     * IN: maximum addressable entry in the caller-provided node_numbers, 
+     * node_to_memsize & node_to_memfree arrays.
+     * OUT: largest possible node index for the system.
+     * If OUT is greater than IN then these arrays are truncated!
+     */
+    uint32_t max_node_index;
+
+    /* For node_to_memsize & node_to_memfree arrays, the 
+     * entry with same index corrosponds to the same node.
+     * If a entry has no node information (e.g., node not present) then the 
+     * sentinel value ~0u is written for_node_number, and value 0u is written 
+     * for node_to_memsize & node_to_memfree.
+     * The size of this array is specified by the caller in @max_node_index. 
+     * If the actual @max_node_index is smaller than the array then the 
+     * trailing elements of the array will not be written by the sysctl.
+     */
+    XEN_GUEST_HANDLE_64(uint64) node_to_memsize;
+    XEN_GUEST_HANDLE_64(uint64) node_to_memfree;
+
+
+    /* node_to_node_distance is array of size (nr_nodes * nr_nodes) listing
+     * memory access distances between nodes. i'th  entry in the array 
+     * specifies distance between node (i / nr_nodes) & node (i % nr_nodes)
+     * If a entry has no node distance information (e.g., node not present) 
+     * then the sentinel value ~0u is written.
+     * The size of this array is specified by the caller in 
+     * @max_node_distance_index. If the max_node_index*max_node_index is 
+     * smaller than the array then the trailing elements of the array will 
+     * not be written by the sysctl.
+     */
+    XEN_GUEST_HANDLE_64(uint32) node_to_node_distance;
+};
+typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
+
+
 struct xen_sysctl {
     uint32_t cmd;
     uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
@@ -493,6 +545,8 @@
         struct xen_sysctl_readconsole       readconsole;
         struct xen_sysctl_tbuf_op           tbuf_op;
         struct xen_sysctl_physinfo          physinfo;
+        struct xen_sysctl_topologyinfo      topologyinfo;
+        struct xen_sysctl_numainfo          numainfo;
         struct xen_sysctl_sched_id          sched_id;
         struct xen_sysctl_perfc_op          perfc_op;
         struct xen_sysctl_getdomaininfolist getdomaininfolist;
diff -r 2636e5619708 xen/include/xen/mm.h
--- a/xen/include/xen/mm.h	Tue Jan 26 15:54:40 2010 +0000
+++ b/xen/include/xen/mm.h	Thu Jan 28 14:17:32 2010 -0800
@@ -57,6 +57,7 @@
 unsigned long avail_domheap_pages_region(
     unsigned int node, unsigned int min_width, unsigned int max_width);
 unsigned long avail_domheap_pages(void);
+unsigned long avail_node_heap_pages(unsigned int);
 #define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
 #define free_domheap_page(p)  (free_domheap_pages(p,0))
 unsigned int online_page(unsigned long mfn, uint32_t *status);

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: Host Numa informtion in dom0
  2010-01-29 23:05 ` Host Numa informtion in dom0 Kamble, Nitin A
@ 2010-01-30  8:09   ` Keir Fraser
  2010-02-01  2:21     ` Kamble, Nitin A
  2010-02-01 10:23   ` Andre Przywara
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 14+ messages in thread
From: Keir Fraser @ 2010-01-30  8:09 UTC (permalink / raw)
  To: Kamble, Nitin A, xen-devel

I'll apply post 4.0.0. Please also supply a signed-off-by line.

 -- Keir

On 29/01/2010 23:05, "Kamble, Nitin A" <nitin.a.kamble@intel.com> wrote:

> Hi Keir,
>    Attached is the patch which exposes the host numa information to dom0. With
> the patch ³xm info² command now also gives the cpu topology & host numa
> information. This will be later used to build guest numa support.
> The patch basically changes physinfo sysctl, and adds topology_info &
> numa_info sysctls, and also changes the python & libxc code accordingly.
>  
> Please apply.
>  
> Thanks & Regards,
> Nitin
>  
>  
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: Host Numa informtion in dom0
  2010-01-30  8:09   ` Keir Fraser
@ 2010-02-01  2:21     ` Kamble, Nitin A
  0 siblings, 0 replies; 14+ messages in thread
From: Kamble, Nitin A @ 2010-02-01  2:21 UTC (permalink / raw)
  To: Keir Fraser, xen-devel

Thanks Keir,
Signed-Off-by: Nitin A Kamble <nitin.a.kamble@intel.com>

Regards,
Nitin

-----Original Message-----
From: Keir Fraser [mailto:keir.fraser@eu.citrix.com] 
Sent: Saturday, January 30, 2010 12:09 AM
To: Kamble, Nitin A; xen-devel@lists.xensource.com
Subject: Re: [Xen-devel] Host Numa informtion in dom0

I'll apply post 4.0.0. Please also supply a signed-off-by line.

 -- Keir

On 29/01/2010 23:05, "Kamble, Nitin A" <nitin.a.kamble@intel.com> wrote:

> Hi Keir,
>    Attached is the patch which exposes the host numa information to dom0. With
> the patch ³xm info² command now also gives the cpu topology & host numa
> information. This will be later used to build guest numa support.
> The patch basically changes physinfo sysctl, and adds topology_info &
> numa_info sysctls, and also changes the python & libxc code accordingly.
>  
> Please apply.
>  
> Thanks & Regards,
> Nitin
>  
>  
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: Host Numa informtion in dom0
  2010-01-29 23:05 ` Host Numa informtion in dom0 Kamble, Nitin A
  2010-01-30  8:09   ` Keir Fraser
@ 2010-02-01 10:23   ` Andre Przywara
  2010-02-01 17:53     ` Dulloor
  2010-02-05 17:39   ` Ian Pratt
  2010-05-26 17:31   ` Bruce Edge
  3 siblings, 1 reply; 14+ messages in thread
From: Andre Przywara @ 2010-02-01 10:23 UTC (permalink / raw)
  To: Kamble, Nitin A; +Cc: xen-devel, Keir Fraser

Kamble, Nitin A wrote:
> Hi Keir,
> 
>    Attached is the patch which exposes the host numa information to 
> dom0. With the patch “xm info” command now also gives the cpu topology & 
> host numa information. This will be later used to build guest numa support.
What information are you missing from the current physinfo? As far as I 
can see, only the total amount of memory per node is not provided. But 
one could get this info from parsing the SRAT table in Dom0, which is at 
least mapped into Dom0's memory.
Or do you want to provide NUMA information to all PV guests (but then it 
cannot be a sysctl)? This would be helpful, as this would avoid to 
enable ACPI parsing in PV Linux for NUMA guest support.

Beside that I have to oppose the introduction of sockets_per_node again. 
Future AMD processors will feature _two_ nodes on _one_ socket, so this 
variable should hold 1/2, but this will be rounded to zero. I think this 
information is pretty useless anyway, as the number of sockets is mostly 
interesting for licensing purposes, where a single number is sufficient. 
  For scheduling purposes cache topology is more important.

My NUMA guest patches (currently for HVM only) are doing fine, I will 
try to send out a RFC patches this week. I think they don't interfere 
with this patch, but if you have other patches in development, we should 
sync on this.
The scope of my patches is to let the user (or xend) describe a guest's 
  topology (either by specifying only the number of guest nodes in the 
config file or by explicitly describing the whole NUMA topology). Some 
code will assign host nodes to the guest nodes (I am not sure yet 
whether this really belongs into xend as it currently does, or is better 
done in libxc, where libxenlight would also benefit).
Then libxc's hvm_build_* will pass that info into the hvm_info_table, 
where code in the hvmloader will generate an appropriate SRAT table.
An extension of this would be to let Xen automatically decide whether a 
split of the resources is necessary (because there is not enough memory 
available (anymore) on one node).

Looking forward to comments...

Regards,
Andre.

-- 
Andre Przywara
AMD-Operating System Research Center (OSRC), Dresden, Germany
Tel: +49 351 448 3567 12
----to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Karl-Hammerschmidt-Str. 34, 85609 Dornach b. Muenchen
Geschaeftsfuehrer: Andrew Bowd; Thomas M. McCoy; Giuliano Meroni
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: Host Numa informtion in dom0
  2010-02-01 10:23   ` Andre Przywara
@ 2010-02-01 17:53     ` Dulloor
  2010-02-01 21:39       ` Andre Przywara
  0 siblings, 1 reply; 14+ messages in thread
From: Dulloor @ 2010-02-01 17:53 UTC (permalink / raw)
  To: Andre Przywara; +Cc: xen-devel, Keir Fraser

> Beside that I have to oppose the introduction of sockets_per_node again.
> Future AMD processors will feature _two_ nodes on _one_ socket, so this
> variable should hold 1/2, but this will be rounded to zero. I think this
> information is pretty useless anyway, as the number of sockets is mostly
> interesting for licensing purposes, where a single number is sufficient.

I sent a similar patch (was using to enlist pcpu-tuples and in
vcpu-pin/unpin) and I didn't pursue it because of this same argument.
When we talk of cpu topology, that's how it is currently :
nodes-socket-cpu-core. Don't sockets also figure in the cache and
interconnect hierarchy ?
What would be the hierarchy in those future AMD processors ? Even Keir
and Ian Pratt initially wanted the pcpu-tuples
to be listed that way. So, it would be helpful to make a call and move ahead.

-dulloor


On Mon, Feb 1, 2010 at 5:23 AM, Andre Przywara <andre.przywara@amd.com> wrote:
> Kamble, Nitin A wrote:
>>
>> Hi Keir,
>>
>>   Attached is the patch which exposes the host numa information to dom0.
>> With the patch “xm info” command now also gives the cpu topology & host numa
>> information. This will be later used to build guest numa support.
>
> What information are you missing from the current physinfo? As far as I can
> see, only the total amount of memory per node is not provided. But one could
> get this info from parsing the SRAT table in Dom0, which is at least mapped
> into Dom0's memory.
> Or do you want to provide NUMA information to all PV guests (but then it
> cannot be a sysctl)? This would be helpful, as this would avoid to enable
> ACPI parsing in PV Linux for NUMA guest support.
>
> Beside that I have to oppose the introduction of sockets_per_node again.
> Future AMD processors will feature _two_ nodes on _one_ socket, so this
> variable should hold 1/2, but this will be rounded to zero. I think this
> information is pretty useless anyway, as the number of sockets is mostly
> interesting for licensing purposes, where a single number is sufficient.
>  For scheduling purposes cache topology is more important.
>
> My NUMA guest patches (currently for HVM only) are doing fine, I will try to
> send out a RFC patches this week. I think they don't interfere with this
> patch, but if you have other patches in development, we should sync on this.
> The scope of my patches is to let the user (or xend) describe a guest's
>  topology (either by specifying only the number of guest nodes in the config
> file or by explicitly describing the whole NUMA topology). Some code will
> assign host nodes to the guest nodes (I am not sure yet whether this really
> belongs into xend as it currently does, or is better done in libxc, where
> libxenlight would also benefit).
> Then libxc's hvm_build_* will pass that info into the hvm_info_table, where
> code in the hvmloader will generate an appropriate SRAT table.
> An extension of this would be to let Xen automatically decide whether a
> split of the resources is necessary (because there is not enough memory
> available (anymore) on one node).
>
> Looking forward to comments...
>
> Regards,
> Andre.
>
> --
> Andre Przywara
> AMD-Operating System Research Center (OSRC), Dresden, Germany
> Tel: +49 351 448 3567 12
> ----to satisfy European Law for business letters:
> Advanced Micro Devices GmbH
> Karl-Hammerschmidt-Str. 34, 85609 Dornach b. Muenchen
> Geschaeftsfuehrer: Andrew Bowd; Thomas M. McCoy; Giuliano Meroni
> Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
> Registergericht Muenchen, HRB Nr. 43632
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: Host Numa informtion in dom0
  2010-02-01 17:53     ` Dulloor
@ 2010-02-01 21:39       ` Andre Przywara
  2010-02-01 23:21         ` Kamble, Nitin A
  0 siblings, 1 reply; 14+ messages in thread
From: Andre Przywara @ 2010-02-01 21:39 UTC (permalink / raw)
  To: Dulloor; +Cc: xen-devel, Keir Fraser

Dulloor wrote:
>> Beside that I have to oppose the introduction of sockets_per_node again.
>> Future AMD processors will feature _two_ nodes on _one_ socket, so this
>> variable should hold 1/2, but this will be rounded to zero. I think this
>> information is pretty useless anyway, as the number of sockets is mostly
>> interesting for licensing purposes, where a single number is sufficient.
> 
> I sent a similar patch (was using to enlist pcpu-tuples and in
> vcpu-pin/unpin) and I didn't pursue it because of this same argument.
> When we talk of cpu topology, that's how it is currently :
> nodes-socket-cpu-core. Don't sockets also figure in the cache and
> interconnect hierarchy ?
Not necessarily. Think of Intel's Core2Quad, they have two separate L2 
caches each associated to two of the four cores in one socket. If you 
move from core0 to core2 then AFAIK the cost would be very similar to 
moving to another processor socket. So in fact the term socket does not 
help here.
The situation is similar to the new AMD CPUs, just that it replaces "L2 
cache" with "node" (aka shared memory controller, which also matches 
shared L3 cache). In fact the cost of moving from one node to the 
neighbor in the same socket is exactly the same as moving to another 
socket.
> What would be the hierarchy in those future AMD processors ? Even Keir
> and Ian Pratt initially wanted the pcpu-tuples
> to be listed that way. So, it would be helpful to make a call and move ahead.
You could create variables like cores_per_socket and cores_per_node, 
this would solve this issue for now. Actually better would be an array 
mapping cores (or threads) to {nodes,sockets,L[123]_caches}, as this 
would allow asymmetrical configurations (useful for guests).
In the past there once was a socket_per_node value in physinfo, but it 
has been removed. It was not used anywhere, and multiplying the whole 
chain of x_per_y sometimes ended up in wrong values anyway.
Anyway, if you insist on this value it will hold bogus values for the 
upcoming processors. If it will be zero, you end up in trouble when 
multiplying or dividing with it, and letting it be one is also wrong.
I am sorry to spoil this whole game, but that it's how it is.

If you or Nitin show me how the socket_per_node variable should be used, 
we can maybe find a pleasing solution.

Regards,
Andre.
> 
> On Mon, Feb 1, 2010 at 5:23 AM, Andre Przywara <andre.przywara@amd.com> wrote:
>> Kamble, Nitin A wrote:
>>> Hi Keir,
>>>
>>>   Attached is the patch which exposes the host numa information to dom0.
>>> With the patch “xm info” command now also gives the cpu topology & host numa
>>> information. This will be later used to build guest numa support.
>> What information are you missing from the current physinfo? As far as I can
>> see, only the total amount of memory per node is not provided. But one could
>> get this info from parsing the SRAT table in Dom0, which is at least mapped
>> into Dom0's memory.
>> Or do you want to provide NUMA information to all PV guests (but then it
>> cannot be a sysctl)? This would be helpful, as this would avoid to enable
>> ACPI parsing in PV Linux for NUMA guest support.
>>
>> Beside that I have to oppose the introduction of sockets_per_node again.
>> Future AMD processors will feature _two_ nodes on _one_ socket, so this
>> variable should hold 1/2, but this will be rounded to zero. I think this
>> information is pretty useless anyway, as the number of sockets is mostly
>> interesting for licensing purposes, where a single number is sufficient.
>>  For scheduling purposes cache topology is more important.
>>
>> My NUMA guest patches (currently for HVM only) are doing fine, I will try to
>> send out a RFC patches this week. I think they don't interfere with this
>> patch, but if you have other patches in development, we should sync on this.
>> The scope of my patches is to let the user (or xend) describe a guest's
>>  topology (either by specifying only the number of guest nodes in the config
>> file or by explicitly describing the whole NUMA topology). Some code will
>> assign host nodes to the guest nodes (I am not sure yet whether this really
>> belongs into xend as it currently does, or is better done in libxc, where
>> libxenlight would also benefit).
>> Then libxc's hvm_build_* will pass that info into the hvm_info_table, where
>> code in the hvmloader will generate an appropriate SRAT table.
>> An extension of this would be to let Xen automatically decide whether a
>> split of the resources is necessary (because there is not enough memory
>> available (anymore) on one node).
>>
>> Looking forward to comments...
>>
>> Regards,
>> Andre.
>>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: Host Numa informtion in dom0
  2010-02-01 21:39       ` Andre Przywara
@ 2010-02-01 23:21         ` Kamble, Nitin A
  0 siblings, 0 replies; 14+ messages in thread
From: Kamble, Nitin A @ 2010-02-01 23:21 UTC (permalink / raw)
  To: Andre Przywara, Dulloor; +Cc: Keir, xen-devel, Fraser

Andre, Dulloor,
  Some of us are also busy cooking guest numa patches for xen. I think we should sync up, so that it works well for both. 
  And sockets_per_node can be taken out if it is issue to you. That was added to assist the user in specifying the numa topology for the guest. It is not strictly required, and can be taken out without any harm.

Thanks & Regards,
Nitin



-----Original Message-----
From: Andre Przywara [mailto:andre.przywara@amd.com] 
Sent: Monday, February 01, 2010 1:40 PM
To: Dulloor
Cc: Kamble, Nitin A; xen-devel@lists.xensource.com; Keir Fraser
Subject: Re: [Xen-devel] Host Numa informtion in dom0

Dulloor wrote:
>> Beside that I have to oppose the introduction of sockets_per_node again.
>> Future AMD processors will feature _two_ nodes on _one_ socket, so this
>> variable should hold 1/2, but this will be rounded to zero. I think this
>> information is pretty useless anyway, as the number of sockets is mostly
>> interesting for licensing purposes, where a single number is sufficient.
> 
> I sent a similar patch (was using to enlist pcpu-tuples and in
> vcpu-pin/unpin) and I didn't pursue it because of this same argument.
> When we talk of cpu topology, that's how it is currently :
> nodes-socket-cpu-core. Don't sockets also figure in the cache and
> interconnect hierarchy ?
Not necessarily. Think of Intel's Core2Quad, they have two separate L2 
caches each associated to two of the four cores in one socket. If you 
move from core0 to core2 then AFAIK the cost would be very similar to 
moving to another processor socket. So in fact the term socket does not 
help here.
The situation is similar to the new AMD CPUs, just that it replaces "L2 
cache" with "node" (aka shared memory controller, which also matches 
shared L3 cache). In fact the cost of moving from one node to the 
neighbor in the same socket is exactly the same as moving to another 
socket.
> What would be the hierarchy in those future AMD processors ? Even Keir
> and Ian Pratt initially wanted the pcpu-tuples
> to be listed that way. So, it would be helpful to make a call and move ahead.
You could create variables like cores_per_socket and cores_per_node, 
this would solve this issue for now. Actually better would be an array 
mapping cores (or threads) to {nodes,sockets,L[123]_caches}, as this 
would allow asymmetrical configurations (useful for guests).
In the past there once was a socket_per_node value in physinfo, but it 
has been removed. It was not used anywhere, and multiplying the whole 
chain of x_per_y sometimes ended up in wrong values anyway.
Anyway, if you insist on this value it will hold bogus values for the 
upcoming processors. If it will be zero, you end up in trouble when 
multiplying or dividing with it, and letting it be one is also wrong.
I am sorry to spoil this whole game, but that it's how it is.

If you or Nitin show me how the socket_per_node variable should be used, 
we can maybe find a pleasing solution.

Regards,
Andre.
> 
> On Mon, Feb 1, 2010 at 5:23 AM, Andre Przywara <andre.przywara@amd.com> wrote:
>> Kamble, Nitin A wrote:
>>> Hi Keir,
>>>
>>>   Attached is the patch which exposes the host numa information to dom0.
>>> With the patch "xm info" command now also gives the cpu topology & host numa
>>> information. This will be later used to build guest numa support.
>> What information are you missing from the current physinfo? As far as I can
>> see, only the total amount of memory per node is not provided. But one could
>> get this info from parsing the SRAT table in Dom0, which is at least mapped
>> into Dom0's memory.
>> Or do you want to provide NUMA information to all PV guests (but then it
>> cannot be a sysctl)? This would be helpful, as this would avoid to enable
>> ACPI parsing in PV Linux for NUMA guest support.
>>
>> Beside that I have to oppose the introduction of sockets_per_node again.
>> Future AMD processors will feature _two_ nodes on _one_ socket, so this
>> variable should hold 1/2, but this will be rounded to zero. I think this
>> information is pretty useless anyway, as the number of sockets is mostly
>> interesting for licensing purposes, where a single number is sufficient.
>>  For scheduling purposes cache topology is more important.
>>
>> My NUMA guest patches (currently for HVM only) are doing fine, I will try to
>> send out a RFC patches this week. I think they don't interfere with this
>> patch, but if you have other patches in development, we should sync on this.
>> The scope of my patches is to let the user (or xend) describe a guest's
>>  topology (either by specifying only the number of guest nodes in the config
>> file or by explicitly describing the whole NUMA topology). Some code will
>> assign host nodes to the guest nodes (I am not sure yet whether this really
>> belongs into xend as it currently does, or is better done in libxc, where
>> libxenlight would also benefit).
>> Then libxc's hvm_build_* will pass that info into the hvm_info_table, where
>> code in the hvmloader will generate an appropriate SRAT table.
>> An extension of this would be to let Xen automatically decide whether a
>> split of the resources is necessary (because there is not enough memory
>> available (anymore) on one node).
>>
>> Looking forward to comments...
>>
>> Regards,
>> Andre.
>>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: Host Numa informtion in dom0
  2010-01-29 23:05 ` Host Numa informtion in dom0 Kamble, Nitin A
  2010-01-30  8:09   ` Keir Fraser
  2010-02-01 10:23   ` Andre Przywara
@ 2010-02-05 17:39   ` Ian Pratt
  2010-02-05 20:33     ` Dan Magenheimer
  2010-02-09 22:56     ` Nakajima, Jun
  2010-05-26 17:31   ` Bruce Edge
  3 siblings, 2 replies; 14+ messages in thread
From: Ian Pratt @ 2010-02-05 17:39 UTC (permalink / raw)
  To: Kamble, Nitin A, xen-devel; +Cc: Ian Pratt

>    Attached is the patch which exposes the host numa information to dom0.
> With the patch "xm info" command now also gives the cpu topology & host
> numa information. This will be later used to build guest numa support.
> 
> The patch basically changes physinfo sysctl, and adds topology_info &
> numa_info sysctls, and also changes the python & libxc code accordingly.


It would be good to have a discussion about how we should expose NUMA information to guests. 

I believe we can control the desired allocation of memory from nodes and creation of guest NUMA tables using VCPU affinity masks combined with a new boolean option to enable exposure of NUMA information to guests.

For each guest VCPU, we should inspect its affinity mask to see which nodes the VCPU is able to run on, thus building a set of 'allowed node' masks. We should then compare all the 'allowed node' masks to see how many unique node masks there are -- this corresponds to the number of NUMA nodes that we wish to expose to the guest if this guest has NUMA enabled. We would aportion the guest's pseudo-physical memory equally between these virtual NUMA nodes.

If guest NUMA is disabled, we just use a single node mask which is the union of the per-VCPU node masks.

Where allowed node masks span more than one physical node, we should allocate memory to the guest's virtual node by pseudo randomly striping memory allocations (in 2MB chunks) from across the specified physical nodes. [pseudo random is probably better than round robin]

Make sense? I can provide some worked exampled.

As regards the socket vs node terminology, I agree the variables are probably badly named and would perhaps best be called 'node' and 'supernode'. The key thing is that the toolstack should allow hierarchy to be expressed when specifying CPUs (using a dotted notation) rather than having to specify the enumerated CPU number.


Best,
Ian

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: RE: Host Numa informtion in dom0
  2010-02-05 17:39   ` Ian Pratt
@ 2010-02-05 20:33     ` Dan Magenheimer
  2010-02-09 22:03       ` Nakajima, Jun
  2010-02-09 22:56     ` Nakajima, Jun
  1 sibling, 1 reply; 14+ messages in thread
From: Dan Magenheimer @ 2010-02-05 20:33 UTC (permalink / raw)
  To: Ian Pratt, Kamble, Nitin A, xen-devel, Andre Przywara

It would be good if the discussion includes how guest NUMA
works with (or is exclusive of) migration/save/restore.  Also,
the discussion should include the interaction (or exclusivity
from) the various Xen RAM utilization technologies -- tmem,
page sharing/swapping, and PoD.  Obviously it would be great
if Xen could provide both optimal affinity/performance and optimal
flexibility and resource utilization, but I suspect that will
be a VERY difficult combination.

> -----Original Message-----
> From: Ian Pratt [mailto:Ian.Pratt@eu.citrix.com]
> Sent: Friday, February 05, 2010 10:39 AM
> To: Kamble, Nitin A; xen-devel@lists.xensource.com
> Cc: Ian Pratt
> Subject: [Xen-devel] RE: Host Numa informtion in dom0
> 
> >    Attached is the patch which exposes the host numa information to
> dom0.
> > With the patch "xm info" command now also gives the cpu topology &
> host
> > numa information. This will be later used to build guest numa
> support.
> >
> > The patch basically changes physinfo sysctl, and adds topology_info &
> > numa_info sysctls, and also changes the python & libxc code
> accordingly.
> 
> 
> It would be good to have a discussion about how we should expose NUMA
> information to guests.
> 
> I believe we can control the desired allocation of memory from nodes
> and creation of guest NUMA tables using VCPU affinity masks combined
> with a new boolean option to enable exposure of NUMA information to
> guests.
> 
> For each guest VCPU, we should inspect its affinity mask to see which
> nodes the VCPU is able to run on, thus building a set of 'allowed node'
> masks. We should then compare all the 'allowed node' masks to see how
> many unique node masks there are -- this corresponds to the number of
> NUMA nodes that we wish to expose to the guest if this guest has NUMA
> enabled. We would aportion the guest's pseudo-physical memory equally
> between these virtual NUMA nodes.
> 
> If guest NUMA is disabled, we just use a single node mask which is the
> union of the per-VCPU node masks.
> 
> Where allowed node masks span more than one physical node, we should
> allocate memory to the guest's virtual node by pseudo randomly striping
> memory allocations (in 2MB chunks) from across the specified physical
> nodes. [pseudo random is probably better than round robin]
> 
> Make sense? I can provide some worked exampled.
> 
> As regards the socket vs node terminology, I agree the variables are
> probably badly named and would perhaps best be called 'node' and
> 'supernode'. The key thing is that the toolstack should allow hierarchy
> to be expressed when specifying CPUs (using a dotted notation) rather
> than having to specify the enumerated CPU number.
> 
> 
> Best,
> Ian
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: RE: Host Numa informtion in dom0
  2010-02-05 20:33     ` Dan Magenheimer
@ 2010-02-09 22:03       ` Nakajima, Jun
  2010-02-10  3:25         ` Dan Magenheimer
  0 siblings, 1 reply; 14+ messages in thread
From: Nakajima, Jun @ 2010-02-09 22:03 UTC (permalink / raw)
  To: Dan Magenheimer, Ian Pratt, Kamble, Nitin A,
	xen-devel@lists.xensource.com

Dan Magenheimer wrote on Fri, 5 Feb 2010 at 12:33:19:

> It would be good if the discussion includes how guest NUMA
> works with (or is exclusive of) migration/save/restore.  Also,
> the discussion should include the interaction (or exclusivity
> from) the various Xen RAM utilization technologies -- tmem,
> page sharing/swapping, and PoD.  Obviously it would be great
> if Xen could provide both optimal affinity/performance and optimal
> flexibility and resource utilization, but I suspect that will
> be a VERY difficult combination.
> 

I think migration/save/restore should be excluded at this point, to keep the design/implementation simple; it's a performance/scalability feature.

Jun
___
Intel Open Source Technology Center

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: Host Numa informtion in dom0
  2010-02-05 17:39   ` Ian Pratt
  2010-02-05 20:33     ` Dan Magenheimer
@ 2010-02-09 22:56     ` Nakajima, Jun
  2010-02-11 15:21       ` Ian Pratt
  1 sibling, 1 reply; 14+ messages in thread
From: Nakajima, Jun @ 2010-02-09 22:56 UTC (permalink / raw)
  To: Ian Pratt, Kamble, Nitin A, xen-devel

Ian Pratt wrote on Fri, 5 Feb 2010 at 09:39:09:

>>    Attached is the patch which exposes the host numa information to
> dom0.
>> With the patch "xm info" command now also gives the cpu topology & host
>> numa information. This will be later used to build guest numa support.
>> 
>> The patch basically changes physinfo sysctl, and adds topology_info &
>> numa_info sysctls, and also changes the python & libxc code
> accordingly.
> 
> 
> It would be good to have a discussion about how we should expose NUMA
> information to guests.
> 
> I believe we can control the desired allocation of memory from nodes and
> creation of guest NUMA tables using VCPU affinity masks combined with a
> new boolean option to enable exposure of NUMA information to guests.
> 

I agree. 

> For each guest VCPU, we should inspect its affinity mask to see which
> nodes the VCPU is able to run on, thus building a set of 'allowed node'
> masks. We should then compare all the 'allowed node' masks to see how
> many unique node masks there are -- this corresponds to the number of
> NUMA nodes that we wish to expose to the guest if this guest has NUMA
> enabled. We would aportion the guest's pseudo-physical memory equally
> between these virtual NUMA nodes.
> 

Right.

> If guest NUMA is disabled, we just use a single node mask which is the
> union of the per-VCPU node masks.
> 
> Where allowed node masks span more than one physical node, we should
> allocate memory to the guest's virtual node by pseudo randomly striping
> memory allocations (in 2MB chunks) from across the specified physical
> nodes. [pseudo random is probably better than round robin]

Do we really want to support this? I don't think the allowed node masks should span more than one physical NUMA node. We also need to look at I/O devices as well.

> 
> Make sense? I can provide some worked exampled.
> 

Examples are appreciated.

Thanks,
Jun
___
Intel Open Source Technology Center

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: RE: Host Numa informtion in dom0
  2010-02-09 22:03       ` Nakajima, Jun
@ 2010-02-10  3:25         ` Dan Magenheimer
  0 siblings, 0 replies; 14+ messages in thread
From: Dan Magenheimer @ 2010-02-10  3:25 UTC (permalink / raw)
  To: Nakajima, Jun, Ian Pratt, Kamble, Nitin A, xen-devel, Andre Przywara

[-- Attachment #1: Type: text/plain, Size: 1847 bytes --]

While I am in agreement in general, my point is that we
need to avoid misleading virtualization users by somehow
making it clear that "pinning NUMA memory" to get
performance advantages results in significant losses
in flexibility.  For example, it won't be intuitive
to users/admins that starting guest A and then starting
guest B may result in very different performance
profile for A's applications than starting guest B
and then starting guest A.

This may be obvious for other flexibility limiters such
as PCI passthrough, but I suspect the vast majority of
users (at least outside of the HPC community) for the next
few years are not going to accept that one chunk of memory 
is *that* different from another chunk of memory.

> -----Original Message-----
> From: Nakajima, Jun [mailto:jun.nakajima@intel.com]
> Sent: Tuesday, February 09, 2010 3:03 PM
> To: Dan Magenheimer; Ian Pratt; Kamble, Nitin A; xen-
> devel@lists.xensource.com; Andre Przywara
> Subject: RE: [Xen-devel] RE: Host Numa informtion in dom0
> 
> Dan Magenheimer wrote on Fri, 5 Feb 2010 at 12:33:19:
> 
> > It would be good if the discussion includes how guest NUMA
> > works with (or is exclusive of) migration/save/restore.  Also,
> > the discussion should include the interaction (or exclusivity
> > from) the various Xen RAM utilization technologies -- tmem,
> > page sharing/swapping, and PoD.  Obviously it would be great
> > if Xen could provide both optimal affinity/performance and optimal
> > flexibility and resource utilization, but I suspect that will
> > be a VERY difficult combination.
> >
> 
> I think migration/save/restore should be excluded at this point, to
> keep the design/implementation simple; it's a performance/scalability
> feature.
> 
> Jun
> ___
> Intel Open Source Technology Center
> 
> 
> 

[-- Attachment #2: winmail.dat --]
[-- Type: application/ms-tnef, Size: 1962 bytes --]

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: Host Numa informtion in dom0
  2010-02-09 22:56     ` Nakajima, Jun
@ 2010-02-11 15:21       ` Ian Pratt
  0 siblings, 0 replies; 14+ messages in thread
From: Ian Pratt @ 2010-02-11 15:21 UTC (permalink / raw)
  To: Nakajima, Jun, Kamble, Nitin A, xen-devel; +Cc: Ian Pratt

> > If guest NUMA is disabled, we just use a single node mask which is the
> > union of the per-VCPU node masks.
> >
> > Where allowed node masks span more than one physical node, we should
> > allocate memory to the guest's virtual node by pseudo randomly striping
> > memory allocations (in 2MB chunks) from across the specified physical
> > nodes. [pseudo random is probably better than round robin]
> 
> Do we really want to support this? I don't think the allowed node masks
> should span more than one physical NUMA node. We also need to look at I/O
> devices as well.

Given that we definitely need this striping code in the case where the guest is non NUMA, I'd be inclined to still allow it to be used even if the guest has multiple NUMA nodes. It could come in handy where there is a hierarchy between physical NUMA nodes, enabling for example striping to be used between a pair of 'close' nodes, while exposing the higher-level topology of sets of the paired nodes to be exposed to the guest. 

Ian

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: Host Numa informtion in dom0
  2010-01-29 23:05 ` Host Numa informtion in dom0 Kamble, Nitin A
                     ` (2 preceding siblings ...)
  2010-02-05 17:39   ` Ian Pratt
@ 2010-05-26 17:31   ` Bruce Edge
  3 siblings, 0 replies; 14+ messages in thread
From: Bruce Edge @ 2010-05-26 17:31 UTC (permalink / raw)
  Cc: xen-devel

I'm getting a python traceback when I try to start a VM that I tracked
down (I think) to this patch:

Traceback (most recent call last):
  File "/usr/local/lib/python2.6/dist-packages/xen/util/xmlrpclib2.py",
line 131, in _marshaled_dispatch
    response = self._dispatch(method, params)
  File "/usr/lib/python2.6/SimpleXMLRPCServer.py", line 418, in _dispatch
    return func(*params)
  File "/usr/local/lib/python2.6/dist-packages/xen/xend/server/XMLRPCServer.py",
line 80, in domain_create
    info = XendDomain.instance().domain_create(config)
  File "/usr/local/lib/python2.6/dist-packages/xen/xend/XendDomain.py",
line 982, in domain_create
    dominfo = XendDomainInfo.create(config)
  File "/usr/local/lib/python2.6/dist-packages/xen/xend/XendDomainInfo.py",
line 106, in create
    vm.start()
  File "/usr/local/lib/python2.6/dist-packages/xen/xend/XendDomainInfo.py",
line 470, in start
    XendTask.log_progress(0, 30, self._constructDomain)
  File "/usr/local/lib/python2.6/dist-packages/xen/xend/XendTask.py",
line 209, in log_progress
    retval = func(*args, **kwds)
  File "/usr/local/lib/python2.6/dist-packages/xen/xend/XendDomainInfo.py",
line 2530, in _constructDomain
    balloon.free(16*1024, self) # 16MB should be plenty
  File "/usr/local/lib/python2.6/dist-packages/xen/xend/balloon.py",
line 187, in free
    nodenum = xc.numainfo()['cpu_to_node'][cpu]
KeyError: 'cpu_to_node'


release                : 2.6.32.12
version                : #1 SMP Wed May 5 21:52:23 PDT 2010
machine                : x86_64
nr_cpus                : 16
nr_nodes               : 2
cores_per_socket       : 4
threads_per_core       : 2
cpu_mhz                : 2533
hw_caps                :
bfebfbff:28100800:00000000:00001b40:009ce3bd:00000000:00000001:00000000
virt_caps              : hvm hvm_directio
total_memory           : 12277
free_memory            : 11629
free_cpus              : 0
xen_major              : 4
xen_minor              : 1
xen_extra              : -unstable
xen_caps               : xen-3.0-x86_64 xen-3.0-x86_32p hvm-3.0-x86_32
hvm-3.0-x86_32p hvm-3.0-x86_64
xen_scheduler          : credit
xen_pagesize           : 4096
platform_params        : virt_start=0xffff800000000000
xen_changeset          : Sat May 22 06:36:41 2010 +0100 21446:93410e5e4ad8
xen_commandline        : dummy=dummy console=com1 115200,8n1
dom0_mem=512M dom0_max_vcpus=1 dom0_vcpus_pin=true
iommu=1,passthrough,no-intremap loglvl=all loglvl_guest=all loglevl=10
debug acpi=force apic=on apic_verbosity=verbose numa=on
cc_compiler            : gcc version 4.3.3 (Ubuntu 4.3.3-5ubuntu4)
cc_compile_by          : bedge
cc_compile_domain      :
cc_compile_date        : Tue May 25 14:51:02 PDT 2010
xend_config_format     : 4

-Bruce


On Fri, Jan 29, 2010 at 4:05 PM, Kamble, Nitin A
<nitin.a.kamble@intel.com> wrote:
>
> Hi Keir,
>
>    Attached is the patch which exposes the host numa information to dom0. With the patch “xm info” command now also gives the cpu topology & host numa information. This will be later used to build guest numa support.
>
> The patch basically changes physinfo sysctl, and adds topology_info & numa_info sysctls, and also changes the python & libxc code accordingly.
>
>
>
> Please apply.
>
>
>
> Thanks & Regards,
>
> Nitin
>
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
>

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2010-05-26 17:31 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <AcqhN4DFeKntxDxZTHGQrdKilppA4Q==>
2010-01-29 23:05 ` Host Numa informtion in dom0 Kamble, Nitin A
2010-01-30  8:09   ` Keir Fraser
2010-02-01  2:21     ` Kamble, Nitin A
2010-02-01 10:23   ` Andre Przywara
2010-02-01 17:53     ` Dulloor
2010-02-01 21:39       ` Andre Przywara
2010-02-01 23:21         ` Kamble, Nitin A
2010-02-05 17:39   ` Ian Pratt
2010-02-05 20:33     ` Dan Magenheimer
2010-02-09 22:03       ` Nakajima, Jun
2010-02-10  3:25         ` Dan Magenheimer
2010-02-09 22:56     ` Nakajima, Jun
2010-02-11 15:21       ` Ian Pratt
2010-05-26 17:31   ` Bruce Edge

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.