* [PATCH 0/6] hugetlb: V6 constrain allocation/free based on task mempolicy
@ 2009-09-09 16:31 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:31 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
PATCH 0/6 hugetlb: numa control of persistent huge pages alloc/free
Against: 2.6.31-rc7-mmotm-090827-1651
This is V6 of a series of patches to provide control over the location
of the allocation and freeing of persistent huge pages on a NUMA
platform. Please consider V6 [patches 1-6] for merging into mmotm.
This series uses two mechanisms to constrain the nodes from which
persistent huge pages are allocated: 1) the task NUMA mempolicy of
the task modifying "nr_hugepages", based on a suggestion by Mel Gorman;
and 2) a subset of the hugepages hstate sysfs attributes have been
added [in V4] to each node system device under:
/sys/devices/node/node[0-9]*/hugepages.
The per node attibutes allow direct assignment of a huge page
count on a specific node, regardless of the task's mempolicy or
cpuset constraints.
V5 addressed review comments -- changes described in patch descriptions.
V6 addresses more review comments, described in the patches.
Attached to V6, I'm sending a 3 patch series that implements an
enhancement suggested by David Rientjes: the default huge page nodes
allowed mask will be the nodes with memory rather than all on-line nodes.
The "nodes with memory" state already tracks memory/node hot-plug.
Further, we will allocate per node hstate attributes only for nodes with
memory. This requires that we register a memory on/off-line notifier
and [un]register the attributes on transitions to/from memoryless state.
Because of the interaction with memory hotplug, these 3 patches will
likely require more work and testing before merging. The first six
patches do not depend on these 3 and, IMO, need not wait for them.
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 1/6] hugetlb: rework hstate_next_node_* functions
2009-09-09 16:31 ` Lee Schermerhorn
(?)
@ 2009-09-09 16:31 ` Lee Schermerhorn
-1 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:31 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
[PATCH 1/6] hugetlb: rework hstate_next_node* functions
Against: 2.6.31-rc7-mmotm-090827-1651
V2: + cleaned up comments, removed some deemed unnecessary,
add some suggested by review
+ removed check for !current in huge_mpol_nodes_allowed().
+ added 'current->comm' to warning message in huge_mpol_nodes_allowed().
+ added VM_BUG_ON() assertion in hugetlb.c next_node_allowed() to
catch out of range node id.
+ add examples to patch description
V3: + factored this "cleanup" patch out of V2 patch 2/3
+ moved ahead of patch to add nodes_allowed mask to alloc funcs
as this patch is somewhat independent from using task mempolicy
to control huge page allocation and freeing.
Modify the hstate_next_node* functions to allow them to be called to
obtain the "start_nid". Then, whereas prior to this patch we
unconditionally called hstate_next_node_to_{alloc|free}(), whether
or not we successfully allocated/freed a huge page on the node,
now we only call these functions on failure to alloc/free to advance
to next allowed node.
Factor out the next_node_allowed() function to handle wrap at end
of node_online_map. In this version, the allowed nodes include all
of the online nodes.
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
mm/hugetlb.c | 70 +++++++++++++++++++++++++++++++++++++----------------------
1 file changed, 45 insertions(+), 25 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/hugetlb.c 2009-09-09 11:57:32.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c 2009-09-09 11:57:34.000000000 -0400
@@ -622,6 +622,20 @@ static struct page *alloc_fresh_huge_pag
}
/*
+ * common helper function for hstate_next_node_to_{alloc|free}.
+ * return next node in node_online_map, wrapping at end.
+ */
+static int next_node_allowed(int nid)
+{
+ nid = next_node(nid, node_online_map);
+ if (nid == MAX_NUMNODES)
+ nid = first_node(node_online_map);
+ VM_BUG_ON(nid >= MAX_NUMNODES);
+
+ return nid;
+}
+
+/*
* Use a helper variable to find the next node and then
* copy it back to next_nid_to_alloc afterwards:
* otherwise there's a window in which a racer might
@@ -634,12 +648,12 @@ static struct page *alloc_fresh_huge_pag
*/
static int hstate_next_node_to_alloc(struct hstate *h)
{
- int next_nid;
- next_nid = next_node(h->next_nid_to_alloc, node_online_map);
- if (next_nid == MAX_NUMNODES)
- next_nid = first_node(node_online_map);
+ int nid, next_nid;
+
+ nid = h->next_nid_to_alloc;
+ next_nid = next_node_allowed(nid);
h->next_nid_to_alloc = next_nid;
- return next_nid;
+ return nid;
}
static int alloc_fresh_huge_page(struct hstate *h)
@@ -649,15 +663,17 @@ static int alloc_fresh_huge_page(struct
int next_nid;
int ret = 0;
- start_nid = h->next_nid_to_alloc;
+ start_nid = hstate_next_node_to_alloc(h);
next_nid = start_nid;
do {
page = alloc_fresh_huge_page_node(h, next_nid);
- if (page)
+ if (page) {
ret = 1;
+ break;
+ }
next_nid = hstate_next_node_to_alloc(h);
- } while (!page && next_nid != start_nid);
+ } while (next_nid != start_nid);
if (ret)
count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -668,17 +684,19 @@ static int alloc_fresh_huge_page(struct
}
/*
- * helper for free_pool_huge_page() - find next node
- * from which to free a huge page
+ * helper for free_pool_huge_page() - return the next node
+ * from which to free a huge page. Advance the next node id
+ * whether or not we find a free huge page to free so that the
+ * next attempt to free addresses the next node.
*/
static int hstate_next_node_to_free(struct hstate *h)
{
- int next_nid;
- next_nid = next_node(h->next_nid_to_free, node_online_map);
- if (next_nid == MAX_NUMNODES)
- next_nid = first_node(node_online_map);
+ int nid, next_nid;
+
+ nid = h->next_nid_to_free;
+ next_nid = next_node_allowed(nid);
h->next_nid_to_free = next_nid;
- return next_nid;
+ return nid;
}
/*
@@ -693,7 +711,7 @@ static int free_pool_huge_page(struct hs
int next_nid;
int ret = 0;
- start_nid = h->next_nid_to_free;
+ start_nid = hstate_next_node_to_free(h);
next_nid = start_nid;
do {
@@ -715,9 +733,10 @@ static int free_pool_huge_page(struct hs
}
update_and_free_page(h, page);
ret = 1;
+ break;
}
next_nid = hstate_next_node_to_free(h);
- } while (!ret && next_nid != start_nid);
+ } while (next_nid != start_nid);
return ret;
}
@@ -1028,10 +1047,9 @@ int __weak alloc_bootmem_huge_page(struc
void *addr;
addr = __alloc_bootmem_node_nopanic(
- NODE_DATA(h->next_nid_to_alloc),
+ NODE_DATA(hstate_next_node_to_alloc(h)),
huge_page_size(h), huge_page_size(h), 0);
- hstate_next_node_to_alloc(h);
if (addr) {
/*
* Use the beginning of the huge page to store the
@@ -1167,29 +1185,31 @@ static int adjust_pool_surplus(struct hs
VM_BUG_ON(delta != -1 && delta != 1);
if (delta < 0)
- start_nid = h->next_nid_to_alloc;
+ start_nid = hstate_next_node_to_alloc(h);
else
- start_nid = h->next_nid_to_free;
+ start_nid = hstate_next_node_to_free(h);
next_nid = start_nid;
do {
int nid = next_nid;
if (delta < 0) {
- next_nid = hstate_next_node_to_alloc(h);
/*
* To shrink on this node, there must be a surplus page
*/
- if (!h->surplus_huge_pages_node[nid])
+ if (!h->surplus_huge_pages_node[nid]) {
+ next_nid = hstate_next_node_to_alloc(h);
continue;
+ }
}
if (delta > 0) {
- next_nid = hstate_next_node_to_free(h);
/*
* Surplus cannot exceed the total number of pages
*/
if (h->surplus_huge_pages_node[nid] >=
- h->nr_huge_pages_node[nid])
+ h->nr_huge_pages_node[nid]) {
+ next_nid = hstate_next_node_to_free(h);
continue;
+ }
}
h->surplus_huge_pages += delta;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 2/6] hugetlb: add nodemask arg to huge page alloc, free and surplus adjust fcns
2009-09-09 16:31 ` Lee Schermerhorn
(?)
(?)
@ 2009-09-09 16:31 ` Lee Schermerhorn
-1 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:31 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
[PATCH 2/6] hugetlb: add nodemask arg to huge page alloc, free and surplus adjust fcns
Against: 2.6.31-rc7-mmotm-090827-1651
V3: + moved this patch to after the "rework" of hstate_next_node_to_...
functions as this patch is more specific to using task mempolicy
to control huge page allocation and freeing.
V5: + removed now unneeded 'nextnid' from hstate_next_node_to_{alloc|free}
and updated the stale comments.
V6: + move defaulting of nodes_allowed [to &node_online_map] up to
set_max_huge_pages(). Eliminate from hstate_next_node_*()
functions. [David Rientjes' suggestion].
+ renamed "this_node_allowed()" to "get_valid_node_allowed()"
[for David]
In preparation for constraining huge page allocation and freeing by the
controlling task's numa mempolicy, add a "nodes_allowed" nodemask pointer
to the allocate, free and surplus adjustment functions. For now, pass
NULL to indicate default behavior--i.e., use node_online_map. A
subsqeuent patch will derive a non-default mask from the controlling
task's numa mempolicy.
Note that this method of updating the global hstate nr_hugepages under
the constraint of a nodemask simplifies keeping the global state
consistent--especially the number of persistent and surplus pages
relative to reservations and overcommit limits. There are undoubtedly
other ways to do this, but this works for both interfaces: mempolicy
and per node attributes.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Reviewed-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: David Rientjes <rientjes@google.com>
mm/hugetlb.c | 120 ++++++++++++++++++++++++++++++++++-------------------------
1 file changed, 71 insertions(+), 49 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/hugetlb.c 2009-09-09 11:57:34.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c 2009-09-09 11:57:34.000000000 -0400
@@ -622,48 +622,56 @@ static struct page *alloc_fresh_huge_pag
}
/*
- * common helper function for hstate_next_node_to_{alloc|free}.
- * return next node in node_online_map, wrapping at end.
+ * common helper functions for hstate_next_node_to_{alloc|free}.
+ * We may have allocated or freed a huge page based on a different
+ * nodes_allowed previously, so h->next_node_to_{alloc|free} might
+ * be outside of *nodes_allowed. Ensure that we use an allowed
+ * node for alloc or free.
*/
-static int next_node_allowed(int nid)
+static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{
- nid = next_node(nid, node_online_map);
+ nid = next_node(nid, *nodes_allowed);
if (nid == MAX_NUMNODES)
- nid = first_node(node_online_map);
+ nid = first_node(*nodes_allowed);
VM_BUG_ON(nid >= MAX_NUMNODES);
return nid;
}
+static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
+{
+ if (!node_isset(nid, *nodes_allowed))
+ nid = next_node_allowed(nid, nodes_allowed);
+ return nid;
+}
+
/*
- * Use a helper variable to find the next node and then
- * copy it back to next_nid_to_alloc afterwards:
- * otherwise there's a window in which a racer might
- * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
- * But we don't need to use a spin_lock here: it really
- * doesn't matter if occasionally a racer chooses the
- * same nid as we do. Move nid forward in the mask even
- * if we just successfully allocated a hugepage so that
- * the next caller gets hugepages on the next node.
+ * returns the previously saved node ["this node"] from which to
+ * allocate a persistent huge page for the pool and advance the
+ * next node from which to allocate, handling wrap at end of node
+ * mask.
*/
-static int hstate_next_node_to_alloc(struct hstate *h)
+static int hstate_next_node_to_alloc(struct hstate *h,
+ nodemask_t *nodes_allowed)
{
- int nid, next_nid;
+ int nid;
+
+ VM_BUG_ON(!nodes_allowed);
+
+ nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
+ h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
- nid = h->next_nid_to_alloc;
- next_nid = next_node_allowed(nid);
- h->next_nid_to_alloc = next_nid;
return nid;
}
-static int alloc_fresh_huge_page(struct hstate *h)
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
struct page *page;
int start_nid;
int next_nid;
int ret = 0;
- start_nid = hstate_next_node_to_alloc(h);
+ start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
next_nid = start_nid;
do {
@@ -672,7 +680,7 @@ static int alloc_fresh_huge_page(struct
ret = 1;
break;
}
- next_nid = hstate_next_node_to_alloc(h);
+ next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
} while (next_nid != start_nid);
if (ret)
@@ -684,18 +692,20 @@ static int alloc_fresh_huge_page(struct
}
/*
- * helper for free_pool_huge_page() - return the next node
- * from which to free a huge page. Advance the next node id
- * whether or not we find a free huge page to free so that the
- * next attempt to free addresses the next node.
+ * helper for free_pool_huge_page() - return the previously saved
+ * node ["this node"] from which to free a huge page. Advance the
+ * next node id whether or not we find a free huge page to free so
+ * that the next attempt to free addresses the next node.
*/
-static int hstate_next_node_to_free(struct hstate *h)
+static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{
- int nid, next_nid;
+ int nid;
+
+ VM_BUG_ON(!nodes_allowed);
+
+ nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
+ h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
- nid = h->next_nid_to_free;
- next_nid = next_node_allowed(nid);
- h->next_nid_to_free = next_nid;
return nid;
}
@@ -705,13 +715,14 @@ static int hstate_next_node_to_free(stru
* balanced over allowed nodes.
* Called with hugetlb_lock locked.
*/
-static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
+static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
+ bool acct_surplus)
{
int start_nid;
int next_nid;
int ret = 0;
- start_nid = hstate_next_node_to_free(h);
+ start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
@@ -735,7 +746,7 @@ static int free_pool_huge_page(struct hs
ret = 1;
break;
}
- next_nid = hstate_next_node_to_free(h);
+ next_nid = hstate_next_node_to_free(h, nodes_allowed);
} while (next_nid != start_nid);
return ret;
@@ -937,7 +948,7 @@ static void return_unused_surplus_pages(
* on-line nodes for us and will handle the hstate accounting.
*/
while (nr_pages--) {
- if (!free_pool_huge_page(h, 1))
+ if (!free_pool_huge_page(h, &node_online_map, 1))
break;
}
}
@@ -1047,7 +1058,7 @@ int __weak alloc_bootmem_huge_page(struc
void *addr;
addr = __alloc_bootmem_node_nopanic(
- NODE_DATA(hstate_next_node_to_alloc(h)),
+ NODE_DATA(hstate_next_node_to_alloc(h, NULL)),
huge_page_size(h), huge_page_size(h), 0);
if (addr) {
@@ -1102,7 +1113,7 @@ static void __init hugetlb_hstate_alloc_
if (h->order >= MAX_ORDER) {
if (!alloc_bootmem_huge_page(h))
break;
- } else if (!alloc_fresh_huge_page(h))
+ } else if (!alloc_fresh_huge_page(h, &node_online_map))
break;
}
h->max_huge_pages = i;
@@ -1144,16 +1155,22 @@ static void __init report_hugepages(void
}
#ifdef CONFIG_HIGHMEM
-static void try_to_free_low(struct hstate *h, unsigned long count)
+static void try_to_free_low(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
int i;
if (h->order >= MAX_ORDER)
return;
+ if (!nodes_allowed)
+ nodes_allowed = &node_online_map;
+
for (i = 0; i < MAX_NUMNODES; ++i) {
struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i];
+ if (!node_isset(i, *nodes_allowed))
+ continue;
list_for_each_entry_safe(page, next, freel, lru) {
if (count >= h->nr_huge_pages)
return;
@@ -1167,7 +1184,8 @@ static void try_to_free_low(struct hstat
}
}
#else
-static inline void try_to_free_low(struct hstate *h, unsigned long count)
+static inline void try_to_free_low(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
}
#endif
@@ -1177,7 +1195,8 @@ static inline void try_to_free_low(struc
* balanced by operating on them in a round-robin fashion.
* Returns 1 if an adjustment was made.
*/
-static int adjust_pool_surplus(struct hstate *h, int delta)
+static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
+ int delta)
{
int start_nid, next_nid;
int ret = 0;
@@ -1185,9 +1204,9 @@ static int adjust_pool_surplus(struct hs
VM_BUG_ON(delta != -1 && delta != 1);
if (delta < 0)
- start_nid = hstate_next_node_to_alloc(h);
+ start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
else
- start_nid = hstate_next_node_to_free(h);
+ start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
@@ -1197,7 +1216,8 @@ static int adjust_pool_surplus(struct hs
* To shrink on this node, there must be a surplus page
*/
if (!h->surplus_huge_pages_node[nid]) {
- next_nid = hstate_next_node_to_alloc(h);
+ next_nid = hstate_next_node_to_alloc(h,
+ nodes_allowed);
continue;
}
}
@@ -1207,7 +1227,8 @@ static int adjust_pool_surplus(struct hs
*/
if (h->surplus_huge_pages_node[nid] >=
h->nr_huge_pages_node[nid]) {
- next_nid = hstate_next_node_to_free(h);
+ next_nid = hstate_next_node_to_free(h,
+ nodes_allowed);
continue;
}
}
@@ -1225,6 +1246,7 @@ static int adjust_pool_surplus(struct hs
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
{
unsigned long min_count, ret;
+ nodemask_t *nodes_allowed = &node_online_map;
if (h->order >= MAX_ORDER)
return h->max_huge_pages;
@@ -1242,7 +1264,7 @@ static unsigned long set_max_huge_pages(
*/
spin_lock(&hugetlb_lock);
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
- if (!adjust_pool_surplus(h, -1))
+ if (!adjust_pool_surplus(h, nodes_allowed, -1))
break;
}
@@ -1253,7 +1275,7 @@ static unsigned long set_max_huge_pages(
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
- ret = alloc_fresh_huge_page(h);
+ ret = alloc_fresh_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
@@ -1277,13 +1299,13 @@ static unsigned long set_max_huge_pages(
*/
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
min_count = max(count, min_count);
- try_to_free_low(h, min_count);
+ try_to_free_low(h, min_count, nodes_allowed);
while (min_count < persistent_huge_pages(h)) {
- if (!free_pool_huge_page(h, 0))
+ if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
}
while (count < persistent_huge_pages(h)) {
- if (!adjust_pool_surplus(h, 1))
+ if (!adjust_pool_surplus(h, nodes_allowed, 1))
break;
}
out:
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
2009-09-09 16:31 ` Lee Schermerhorn
` (2 preceding siblings ...)
(?)
@ 2009-09-09 16:31 ` Lee Schermerhorn
2009-09-10 23:05 ` Andrew Morton
-1 siblings, 1 reply; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:31 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
[PATCH 3/6] - hugetlb: introduce alloc_nodemask_of_node()
Against: 2.6.31-rc7-mmotm-090827-1651
New in V5 of series
V6: + rename 'init_nodemask_of_nodes()' to 'init_nodemask_of_node()'
+ redefine init_nodemask_of_node() as static inline fcn
+ move this patch back 1 in series
Introduce nodemask macro to allocate a nodemask and
initialize it to contain a single node, using the macro
init_nodemask_of_node() factored out of the nodemask_of_node()
macro.
alloc_nodemask_of_node() coded as a macro to avoid header
dependency hell.
This will be used to construct the huge pages "nodes_allowed"
nodemask for a single node when basing nodes_allowed on a
preferred/local mempolicy or when a persistent huge page
pool page count is modified via a per node sysfs attribute.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
include/linux/nodemask.h | 22 ++++++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/nodemask.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/nodemask.h 2009-09-09 11:57:26.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/nodemask.h 2009-09-09 11:57:35.000000000 -0400
@@ -245,18 +245,36 @@ static inline int __next_node(int n, con
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
+static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+{
+ nodes_clear(*(mask));
+ node_set((node), *(mask));
+}
+
#define nodemask_of_node(node) \
({ \
typeof(_unused_nodemask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
m.bits[0] = 1UL<<(node); \
} else { \
- nodes_clear(m); \
- node_set((node), m); \
+ init_nodemask_of_node(&m, (node)); \
} \
m; \
})
+/*
+ * returns pointer to kmalloc()'d nodemask initialized to contain the
+ * specified node. Caller must free with kfree().
+ */
+#define alloc_nodemask_of_node(node) \
+({ \
+ typeof(_unused_nodemask_arg_) *nmp; \
+ nmp = kmalloc(sizeof(*nmp), GFP_KERNEL); \
+ if (nmp) \
+ init_nodemask_of_node(nmp, (node)); \
+ nmp; \
+})
+
#define first_unset_node(mask) __first_unset_node(&(mask))
static inline int __first_unset_node(const nodemask_t *maskp)
{
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
2009-09-09 16:31 ` [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node Lee Schermerhorn
@ 2009-09-10 23:05 ` Andrew Morton
0 siblings, 0 replies; 44+ messages in thread
From: Andrew Morton @ 2009-09-10 23:05 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Wed, 09 Sep 2009 12:31:46 -0400
Lee Schermerhorn <lee.schermerhorn@hp.com> wrote:
> [PATCH 3/6] - hugetlb: introduce alloc_nodemask_of_node()
>
> Against: 2.6.31-rc7-mmotm-090827-1651
>
> New in V5 of series
>
> V6: + rename 'init_nodemask_of_nodes()' to 'init_nodemask_of_node()'
> + redefine init_nodemask_of_node() as static inline fcn
> + move this patch back 1 in series
>
> Introduce nodemask macro to allocate a nodemask and
> initialize it to contain a single node, using the macro
> init_nodemask_of_node() factored out of the nodemask_of_node()
> macro.
>
> alloc_nodemask_of_node() coded as a macro to avoid header
> dependency hell.
>
> This will be used to construct the huge pages "nodes_allowed"
> nodemask for a single node when basing nodes_allowed on a
> preferred/local mempolicy or when a persistent huge page
> pool page count is modified via a per node sysfs attribute.
>
> ...
>
> +/*
> + * returns pointer to kmalloc()'d nodemask initialized to contain the
> + * specified node. Caller must free with kfree().
> + */
> +#define alloc_nodemask_of_node(node) \
> +({ \
> + typeof(_unused_nodemask_arg_) *nmp; \
> + nmp = kmalloc(sizeof(*nmp), GFP_KERNEL); \
> + if (nmp) \
> + init_nodemask_of_node(nmp, (node)); \
> + nmp; \
> +})
All right, I give up. What's with this `typeof(_unused_nodemask_arg_)'
stuff?
Was there a reason why this had to be implemented as a macro? One
which evaluates its arg either one or zero times, btw?
hm. "to avoid header dependency hell". What hell? Self-inflicted?
alloc_nodemask_of_node() has no callers, so I can think of a good fix
for these problems. If it _did_ have a caller then I might ask "can't
we fix this by moving alloc_nodemask_of_node() into the .c file". But
it doesn't so I can't.
It's a bit rude to assume that the caller wanted to use GFP_KERNEL.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
@ 2009-09-10 23:05 ` Andrew Morton
0 siblings, 0 replies; 44+ messages in thread
From: Andrew Morton @ 2009-09-10 23:05 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Wed, 09 Sep 2009 12:31:46 -0400
Lee Schermerhorn <lee.schermerhorn@hp.com> wrote:
> [PATCH 3/6] - hugetlb: introduce alloc_nodemask_of_node()
>
> Against: 2.6.31-rc7-mmotm-090827-1651
>
> New in V5 of series
>
> V6: + rename 'init_nodemask_of_nodes()' to 'init_nodemask_of_node()'
> + redefine init_nodemask_of_node() as static inline fcn
> + move this patch back 1 in series
>
> Introduce nodemask macro to allocate a nodemask and
> initialize it to contain a single node, using the macro
> init_nodemask_of_node() factored out of the nodemask_of_node()
> macro.
>
> alloc_nodemask_of_node() coded as a macro to avoid header
> dependency hell.
>
> This will be used to construct the huge pages "nodes_allowed"
> nodemask for a single node when basing nodes_allowed on a
> preferred/local mempolicy or when a persistent huge page
> pool page count is modified via a per node sysfs attribute.
>
> ...
>
> +/*
> + * returns pointer to kmalloc()'d nodemask initialized to contain the
> + * specified node. Caller must free with kfree().
> + */
> +#define alloc_nodemask_of_node(node) \
> +({ \
> + typeof(_unused_nodemask_arg_) *nmp; \
> + nmp = kmalloc(sizeof(*nmp), GFP_KERNEL); \
> + if (nmp) \
> + init_nodemask_of_node(nmp, (node)); \
> + nmp; \
> +})
All right, I give up. What's with this `typeof(_unused_nodemask_arg_)'
stuff?
Was there a reason why this had to be implemented as a macro? One
which evaluates its arg either one or zero times, btw?
hm. "to avoid header dependency hell". What hell? Self-inflicted?
alloc_nodemask_of_node() has no callers, so I can think of a good fix
for these problems. If it _did_ have a caller then I might ask "can't
we fix this by moving alloc_nodemask_of_node() into the .c file". But
it doesn't so I can't.
It's a bit rude to assume that the caller wanted to use GFP_KERNEL.
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
2009-09-10 23:05 ` Andrew Morton
@ 2009-09-10 23:17 ` David Rientjes
-1 siblings, 0 replies; 44+ messages in thread
From: David Rientjes @ 2009-09-10 23:17 UTC (permalink / raw)
To: Andrew Morton
Cc: Lee Schermerhorn, linux-mm, linux-numa, mel, randy.dunlap, nacc,
agl, apw, eric.whitney
On Thu, 10 Sep 2009, Andrew Morton wrote:
> alloc_nodemask_of_node() has no callers, so I can think of a good fix
> for these problems. If it _did_ have a caller then I might ask "can't
> we fix this by moving alloc_nodemask_of_node() into the .c file". But
> it doesn't so I can't.
>
It gets a caller in patch 5 of the series in set_max_huge_pages().
My early criticism of both alloc_nodemask_of_node() and
alloc_nodemask_of_mempolicy() was that for small CONFIG_NODES_SHIFT (say,
6 or less, which covers all defconfigs except ia64), it is perfectly
reasonable to allocate 64 bytes on the stack in the caller.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
@ 2009-09-10 23:17 ` David Rientjes
0 siblings, 0 replies; 44+ messages in thread
From: David Rientjes @ 2009-09-10 23:17 UTC (permalink / raw)
To: Andrew Morton
Cc: Lee Schermerhorn, linux-mm, linux-numa, mel, randy.dunlap, nacc,
agl, apw, eric.whitney
On Thu, 10 Sep 2009, Andrew Morton wrote:
> alloc_nodemask_of_node() has no callers, so I can think of a good fix
> for these problems. If it _did_ have a caller then I might ask "can't
> we fix this by moving alloc_nodemask_of_node() into the .c file". But
> it doesn't so I can't.
>
It gets a caller in patch 5 of the series in set_max_huge_pages().
My early criticism of both alloc_nodemask_of_node() and
alloc_nodemask_of_mempolicy() was that for small CONFIG_NODES_SHIFT (say,
6 or less, which covers all defconfigs except ia64), it is perfectly
reasonable to allocate 64 bytes on the stack in the caller.
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
2009-09-10 23:17 ` David Rientjes
@ 2009-09-10 23:36 ` Andrew Morton
-1 siblings, 0 replies; 44+ messages in thread
From: Andrew Morton @ 2009-09-10 23:36 UTC (permalink / raw)
To: David Rientjes
Cc: lee.schermerhorn, linux-mm, linux-numa, mel, randy.dunlap, nacc,
agl, apw, eric.whitney
On Thu, 10 Sep 2009 16:17:22 -0700 (PDT)
David Rientjes <rientjes@google.com> wrote:
> On Thu, 10 Sep 2009, Andrew Morton wrote:
>
> > alloc_nodemask_of_node() has no callers, so I can think of a good fix
> > for these problems. If it _did_ have a caller then I might ask "can't
> > we fix this by moving alloc_nodemask_of_node() into the .c file". But
> > it doesn't so I can't.
> >
>
> It gets a caller in patch 5 of the series in set_max_huge_pages().
ooh, there it is.
So alloc_nodemask_of_node() could be moved into mm/hugetlb.c.
> My early criticism of both alloc_nodemask_of_node() and
> alloc_nodemask_of_mempolicy() was that for small CONFIG_NODES_SHIFT (say,
> 6 or less, which covers all defconfigs except ia64), it is perfectly
> reasonable to allocate 64 bytes on the stack in the caller.
Spose so. But this stuff is only called when userspace reconfigures
via sysfs, so it'll be low bandwidth (one sincerely hopes).
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
@ 2009-09-10 23:36 ` Andrew Morton
0 siblings, 0 replies; 44+ messages in thread
From: Andrew Morton @ 2009-09-10 23:36 UTC (permalink / raw)
To: David Rientjes
Cc: lee.schermerhorn, linux-mm, linux-numa, mel, randy.dunlap, nacc,
agl, apw, eric.whitney
On Thu, 10 Sep 2009 16:17:22 -0700 (PDT)
David Rientjes <rientjes@google.com> wrote:
> On Thu, 10 Sep 2009, Andrew Morton wrote:
>
> > alloc_nodemask_of_node() has no callers, so I can think of a good fix
> > for these problems. If it _did_ have a caller then I might ask "can't
> > we fix this by moving alloc_nodemask_of_node() into the .c file". But
> > it doesn't so I can't.
> >
>
> It gets a caller in patch 5 of the series in set_max_huge_pages().
ooh, there it is.
So alloc_nodemask_of_node() could be moved into mm/hugetlb.c.
> My early criticism of both alloc_nodemask_of_node() and
> alloc_nodemask_of_mempolicy() was that for small CONFIG_NODES_SHIFT (say,
> 6 or less, which covers all defconfigs except ia64), it is perfectly
> reasonable to allocate 64 bytes on the stack in the caller.
Spose so. But this stuff is only called when userspace reconfigures
via sysfs, so it'll be low bandwidth (one sincerely hopes).
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
2009-09-10 23:36 ` Andrew Morton
(?)
@ 2009-09-10 23:43 ` David Rientjes
-1 siblings, 0 replies; 44+ messages in thread
From: David Rientjes @ 2009-09-10 23:43 UTC (permalink / raw)
To: Andrew Morton
Cc: lee.schermerhorn, linux-mm, linux-numa, mel, randy.dunlap, nacc,
agl, apw, eric.whitney
On Thu, 10 Sep 2009, Andrew Morton wrote:
> > > alloc_nodemask_of_node() has no callers, so I can think of a good fix
> > > for these problems. If it _did_ have a caller then I might ask "can't
> > > we fix this by moving alloc_nodemask_of_node() into the .c file". But
> > > it doesn't so I can't.
> > >
> >
> > It gets a caller in patch 5 of the series in set_max_huge_pages().
>
> ooh, there it is.
>
> So alloc_nodemask_of_node() could be moved into mm/hugetlb.c.
>
We discussed that, but the consensus was that it specific to mempolicies
not hugepages. Perhaps someday it will gain another caller.
> > My early criticism of both alloc_nodemask_of_node() and
> > alloc_nodemask_of_mempolicy() was that for small CONFIG_NODES_SHIFT (say,
> > 6 or less, which covers all defconfigs except ia64), it is perfectly
> > reasonable to allocate 64 bytes on the stack in the caller.
>
> Spose so. But this stuff is only called when userspace reconfigures
> via sysfs, so it'll be low bandwidth (one sincerely hopes).
>
True, but order-0 GFP_KERNEL allocations will loop forever in the page
allocator and kill off tasks if it can't allocate memory. That wouldn't
necessarily be a cause for concern other than the fact that this tunable
is already frequently written when memory is low to reclaim pages.
[ If we're really tailoring it only for its current use case, though, the
stack could easily support even NODES_SHIFT of 10. ]
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
2009-09-10 23:05 ` Andrew Morton
@ 2009-09-11 13:11 ` Lee Schermerhorn
-1 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-11 13:11 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Thu, 2009-09-10 at 16:05 -0700, Andrew Morton wrote:
> On Wed, 09 Sep 2009 12:31:46 -0400
> Lee Schermerhorn <lee.schermerhorn@hp.com> wrote:
>
> > [PATCH 3/6] - hugetlb: introduce alloc_nodemask_of_node()
> >
> > Against: 2.6.31-rc7-mmotm-090827-1651
> >
> > New in V5 of series
> >
> > V6: + rename 'init_nodemask_of_nodes()' to 'init_nodemask_of_node()'
> > + redefine init_nodemask_of_node() as static inline fcn
> > + move this patch back 1 in series
> >
> > Introduce nodemask macro to allocate a nodemask and
> > initialize it to contain a single node, using the macro
> > init_nodemask_of_node() factored out of the nodemask_of_node()
> > macro.
> >
> > alloc_nodemask_of_node() coded as a macro to avoid header
> > dependency hell.
> >
> > This will be used to construct the huge pages "nodes_allowed"
> > nodemask for a single node when basing nodes_allowed on a
> > preferred/local mempolicy or when a persistent huge page
> > pool page count is modified via a per node sysfs attribute.
> >
> > ...
> >
> > +/*
> > + * returns pointer to kmalloc()'d nodemask initialized to contain the
> > + * specified node. Caller must free with kfree().
> > + */
> > +#define alloc_nodemask_of_node(node) \
> > +({ \
> > + typeof(_unused_nodemask_arg_) *nmp; \
> > + nmp = kmalloc(sizeof(*nmp), GFP_KERNEL); \
> > + if (nmp) \
> > + init_nodemask_of_node(nmp, (node)); \
> > + nmp; \
> > +})
>
> All right, I give up. What's with this `typeof(_unused_nodemask_arg_)'
> stuff?
You got me. I would have used a bar nodemask_t, but I was following the
style of the nodemask_of_node() in the same header.
>
>
> Was there a reason why this had to be implemented as a macro.
> One
> which evaluates its arg either one or zero times, btw?
Well, one, unless the alloc fails.
>
> hm. "to avoid header dependency hell". What hell? Self-inflicted?
Well, I tried to make it a static inline function, but nodemask.h gets
included, indirectly, in various places where, e.g., kmalloc() is not
defined. I tried including slab.h, but that had problems with other
missing definitions. I didn't want to end up with the entire
include/linux directory included in nodemask.h.
I would have put it in a .c file, but there is no, e.g., nodemask.c.
Guess I could have created alloc_bitmap_of_bit() in bitmap.c with a
wrapper in nodemask.h. Would that be preferable?
>
> alloc_nodemask_of_node() has no callers, so I can think of a good fix
> for these problems. If it _did_ have a caller then I might ask "can't
> we fix this by moving alloc_nodemask_of_node() into the .c file". But
> it doesn't so I can't.
This patch was a later addition. The function is used by the following
patch. Originally, I had a private function in hugetlb.c that
kmalloc()'d and initialized the nodes_allowed mask. Mel suggested that
I use the generic nodemask_of_node(). That didn't have the semantics I
wanted, so I created this variant.
>
> It's a bit rude to assume that the caller wanted to use GFP_KERNEL.
I can add a gfp_t parameter to the macro, but I'll still need to select
value in the caller. Do you have a suggested alternative to GFP_KERNEL
[for both here and in alloc_nodemask_of_mempolicy()]? We certainly
don't want to loop forever, killing off tasks, as David mentioned.
Silently failing is OK. We handle that.
Lee
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
@ 2009-09-11 13:11 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-11 13:11 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Thu, 2009-09-10 at 16:05 -0700, Andrew Morton wrote:
> On Wed, 09 Sep 2009 12:31:46 -0400
> Lee Schermerhorn <lee.schermerhorn@hp.com> wrote:
>
> > [PATCH 3/6] - hugetlb: introduce alloc_nodemask_of_node()
> >
> > Against: 2.6.31-rc7-mmotm-090827-1651
> >
> > New in V5 of series
> >
> > V6: + rename 'init_nodemask_of_nodes()' to 'init_nodemask_of_node()'
> > + redefine init_nodemask_of_node() as static inline fcn
> > + move this patch back 1 in series
> >
> > Introduce nodemask macro to allocate a nodemask and
> > initialize it to contain a single node, using the macro
> > init_nodemask_of_node() factored out of the nodemask_of_node()
> > macro.
> >
> > alloc_nodemask_of_node() coded as a macro to avoid header
> > dependency hell.
> >
> > This will be used to construct the huge pages "nodes_allowed"
> > nodemask for a single node when basing nodes_allowed on a
> > preferred/local mempolicy or when a persistent huge page
> > pool page count is modified via a per node sysfs attribute.
> >
> > ...
> >
> > +/*
> > + * returns pointer to kmalloc()'d nodemask initialized to contain the
> > + * specified node. Caller must free with kfree().
> > + */
> > +#define alloc_nodemask_of_node(node) \
> > +({ \
> > + typeof(_unused_nodemask_arg_) *nmp; \
> > + nmp = kmalloc(sizeof(*nmp), GFP_KERNEL); \
> > + if (nmp) \
> > + init_nodemask_of_node(nmp, (node)); \
> > + nmp; \
> > +})
>
> All right, I give up. What's with this `typeof(_unused_nodemask_arg_)'
> stuff?
You got me. I would have used a bar nodemask_t, but I was following the
style of the nodemask_of_node() in the same header.
>
>
> Was there a reason why this had to be implemented as a macro.
> One
> which evaluates its arg either one or zero times, btw?
Well, one, unless the alloc fails.
>
> hm. "to avoid header dependency hell". What hell? Self-inflicted?
Well, I tried to make it a static inline function, but nodemask.h gets
included, indirectly, in various places where, e.g., kmalloc() is not
defined. I tried including slab.h, but that had problems with other
missing definitions. I didn't want to end up with the entire
include/linux directory included in nodemask.h.
I would have put it in a .c file, but there is no, e.g., nodemask.c.
Guess I could have created alloc_bitmap_of_bit() in bitmap.c with a
wrapper in nodemask.h. Would that be preferable?
>
> alloc_nodemask_of_node() has no callers, so I can think of a good fix
> for these problems. If it _did_ have a caller then I might ask "can't
> we fix this by moving alloc_nodemask_of_node() into the .c file". But
> it doesn't so I can't.
This patch was a later addition. The function is used by the following
patch. Originally, I had a private function in hugetlb.c that
kmalloc()'d and initialized the nodes_allowed mask. Mel suggested that
I use the generic nodemask_of_node(). That didn't have the semantics I
wanted, so I created this variant.
>
> It's a bit rude to assume that the caller wanted to use GFP_KERNEL.
I can add a gfp_t parameter to the macro, but I'll still need to select
value in the caller. Do you have a suggested alternative to GFP_KERNEL
[for both here and in alloc_nodemask_of_mempolicy()]? We certainly
don't want to loop forever, killing off tasks, as David mentioned.
Silently failing is OK. We handle that.
Lee
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
2009-09-11 13:11 ` Lee Schermerhorn
@ 2009-09-11 22:38 ` David Rientjes
-1 siblings, 0 replies; 44+ messages in thread
From: David Rientjes @ 2009-09-11 22:38 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: Andrew Morton, linux-mm, linux-numa, mel, randy.dunlap, nacc,
agl, apw, eric.whitney
On Fri, 11 Sep 2009, Lee Schermerhorn wrote:
> > It's a bit rude to assume that the caller wanted to use GFP_KERNEL.
>
> I can add a gfp_t parameter to the macro, but I'll still need to select
> value in the caller. Do you have a suggested alternative to GFP_KERNEL
> [for both here and in alloc_nodemask_of_mempolicy()]? We certainly
> don't want to loop forever, killing off tasks, as David mentioned.
> Silently failing is OK. We handle that.
>
Dynamically allocating the nodemask_t for small NODES_SHIFT and failing to
find adequate memory isn't as troublesome as I may have made it sound;
it's only a problem if we're low on memory and can't do order-0 GFP_KERNEL
allocations and the kmalloc cache for that size is full. That's going to
be extremely rare, but the first requirement, being low on memory, is one
of the reasons why people traditionally free hugepages via the tunable.
As far as the software engineering of alloc_nodemask_of_node() goes, I'd
defer back to my previous suggestion of modifying NODEMASK_ALLOC() which
has very much the same purpose. It's also only used with mempolicies
because we're frequently dealing with the same issue; this is not unique
only to hugetlb, which is probably why it was made generic in the first
place.
It has the added benefit of also incorporating my other suggestion, which
was to allocate these on the stack when NODES_SHIFT is small, which it
defaults to for all architectures other than ia64. I think it would be
nice to avoid the slab allocator for relatively small (<= 256 bytes?)
amounts of memory that could otherwise be stack allocated. That's more of
a general statement with regard to the entire kernel, but I don't think
you'll find much benefit in always allocating them from slab for code
clarity when NODEMASK_ALLOC() exists for the same purpose such as
set_mempolicy(), mbind(), etc.
So I'd ask that you reconsider using NODEMASK_ALLOC() by making it more
general (i.e. not just allocating "structs of <name>" but rather pass in
the entire type such as "nodemask_t" or "struct nodemask_scratch") and
then using it to dynamically allocate your hugetlb nodemasks when
necessary because of their size.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 3/6] hugetlb: introduce alloc_nodemask_of_node
@ 2009-09-11 22:38 ` David Rientjes
0 siblings, 0 replies; 44+ messages in thread
From: David Rientjes @ 2009-09-11 22:38 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: Andrew Morton, linux-mm, linux-numa, mel, randy.dunlap, nacc,
agl, apw, eric.whitney
On Fri, 11 Sep 2009, Lee Schermerhorn wrote:
> > It's a bit rude to assume that the caller wanted to use GFP_KERNEL.
>
> I can add a gfp_t parameter to the macro, but I'll still need to select
> value in the caller. Do you have a suggested alternative to GFP_KERNEL
> [for both here and in alloc_nodemask_of_mempolicy()]? We certainly
> don't want to loop forever, killing off tasks, as David mentioned.
> Silently failing is OK. We handle that.
>
Dynamically allocating the nodemask_t for small NODES_SHIFT and failing to
find adequate memory isn't as troublesome as I may have made it sound;
it's only a problem if we're low on memory and can't do order-0 GFP_KERNEL
allocations and the kmalloc cache for that size is full. That's going to
be extremely rare, but the first requirement, being low on memory, is one
of the reasons why people traditionally free hugepages via the tunable.
As far as the software engineering of alloc_nodemask_of_node() goes, I'd
defer back to my previous suggestion of modifying NODEMASK_ALLOC() which
has very much the same purpose. It's also only used with mempolicies
because we're frequently dealing with the same issue; this is not unique
only to hugetlb, which is probably why it was made generic in the first
place.
It has the added benefit of also incorporating my other suggestion, which
was to allocate these on the stack when NODES_SHIFT is small, which it
defaults to for all architectures other than ia64. I think it would be
nice to avoid the slab allocator for relatively small (<= 256 bytes?)
amounts of memory that could otherwise be stack allocated. That's more of
a general statement with regard to the entire kernel, but I don't think
you'll find much benefit in always allocating them from slab for code
clarity when NODEMASK_ALLOC() exists for the same purpose such as
set_mempolicy(), mbind(), etc.
So I'd ask that you reconsider using NODEMASK_ALLOC() by making it more
general (i.e. not just allocating "structs of <name>" but rather pass in
the entire type such as "nodemask_t" or "struct nodemask_scratch") and
then using it to dynamically allocate your hugetlb nodemasks when
necessary because of their size.
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 4/6] hugetlb: derive huge pages nodes allowed from task mempolicy
2009-09-09 16:31 ` Lee Schermerhorn
` (3 preceding siblings ...)
(?)
@ 2009-09-09 16:31 ` Lee Schermerhorn
2009-09-10 23:15 ` Andrew Morton
-1 siblings, 1 reply; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:31 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
[PATCH 4/6] hugetlb: derive huge pages nodes allowed from task mempolicy
Against: 2.6.31-rc7-mmotm-090827-1651
V2: + cleaned up comments, removed some deemed unnecessary,
add some suggested by review
+ removed check for !current in huge_mpol_nodes_allowed().
+ added 'current->comm' to warning message in huge_mpol_nodes_allowed().
+ added VM_BUG_ON() assertion in hugetlb.c next_node_allowed() to
catch out of range node id.
+ add examples to patch description
V3: Factored this patch from V2 patch 2/3
V4: added back missing "kfree(nodes_allowed)" in set_max_nr_hugepages()
V5: remove internal '\n' from printk in huge_mpol_nodes_allowed()
V6: + rename 'huge_mpol_nodes_allowed()" to "alloc_nodemask_of_mempolicy()"
+ move the printk() when we can't kmalloc() a nodemask_t to
set_max_huge_pages(), as alloc_nodemask_of_mempolicy() is no longer
hugepage specific.
+ handle movement of nodes_allowed initialization:
++ Don't kfree() nodes_allowed when it points at node_online_map.
This patch derives a "nodes_allowed" node mask from the numa
mempolicy of the task modifying the number of persistent huge
pages to control the allocation, freeing and adjusting of surplus
huge pages. This mask is derived as follows:
* For "default" [NULL] task mempolicy, a NULL nodemask_t pointer
is produced. This will cause the hugetlb subsystem to use
node_online_map as the "nodes_allowed". This preserves the
behavior before this patch.
* For "preferred" mempolicy, including explicit local allocation,
a nodemask with the single preferred node will be produced.
"local" policy will NOT track any internode migrations of the
task adjusting nr_hugepages.
* For "bind" and "interleave" policy, the mempolicy's nodemask
will be used.
* Other than to inform the construction of the nodes_allowed node
mask, the actual mempolicy mode is ignored. That is, all modes
behave like interleave over the resulting nodes_allowed mask
with no "fallback".
Notes:
1) This patch introduces a subtle change in behavior: huge page
allocation and freeing will be constrained by any mempolicy
that the task adjusting the huge page pool inherits from its
parent. This policy could come from a distant ancestor. The
adminstrator adjusting the huge page pool without explicitly
specifying a mempolicy via numactl might be surprised by this.
Additionaly, any mempolicy specified by numactl will be
constrained by the cpuset in which numactl is invoked.
Using sysfs per node hugepages attributes to adjust the per
node persistent huge pages count [subsequent patch] ignores
mempolicy and cpuset constraints.
2) Hugepages allocated at boot time use the node_online_map.
An additional patch could implement a temporary boot time
huge pages nodes_allowed command line parameter.
3) Using mempolicy to control persistent huge page allocation
and freeing requires no change to hugeadm when invoking
it via numactl, as shown in the examples below. However,
hugeadm could be enhanced to take the allowed nodes as an
argument and set its task mempolicy itself. This would allow
it to detect and warn about any non-default mempolicy that it
inherited from its parent, thus alleviating the issue described
in Note 1 above.
See the updated documentation [next patch] for more information
about the implications of this patch.
Examples:
Starting with:
Node 0 HugePages_Total: 0
Node 1 HugePages_Total: 0
Node 2 HugePages_Total: 0
Node 3 HugePages_Total: 0
Default behavior [with or without this patch] balances persistent
hugepage allocation across nodes [with sufficient contiguous memory]:
hugeadm --pool-pages-min=2048Kb:32
yields:
Node 0 HugePages_Total: 8
Node 1 HugePages_Total: 8
Node 2 HugePages_Total: 8
Node 3 HugePages_Total: 8
Applying mempolicy--e.g., with numactl [using '-m' a.k.a.
'--membind' because it allows multiple nodes to be specified
and it's easy to type]--we can allocate huge pages on
individual nodes or sets of nodes. So, starting from the
condition above, with 8 huge pages per node:
numactl -m 2 hugeadm --pool-pages-min=2048Kb:+8
yields:
Node 0 HugePages_Total: 8
Node 1 HugePages_Total: 8
Node 2 HugePages_Total: 16
Node 3 HugePages_Total: 8
The incremental 8 huge pages were restricted to node 2 by the
specified mempolicy.
Similarly, we can use mempolicy to free persistent huge pages
from specified nodes:
numactl -m 0,1 hugeadm --pool-pages-min=2048Kb:-8
yields:
Node 0 HugePages_Total: 4
Node 1 HugePages_Total: 4
Node 2 HugePages_Total: 16
Node 3 HugePages_Total: 8
The 8 huge pages freed were balanced over nodes 0 and 1.
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
include/linux/mempolicy.h | 3 ++
mm/hugetlb.c | 12 +++++++++-
mm/mempolicy.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 65 insertions(+), 1 deletion(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/mempolicy.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/mempolicy.c 2009-09-09 11:57:26.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/mm/mempolicy.c 2009-09-09 11:57:36.000000000 -0400
@@ -1564,6 +1564,57 @@ struct zonelist *huge_zonelist(struct vm
}
return zl;
}
+
+/*
+ * alloc_nodemask_of_mempolicy
+ *
+ * Returns a [pointer to a] nodelist based on the current task's mempolicy.
+ *
+ * If the task's mempolicy is "default" [NULL], return NULL for default
+ * behavior. Otherwise, extract the policy nodemask for 'bind'
+ * or 'interleave' policy or construct a nodemask for 'preferred' or
+ * 'local' policy and return a pointer to a kmalloc()ed nodemask_t.
+ *
+ * N.B., it is the caller's responsibility to free a returned nodemask.
+ */
+nodemask_t *alloc_nodemask_of_mempolicy(void)
+{
+ nodemask_t *nodes_allowed = NULL;
+ struct mempolicy *mempolicy;
+ int nid;
+
+ if (!current->mempolicy)
+ return NULL;
+
+ mpol_get(current->mempolicy);
+ nodes_allowed = kmalloc(sizeof(*nodes_allowed), GFP_KERNEL);
+ if (!nodes_allowed)
+ return NULL; /* silently default */
+
+ nodes_clear(*nodes_allowed);
+ mempolicy = current->mempolicy;
+ switch (mempolicy->mode) {
+ case MPOL_PREFERRED:
+ if (mempolicy->flags & MPOL_F_LOCAL)
+ nid = numa_node_id();
+ else
+ nid = mempolicy->v.preferred_node;
+ node_set(nid, *nodes_allowed);
+ break;
+
+ case MPOL_BIND:
+ /* Fall through */
+ case MPOL_INTERLEAVE:
+ *nodes_allowed = mempolicy->v.nodes;
+ break;
+
+ default:
+ BUG();
+ }
+
+ mpol_put(current->mempolicy);
+ return nodes_allowed;
+}
#endif
/* Allocate a page in interleaved policy.
Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/mempolicy.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/mempolicy.h 2009-09-09 11:57:26.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/mempolicy.h 2009-09-09 11:57:36.000000000 -0400
@@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(str
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
+extern nodemask_t *alloc_nodemask_of_mempolicy(void);
extern unsigned slab_node(struct mempolicy *policy);
extern enum zone_type policy_zone;
@@ -328,6 +329,8 @@ static inline struct zonelist *huge_zone
return node_zonelist(0, gfp_flags);
}
+static inline nodemask_t *alloc_nodemask_of_mempolicy(void) { return NULL; }
+
static inline int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes,
const nodemask_t *to_nodes, int flags)
Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/hugetlb.c 2009-09-09 11:57:34.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c 2009-09-09 11:57:36.000000000 -0400
@@ -1246,11 +1246,19 @@ static int adjust_pool_surplus(struct hs
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
{
unsigned long min_count, ret;
- nodemask_t *nodes_allowed = &node_online_map;
+ nodemask_t *nodes_allowed;
if (h->order >= MAX_ORDER)
return h->max_huge_pages;
+ nodes_allowed = alloc_nodemask_of_mempolicy();
+ if (!nodes_allowed) {
+ printk(KERN_WARNING "%s unable to allocate nodes allowed mask "
+ "for huge page allocation. Falling back to default.\n",
+ current->comm);
+ nodes_allowed = &node_online_map;
+ }
+
/*
* Increase the pool size
* First take pages out of surplus state. Then make up the
@@ -1311,6 +1319,8 @@ static unsigned long set_max_huge_pages(
out:
ret = persistent_huge_pages(h);
spin_unlock(&hugetlb_lock);
+ if (nodes_allowed != &node_online_map)
+ kfree(nodes_allowed);
return ret;
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 4/6] hugetlb: derive huge pages nodes allowed from task mempolicy
2009-09-09 16:31 ` [PATCH 4/6] hugetlb: derive huge pages nodes allowed from task mempolicy Lee Schermerhorn
@ 2009-09-10 23:15 ` Andrew Morton
2009-09-11 13:12 ` Lee Schermerhorn
0 siblings, 1 reply; 44+ messages in thread
From: Andrew Morton @ 2009-09-10 23:15 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Wed, 09 Sep 2009 12:31:52 -0400
Lee Schermerhorn <lee.schermerhorn@hp.com> wrote:
> This patch derives a "nodes_allowed" node mask from the numa
> mempolicy of the task modifying the number of persistent huge
> pages to control the allocation, freeing and adjusting of surplus
> huge pages.
>
> ...
>
> Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/mempolicy.c
> ===================================================================
> --- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/mempolicy.c 2009-09-09 11:57:26.000000000 -0400
> +++ linux-2.6.31-rc7-mmotm-090827-1651/mm/mempolicy.c 2009-09-09 11:57:36.000000000 -0400
> @@ -1564,6 +1564,57 @@ struct zonelist *huge_zonelist(struct vm
> }
> return zl;
> }
> +
> +/*
> + * alloc_nodemask_of_mempolicy
> + *
> + * Returns a [pointer to a] nodelist based on the current task's mempolicy.
> + *
> + * If the task's mempolicy is "default" [NULL], return NULL for default
> + * behavior. Otherwise, extract the policy nodemask for 'bind'
> + * or 'interleave' policy or construct a nodemask for 'preferred' or
> + * 'local' policy and return a pointer to a kmalloc()ed nodemask_t.
> + *
> + * N.B., it is the caller's responsibility to free a returned nodemask.
> + */
> +nodemask_t *alloc_nodemask_of_mempolicy(void)
> +{
> + nodemask_t *nodes_allowed = NULL;
> + struct mempolicy *mempolicy;
> + int nid;
> +
> + if (!current->mempolicy)
> + return NULL;
> +
> + mpol_get(current->mempolicy);
> + nodes_allowed = kmalloc(sizeof(*nodes_allowed), GFP_KERNEL);
Ho hum. I guess a caller which didn't permit GFP_KERNEL would be
pretty lame.
> + if (!nodes_allowed)
> + return NULL; /* silently default */
Missed an mpol_put().
> + nodes_clear(*nodes_allowed);
> + mempolicy = current->mempolicy;
> + switch (mempolicy->mode) {
> + case MPOL_PREFERRED:
> + if (mempolicy->flags & MPOL_F_LOCAL)
> + nid = numa_node_id();
> + else
> + nid = mempolicy->v.preferred_node;
> + node_set(nid, *nodes_allowed);
> + break;
> +
> + case MPOL_BIND:
> + /* Fall through */
> + case MPOL_INTERLEAVE:
> + *nodes_allowed = mempolicy->v.nodes;
> + break;
> +
> + default:
> + BUG();
> + }
> +
> + mpol_put(current->mempolicy);
> + return nodes_allowed;
> +}
Do we actually need the mpol_get()/put here? Can some other process
really some in and trash a process's current->mempolicy when that
process isn't looking?
If so, why the heck isn't the code racy?
static inline void mpol_get(struct mempolicy *pol)
{
if (pol)
atomic_inc(&pol->refcnt);
}
If it's possible for some other task to trash current->mempolicy then
that trashing can happen between the `if' and the `atomic_inc', so
we're screwed.
So either we need some locking here or the mpol_get() isn't needed on
current's mempolicy or the mpol_get() has some secret side-effect?
Fixlets:
--- a/mm/hugetlb.c~hugetlb-derive-huge-pages-nodes-allowed-from-task-mempolicy-fix
+++ a/mm/hugetlb.c
@@ -1253,7 +1253,7 @@ static unsigned long set_max_huge_pages(
nodes_allowed = alloc_nodemask_of_mempolicy();
if (!nodes_allowed) {
- printk(KERN_WARNING "%s unable to allocate nodes allowed mask "
+ printk(KERN_WARNING "%s: unable to allocate nodes allowed mask "
"for huge page allocation. Falling back to default.\n",
current->comm);
nodes_allowed = &node_online_map;
--- a/mm/mempolicy.c~hugetlb-derive-huge-pages-nodes-allowed-from-task-mempolicy-fix
+++ a/mm/mempolicy.c
@@ -1589,7 +1589,7 @@ nodemask_t *alloc_nodemask_of_mempolicy(
mpol_get(current->mempolicy);
nodes_allowed = kmalloc(sizeof(*nodes_allowed), GFP_KERNEL);
if (!nodes_allowed)
- return NULL; /* silently default */
+ goto out; /* silently default */
nodes_clear(*nodes_allowed);
mempolicy = current->mempolicy;
@@ -1611,7 +1611,7 @@ nodemask_t *alloc_nodemask_of_mempolicy(
default:
BUG();
}
-
+out:
mpol_put(current->mempolicy);
return nodes_allowed;
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 4/6] hugetlb: derive huge pages nodes allowed from task mempolicy
2009-09-10 23:15 ` Andrew Morton
@ 2009-09-11 13:12 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-11 13:12 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Thu, 2009-09-10 at 16:15 -0700, Andrew Morton wrote:
> On Wed, 09 Sep 2009 12:31:52 -0400
> Lee Schermerhorn <lee.schermerhorn@hp.com> wrote:
>
> > This patch derives a "nodes_allowed" node mask from the numa
> > mempolicy of the task modifying the number of persistent huge
> > pages to control the allocation, freeing and adjusting of surplus
> > huge pages.
> >
> > ...
> >
>
> > Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/mempolicy.c
> > ===================================================================
> > --- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/mempolicy.c 2009-09-09 11:57:26.000000000 -0400
> > +++ linux-2.6.31-rc7-mmotm-090827-1651/mm/mempolicy.c 2009-09-09 11:57:36.000000000 -0400
> > @@ -1564,6 +1564,57 @@ struct zonelist *huge_zonelist(struct vm
> > }
> > return zl;
> > }
> > +
> > +/*
> > + * alloc_nodemask_of_mempolicy
> > + *
> > + * Returns a [pointer to a] nodelist based on the current task's mempolicy.
> > + *
> > + * If the task's mempolicy is "default" [NULL], return NULL for default
> > + * behavior. Otherwise, extract the policy nodemask for 'bind'
> > + * or 'interleave' policy or construct a nodemask for 'preferred' or
> > + * 'local' policy and return a pointer to a kmalloc()ed nodemask_t.
> > + *
> > + * N.B., it is the caller's responsibility to free a returned nodemask.
> > + */
> > +nodemask_t *alloc_nodemask_of_mempolicy(void)
> > +{
> > + nodemask_t *nodes_allowed = NULL;
> > + struct mempolicy *mempolicy;
> > + int nid;
> > +
> > + if (!current->mempolicy)
> > + return NULL;
> > +
> > + mpol_get(current->mempolicy);
> > + nodes_allowed = kmalloc(sizeof(*nodes_allowed), GFP_KERNEL);
>
> Ho hum. I guess a caller which didn't permit GFP_KERNEL would be
> pretty lame.
>
> > + if (!nodes_allowed)
> > + return NULL; /* silently default */
>
> Missed an mpol_put().
Ah, yes. But, see below...
>
> > + nodes_clear(*nodes_allowed);
> > + mempolicy = current->mempolicy;
> > + switch (mempolicy->mode) {
> > + case MPOL_PREFERRED:
> > + if (mempolicy->flags & MPOL_F_LOCAL)
> > + nid = numa_node_id();
> > + else
> > + nid = mempolicy->v.preferred_node;
> > + node_set(nid, *nodes_allowed);
> > + break;
> > +
> > + case MPOL_BIND:
> > + /* Fall through */
> > + case MPOL_INTERLEAVE:
> > + *nodes_allowed = mempolicy->v.nodes;
> > + break;
> > +
> > + default:
> > + BUG();
> > + }
> > +
> > + mpol_put(current->mempolicy);
> > + return nodes_allowed;
> > +}
>
> Do we actually need the mpol_get()/put here? Can some other process
> really some in and trash a process's current->mempolicy when that
> process isn't looking?
You're correct. In this context, I can/will eliminate the get/put.
We only really need the reference count in two places:
1) for shared policies [shmem, ...]: one task could be replacing a
shared policy while another task is trying to allocate using it's
nodemask.
2) for show_numa_maps(), as we're looking at another task's mempolicy
[when vma policies default to task mempolicy].
But, here, where the current task is examining its own mempolicy, we
don't need the get/put as only the task itself can change it's
mempolicy.
>
> If so, why the heck isn't the code racy?
>
> static inline void mpol_get(struct mempolicy *pol)
> {
> if (pol)
> atomic_inc(&pol->refcnt);
> }
>
> If it's possible for some other task to trash current->mempolicy then
> that trashing can happen between the `if' and the `atomic_inc', so
> we're screwed.
>
> So either we need some locking here or the mpol_get() isn't needed on
> current's mempolicy or the mpol_get() has some secret side-effect?
Good point. Need to revisit this, altho' not in the context of this
series, IMO. May need an atomic_inc_if_nonzero() or such there.
>
>
> Fixlets:
>
> --- a/mm/hugetlb.c~hugetlb-derive-huge-pages-nodes-allowed-from-task-mempolicy-fix
> +++ a/mm/hugetlb.c
> @@ -1253,7 +1253,7 @@ static unsigned long set_max_huge_pages(
>
> nodes_allowed = alloc_nodemask_of_mempolicy();
> if (!nodes_allowed) {
> - printk(KERN_WARNING "%s unable to allocate nodes allowed mask "
> + printk(KERN_WARNING "%s: unable to allocate nodes allowed mask "
> "for huge page allocation. Falling back to default.\n",
> current->comm);
> nodes_allowed = &node_online_map;
> --- a/mm/mempolicy.c~hugetlb-derive-huge-pages-nodes-allowed-from-task-mempolicy-fix
> +++ a/mm/mempolicy.c
> @@ -1589,7 +1589,7 @@ nodemask_t *alloc_nodemask_of_mempolicy(
> mpol_get(current->mempolicy);
> nodes_allowed = kmalloc(sizeof(*nodes_allowed), GFP_KERNEL);
> if (!nodes_allowed)
> - return NULL; /* silently default */
> + goto out; /* silently default */
>
> nodes_clear(*nodes_allowed);
> mempolicy = current->mempolicy;
> @@ -1611,7 +1611,7 @@ nodemask_t *alloc_nodemask_of_mempolicy(
> default:
> BUG();
> }
> -
> +out:
> mpol_put(current->mempolicy);
> return nodes_allowed;
> }
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 5/6] hugetlb: add per node hstate attributes
2009-09-09 16:31 ` Lee Schermerhorn
@ 2009-09-09 16:31 ` Lee Schermerhorn
-1 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:31 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
[PATCH 5/6] hugetlb: register per node hugepages attributes
Against: 2.6.31-rc7-mmotm-090827-1651
V2: remove dependency on kobject private bitfield. Search
global hstates then all per node hstates for kobject
match in attribute show/store functions.
V3: rebase atop the mempolicy-based hugepage alloc/free;
use custom "nodes_allowed" to restrict alloc/free to
a specific node via per node attributes. Per node
attribute overrides mempolicy. I.e., mempolicy only
applies to global attributes.
V5: Fix issues raised by Mel Gorman:
+ add !NUMA versions of hugetlb_[un]register_node()
+ rename 'hi' to 'i' in kobj_to_node_hstate()
+ rename (count, input) to (len, count) in nr_hugepages_store()
+ moved per node hugepages_kobj and hstate_kobjs[] from the
struct node [sysdev] to hugetlb.c private arrays.
+ changed registration mechanism so that hugetlbfs [a module]
register its attributes registration callbacks with the node
driver, eliminating the dependency between the node driver
and hugetlbfs. From it's init func, hugetlbfs will register
all on-line nodes' hugepage sysfs attributes along with
hugetlbfs' attributes register/unregister functions. The
node driver will use these functions to [un]register nodes
with hugetlbfs on node hot-plug.
+ replaced hugetlb.c private "nodes_allowed_from_node()" with
[new] generic "alloc_nodemask_of_node()".
V5a: + fix !NUMA register_hugetlbfs_with_node(): don't use
keyword 'do' as parameter name!
V6: + Use NUMA_NO_NODE for unspecified node id throughout hugetlb.c
to indicate that we didn't get there via a per node attribute.
Drop redundant "NO_NODEID_SPECIFIED" definition.
+ handle movement of defaulting of nodes_allowed up to
set_max_huge_pages()
This patch adds the per huge page size control/query attributes
to the per node sysdevs:
/sys/devices/system/node/node<ID>/hugepages/hugepages-<size>/
nr_hugepages - r/w
free_huge_pages - r/o
surplus_huge_pages - r/o
The patch attempts to re-use/share as much of the existing
global hstate attribute initialization and handling, and the
"nodes_allowed" constraint processing as possible.
Calling set_max_huge_pages() with no node indicates a change to
global hstate parameters. In this case, any non-default task
mempolicy will be used to generate the nodes_allowed mask. A
valid node id indicates an update to that node's hstate
parameters, and the count argument specifies the target count
for the specified node. From this info, we compute the target
global count for the hstate and construct a nodes_allowed node
mask contain only the specified node.
Setting the node specific nr_hugepages via the per node attribute
effectively ignores any task mempolicy or cpuset constraints.
With this patch:
(me):ls /sys/devices/system/node/node0/hugepages/hugepages-2048kB
./ ../ free_hugepages nr_hugepages surplus_hugepages
Starting from:
Node 0 HugePages_Total: 0
Node 0 HugePages_Free: 0
Node 0 HugePages_Surp: 0
Node 1 HugePages_Total: 0
Node 1 HugePages_Free: 0
Node 1 HugePages_Surp: 0
Node 2 HugePages_Total: 0
Node 2 HugePages_Free: 0
Node 2 HugePages_Surp: 0
Node 3 HugePages_Total: 0
Node 3 HugePages_Free: 0
Node 3 HugePages_Surp: 0
vm.nr_hugepages = 0
Allocate 16 persistent huge pages on node 2:
(me):echo 16 >/sys/devices/system/node/node2/hugepages/hugepages-2048kB/nr_hugepages
[Note that this is equivalent to:
numactl -m 2 hugeadmin --pool-pages-min 2M:+16
]
Yields:
Node 0 HugePages_Total: 0
Node 0 HugePages_Free: 0
Node 0 HugePages_Surp: 0
Node 1 HugePages_Total: 0
Node 1 HugePages_Free: 0
Node 1 HugePages_Surp: 0
Node 2 HugePages_Total: 16
Node 2 HugePages_Free: 16
Node 2 HugePages_Surp: 0
Node 3 HugePages_Total: 0
Node 3 HugePages_Free: 0
Node 3 HugePages_Surp: 0
vm.nr_hugepages = 16
Global controls work as expected--reduce pool to 8 persistent huge pages:
(me):echo 8 >/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
Node 0 HugePages_Total: 0
Node 0 HugePages_Free: 0
Node 0 HugePages_Surp: 0
Node 1 HugePages_Total: 0
Node 1 HugePages_Free: 0
Node 1 HugePages_Surp: 0
Node 2 HugePages_Total: 8
Node 2 HugePages_Free: 8
Node 2 HugePages_Surp: 0
Node 3 HugePages_Total: 0
Node 3 HugePages_Free: 0
Node 3 HugePages_Surp: 0
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
drivers/base/node.c | 33 +++++++
include/linux/node.h | 8 +
mm/hugetlb.c | 240 ++++++++++++++++++++++++++++++++++++++++++++-------
3 files changed, 251 insertions(+), 30 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:26.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:37.000000000 -0400
@@ -177,6 +177,37 @@ static ssize_t node_read_distance(struct
}
static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+/*
+ * hugetlbfs per node attributes registration interface:
+ * When/if hugetlb[fs] subsystem initializes [sometime after this module],
+ * it will register it's per node attributes for all nodes on-line at that
+ * point. It will also call register_hugetlbfs_with_node(), below, to
+ * register it's attribute registration functions with this node driver.
+ * Once these hooks have been initialized, the node driver will call into
+ * the hugetlb module to [un]register attributes for hot-plugged nodes.
+ */
+NODE_REGISTRATION_FUNC __hugetlb_register_node;
+NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
+
+static inline void hugetlb_register_node(struct node *node)
+{
+ if (__hugetlb_register_node)
+ __hugetlb_register_node(node);
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
+{
+ if (__hugetlb_unregister_node)
+ __hugetlb_unregister_node(node);
+}
+
+void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
+ NODE_REGISTRATION_FUNC unregister)
+{
+ __hugetlb_register_node = doregister;
+ __hugetlb_unregister_node = unregister;
+}
+
/*
* register_node - Setup a sysfs device for a node.
@@ -200,6 +231,7 @@ int register_node(struct node *node, int
sysdev_create_file(&node->sysdev, &attr_distance);
scan_unevictable_register_node(node);
+ hugetlb_register_node(node);
}
return error;
}
@@ -220,6 +252,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_distance);
scan_unevictable_unregister_node(node);
+ hugetlb_unregister_node(node);
sysdev_unregister(&node->sysdev);
}
Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/hugetlb.c 2009-09-09 11:57:36.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c 2009-09-09 11:57:37.000000000 -0400
@@ -24,6 +24,7 @@
#include <asm/io.h>
#include <linux/hugetlb.h>
+#include <linux/node.h>
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
@@ -1243,7 +1244,8 @@ static int adjust_pool_surplus(struct hs
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
+static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ int nid)
{
unsigned long min_count, ret;
nodemask_t *nodes_allowed;
@@ -1251,7 +1253,17 @@ static unsigned long set_max_huge_pages(
if (h->order >= MAX_ORDER)
return h->max_huge_pages;
- nodes_allowed = alloc_nodemask_of_mempolicy();
+ if (nid == NUMA_NO_NODE) {
+ nodes_allowed = alloc_nodemask_of_mempolicy();
+ } else {
+ /*
+ * incoming 'count' is for node 'nid' only, so
+ * adjust count to global, but restrict alloc/free
+ * to the specified node.
+ */
+ count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
+ nodes_allowed = alloc_nodemask_of_node(nid);
+ }
if (!nodes_allowed) {
printk(KERN_WARNING "%s unable to allocate nodes allowed mask "
"for huge page allocation. Falling back to default.\n",
@@ -1334,51 +1346,71 @@ out:
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
-static struct hstate *kobj_to_hstate(struct kobject *kobj)
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
+
+static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{
int i;
+
for (i = 0; i < HUGE_MAX_HSTATE; i++)
- if (hstate_kobjs[i] == kobj)
+ if (hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = NUMA_NO_NODE;
return &hstates[i];
- BUG();
- return NULL;
+ }
+
+ return kobj_to_node_hstate(kobj, nidp);
}
static ssize_t nr_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->nr_huge_pages);
+ struct hstate *h;
+ unsigned long nr_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ nr_huge_pages = h->nr_huge_pages;
+ else
+ nr_huge_pages = h->nr_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", nr_huge_pages);
}
+
static ssize_t nr_hugepages_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+ struct kobj_attribute *attr, const char *buf, size_t len)
{
+ unsigned long count;
+ struct hstate *h;
+ int nid;
int err;
- unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
- err = strict_strtoul(buf, 10, &input);
+ err = strict_strtoul(buf, 10, &count);
if (err)
return 0;
- h->max_huge_pages = set_max_huge_pages(h, input);
+ h = kobj_to_hstate(kobj, &nid);
+ h->max_huge_pages = set_max_huge_pages(h, count, nid);
- return count;
+ return len;
}
HSTATE_ATTR(nr_hugepages);
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
+
return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
}
+
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int err;
unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
err = strict_strtoul(buf, 10, &input);
if (err)
@@ -1395,15 +1427,24 @@ HSTATE_ATTR(nr_overcommit_hugepages);
static ssize_t free_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->free_huge_pages);
+ struct hstate *h;
+ unsigned long free_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ free_huge_pages = h->free_huge_pages;
+ else
+ free_huge_pages = h->free_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", free_huge_pages);
}
HSTATE_ATTR_RO(free_hugepages);
static ssize_t resv_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->resv_huge_pages);
}
HSTATE_ATTR_RO(resv_hugepages);
@@ -1411,8 +1452,17 @@ HSTATE_ATTR_RO(resv_hugepages);
static ssize_t surplus_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->surplus_huge_pages);
+ struct hstate *h;
+ unsigned long surplus_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ surplus_huge_pages = h->surplus_huge_pages;
+ else
+ surplus_huge_pages = h->surplus_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", surplus_huge_pages);
}
HSTATE_ATTR_RO(surplus_hugepages);
@@ -1429,19 +1479,21 @@ static struct attribute_group hstate_att
.attrs = hstate_attrs,
};
-static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
+static int __init hugetlb_sysfs_add_hstate(struct hstate *h,
+ struct kobject *parent,
+ struct kobject **hstate_kobjs,
+ struct attribute_group *hstate_attr_group)
{
int retval;
+ int hi = h - hstates;
- hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
- hugepages_kobj);
- if (!hstate_kobjs[h - hstates])
+ hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
+ if (!hstate_kobjs[hi])
return -ENOMEM;
- retval = sysfs_create_group(hstate_kobjs[h - hstates],
- &hstate_attr_group);
+ retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
- kobject_put(hstate_kobjs[h - hstates]);
+ kobject_put(hstate_kobjs[hi]);
return retval;
}
@@ -1456,17 +1508,143 @@ static void __init hugetlb_sysfs_init(vo
return;
for_each_hstate(h) {
- err = hugetlb_sysfs_add_hstate(h);
+ err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
+ hstate_kobjs, &hstate_attr_group);
if (err)
printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
h->name);
}
}
+#ifdef CONFIG_NUMA
+
+struct node_hstate {
+ struct kobject *hugepages_kobj;
+ struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
+};
+struct node_hstate node_hstates[MAX_NUMNODES];
+
+static struct attribute *per_node_hstate_attrs[] = {
+ &nr_hugepages_attr.attr,
+ &free_hugepages_attr.attr,
+ &surplus_hugepages_attr.attr,
+ NULL,
+};
+
+static struct attribute_group per_node_hstate_attr_group = {
+ .attrs = per_node_hstate_attrs,
+};
+
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++) {
+ struct node_hstate *nhs = &node_hstates[nid];
+ int i;
+ for (i = 0; i < HUGE_MAX_HSTATE; i++)
+ if (nhs->hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = nid;
+ return &hstates[i];
+ }
+ }
+
+ BUG();
+ return NULL;
+}
+
+void hugetlb_unregister_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+
+ if (!nhs->hugepages_kobj)
+ return;
+
+ for_each_hstate(h)
+ if (nhs->hstate_kobjs[h - hstates]) {
+ kobject_put(nhs->hstate_kobjs[h - hstates]);
+ nhs->hstate_kobjs[h - hstates] = NULL;
+ }
+
+ kobject_put(nhs->hugepages_kobj);
+ nhs->hugepages_kobj = NULL;
+}
+
+static void hugetlb_unregister_all_nodes(void)
+{
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++)
+ hugetlb_unregister_node(&node_devices[nid]);
+
+ register_hugetlbfs_with_node(NULL, NULL);
+}
+
+void hugetlb_register_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+ int err;
+
+ if (nhs->hugepages_kobj)
+ return; /* already allocated */
+
+ nhs->hugepages_kobj = kobject_create_and_add("hugepages",
+ &node->sysdev.kobj);
+ if (!nhs->hugepages_kobj)
+ return;
+
+ for_each_hstate(h) {
+ err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
+ nhs->hstate_kobjs,
+ &per_node_hstate_attr_group);
+ if (err) {
+ printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
+ " for node %d\n",
+ h->name, node->sysdev.id);
+ hugetlb_unregister_node(node);
+ break;
+ }
+ }
+}
+
+static void hugetlb_register_all_nodes(void)
+{
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++) {
+ struct node *node = &node_devices[nid];
+ if (node->sysdev.id == nid)
+ hugetlb_register_node(node);
+ }
+
+ register_hugetlbfs_with_node(hugetlb_register_node,
+ hugetlb_unregister_node);
+}
+#else /* !CONFIG_NUMA */
+
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ BUG();
+ if (nidp)
+ *nidp = -1;
+ return NULL;
+}
+
+static void hugetlb_unregister_all_nodes(void) { }
+
+static void hugetlb_register_all_nodes(void) { }
+
+#endif
+
static void __exit hugetlb_exit(void)
{
struct hstate *h;
+ hugetlb_unregister_all_nodes();
+
for_each_hstate(h) {
kobject_put(hstate_kobjs[h - hstates]);
}
@@ -1501,6 +1679,8 @@ static int __init hugetlb_init(void)
hugetlb_sysfs_init();
+ hugetlb_register_all_nodes();
+
return 0;
}
module_init(hugetlb_init);
@@ -1603,7 +1783,7 @@ int hugetlb_sysctl_handler(struct ctl_ta
proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (write)
- h->max_huge_pages = set_max_huge_pages(h, tmp);
+ h->max_huge_pages = set_max_huge_pages(h, tmp, NUMA_NO_NODE);
return 0;
}
Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/node.h 2009-09-09 11:57:26.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h 2009-09-09 11:57:37.000000000 -0400
@@ -28,6 +28,7 @@ struct node {
struct memory_block;
extern struct node node_devices[];
+typedef void (*NODE_REGISTRATION_FUNC)(struct node *);
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
@@ -39,6 +40,8 @@ extern int unregister_cpu_under_node(uns
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
+extern void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
+ NODE_REGISTRATION_FUNC unregister);
#else
static inline int register_one_node(int nid)
{
@@ -65,6 +68,11 @@ static inline int unregister_mem_sect_un
{
return 0;
}
+
+static inline void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC reg,
+ NODE_REGISTRATION_FUNC unreg)
+{
+}
#endif
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 5/6] hugetlb: add per node hstate attributes
@ 2009-09-09 16:31 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:31 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
[PATCH 5/6] hugetlb: register per node hugepages attributes
Against: 2.6.31-rc7-mmotm-090827-1651
V2: remove dependency on kobject private bitfield. Search
global hstates then all per node hstates for kobject
match in attribute show/store functions.
V3: rebase atop the mempolicy-based hugepage alloc/free;
use custom "nodes_allowed" to restrict alloc/free to
a specific node via per node attributes. Per node
attribute overrides mempolicy. I.e., mempolicy only
applies to global attributes.
V5: Fix issues raised by Mel Gorman:
+ add !NUMA versions of hugetlb_[un]register_node()
+ rename 'hi' to 'i' in kobj_to_node_hstate()
+ rename (count, input) to (len, count) in nr_hugepages_store()
+ moved per node hugepages_kobj and hstate_kobjs[] from the
struct node [sysdev] to hugetlb.c private arrays.
+ changed registration mechanism so that hugetlbfs [a module]
register its attributes registration callbacks with the node
driver, eliminating the dependency between the node driver
and hugetlbfs. From it's init func, hugetlbfs will register
all on-line nodes' hugepage sysfs attributes along with
hugetlbfs' attributes register/unregister functions. The
node driver will use these functions to [un]register nodes
with hugetlbfs on node hot-plug.
+ replaced hugetlb.c private "nodes_allowed_from_node()" with
[new] generic "alloc_nodemask_of_node()".
V5a: + fix !NUMA register_hugetlbfs_with_node(): don't use
keyword 'do' as parameter name!
V6: + Use NUMA_NO_NODE for unspecified node id throughout hugetlb.c
to indicate that we didn't get there via a per node attribute.
Drop redundant "NO_NODEID_SPECIFIED" definition.
+ handle movement of defaulting of nodes_allowed up to
set_max_huge_pages()
This patch adds the per huge page size control/query attributes
to the per node sysdevs:
/sys/devices/system/node/node<ID>/hugepages/hugepages-<size>/
nr_hugepages - r/w
free_huge_pages - r/o
surplus_huge_pages - r/o
The patch attempts to re-use/share as much of the existing
global hstate attribute initialization and handling, and the
"nodes_allowed" constraint processing as possible.
Calling set_max_huge_pages() with no node indicates a change to
global hstate parameters. In this case, any non-default task
mempolicy will be used to generate the nodes_allowed mask. A
valid node id indicates an update to that node's hstate
parameters, and the count argument specifies the target count
for the specified node. From this info, we compute the target
global count for the hstate and construct a nodes_allowed node
mask contain only the specified node.
Setting the node specific nr_hugepages via the per node attribute
effectively ignores any task mempolicy or cpuset constraints.
With this patch:
(me):ls /sys/devices/system/node/node0/hugepages/hugepages-2048kB
./ ../ free_hugepages nr_hugepages surplus_hugepages
Starting from:
Node 0 HugePages_Total: 0
Node 0 HugePages_Free: 0
Node 0 HugePages_Surp: 0
Node 1 HugePages_Total: 0
Node 1 HugePages_Free: 0
Node 1 HugePages_Surp: 0
Node 2 HugePages_Total: 0
Node 2 HugePages_Free: 0
Node 2 HugePages_Surp: 0
Node 3 HugePages_Total: 0
Node 3 HugePages_Free: 0
Node 3 HugePages_Surp: 0
vm.nr_hugepages = 0
Allocate 16 persistent huge pages on node 2:
(me):echo 16 >/sys/devices/system/node/node2/hugepages/hugepages-2048kB/nr_hugepages
[Note that this is equivalent to:
numactl -m 2 hugeadmin --pool-pages-min 2M:+16
]
Yields:
Node 0 HugePages_Total: 0
Node 0 HugePages_Free: 0
Node 0 HugePages_Surp: 0
Node 1 HugePages_Total: 0
Node 1 HugePages_Free: 0
Node 1 HugePages_Surp: 0
Node 2 HugePages_Total: 16
Node 2 HugePages_Free: 16
Node 2 HugePages_Surp: 0
Node 3 HugePages_Total: 0
Node 3 HugePages_Free: 0
Node 3 HugePages_Surp: 0
vm.nr_hugepages = 16
Global controls work as expected--reduce pool to 8 persistent huge pages:
(me):echo 8 >/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
Node 0 HugePages_Total: 0
Node 0 HugePages_Free: 0
Node 0 HugePages_Surp: 0
Node 1 HugePages_Total: 0
Node 1 HugePages_Free: 0
Node 1 HugePages_Surp: 0
Node 2 HugePages_Total: 8
Node 2 HugePages_Free: 8
Node 2 HugePages_Surp: 0
Node 3 HugePages_Total: 0
Node 3 HugePages_Free: 0
Node 3 HugePages_Surp: 0
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
drivers/base/node.c | 33 +++++++
include/linux/node.h | 8 +
mm/hugetlb.c | 240 ++++++++++++++++++++++++++++++++++++++++++++-------
3 files changed, 251 insertions(+), 30 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:26.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:37.000000000 -0400
@@ -177,6 +177,37 @@ static ssize_t node_read_distance(struct
}
static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+/*
+ * hugetlbfs per node attributes registration interface:
+ * When/if hugetlb[fs] subsystem initializes [sometime after this module],
+ * it will register it's per node attributes for all nodes on-line at that
+ * point. It will also call register_hugetlbfs_with_node(), below, to
+ * register it's attribute registration functions with this node driver.
+ * Once these hooks have been initialized, the node driver will call into
+ * the hugetlb module to [un]register attributes for hot-plugged nodes.
+ */
+NODE_REGISTRATION_FUNC __hugetlb_register_node;
+NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
+
+static inline void hugetlb_register_node(struct node *node)
+{
+ if (__hugetlb_register_node)
+ __hugetlb_register_node(node);
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
+{
+ if (__hugetlb_unregister_node)
+ __hugetlb_unregister_node(node);
+}
+
+void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
+ NODE_REGISTRATION_FUNC unregister)
+{
+ __hugetlb_register_node = doregister;
+ __hugetlb_unregister_node = unregister;
+}
+
/*
* register_node - Setup a sysfs device for a node.
@@ -200,6 +231,7 @@ int register_node(struct node *node, int
sysdev_create_file(&node->sysdev, &attr_distance);
scan_unevictable_register_node(node);
+ hugetlb_register_node(node);
}
return error;
}
@@ -220,6 +252,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_distance);
scan_unevictable_unregister_node(node);
+ hugetlb_unregister_node(node);
sysdev_unregister(&node->sysdev);
}
Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/hugetlb.c 2009-09-09 11:57:36.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c 2009-09-09 11:57:37.000000000 -0400
@@ -24,6 +24,7 @@
#include <asm/io.h>
#include <linux/hugetlb.h>
+#include <linux/node.h>
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
@@ -1243,7 +1244,8 @@ static int adjust_pool_surplus(struct hs
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
+static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ int nid)
{
unsigned long min_count, ret;
nodemask_t *nodes_allowed;
@@ -1251,7 +1253,17 @@ static unsigned long set_max_huge_pages(
if (h->order >= MAX_ORDER)
return h->max_huge_pages;
- nodes_allowed = alloc_nodemask_of_mempolicy();
+ if (nid == NUMA_NO_NODE) {
+ nodes_allowed = alloc_nodemask_of_mempolicy();
+ } else {
+ /*
+ * incoming 'count' is for node 'nid' only, so
+ * adjust count to global, but restrict alloc/free
+ * to the specified node.
+ */
+ count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
+ nodes_allowed = alloc_nodemask_of_node(nid);
+ }
if (!nodes_allowed) {
printk(KERN_WARNING "%s unable to allocate nodes allowed mask "
"for huge page allocation. Falling back to default.\n",
@@ -1334,51 +1346,71 @@ out:
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
-static struct hstate *kobj_to_hstate(struct kobject *kobj)
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
+
+static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{
int i;
+
for (i = 0; i < HUGE_MAX_HSTATE; i++)
- if (hstate_kobjs[i] == kobj)
+ if (hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = NUMA_NO_NODE;
return &hstates[i];
- BUG();
- return NULL;
+ }
+
+ return kobj_to_node_hstate(kobj, nidp);
}
static ssize_t nr_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->nr_huge_pages);
+ struct hstate *h;
+ unsigned long nr_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ nr_huge_pages = h->nr_huge_pages;
+ else
+ nr_huge_pages = h->nr_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", nr_huge_pages);
}
+
static ssize_t nr_hugepages_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+ struct kobj_attribute *attr, const char *buf, size_t len)
{
+ unsigned long count;
+ struct hstate *h;
+ int nid;
int err;
- unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
- err = strict_strtoul(buf, 10, &input);
+ err = strict_strtoul(buf, 10, &count);
if (err)
return 0;
- h->max_huge_pages = set_max_huge_pages(h, input);
+ h = kobj_to_hstate(kobj, &nid);
+ h->max_huge_pages = set_max_huge_pages(h, count, nid);
- return count;
+ return len;
}
HSTATE_ATTR(nr_hugepages);
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
+
return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
}
+
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int err;
unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
err = strict_strtoul(buf, 10, &input);
if (err)
@@ -1395,15 +1427,24 @@ HSTATE_ATTR(nr_overcommit_hugepages);
static ssize_t free_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->free_huge_pages);
+ struct hstate *h;
+ unsigned long free_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ free_huge_pages = h->free_huge_pages;
+ else
+ free_huge_pages = h->free_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", free_huge_pages);
}
HSTATE_ATTR_RO(free_hugepages);
static ssize_t resv_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->resv_huge_pages);
}
HSTATE_ATTR_RO(resv_hugepages);
@@ -1411,8 +1452,17 @@ HSTATE_ATTR_RO(resv_hugepages);
static ssize_t surplus_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->surplus_huge_pages);
+ struct hstate *h;
+ unsigned long surplus_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ surplus_huge_pages = h->surplus_huge_pages;
+ else
+ surplus_huge_pages = h->surplus_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", surplus_huge_pages);
}
HSTATE_ATTR_RO(surplus_hugepages);
@@ -1429,19 +1479,21 @@ static struct attribute_group hstate_att
.attrs = hstate_attrs,
};
-static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
+static int __init hugetlb_sysfs_add_hstate(struct hstate *h,
+ struct kobject *parent,
+ struct kobject **hstate_kobjs,
+ struct attribute_group *hstate_attr_group)
{
int retval;
+ int hi = h - hstates;
- hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
- hugepages_kobj);
- if (!hstate_kobjs[h - hstates])
+ hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
+ if (!hstate_kobjs[hi])
return -ENOMEM;
- retval = sysfs_create_group(hstate_kobjs[h - hstates],
- &hstate_attr_group);
+ retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
- kobject_put(hstate_kobjs[h - hstates]);
+ kobject_put(hstate_kobjs[hi]);
return retval;
}
@@ -1456,17 +1508,143 @@ static void __init hugetlb_sysfs_init(vo
return;
for_each_hstate(h) {
- err = hugetlb_sysfs_add_hstate(h);
+ err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
+ hstate_kobjs, &hstate_attr_group);
if (err)
printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
h->name);
}
}
+#ifdef CONFIG_NUMA
+
+struct node_hstate {
+ struct kobject *hugepages_kobj;
+ struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
+};
+struct node_hstate node_hstates[MAX_NUMNODES];
+
+static struct attribute *per_node_hstate_attrs[] = {
+ &nr_hugepages_attr.attr,
+ &free_hugepages_attr.attr,
+ &surplus_hugepages_attr.attr,
+ NULL,
+};
+
+static struct attribute_group per_node_hstate_attr_group = {
+ .attrs = per_node_hstate_attrs,
+};
+
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++) {
+ struct node_hstate *nhs = &node_hstates[nid];
+ int i;
+ for (i = 0; i < HUGE_MAX_HSTATE; i++)
+ if (nhs->hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = nid;
+ return &hstates[i];
+ }
+ }
+
+ BUG();
+ return NULL;
+}
+
+void hugetlb_unregister_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+
+ if (!nhs->hugepages_kobj)
+ return;
+
+ for_each_hstate(h)
+ if (nhs->hstate_kobjs[h - hstates]) {
+ kobject_put(nhs->hstate_kobjs[h - hstates]);
+ nhs->hstate_kobjs[h - hstates] = NULL;
+ }
+
+ kobject_put(nhs->hugepages_kobj);
+ nhs->hugepages_kobj = NULL;
+}
+
+static void hugetlb_unregister_all_nodes(void)
+{
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++)
+ hugetlb_unregister_node(&node_devices[nid]);
+
+ register_hugetlbfs_with_node(NULL, NULL);
+}
+
+void hugetlb_register_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+ int err;
+
+ if (nhs->hugepages_kobj)
+ return; /* already allocated */
+
+ nhs->hugepages_kobj = kobject_create_and_add("hugepages",
+ &node->sysdev.kobj);
+ if (!nhs->hugepages_kobj)
+ return;
+
+ for_each_hstate(h) {
+ err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
+ nhs->hstate_kobjs,
+ &per_node_hstate_attr_group);
+ if (err) {
+ printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
+ " for node %d\n",
+ h->name, node->sysdev.id);
+ hugetlb_unregister_node(node);
+ break;
+ }
+ }
+}
+
+static void hugetlb_register_all_nodes(void)
+{
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++) {
+ struct node *node = &node_devices[nid];
+ if (node->sysdev.id == nid)
+ hugetlb_register_node(node);
+ }
+
+ register_hugetlbfs_with_node(hugetlb_register_node,
+ hugetlb_unregister_node);
+}
+#else /* !CONFIG_NUMA */
+
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ BUG();
+ if (nidp)
+ *nidp = -1;
+ return NULL;
+}
+
+static void hugetlb_unregister_all_nodes(void) { }
+
+static void hugetlb_register_all_nodes(void) { }
+
+#endif
+
static void __exit hugetlb_exit(void)
{
struct hstate *h;
+ hugetlb_unregister_all_nodes();
+
for_each_hstate(h) {
kobject_put(hstate_kobjs[h - hstates]);
}
@@ -1501,6 +1679,8 @@ static int __init hugetlb_init(void)
hugetlb_sysfs_init();
+ hugetlb_register_all_nodes();
+
return 0;
}
module_init(hugetlb_init);
@@ -1603,7 +1783,7 @@ int hugetlb_sysctl_handler(struct ctl_ta
proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (write)
- h->max_huge_pages = set_max_huge_pages(h, tmp);
+ h->max_huge_pages = set_max_huge_pages(h, tmp, NUMA_NO_NODE);
return 0;
}
Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/node.h 2009-09-09 11:57:26.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h 2009-09-09 11:57:37.000000000 -0400
@@ -28,6 +28,7 @@ struct node {
struct memory_block;
extern struct node node_devices[];
+typedef void (*NODE_REGISTRATION_FUNC)(struct node *);
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
@@ -39,6 +40,8 @@ extern int unregister_cpu_under_node(uns
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
+extern void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
+ NODE_REGISTRATION_FUNC unregister);
#else
static inline int register_one_node(int nid)
{
@@ -65,6 +68,11 @@ static inline int unregister_mem_sect_un
{
return 0;
}
+
+static inline void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC reg,
+ NODE_REGISTRATION_FUNC unreg)
+{
+}
#endif
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 5/6] hugetlb: add per node hstate attributes
2009-09-09 16:31 ` Lee Schermerhorn
@ 2009-09-10 12:32 ` Mel Gorman
-1 siblings, 0 replies; 44+ messages in thread
From: Mel Gorman @ 2009-09-10 12:32 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: linux-mm, linux-numa, akpm, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
On Wed, Sep 09, 2009 at 12:31:58PM -0400, Lee Schermerhorn wrote:
> [PATCH 5/6] hugetlb: register per node hugepages attributes
>
> V6: + Use NUMA_NO_NODE for unspecified node id throughout hugetlb.c
> to indicate that we didn't get there via a per node attribute.
> Drop redundant "NO_NODEID_SPECIFIED" definition.
> + handle movement of defaulting of nodes_allowed up to
> set_max_huge_pages()
>
ppc64 doesn't define NUMA_NO_NODE so this fails to build. Maybe move the
definition to include/linux/node.h as a pre-requisite patch?
> This patch adds the per huge page size control/query attributes
> to the per node sysdevs:
>
> /sys/devices/system/node/node<ID>/hugepages/hugepages-<size>/
> nr_hugepages - r/w
> free_huge_pages - r/o
> surplus_huge_pages - r/o
>
> The patch attempts to re-use/share as much of the existing
> global hstate attribute initialization and handling, and the
> "nodes_allowed" constraint processing as possible.
> Calling set_max_huge_pages() with no node indicates a change to
> global hstate parameters. In this case, any non-default task
> mempolicy will be used to generate the nodes_allowed mask. A
> valid node id indicates an update to that node's hstate
> parameters, and the count argument specifies the target count
> for the specified node. From this info, we compute the target
> global count for the hstate and construct a nodes_allowed node
> mask contain only the specified node.
>
> Setting the node specific nr_hugepages via the per node attribute
> effectively ignores any task mempolicy or cpuset constraints.
>
> With this patch:
>
> (me):ls /sys/devices/system/node/node0/hugepages/hugepages-2048kB
> ./ ../ free_hugepages nr_hugepages surplus_hugepages
>
> Starting from:
> Node 0 HugePages_Total: 0
> Node 0 HugePages_Free: 0
> Node 0 HugePages_Surp: 0
> Node 1 HugePages_Total: 0
> Node 1 HugePages_Free: 0
> Node 1 HugePages_Surp: 0
> Node 2 HugePages_Total: 0
> Node 2 HugePages_Free: 0
> Node 2 HugePages_Surp: 0
> Node 3 HugePages_Total: 0
> Node 3 HugePages_Free: 0
> Node 3 HugePages_Surp: 0
> vm.nr_hugepages = 0
>
> Allocate 16 persistent huge pages on node 2:
> (me):echo 16 >/sys/devices/system/node/node2/hugepages/hugepages-2048kB/nr_hugepages
>
> [Note that this is equivalent to:
> numactl -m 2 hugeadmin --pool-pages-min 2M:+16
> ]
>
> Yields:
> Node 0 HugePages_Total: 0
> Node 0 HugePages_Free: 0
> Node 0 HugePages_Surp: 0
> Node 1 HugePages_Total: 0
> Node 1 HugePages_Free: 0
> Node 1 HugePages_Surp: 0
> Node 2 HugePages_Total: 16
> Node 2 HugePages_Free: 16
> Node 2 HugePages_Surp: 0
> Node 3 HugePages_Total: 0
> Node 3 HugePages_Free: 0
> Node 3 HugePages_Surp: 0
> vm.nr_hugepages = 16
>
> Global controls work as expected--reduce pool to 8 persistent huge pages:
> (me):echo 8 >/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
>
> Node 0 HugePages_Total: 0
> Node 0 HugePages_Free: 0
> Node 0 HugePages_Surp: 0
> Node 1 HugePages_Total: 0
> Node 1 HugePages_Free: 0
> Node 1 HugePages_Surp: 0
> Node 2 HugePages_Total: 8
> Node 2 HugePages_Free: 8
> Node 2 HugePages_Surp: 0
> Node 3 HugePages_Total: 0
> Node 3 HugePages_Free: 0
> Node 3 HugePages_Surp: 0
>
> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
> Acked-by: Mel Gorman <mel@csn.ul.ie>
>
> drivers/base/node.c | 33 +++++++
> include/linux/node.h | 8 +
> mm/hugetlb.c | 240 ++++++++++++++++++++++++++++++++++++++++++++-------
> 3 files changed, 251 insertions(+), 30 deletions(-)
>
> Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
> ===================================================================
> --- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:26.000000000 -0400
> +++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:37.000000000 -0400
> @@ -177,6 +177,37 @@ static ssize_t node_read_distance(struct
> }
> static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
>
> +/*
> + * hugetlbfs per node attributes registration interface:
> + * When/if hugetlb[fs] subsystem initializes [sometime after this module],
> + * it will register it's per node attributes for all nodes on-line at that
> + * point. It will also call register_hugetlbfs_with_node(), below, to
> + * register it's attribute registration functions with this node driver.
> + * Once these hooks have been initialized, the node driver will call into
> + * the hugetlb module to [un]register attributes for hot-plugged nodes.
> + */
> +NODE_REGISTRATION_FUNC __hugetlb_register_node;
> +NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
> +
> +static inline void hugetlb_register_node(struct node *node)
> +{
> + if (__hugetlb_register_node)
> + __hugetlb_register_node(node);
> +}
> +
> +static inline void hugetlb_unregister_node(struct node *node)
> +{
> + if (__hugetlb_unregister_node)
> + __hugetlb_unregister_node(node);
> +}
> +
> +void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
> + NODE_REGISTRATION_FUNC unregister)
> +{
> + __hugetlb_register_node = doregister;
> + __hugetlb_unregister_node = unregister;
> +}
> +
>
> /*
> * register_node - Setup a sysfs device for a node.
> @@ -200,6 +231,7 @@ int register_node(struct node *node, int
> sysdev_create_file(&node->sysdev, &attr_distance);
>
> scan_unevictable_register_node(node);
> + hugetlb_register_node(node);
> }
> return error;
> }
> @@ -220,6 +252,7 @@ void unregister_node(struct node *node)
> sysdev_remove_file(&node->sysdev, &attr_distance);
>
> scan_unevictable_unregister_node(node);
> + hugetlb_unregister_node(node);
>
> sysdev_unregister(&node->sysdev);
> }
> Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c
> ===================================================================
> --- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/hugetlb.c 2009-09-09 11:57:36.000000000 -0400
> +++ linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c 2009-09-09 11:57:37.000000000 -0400
> @@ -24,6 +24,7 @@
> #include <asm/io.h>
>
> #include <linux/hugetlb.h>
> +#include <linux/node.h>
> #include "internal.h"
>
> const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
> @@ -1243,7 +1244,8 @@ static int adjust_pool_surplus(struct hs
> }
>
> #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
> -static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
> +static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
> + int nid)
> {
> unsigned long min_count, ret;
> nodemask_t *nodes_allowed;
> @@ -1251,7 +1253,17 @@ static unsigned long set_max_huge_pages(
> if (h->order >= MAX_ORDER)
> return h->max_huge_pages;
>
> - nodes_allowed = alloc_nodemask_of_mempolicy();
> + if (nid == NUMA_NO_NODE) {
> + nodes_allowed = alloc_nodemask_of_mempolicy();
> + } else {
> + /*
> + * incoming 'count' is for node 'nid' only, so
> + * adjust count to global, but restrict alloc/free
> + * to the specified node.
> + */
> + count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
> + nodes_allowed = alloc_nodemask_of_node(nid);
> + }
> if (!nodes_allowed) {
> printk(KERN_WARNING "%s unable to allocate nodes allowed mask "
> "for huge page allocation. Falling back to default.\n",
> @@ -1334,51 +1346,71 @@ out:
> static struct kobject *hugepages_kobj;
> static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
>
> -static struct hstate *kobj_to_hstate(struct kobject *kobj)
> +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
> +
> +static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
> {
> int i;
> +
> for (i = 0; i < HUGE_MAX_HSTATE; i++)
> - if (hstate_kobjs[i] == kobj)
> + if (hstate_kobjs[i] == kobj) {
> + if (nidp)
> + *nidp = NUMA_NO_NODE;
> return &hstates[i];
> - BUG();
> - return NULL;
> + }
> +
> + return kobj_to_node_hstate(kobj, nidp);
> }
>
> static ssize_t nr_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> - return sprintf(buf, "%lu\n", h->nr_huge_pages);
> + struct hstate *h;
> + unsigned long nr_huge_pages;
> + int nid;
> +
> + h = kobj_to_hstate(kobj, &nid);
> + if (nid == NUMA_NO_NODE)
> + nr_huge_pages = h->nr_huge_pages;
> + else
> + nr_huge_pages = h->nr_huge_pages_node[nid];
> +
> + return sprintf(buf, "%lu\n", nr_huge_pages);
> }
> +
> static ssize_t nr_hugepages_store(struct kobject *kobj,
> - struct kobj_attribute *attr, const char *buf, size_t count)
> + struct kobj_attribute *attr, const char *buf, size_t len)
> {
> + unsigned long count;
> + struct hstate *h;
> + int nid;
> int err;
> - unsigned long input;
> - struct hstate *h = kobj_to_hstate(kobj);
>
> - err = strict_strtoul(buf, 10, &input);
> + err = strict_strtoul(buf, 10, &count);
> if (err)
> return 0;
>
> - h->max_huge_pages = set_max_huge_pages(h, input);
> + h = kobj_to_hstate(kobj, &nid);
> + h->max_huge_pages = set_max_huge_pages(h, count, nid);
>
> - return count;
> + return len;
> }
> HSTATE_ATTR(nr_hugepages);
>
> static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> + struct hstate *h = kobj_to_hstate(kobj, NULL);
> +
> return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
> }
> +
> static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
> struct kobj_attribute *attr, const char *buf, size_t count)
> {
> int err;
> unsigned long input;
> - struct hstate *h = kobj_to_hstate(kobj);
> + struct hstate *h = kobj_to_hstate(kobj, NULL);
>
> err = strict_strtoul(buf, 10, &input);
> if (err)
> @@ -1395,15 +1427,24 @@ HSTATE_ATTR(nr_overcommit_hugepages);
> static ssize_t free_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> - return sprintf(buf, "%lu\n", h->free_huge_pages);
> + struct hstate *h;
> + unsigned long free_huge_pages;
> + int nid;
> +
> + h = kobj_to_hstate(kobj, &nid);
> + if (nid == NUMA_NO_NODE)
> + free_huge_pages = h->free_huge_pages;
> + else
> + free_huge_pages = h->free_huge_pages_node[nid];
> +
> + return sprintf(buf, "%lu\n", free_huge_pages);
> }
> HSTATE_ATTR_RO(free_hugepages);
>
> static ssize_t resv_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> + struct hstate *h = kobj_to_hstate(kobj, NULL);
> return sprintf(buf, "%lu\n", h->resv_huge_pages);
> }
> HSTATE_ATTR_RO(resv_hugepages);
> @@ -1411,8 +1452,17 @@ HSTATE_ATTR_RO(resv_hugepages);
> static ssize_t surplus_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> - return sprintf(buf, "%lu\n", h->surplus_huge_pages);
> + struct hstate *h;
> + unsigned long surplus_huge_pages;
> + int nid;
> +
> + h = kobj_to_hstate(kobj, &nid);
> + if (nid == NUMA_NO_NODE)
> + surplus_huge_pages = h->surplus_huge_pages;
> + else
> + surplus_huge_pages = h->surplus_huge_pages_node[nid];
> +
> + return sprintf(buf, "%lu\n", surplus_huge_pages);
> }
> HSTATE_ATTR_RO(surplus_hugepages);
>
> @@ -1429,19 +1479,21 @@ static struct attribute_group hstate_att
> .attrs = hstate_attrs,
> };
>
> -static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
> +static int __init hugetlb_sysfs_add_hstate(struct hstate *h,
> + struct kobject *parent,
> + struct kobject **hstate_kobjs,
> + struct attribute_group *hstate_attr_group)
> {
> int retval;
> + int hi = h - hstates;
>
> - hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
> - hugepages_kobj);
> - if (!hstate_kobjs[h - hstates])
> + hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
> + if (!hstate_kobjs[hi])
> return -ENOMEM;
>
> - retval = sysfs_create_group(hstate_kobjs[h - hstates],
> - &hstate_attr_group);
> + retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
> if (retval)
> - kobject_put(hstate_kobjs[h - hstates]);
> + kobject_put(hstate_kobjs[hi]);
>
> return retval;
> }
> @@ -1456,17 +1508,143 @@ static void __init hugetlb_sysfs_init(vo
> return;
>
> for_each_hstate(h) {
> - err = hugetlb_sysfs_add_hstate(h);
> + err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
> + hstate_kobjs, &hstate_attr_group);
> if (err)
> printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
> h->name);
> }
> }
>
> +#ifdef CONFIG_NUMA
> +
> +struct node_hstate {
> + struct kobject *hugepages_kobj;
> + struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
> +};
> +struct node_hstate node_hstates[MAX_NUMNODES];
> +
> +static struct attribute *per_node_hstate_attrs[] = {
> + &nr_hugepages_attr.attr,
> + &free_hugepages_attr.attr,
> + &surplus_hugepages_attr.attr,
> + NULL,
> +};
> +
> +static struct attribute_group per_node_hstate_attr_group = {
> + .attrs = per_node_hstate_attrs,
> +};
> +
> +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> +{
> + int nid;
> +
> + for (nid = 0; nid < nr_node_ids; nid++) {
> + struct node_hstate *nhs = &node_hstates[nid];
> + int i;
> + for (i = 0; i < HUGE_MAX_HSTATE; i++)
> + if (nhs->hstate_kobjs[i] == kobj) {
> + if (nidp)
> + *nidp = nid;
> + return &hstates[i];
> + }
> + }
> +
> + BUG();
> + return NULL;
> +}
> +
> +void hugetlb_unregister_node(struct node *node)
> +{
> + struct hstate *h;
> + struct node_hstate *nhs = &node_hstates[node->sysdev.id];
> +
> + if (!nhs->hugepages_kobj)
> + return;
> +
> + for_each_hstate(h)
> + if (nhs->hstate_kobjs[h - hstates]) {
> + kobject_put(nhs->hstate_kobjs[h - hstates]);
> + nhs->hstate_kobjs[h - hstates] = NULL;
> + }
> +
> + kobject_put(nhs->hugepages_kobj);
> + nhs->hugepages_kobj = NULL;
> +}
> +
> +static void hugetlb_unregister_all_nodes(void)
> +{
> + int nid;
> +
> + for (nid = 0; nid < nr_node_ids; nid++)
> + hugetlb_unregister_node(&node_devices[nid]);
> +
> + register_hugetlbfs_with_node(NULL, NULL);
> +}
> +
> +void hugetlb_register_node(struct node *node)
> +{
> + struct hstate *h;
> + struct node_hstate *nhs = &node_hstates[node->sysdev.id];
> + int err;
> +
> + if (nhs->hugepages_kobj)
> + return; /* already allocated */
> +
> + nhs->hugepages_kobj = kobject_create_and_add("hugepages",
> + &node->sysdev.kobj);
> + if (!nhs->hugepages_kobj)
> + return;
> +
> + for_each_hstate(h) {
> + err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
> + nhs->hstate_kobjs,
> + &per_node_hstate_attr_group);
> + if (err) {
> + printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
> + " for node %d\n",
> + h->name, node->sysdev.id);
> + hugetlb_unregister_node(node);
> + break;
> + }
> + }
> +}
> +
> +static void hugetlb_register_all_nodes(void)
> +{
> + int nid;
> +
> + for (nid = 0; nid < nr_node_ids; nid++) {
> + struct node *node = &node_devices[nid];
> + if (node->sysdev.id == nid)
> + hugetlb_register_node(node);
> + }
> +
> + register_hugetlbfs_with_node(hugetlb_register_node,
> + hugetlb_unregister_node);
> +}
> +#else /* !CONFIG_NUMA */
> +
> +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> +{
> + BUG();
> + if (nidp)
> + *nidp = -1;
> + return NULL;
> +}
> +
> +static void hugetlb_unregister_all_nodes(void) { }
> +
> +static void hugetlb_register_all_nodes(void) { }
> +
> +#endif
> +
> static void __exit hugetlb_exit(void)
> {
> struct hstate *h;
>
> + hugetlb_unregister_all_nodes();
> +
> for_each_hstate(h) {
> kobject_put(hstate_kobjs[h - hstates]);
> }
> @@ -1501,6 +1679,8 @@ static int __init hugetlb_init(void)
>
> hugetlb_sysfs_init();
>
> + hugetlb_register_all_nodes();
> +
> return 0;
> }
> module_init(hugetlb_init);
> @@ -1603,7 +1783,7 @@ int hugetlb_sysctl_handler(struct ctl_ta
> proc_doulongvec_minmax(table, write, buffer, length, ppos);
>
> if (write)
> - h->max_huge_pages = set_max_huge_pages(h, tmp);
> + h->max_huge_pages = set_max_huge_pages(h, tmp, NUMA_NO_NODE);
>
> return 0;
> }
> Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h
> ===================================================================
> --- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/node.h 2009-09-09 11:57:26.000000000 -0400
> +++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h 2009-09-09 11:57:37.000000000 -0400
> @@ -28,6 +28,7 @@ struct node {
>
> struct memory_block;
> extern struct node node_devices[];
> +typedef void (*NODE_REGISTRATION_FUNC)(struct node *);
>
> extern int register_node(struct node *, int, struct node *);
> extern void unregister_node(struct node *node);
> @@ -39,6 +40,8 @@ extern int unregister_cpu_under_node(uns
> extern int register_mem_sect_under_node(struct memory_block *mem_blk,
> int nid);
> extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
> +extern void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
> + NODE_REGISTRATION_FUNC unregister);
> #else
> static inline int register_one_node(int nid)
> {
> @@ -65,6 +68,11 @@ static inline int unregister_mem_sect_un
> {
> return 0;
> }
> +
> +static inline void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC reg,
> + NODE_REGISTRATION_FUNC unreg)
> +{
> +}
> #endif
>
> #define to_node(sys_device) container_of(sys_device, struct node, sysdev)
>
--
Mel Gorman
Part-time Phd Student Linux Technology Center
University of Limerick IBM Dublin Software Lab
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 5/6] hugetlb: add per node hstate attributes
@ 2009-09-10 12:32 ` Mel Gorman
0 siblings, 0 replies; 44+ messages in thread
From: Mel Gorman @ 2009-09-10 12:32 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: linux-mm, linux-numa, akpm, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
On Wed, Sep 09, 2009 at 12:31:58PM -0400, Lee Schermerhorn wrote:
> [PATCH 5/6] hugetlb: register per node hugepages attributes
>
> V6: + Use NUMA_NO_NODE for unspecified node id throughout hugetlb.c
> to indicate that we didn't get there via a per node attribute.
> Drop redundant "NO_NODEID_SPECIFIED" definition.
> + handle movement of defaulting of nodes_allowed up to
> set_max_huge_pages()
>
ppc64 doesn't define NUMA_NO_NODE so this fails to build. Maybe move the
definition to include/linux/node.h as a pre-requisite patch?
> This patch adds the per huge page size control/query attributes
> to the per node sysdevs:
>
> /sys/devices/system/node/node<ID>/hugepages/hugepages-<size>/
> nr_hugepages - r/w
> free_huge_pages - r/o
> surplus_huge_pages - r/o
>
> The patch attempts to re-use/share as much of the existing
> global hstate attribute initialization and handling, and the
> "nodes_allowed" constraint processing as possible.
> Calling set_max_huge_pages() with no node indicates a change to
> global hstate parameters. In this case, any non-default task
> mempolicy will be used to generate the nodes_allowed mask. A
> valid node id indicates an update to that node's hstate
> parameters, and the count argument specifies the target count
> for the specified node. From this info, we compute the target
> global count for the hstate and construct a nodes_allowed node
> mask contain only the specified node.
>
> Setting the node specific nr_hugepages via the per node attribute
> effectively ignores any task mempolicy or cpuset constraints.
>
> With this patch:
>
> (me):ls /sys/devices/system/node/node0/hugepages/hugepages-2048kB
> ./ ../ free_hugepages nr_hugepages surplus_hugepages
>
> Starting from:
> Node 0 HugePages_Total: 0
> Node 0 HugePages_Free: 0
> Node 0 HugePages_Surp: 0
> Node 1 HugePages_Total: 0
> Node 1 HugePages_Free: 0
> Node 1 HugePages_Surp: 0
> Node 2 HugePages_Total: 0
> Node 2 HugePages_Free: 0
> Node 2 HugePages_Surp: 0
> Node 3 HugePages_Total: 0
> Node 3 HugePages_Free: 0
> Node 3 HugePages_Surp: 0
> vm.nr_hugepages = 0
>
> Allocate 16 persistent huge pages on node 2:
> (me):echo 16 >/sys/devices/system/node/node2/hugepages/hugepages-2048kB/nr_hugepages
>
> [Note that this is equivalent to:
> numactl -m 2 hugeadmin --pool-pages-min 2M:+16
> ]
>
> Yields:
> Node 0 HugePages_Total: 0
> Node 0 HugePages_Free: 0
> Node 0 HugePages_Surp: 0
> Node 1 HugePages_Total: 0
> Node 1 HugePages_Free: 0
> Node 1 HugePages_Surp: 0
> Node 2 HugePages_Total: 16
> Node 2 HugePages_Free: 16
> Node 2 HugePages_Surp: 0
> Node 3 HugePages_Total: 0
> Node 3 HugePages_Free: 0
> Node 3 HugePages_Surp: 0
> vm.nr_hugepages = 16
>
> Global controls work as expected--reduce pool to 8 persistent huge pages:
> (me):echo 8 >/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
>
> Node 0 HugePages_Total: 0
> Node 0 HugePages_Free: 0
> Node 0 HugePages_Surp: 0
> Node 1 HugePages_Total: 0
> Node 1 HugePages_Free: 0
> Node 1 HugePages_Surp: 0
> Node 2 HugePages_Total: 8
> Node 2 HugePages_Free: 8
> Node 2 HugePages_Surp: 0
> Node 3 HugePages_Total: 0
> Node 3 HugePages_Free: 0
> Node 3 HugePages_Surp: 0
>
> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
> Acked-by: Mel Gorman <mel@csn.ul.ie>
>
> drivers/base/node.c | 33 +++++++
> include/linux/node.h | 8 +
> mm/hugetlb.c | 240 ++++++++++++++++++++++++++++++++++++++++++++-------
> 3 files changed, 251 insertions(+), 30 deletions(-)
>
> Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
> ===================================================================
> --- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:26.000000000 -0400
> +++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:37.000000000 -0400
> @@ -177,6 +177,37 @@ static ssize_t node_read_distance(struct
> }
> static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
>
> +/*
> + * hugetlbfs per node attributes registration interface:
> + * When/if hugetlb[fs] subsystem initializes [sometime after this module],
> + * it will register it's per node attributes for all nodes on-line at that
> + * point. It will also call register_hugetlbfs_with_node(), below, to
> + * register it's attribute registration functions with this node driver.
> + * Once these hooks have been initialized, the node driver will call into
> + * the hugetlb module to [un]register attributes for hot-plugged nodes.
> + */
> +NODE_REGISTRATION_FUNC __hugetlb_register_node;
> +NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
> +
> +static inline void hugetlb_register_node(struct node *node)
> +{
> + if (__hugetlb_register_node)
> + __hugetlb_register_node(node);
> +}
> +
> +static inline void hugetlb_unregister_node(struct node *node)
> +{
> + if (__hugetlb_unregister_node)
> + __hugetlb_unregister_node(node);
> +}
> +
> +void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
> + NODE_REGISTRATION_FUNC unregister)
> +{
> + __hugetlb_register_node = doregister;
> + __hugetlb_unregister_node = unregister;
> +}
> +
>
> /*
> * register_node - Setup a sysfs device for a node.
> @@ -200,6 +231,7 @@ int register_node(struct node *node, int
> sysdev_create_file(&node->sysdev, &attr_distance);
>
> scan_unevictable_register_node(node);
> + hugetlb_register_node(node);
> }
> return error;
> }
> @@ -220,6 +252,7 @@ void unregister_node(struct node *node)
> sysdev_remove_file(&node->sysdev, &attr_distance);
>
> scan_unevictable_unregister_node(node);
> + hugetlb_unregister_node(node);
>
> sysdev_unregister(&node->sysdev);
> }
> Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c
> ===================================================================
> --- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/hugetlb.c 2009-09-09 11:57:36.000000000 -0400
> +++ linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c 2009-09-09 11:57:37.000000000 -0400
> @@ -24,6 +24,7 @@
> #include <asm/io.h>
>
> #include <linux/hugetlb.h>
> +#include <linux/node.h>
> #include "internal.h"
>
> const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
> @@ -1243,7 +1244,8 @@ static int adjust_pool_surplus(struct hs
> }
>
> #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
> -static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
> +static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
> + int nid)
> {
> unsigned long min_count, ret;
> nodemask_t *nodes_allowed;
> @@ -1251,7 +1253,17 @@ static unsigned long set_max_huge_pages(
> if (h->order >= MAX_ORDER)
> return h->max_huge_pages;
>
> - nodes_allowed = alloc_nodemask_of_mempolicy();
> + if (nid == NUMA_NO_NODE) {
> + nodes_allowed = alloc_nodemask_of_mempolicy();
> + } else {
> + /*
> + * incoming 'count' is for node 'nid' only, so
> + * adjust count to global, but restrict alloc/free
> + * to the specified node.
> + */
> + count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
> + nodes_allowed = alloc_nodemask_of_node(nid);
> + }
> if (!nodes_allowed) {
> printk(KERN_WARNING "%s unable to allocate nodes allowed mask "
> "for huge page allocation. Falling back to default.\n",
> @@ -1334,51 +1346,71 @@ out:
> static struct kobject *hugepages_kobj;
> static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
>
> -static struct hstate *kobj_to_hstate(struct kobject *kobj)
> +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
> +
> +static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
> {
> int i;
> +
> for (i = 0; i < HUGE_MAX_HSTATE; i++)
> - if (hstate_kobjs[i] == kobj)
> + if (hstate_kobjs[i] == kobj) {
> + if (nidp)
> + *nidp = NUMA_NO_NODE;
> return &hstates[i];
> - BUG();
> - return NULL;
> + }
> +
> + return kobj_to_node_hstate(kobj, nidp);
> }
>
> static ssize_t nr_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> - return sprintf(buf, "%lu\n", h->nr_huge_pages);
> + struct hstate *h;
> + unsigned long nr_huge_pages;
> + int nid;
> +
> + h = kobj_to_hstate(kobj, &nid);
> + if (nid == NUMA_NO_NODE)
> + nr_huge_pages = h->nr_huge_pages;
> + else
> + nr_huge_pages = h->nr_huge_pages_node[nid];
> +
> + return sprintf(buf, "%lu\n", nr_huge_pages);
> }
> +
> static ssize_t nr_hugepages_store(struct kobject *kobj,
> - struct kobj_attribute *attr, const char *buf, size_t count)
> + struct kobj_attribute *attr, const char *buf, size_t len)
> {
> + unsigned long count;
> + struct hstate *h;
> + int nid;
> int err;
> - unsigned long input;
> - struct hstate *h = kobj_to_hstate(kobj);
>
> - err = strict_strtoul(buf, 10, &input);
> + err = strict_strtoul(buf, 10, &count);
> if (err)
> return 0;
>
> - h->max_huge_pages = set_max_huge_pages(h, input);
> + h = kobj_to_hstate(kobj, &nid);
> + h->max_huge_pages = set_max_huge_pages(h, count, nid);
>
> - return count;
> + return len;
> }
> HSTATE_ATTR(nr_hugepages);
>
> static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> + struct hstate *h = kobj_to_hstate(kobj, NULL);
> +
> return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
> }
> +
> static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
> struct kobj_attribute *attr, const char *buf, size_t count)
> {
> int err;
> unsigned long input;
> - struct hstate *h = kobj_to_hstate(kobj);
> + struct hstate *h = kobj_to_hstate(kobj, NULL);
>
> err = strict_strtoul(buf, 10, &input);
> if (err)
> @@ -1395,15 +1427,24 @@ HSTATE_ATTR(nr_overcommit_hugepages);
> static ssize_t free_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> - return sprintf(buf, "%lu\n", h->free_huge_pages);
> + struct hstate *h;
> + unsigned long free_huge_pages;
> + int nid;
> +
> + h = kobj_to_hstate(kobj, &nid);
> + if (nid == NUMA_NO_NODE)
> + free_huge_pages = h->free_huge_pages;
> + else
> + free_huge_pages = h->free_huge_pages_node[nid];
> +
> + return sprintf(buf, "%lu\n", free_huge_pages);
> }
> HSTATE_ATTR_RO(free_hugepages);
>
> static ssize_t resv_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> + struct hstate *h = kobj_to_hstate(kobj, NULL);
> return sprintf(buf, "%lu\n", h->resv_huge_pages);
> }
> HSTATE_ATTR_RO(resv_hugepages);
> @@ -1411,8 +1452,17 @@ HSTATE_ATTR_RO(resv_hugepages);
> static ssize_t surplus_hugepages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - struct hstate *h = kobj_to_hstate(kobj);
> - return sprintf(buf, "%lu\n", h->surplus_huge_pages);
> + struct hstate *h;
> + unsigned long surplus_huge_pages;
> + int nid;
> +
> + h = kobj_to_hstate(kobj, &nid);
> + if (nid == NUMA_NO_NODE)
> + surplus_huge_pages = h->surplus_huge_pages;
> + else
> + surplus_huge_pages = h->surplus_huge_pages_node[nid];
> +
> + return sprintf(buf, "%lu\n", surplus_huge_pages);
> }
> HSTATE_ATTR_RO(surplus_hugepages);
>
> @@ -1429,19 +1479,21 @@ static struct attribute_group hstate_att
> .attrs = hstate_attrs,
> };
>
> -static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
> +static int __init hugetlb_sysfs_add_hstate(struct hstate *h,
> + struct kobject *parent,
> + struct kobject **hstate_kobjs,
> + struct attribute_group *hstate_attr_group)
> {
> int retval;
> + int hi = h - hstates;
>
> - hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
> - hugepages_kobj);
> - if (!hstate_kobjs[h - hstates])
> + hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
> + if (!hstate_kobjs[hi])
> return -ENOMEM;
>
> - retval = sysfs_create_group(hstate_kobjs[h - hstates],
> - &hstate_attr_group);
> + retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
> if (retval)
> - kobject_put(hstate_kobjs[h - hstates]);
> + kobject_put(hstate_kobjs[hi]);
>
> return retval;
> }
> @@ -1456,17 +1508,143 @@ static void __init hugetlb_sysfs_init(vo
> return;
>
> for_each_hstate(h) {
> - err = hugetlb_sysfs_add_hstate(h);
> + err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
> + hstate_kobjs, &hstate_attr_group);
> if (err)
> printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
> h->name);
> }
> }
>
> +#ifdef CONFIG_NUMA
> +
> +struct node_hstate {
> + struct kobject *hugepages_kobj;
> + struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
> +};
> +struct node_hstate node_hstates[MAX_NUMNODES];
> +
> +static struct attribute *per_node_hstate_attrs[] = {
> + &nr_hugepages_attr.attr,
> + &free_hugepages_attr.attr,
> + &surplus_hugepages_attr.attr,
> + NULL,
> +};
> +
> +static struct attribute_group per_node_hstate_attr_group = {
> + .attrs = per_node_hstate_attrs,
> +};
> +
> +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> +{
> + int nid;
> +
> + for (nid = 0; nid < nr_node_ids; nid++) {
> + struct node_hstate *nhs = &node_hstates[nid];
> + int i;
> + for (i = 0; i < HUGE_MAX_HSTATE; i++)
> + if (nhs->hstate_kobjs[i] == kobj) {
> + if (nidp)
> + *nidp = nid;
> + return &hstates[i];
> + }
> + }
> +
> + BUG();
> + return NULL;
> +}
> +
> +void hugetlb_unregister_node(struct node *node)
> +{
> + struct hstate *h;
> + struct node_hstate *nhs = &node_hstates[node->sysdev.id];
> +
> + if (!nhs->hugepages_kobj)
> + return;
> +
> + for_each_hstate(h)
> + if (nhs->hstate_kobjs[h - hstates]) {
> + kobject_put(nhs->hstate_kobjs[h - hstates]);
> + nhs->hstate_kobjs[h - hstates] = NULL;
> + }
> +
> + kobject_put(nhs->hugepages_kobj);
> + nhs->hugepages_kobj = NULL;
> +}
> +
> +static void hugetlb_unregister_all_nodes(void)
> +{
> + int nid;
> +
> + for (nid = 0; nid < nr_node_ids; nid++)
> + hugetlb_unregister_node(&node_devices[nid]);
> +
> + register_hugetlbfs_with_node(NULL, NULL);
> +}
> +
> +void hugetlb_register_node(struct node *node)
> +{
> + struct hstate *h;
> + struct node_hstate *nhs = &node_hstates[node->sysdev.id];
> + int err;
> +
> + if (nhs->hugepages_kobj)
> + return; /* already allocated */
> +
> + nhs->hugepages_kobj = kobject_create_and_add("hugepages",
> + &node->sysdev.kobj);
> + if (!nhs->hugepages_kobj)
> + return;
> +
> + for_each_hstate(h) {
> + err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
> + nhs->hstate_kobjs,
> + &per_node_hstate_attr_group);
> + if (err) {
> + printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
> + " for node %d\n",
> + h->name, node->sysdev.id);
> + hugetlb_unregister_node(node);
> + break;
> + }
> + }
> +}
> +
> +static void hugetlb_register_all_nodes(void)
> +{
> + int nid;
> +
> + for (nid = 0; nid < nr_node_ids; nid++) {
> + struct node *node = &node_devices[nid];
> + if (node->sysdev.id == nid)
> + hugetlb_register_node(node);
> + }
> +
> + register_hugetlbfs_with_node(hugetlb_register_node,
> + hugetlb_unregister_node);
> +}
> +#else /* !CONFIG_NUMA */
> +
> +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> +{
> + BUG();
> + if (nidp)
> + *nidp = -1;
> + return NULL;
> +}
> +
> +static void hugetlb_unregister_all_nodes(void) { }
> +
> +static void hugetlb_register_all_nodes(void) { }
> +
> +#endif
> +
> static void __exit hugetlb_exit(void)
> {
> struct hstate *h;
>
> + hugetlb_unregister_all_nodes();
> +
> for_each_hstate(h) {
> kobject_put(hstate_kobjs[h - hstates]);
> }
> @@ -1501,6 +1679,8 @@ static int __init hugetlb_init(void)
>
> hugetlb_sysfs_init();
>
> + hugetlb_register_all_nodes();
> +
> return 0;
> }
> module_init(hugetlb_init);
> @@ -1603,7 +1783,7 @@ int hugetlb_sysctl_handler(struct ctl_ta
> proc_doulongvec_minmax(table, write, buffer, length, ppos);
>
> if (write)
> - h->max_huge_pages = set_max_huge_pages(h, tmp);
> + h->max_huge_pages = set_max_huge_pages(h, tmp, NUMA_NO_NODE);
>
> return 0;
> }
> Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h
> ===================================================================
> --- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/node.h 2009-09-09 11:57:26.000000000 -0400
> +++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h 2009-09-09 11:57:37.000000000 -0400
> @@ -28,6 +28,7 @@ struct node {
>
> struct memory_block;
> extern struct node node_devices[];
> +typedef void (*NODE_REGISTRATION_FUNC)(struct node *);
>
> extern int register_node(struct node *, int, struct node *);
> extern void unregister_node(struct node *node);
> @@ -39,6 +40,8 @@ extern int unregister_cpu_under_node(uns
> extern int register_mem_sect_under_node(struct memory_block *mem_blk,
> int nid);
> extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
> +extern void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
> + NODE_REGISTRATION_FUNC unregister);
> #else
> static inline int register_one_node(int nid)
> {
> @@ -65,6 +68,11 @@ static inline int unregister_mem_sect_un
> {
> return 0;
> }
> +
> +static inline void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC reg,
> + NODE_REGISTRATION_FUNC unreg)
> +{
> +}
> #endif
>
> #define to_node(sys_device) container_of(sys_device, struct node, sysdev)
>
--
Mel Gorman
Part-time Phd Student Linux Technology Center
University of Limerick IBM Dublin Software Lab
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 5/6] hugetlb: add per node hstate attributes
2009-09-10 12:32 ` Mel Gorman
@ 2009-09-10 14:26 ` Lee Schermerhorn
-1 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-10 14:26 UTC (permalink / raw)
To: Mel Gorman
Cc: linux-mm, linux-numa, akpm, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
On Thu, 2009-09-10 at 13:32 +0100, Mel Gorman wrote:
> On Wed, Sep 09, 2009 at 12:31:58PM -0400, Lee Schermerhorn wrote:
> > [PATCH 5/6] hugetlb: register per node hugepages attributes
> >
> > V6: + Use NUMA_NO_NODE for unspecified node id throughout hugetlb.c
> > to indicate that we didn't get there via a per node attribute.
> > Drop redundant "NO_NODEID_SPECIFIED" definition.
> > + handle movement of defaulting of nodes_allowed up to
> > set_max_huge_pages()
> >
>
> ppc64 doesn't define NUMA_NO_NODE so this fails to build. Maybe move the
> definition to include/linux/node.h as a pre-requisite patch?
Rats! should have looked before I leaped. Only ia64 and x86_64 define
NUMA_NO_NODE, both in arch dependent code, and in different headers to
boot. I don't think node.h is the right place. The ia64/x86_64 arch
code uses it for acpi and cpu management. How about <linux/numa.h>?
It's currently a minimal header with no external dependencies. The ia64
numa.h [where NUMA_NO_NODE is defined] already includes it, and the
x86_64 can include it.
This patch, inserted before the subject patch [for bisect-ability],
seems to work on x86_64. Can you try it on ppc?
------------------
PATCH 5/7 - hugetlb: promote NUMA_NO_NODE to generic constant
Against: 2.6.31-rc7-mmotm-090827-1651
Move definition of NUMA_NO_NODE from ia64 and x86_64 arch specific
headers to generic header 'linux/numa.h' for use in generic code.
NUMA_NO_NODE replaces bare '-1' where it's used in this series to
indicate "no node id specified". Ultimately, it can be used
to replace the -1 elsewhere where it is used similarly.
Note that in arch/x86/include/asm/topology.h, NUMA_NO_NODE is
now only defined when CONFIG_NUMA is defined. This seems to work
for current usage of NUMA_NO_NODE in x86_64 arch code, with or
without CONFIG_NUMA defined.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
arch/ia64/include/asm/numa.h | 2 --
arch/x86/include/asm/topology.h | 5 ++---
include/linux/numa.h | 2 ++
3 files changed, 4 insertions(+), 5 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/arch/ia64/include/asm/numa.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/arch/ia64/include/asm/numa.h 2009-06-09 23:05:27.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/arch/ia64/include/asm/numa.h 2009-09-10 08:57:40.000000000 -0400
@@ -22,8 +22,6 @@
#include <asm/mmzone.h>
-#define NUMA_NO_NODE -1
-
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
extern pg_data_t *pgdat_list[MAX_NUMNODES];
Index: linux-2.6.31-rc7-mmotm-090827-1651/arch/x86/include/asm/topology.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/arch/x86/include/asm/topology.h 2009-09-09 10:05:28.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/arch/x86/include/asm/topology.h 2009-09-10 09:07:04.000000000 -0400
@@ -35,11 +35,10 @@
# endif
#endif
-/* Node not present */
-#define NUMA_NO_NODE (-1)
-
#ifdef CONFIG_NUMA
#include <linux/cpumask.h>
+#include <linux/numa.h>
+
#include <asm/mpspec.h>
#ifdef CONFIG_X86_32
Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/numa.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/numa.h 2009-09-04 08:47:02.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/numa.h 2009-09-10 09:00:10.000000000 -0400
@@ -10,4 +10,6 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
+#define NUMA_NO_NODE (-1)
+
#endif /* _LINUX_NUMA_H */
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 5/6] hugetlb: add per node hstate attributes
@ 2009-09-10 14:26 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-10 14:26 UTC (permalink / raw)
To: Mel Gorman
Cc: linux-mm, linux-numa, akpm, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
On Thu, 2009-09-10 at 13:32 +0100, Mel Gorman wrote:
> On Wed, Sep 09, 2009 at 12:31:58PM -0400, Lee Schermerhorn wrote:
> > [PATCH 5/6] hugetlb: register per node hugepages attributes
> >
> > V6: + Use NUMA_NO_NODE for unspecified node id throughout hugetlb.c
> > to indicate that we didn't get there via a per node attribute.
> > Drop redundant "NO_NODEID_SPECIFIED" definition.
> > + handle movement of defaulting of nodes_allowed up to
> > set_max_huge_pages()
> >
>
> ppc64 doesn't define NUMA_NO_NODE so this fails to build. Maybe move the
> definition to include/linux/node.h as a pre-requisite patch?
Rats! should have looked before I leaped. Only ia64 and x86_64 define
NUMA_NO_NODE, both in arch dependent code, and in different headers to
boot. I don't think node.h is the right place. The ia64/x86_64 arch
code uses it for acpi and cpu management. How about <linux/numa.h>?
It's currently a minimal header with no external dependencies. The ia64
numa.h [where NUMA_NO_NODE is defined] already includes it, and the
x86_64 can include it.
This patch, inserted before the subject patch [for bisect-ability],
seems to work on x86_64. Can you try it on ppc?
------------------
PATCH 5/7 - hugetlb: promote NUMA_NO_NODE to generic constant
Against: 2.6.31-rc7-mmotm-090827-1651
Move definition of NUMA_NO_NODE from ia64 and x86_64 arch specific
headers to generic header 'linux/numa.h' for use in generic code.
NUMA_NO_NODE replaces bare '-1' where it's used in this series to
indicate "no node id specified". Ultimately, it can be used
to replace the -1 elsewhere where it is used similarly.
Note that in arch/x86/include/asm/topology.h, NUMA_NO_NODE is
now only defined when CONFIG_NUMA is defined. This seems to work
for current usage of NUMA_NO_NODE in x86_64 arch code, with or
without CONFIG_NUMA defined.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
arch/ia64/include/asm/numa.h | 2 --
arch/x86/include/asm/topology.h | 5 ++---
include/linux/numa.h | 2 ++
3 files changed, 4 insertions(+), 5 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/arch/ia64/include/asm/numa.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/arch/ia64/include/asm/numa.h 2009-06-09 23:05:27.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/arch/ia64/include/asm/numa.h 2009-09-10 08:57:40.000000000 -0400
@@ -22,8 +22,6 @@
#include <asm/mmzone.h>
-#define NUMA_NO_NODE -1
-
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
extern pg_data_t *pgdat_list[MAX_NUMNODES];
Index: linux-2.6.31-rc7-mmotm-090827-1651/arch/x86/include/asm/topology.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/arch/x86/include/asm/topology.h 2009-09-09 10:05:28.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/arch/x86/include/asm/topology.h 2009-09-10 09:07:04.000000000 -0400
@@ -35,11 +35,10 @@
# endif
#endif
-/* Node not present */
-#define NUMA_NO_NODE (-1)
-
#ifdef CONFIG_NUMA
#include <linux/cpumask.h>
+#include <linux/numa.h>
+
#include <asm/mpspec.h>
#ifdef CONFIG_X86_32
Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/numa.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/numa.h 2009-09-04 08:47:02.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/numa.h 2009-09-10 09:00:10.000000000 -0400
@@ -10,4 +10,6 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
+#define NUMA_NO_NODE (-1)
+
#endif /* _LINUX_NUMA_H */
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 5/6] hugetlb: add per node hstate attributes
2009-09-10 14:26 ` Lee Schermerhorn
(?)
@ 2009-09-10 19:50 ` David Rientjes
2009-09-10 19:58 ` Lee Schermerhorn
-1 siblings, 1 reply; 44+ messages in thread
From: David Rientjes @ 2009-09-10 19:50 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: Mel Gorman, linux-mm, linux-numa, Andrew Morton, Randy Dunlap,
Nishanth Aravamudan, Adam Litke, Andy Whitcroft, eric.whitney
On Thu, 10 Sep 2009, Lee Schermerhorn wrote:
> PATCH 5/7 - hugetlb: promote NUMA_NO_NODE to generic constant
>
> Against: 2.6.31-rc7-mmotm-090827-1651
>
> Move definition of NUMA_NO_NODE from ia64 and x86_64 arch specific
> headers to generic header 'linux/numa.h' for use in generic code.
> NUMA_NO_NODE replaces bare '-1' where it's used in this series to
> indicate "no node id specified". Ultimately, it can be used
> to replace the -1 elsewhere where it is used similarly.
>
> Note that in arch/x86/include/asm/topology.h, NUMA_NO_NODE is
> now only defined when CONFIG_NUMA is defined. This seems to work
> for current usage of NUMA_NO_NODE in x86_64 arch code, with or
> without CONFIG_NUMA defined.
>
> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: David Rientjes <rientjes@google.com>
Thought I recommended this in
http://marc.info/?l=linux-mm&m=125201173730752
You could now convert NID_INVAL to NUMA_NO_NODE and remove the duplicate
constant as I earlier suggested to cleanup the acpi code.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 5/6] hugetlb: add per node hstate attributes
2009-09-10 19:50 ` David Rientjes
@ 2009-09-10 19:58 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-10 19:58 UTC (permalink / raw)
To: David Rientjes
Cc: Mel Gorman, linux-mm, linux-numa, Andrew Morton, Randy Dunlap,
Nishanth Aravamudan, Adam Litke, Andy Whitcroft, eric.whitney
On Thu, 2009-09-10 at 12:50 -0700, David Rientjes wrote:
> On Thu, 10 Sep 2009, Lee Schermerhorn wrote:
>
> > PATCH 5/7 - hugetlb: promote NUMA_NO_NODE to generic constant
> >
> > Against: 2.6.31-rc7-mmotm-090827-1651
> >
> > Move definition of NUMA_NO_NODE from ia64 and x86_64 arch specific
> > headers to generic header 'linux/numa.h' for use in generic code.
> > NUMA_NO_NODE replaces bare '-1' where it's used in this series to
> > indicate "no node id specified". Ultimately, it can be used
> > to replace the -1 elsewhere where it is used similarly.
> >
> > Note that in arch/x86/include/asm/topology.h, NUMA_NO_NODE is
> > now only defined when CONFIG_NUMA is defined. This seems to work
> > for current usage of NUMA_NO_NODE in x86_64 arch code, with or
> > without CONFIG_NUMA defined.
> >
> > Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
>
> Acked-by: David Rientjes <rientjes@google.com>
>
> Thought I recommended this in
> http://marc.info/?l=linux-mm&m=125201173730752
Yeah, but that was before the long weekend...
>
> You could now convert NID_INVAL to NUMA_NO_NODE and remove the duplicate
> constant as I earlier suggested to cleanup the acpi code.
Could now be done, as you say...
Meanwhile, I'm working on a bit more "clean up".
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 5/6] hugetlb: add per node hstate attributes
2009-09-09 16:31 ` Lee Schermerhorn
(?)
(?)
@ 2009-09-10 23:31 ` Andrew Morton
2009-09-11 13:12 ` Lee Schermerhorn
-1 siblings, 1 reply; 44+ messages in thread
From: Andrew Morton @ 2009-09-10 23:31 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Wed, 09 Sep 2009 12:31:58 -0400
Lee Schermerhorn <lee.schermerhorn@hp.com> wrote:
> ...
>
> This patch adds the per huge page size control/query attributes
> to the per node sysdevs:
>
> /sys/devices/system/node/node<ID>/hugepages/hugepages-<size>/
> nr_hugepages - r/w
> free_huge_pages - r/o
> surplus_huge_pages - r/o
>
>
> ...
>
> Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
> ===================================================================
> --- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:26.000000000 -0400
> +++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:37.000000000 -0400
> @@ -177,6 +177,37 @@ static ssize_t node_read_distance(struct
> }
> static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
>
> +/*
> + * hugetlbfs per node attributes registration interface:
> + * When/if hugetlb[fs] subsystem initializes [sometime after this module],
> + * it will register it's per node attributes for all nodes on-line at that
> + * point. It will also call register_hugetlbfs_with_node(), below, to
> + * register it's attribute registration functions with this node driver.
> + * Once these hooks have been initialized, the node driver will call into
> + * the hugetlb module to [un]register attributes for hot-plugged nodes.
> + */
> +NODE_REGISTRATION_FUNC __hugetlb_register_node;
> +NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
WHAT THE HECK IS THAT THING?
Oh. It's a typedef. It's not a kernel convention to upper-case those.
it is a kerenl convention to lower-case them and stick a _t at the
end.
There doesn't apepar to have been any reason to make these symbols
global.
>
> +#ifdef CONFIG_NUMA
> +
> +struct node_hstate {
> + struct kobject *hugepages_kobj;
> + struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
> +};
> +struct node_hstate node_hstates[MAX_NUMNODES];
> +
> +static struct attribute *per_node_hstate_attrs[] = {
> + &nr_hugepages_attr.attr,
> + &free_hugepages_attr.attr,
> + &surplus_hugepages_attr.attr,
> + NULL,
> +};
I assume this interface got documented in patch 6/6.
> +static struct attribute_group per_node_hstate_attr_group = {
> + .attrs = per_node_hstate_attrs,
> +};
> +
> +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> +{
> + int nid;
> +
> + for (nid = 0; nid < nr_node_ids; nid++) {
> + struct node_hstate *nhs = &node_hstates[nid];
> + int i;
> + for (i = 0; i < HUGE_MAX_HSTATE; i++)
> + if (nhs->hstate_kobjs[i] == kobj) {
> + if (nidp)
> + *nidp = nid;
Dammit, another function which has no callers. How am I supposed
to find out if we really need to test for a NULL nidp?
> + return &hstates[i];
> + }
> + }
> +
> + BUG();
> + return NULL;
> +}
>
>
> ...
>
> +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> +{
> + BUG();
> + if (nidp)
> + *nidp = -1;
> + return NULL;
> +}
strange.
fixlets:
drivers/base/node.c | 14 +++++++-------
hugetlb.c | 0
include/linux/node.h | 10 +++++-----
3 files changed, 12 insertions(+), 12 deletions(-)
diff -puN drivers/base/node.c~hugetlb-add-per-node-hstate-attributes-fix drivers/base/node.c
--- a/drivers/base/node.c~hugetlb-add-per-node-hstate-attributes-fix
+++ a/drivers/base/node.c
@@ -180,14 +180,14 @@ static SYSDEV_ATTR(distance, S_IRUGO, no
/*
* hugetlbfs per node attributes registration interface:
* When/if hugetlb[fs] subsystem initializes [sometime after this module],
- * it will register it's per node attributes for all nodes on-line at that
- * point. It will also call register_hugetlbfs_with_node(), below, to
- * register it's attribute registration functions with this node driver.
+ * it will register its per node attributes for all nodes online at that
+ * time. It will also call register_hugetlbfs_with_node(), below, to
+ * register its attribute registration functions with this node driver.
* Once these hooks have been initialized, the node driver will call into
* the hugetlb module to [un]register attributes for hot-plugged nodes.
*/
-NODE_REGISTRATION_FUNC __hugetlb_register_node;
-NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
+static node_registration_func_t __hugetlb_register_node;
+static node_registration_func_t __hugetlb_unregister_node;
static inline void hugetlb_register_node(struct node *node)
{
@@ -201,8 +201,8 @@ static inline void hugetlb_unregister_no
__hugetlb_unregister_node(node);
}
-void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
- NODE_REGISTRATION_FUNC unregister)
+void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister)
{
__hugetlb_register_node = doregister;
__hugetlb_unregister_node = unregister;
diff -puN include/linux/node.h~hugetlb-add-per-node-hstate-attributes-fix include/linux/node.h
--- a/include/linux/node.h~hugetlb-add-per-node-hstate-attributes-fix
+++ a/include/linux/node.h
@@ -28,7 +28,7 @@ struct node {
struct memory_block;
extern struct node node_devices[];
-typedef void (*NODE_REGISTRATION_FUNC)(struct node *);
+typedef void (*node_registration_func_t)(struct node *);
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
@@ -40,8 +40,8 @@ extern int unregister_cpu_under_node(uns
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
-extern void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
- NODE_REGISTRATION_FUNC unregister);
+extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister);
#else
static inline int register_one_node(int nid)
{
@@ -69,8 +69,8 @@ static inline int unregister_mem_sect_un
return 0;
}
-static inline void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC reg,
- NODE_REGISTRATION_FUNC unreg)
+static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
+ node_registration_func_t unreg)
{
}
#endif
diff -puN mm/hugetlb.c~hugetlb-add-per-node-hstate-attributes-fix mm/hugetlb.c
_
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 5/6] hugetlb: add per node hstate attributes
2009-09-10 23:31 ` Andrew Morton
@ 2009-09-11 13:12 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-11 13:12 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Thu, 2009-09-10 at 16:31 -0700, Andrew Morton wrote:
> On Wed, 09 Sep 2009 12:31:58 -0400
> Lee Schermerhorn <lee.schermerhorn@hp.com> wrote:
>
> > ...
> >
> > This patch adds the per huge page size control/query attributes
> > to the per node sysdevs:
> >
> > /sys/devices/system/node/node<ID>/hugepages/hugepages-<size>/
> > nr_hugepages - r/w
> > free_huge_pages - r/o
> > surplus_huge_pages - r/o
> >
> >
> > ...
> >
> > Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
> > ===================================================================
> > --- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:26.000000000 -0400
> > +++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:37.000000000 -0400
> > @@ -177,6 +177,37 @@ static ssize_t node_read_distance(struct
> > }
> > static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
> >
> > +/*
> > + * hugetlbfs per node attributes registration interface:
> > + * When/if hugetlb[fs] subsystem initializes [sometime after this module],
> > + * it will register it's per node attributes for all nodes on-line at that
> > + * point. It will also call register_hugetlbfs_with_node(), below, to
> > + * register it's attribute registration functions with this node driver.
> > + * Once these hooks have been initialized, the node driver will call into
> > + * the hugetlb module to [un]register attributes for hot-plugged nodes.
> > + */
> > +NODE_REGISTRATION_FUNC __hugetlb_register_node;
> > +NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
>
> WHAT THE HECK IS THAT THING?
>
> Oh. It's a typedef. It's not a kernel convention to upper-case those.
> it is a kerenl convention to lower-case them and stick a _t at the
> end.
Sorry. Old habits die hard. [and checkpatch didn't complain :)]
>
> There doesn't apepar to have been any reason to make these symbols
> global.
Nah.
>
> >
> > +#ifdef CONFIG_NUMA
> > +
> > +struct node_hstate {
> > + struct kobject *hugepages_kobj;
> > + struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
> > +};
> > +struct node_hstate node_hstates[MAX_NUMNODES];
> > +
> > +static struct attribute *per_node_hstate_attrs[] = {
> > + &nr_hugepages_attr.attr,
> > + &free_hugepages_attr.attr,
> > + &surplus_hugepages_attr.attr,
> > + NULL,
> > +};
>
> I assume this interface got documented in patch 6/6.
Of course!
>
> > +static struct attribute_group per_node_hstate_attr_group = {
> > + .attrs = per_node_hstate_attrs,
> > +};
> > +
> > +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> > +{
> > + int nid;
> > +
> > + for (nid = 0; nid < nr_node_ids; nid++) {
> > + struct node_hstate *nhs = &node_hstates[nid];
> > + int i;
> > + for (i = 0; i < HUGE_MAX_HSTATE; i++)
> > + if (nhs->hstate_kobjs[i] == kobj) {
> > + if (nidp)
> > + *nidp = nid;
>
> Dammit, another function which has no callers. How am I supposed
> to find out if we really need to test for a NULL nidp?
It's called from kobj_to_hstate() further up [nearer the front] of this
patch.
>
> > + return &hstates[i];
> > + }
> > + }
> > +
> > + BUG();
> > + return NULL;
> > +}
> >
> >
> > ...
> >
> > +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> > +{
> > + BUG();
> > + if (nidp)
> > + *nidp = -1;
> > + return NULL;
> > +}
>
> strange.
Yeah. The pre-existing kobj_to_hstate(), for global hstate attributes,
had the BUG() for when it can't match the kobj to an hstate [shouldn't
happen]. And, then it returns NULL. I followed suit in the per node
versions, including this !NUMA stub.
>
>
>
> fixlets:
I'll merge these into my working series. I've been perparing V7--mostly
cleanup: adding some comments. Now that you've added the first part of
the V6 series to the mmotm tree, would you prefer incremental patches to
those?
>
> drivers/base/node.c | 14 +++++++-------
> hugetlb.c | 0
> include/linux/node.h | 10 +++++-----
> 3 files changed, 12 insertions(+), 12 deletions(-)
>
> diff -puN drivers/base/node.c~hugetlb-add-per-node-hstate-attributes-fix drivers/base/node.c
> --- a/drivers/base/node.c~hugetlb-add-per-node-hstate-attributes-fix
> +++ a/drivers/base/node.c
> @@ -180,14 +180,14 @@ static SYSDEV_ATTR(distance, S_IRUGO, no
> /*
> * hugetlbfs per node attributes registration interface:
> * When/if hugetlb[fs] subsystem initializes [sometime after this module],
> - * it will register it's per node attributes for all nodes on-line at that
> - * point. It will also call register_hugetlbfs_with_node(), below, to
> - * register it's attribute registration functions with this node driver.
> + * it will register its per node attributes for all nodes online at that
> + * time. It will also call register_hugetlbfs_with_node(), below, to
> + * register its attribute registration functions with this node driver.
> * Once these hooks have been initialized, the node driver will call into
> * the hugetlb module to [un]register attributes for hot-plugged nodes.
> */
> -NODE_REGISTRATION_FUNC __hugetlb_register_node;
> -NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
> +static node_registration_func_t __hugetlb_register_node;
> +static node_registration_func_t __hugetlb_unregister_node;
>
> static inline void hugetlb_register_node(struct node *node)
> {
> @@ -201,8 +201,8 @@ static inline void hugetlb_unregister_no
> __hugetlb_unregister_node(node);
> }
>
> -void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
> - NODE_REGISTRATION_FUNC unregister)
> +void register_hugetlbfs_with_node(node_registration_func_t doregister,
> + node_registration_func_t unregister)
> {
> __hugetlb_register_node = doregister;
> __hugetlb_unregister_node = unregister;
> diff -puN include/linux/node.h~hugetlb-add-per-node-hstate-attributes-fix include/linux/node.h
> --- a/include/linux/node.h~hugetlb-add-per-node-hstate-attributes-fix
> +++ a/include/linux/node.h
> @@ -28,7 +28,7 @@ struct node {
>
> struct memory_block;
> extern struct node node_devices[];
> -typedef void (*NODE_REGISTRATION_FUNC)(struct node *);
> +typedef void (*node_registration_func_t)(struct node *);
>
> extern int register_node(struct node *, int, struct node *);
> extern void unregister_node(struct node *node);
> @@ -40,8 +40,8 @@ extern int unregister_cpu_under_node(uns
> extern int register_mem_sect_under_node(struct memory_block *mem_blk,
> int nid);
> extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
> -extern void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
> - NODE_REGISTRATION_FUNC unregister);
> +extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
> + node_registration_func_t unregister);
> #else
> static inline int register_one_node(int nid)
> {
> @@ -69,8 +69,8 @@ static inline int unregister_mem_sect_un
> return 0;
> }
>
> -static inline void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC reg,
> - NODE_REGISTRATION_FUNC unreg)
> +static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
> + node_registration_func_t unreg)
> {
> }
> #endif
> diff -puN mm/hugetlb.c~hugetlb-add-per-node-hstate-attributes-fix mm/hugetlb.c
> _
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 5/6] hugetlb: add per node hstate attributes
@ 2009-09-11 13:12 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-11 13:12 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Thu, 2009-09-10 at 16:31 -0700, Andrew Morton wrote:
> On Wed, 09 Sep 2009 12:31:58 -0400
> Lee Schermerhorn <lee.schermerhorn@hp.com> wrote:
>
> > ...
> >
> > This patch adds the per huge page size control/query attributes
> > to the per node sysdevs:
> >
> > /sys/devices/system/node/node<ID>/hugepages/hugepages-<size>/
> > nr_hugepages - r/w
> > free_huge_pages - r/o
> > surplus_huge_pages - r/o
> >
> >
> > ...
> >
> > Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
> > ===================================================================
> > --- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:26.000000000 -0400
> > +++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:37.000000000 -0400
> > @@ -177,6 +177,37 @@ static ssize_t node_read_distance(struct
> > }
> > static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
> >
> > +/*
> > + * hugetlbfs per node attributes registration interface:
> > + * When/if hugetlb[fs] subsystem initializes [sometime after this module],
> > + * it will register it's per node attributes for all nodes on-line at that
> > + * point. It will also call register_hugetlbfs_with_node(), below, to
> > + * register it's attribute registration functions with this node driver.
> > + * Once these hooks have been initialized, the node driver will call into
> > + * the hugetlb module to [un]register attributes for hot-plugged nodes.
> > + */
> > +NODE_REGISTRATION_FUNC __hugetlb_register_node;
> > +NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
>
> WHAT THE HECK IS THAT THING?
>
> Oh. It's a typedef. It's not a kernel convention to upper-case those.
> it is a kerenl convention to lower-case them and stick a _t at the
> end.
Sorry. Old habits die hard. [and checkpatch didn't complain :)]
>
> There doesn't apepar to have been any reason to make these symbols
> global.
Nah.
>
> >
> > +#ifdef CONFIG_NUMA
> > +
> > +struct node_hstate {
> > + struct kobject *hugepages_kobj;
> > + struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
> > +};
> > +struct node_hstate node_hstates[MAX_NUMNODES];
> > +
> > +static struct attribute *per_node_hstate_attrs[] = {
> > + &nr_hugepages_attr.attr,
> > + &free_hugepages_attr.attr,
> > + &surplus_hugepages_attr.attr,
> > + NULL,
> > +};
>
> I assume this interface got documented in patch 6/6.
Of course!
>
> > +static struct attribute_group per_node_hstate_attr_group = {
> > + .attrs = per_node_hstate_attrs,
> > +};
> > +
> > +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> > +{
> > + int nid;
> > +
> > + for (nid = 0; nid < nr_node_ids; nid++) {
> > + struct node_hstate *nhs = &node_hstates[nid];
> > + int i;
> > + for (i = 0; i < HUGE_MAX_HSTATE; i++)
> > + if (nhs->hstate_kobjs[i] == kobj) {
> > + if (nidp)
> > + *nidp = nid;
>
> Dammit, another function which has no callers. How am I supposed
> to find out if we really need to test for a NULL nidp?
It's called from kobj_to_hstate() further up [nearer the front] of this
patch.
>
> > + return &hstates[i];
> > + }
> > + }
> > +
> > + BUG();
> > + return NULL;
> > +}
> >
> >
> > ...
> >
> > +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
> > +{
> > + BUG();
> > + if (nidp)
> > + *nidp = -1;
> > + return NULL;
> > +}
>
> strange.
Yeah. The pre-existing kobj_to_hstate(), for global hstate attributes,
had the BUG() for when it can't match the kobj to an hstate [shouldn't
happen]. And, then it returns NULL. I followed suit in the per node
versions, including this !NUMA stub.
>
>
>
> fixlets:
I'll merge these into my working series. I've been perparing V7--mostly
cleanup: adding some comments. Now that you've added the first part of
the V6 series to the mmotm tree, would you prefer incremental patches to
those?
>
> drivers/base/node.c | 14 +++++++-------
> hugetlb.c | 0
> include/linux/node.h | 10 +++++-----
> 3 files changed, 12 insertions(+), 12 deletions(-)
>
> diff -puN drivers/base/node.c~hugetlb-add-per-node-hstate-attributes-fix drivers/base/node.c
> --- a/drivers/base/node.c~hugetlb-add-per-node-hstate-attributes-fix
> +++ a/drivers/base/node.c
> @@ -180,14 +180,14 @@ static SYSDEV_ATTR(distance, S_IRUGO, no
> /*
> * hugetlbfs per node attributes registration interface:
> * When/if hugetlb[fs] subsystem initializes [sometime after this module],
> - * it will register it's per node attributes for all nodes on-line at that
> - * point. It will also call register_hugetlbfs_with_node(), below, to
> - * register it's attribute registration functions with this node driver.
> + * it will register its per node attributes for all nodes online at that
> + * time. It will also call register_hugetlbfs_with_node(), below, to
> + * register its attribute registration functions with this node driver.
> * Once these hooks have been initialized, the node driver will call into
> * the hugetlb module to [un]register attributes for hot-plugged nodes.
> */
> -NODE_REGISTRATION_FUNC __hugetlb_register_node;
> -NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
> +static node_registration_func_t __hugetlb_register_node;
> +static node_registration_func_t __hugetlb_unregister_node;
>
> static inline void hugetlb_register_node(struct node *node)
> {
> @@ -201,8 +201,8 @@ static inline void hugetlb_unregister_no
> __hugetlb_unregister_node(node);
> }
>
> -void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
> - NODE_REGISTRATION_FUNC unregister)
> +void register_hugetlbfs_with_node(node_registration_func_t doregister,
> + node_registration_func_t unregister)
> {
> __hugetlb_register_node = doregister;
> __hugetlb_unregister_node = unregister;
> diff -puN include/linux/node.h~hugetlb-add-per-node-hstate-attributes-fix include/linux/node.h
> --- a/include/linux/node.h~hugetlb-add-per-node-hstate-attributes-fix
> +++ a/include/linux/node.h
> @@ -28,7 +28,7 @@ struct node {
>
> struct memory_block;
> extern struct node node_devices[];
> -typedef void (*NODE_REGISTRATION_FUNC)(struct node *);
> +typedef void (*node_registration_func_t)(struct node *);
>
> extern int register_node(struct node *, int, struct node *);
> extern void unregister_node(struct node *node);
> @@ -40,8 +40,8 @@ extern int unregister_cpu_under_node(uns
> extern int register_mem_sect_under_node(struct memory_block *mem_blk,
> int nid);
> extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
> -extern void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC doregister,
> - NODE_REGISTRATION_FUNC unregister);
> +extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
> + node_registration_func_t unregister);
> #else
> static inline int register_one_node(int nid)
> {
> @@ -69,8 +69,8 @@ static inline int unregister_mem_sect_un
> return 0;
> }
>
> -static inline void register_hugetlbfs_with_node(NODE_REGISTRATION_FUNC reg,
> - NODE_REGISTRATION_FUNC unreg)
> +static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
> + node_registration_func_t unreg)
> {
> }
> #endif
> diff -puN mm/hugetlb.c~hugetlb-add-per-node-hstate-attributes-fix mm/hugetlb.c
> _
>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 6/6] hugetlb: update hugetlb documentation for mempolicy based management.
2009-09-09 16:31 ` Lee Schermerhorn
@ 2009-09-09 16:32 ` Lee Schermerhorn
-1 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:32 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
[PATCH 6/6] hugetlb: update hugetlb documentation for mempolicy based management.
Against: 2.6.31-rc7-mmotm-090827-1651
V2: Add brief description of per node attributes.
V6: address review comments
This patch updates the kernel huge tlb documentation to describe the
numa memory policy based huge page management. Additionaly, the patch
includes a fair amount of rework to improve consistency, eliminate
duplication and set the context for documenting the memory policy
interaction.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: David Rientjes <rientjes@google.com>
Documentation/vm/hugetlbpage.txt | 263 +++++++++++++++++++++++++--------------
1 file changed, 175 insertions(+), 88 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:26.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:37.000000000 -0400
@@ -11,23 +11,21 @@ This optimization is more critical now a
(several GBs) are more readily available.
Users can use the huge page support in Linux kernel by either using the mmap
-system call or standard SYSv shared memory system calls (shmget, shmat).
+system call or standard SYSV shared memory system calls (shmget, shmat).
First the Linux kernel needs to be built with the CONFIG_HUGETLBFS
(present under "File systems") and CONFIG_HUGETLB_PAGE (selected
automatically when CONFIG_HUGETLBFS is selected) configuration
options.
-The kernel built with huge page support should show the number of configured
-huge pages in the system by running the "cat /proc/meminfo" command.
+The /proc/meminfo file provides information about the total number of
+persistent hugetlb pages in the kernel's huge page pool. It also displays
+information about the number of free, reserved and surplus huge pages and the
+default huge page size. The huge page size is needed for generating the
+proper alignment and size of the arguments to system calls that map huge page
+regions.
-/proc/meminfo also provides information about the total number of hugetlb
-pages configured in the kernel. It also displays information about the
-number of free hugetlb pages at any time. It also displays information about
-the configured huge page size - this is needed for generating the proper
-alignment and size of the arguments to the above system calls.
-
-The output of "cat /proc/meminfo" will have lines like:
+The output of "cat /proc/meminfo" will include lines like:
.....
HugePages_Total: vvv
@@ -53,59 +51,63 @@ HugePages_Surp is short for "surplus,"
/proc/filesystems should also show a filesystem of type "hugetlbfs" configured
in the kernel.
-/proc/sys/vm/nr_hugepages indicates the current number of configured hugetlb
-pages in the kernel. Super user can dynamically request more (or free some
-pre-configured) huge pages.
-The allocation (or deallocation) of hugetlb pages is possible only if there are
-enough physically contiguous free pages in system (freeing of huge pages is
-possible only if there are enough hugetlb pages free that can be transferred
-back to regular memory pool).
-
-Pages that are used as hugetlb pages are reserved inside the kernel and cannot
-be used for other purposes.
-
-Once the kernel with Hugetlb page support is built and running, a user can
-use either the mmap system call or shared memory system calls to start using
-the huge pages. It is required that the system administrator preallocate
-enough memory for huge page purposes.
-
-The administrator can preallocate huge pages on the kernel boot command line by
-specifying the "hugepages=N" parameter, where 'N' = the number of huge pages
-requested. This is the most reliable method for preallocating huge pages as
-memory has not yet become fragmented.
+/proc/sys/vm/nr_hugepages indicates the current number of "persistent" huge
+pages in the kernel's huge page pool. "Persistent" huge pages will be
+returned to the huge page pool when freed by a task. A user with root
+privileges can dynamically allocate more or free some persistent huge pages
+by increasing or decreasing the value of 'nr_hugepages'.
+
+Pages that are used as huge pages are reserved inside the kernel and cannot
+be used for other purposes. Huge pages cannot be swapped out under
+memory pressure.
+
+Once a number of huge pages have been pre-allocated to the kernel huge page
+pool, a user with appropriate privilege can use either the mmap system call
+or shared memory system calls to use the huge pages. See the discussion of
+Using Huge Pages, below.
+
+The administrator can allocate persistent huge pages on the kernel boot
+command line by specifying the "hugepages=N" parameter, where 'N' = the
+number of huge pages requested. This is the most reliable method of
+allocating huge pages as memory has not yet become fragmented.
-Some platforms support multiple huge page sizes. To preallocate huge pages
+Some platforms support multiple huge page sizes. To allocate huge pages
of a specific size, one must preceed the huge pages boot command parameters
with a huge page size selection parameter "hugepagesz=<size>". <size> must
be specified in bytes with optional scale suffix [kKmMgG]. The default huge
page size may be selected with the "default_hugepagesz=<size>" boot parameter.
-/proc/sys/vm/nr_hugepages indicates the current number of configured [default
-size] hugetlb pages in the kernel. Super user can dynamically request more
-(or free some pre-configured) huge pages.
-
-Use the following command to dynamically allocate/deallocate default sized
-huge pages:
+When multiple huge page sizes are supported, /proc/sys/vm/nr_hugepages
+indicates the current number of pre-allocated huge pages of the default size.
+Thus, one can use the following command to dynamically allocate/deallocate
+default sized persistent huge pages:
echo 20 > /proc/sys/vm/nr_hugepages
-This command will try to configure 20 default sized huge pages in the system.
+This command will try to adjust the number of default sized huge pages in the
+huge page pool to 20, allocating or freeing huge pages, as required.
+
On a NUMA platform, the kernel will attempt to distribute the huge page pool
-over the all on-line nodes. These huge pages, allocated when nr_hugepages
-is increased, are called "persistent huge pages".
+over all the set of allowed nodes specified by the NUMA memory policy of the
+task that modifies nr_hugepages. The default for the allowed nodes--when the
+task has default memory policy--is all on-line nodes. Allowed nodes with
+insufficient available, contiguous memory for a huge page will be silently
+skipped when allocating persistent huge pages. See the discussion below of
+the interaction of task memory policy, cpusets and per node attributes with
+the allocation and freeing of persistent huge pages.
The success or failure of huge page allocation depends on the amount of
-physically contiguous memory that is preset in system at the time of the
+physically contiguous memory that is present in system at the time of the
allocation attempt. If the kernel is unable to allocate huge pages from
some nodes in a NUMA system, it will attempt to make up the difference by
allocating extra pages on other nodes with sufficient available contiguous
memory, if any.
-System administrators may want to put this command in one of the local rc init
-files. This will enable the kernel to request huge pages early in the boot
-process when the possibility of getting physical contiguous pages is still
-very high. Administrators can verify the number of huge pages actually
-allocated by checking the sysctl or meminfo. To check the per node
+System administrators may want to put this command in one of the local rc
+init files. This will enable the kernel to allocate huge pages early in
+the boot process when the possibility of getting physical contiguous pages
+is still very high. Administrators can verify the number of huge pages
+actually allocated by checking the sysctl or meminfo. To check the per node
distribution of huge pages in a NUMA system, use:
cat /sys/devices/system/node/node*/meminfo | fgrep Huge
@@ -113,39 +115,40 @@ distribution of huge pages in a NUMA sys
/proc/sys/vm/nr_overcommit_hugepages specifies how large the pool of
huge pages can grow, if more huge pages than /proc/sys/vm/nr_hugepages are
requested by applications. Writing any non-zero value into this file
-indicates that the hugetlb subsystem is allowed to try to obtain "surplus"
-huge pages from the buddy allocator, when the normal pool is exhausted. As
-these surplus huge pages go out of use, they are freed back to the buddy
-allocator.
+indicates that the hugetlb subsystem is allowed to try to obtain that
+number of "surplus" huge pages from the kernel's normal page pool, when the
+persistent huge page pool is exhausted. As these surplus huge pages become
+unused, they are freed back to the kernel's normal page pool.
-When increasing the huge page pool size via nr_hugepages, any surplus
+When increasing the huge page pool size via nr_hugepages, any existing surplus
pages will first be promoted to persistent huge pages. Then, additional
huge pages will be allocated, if necessary and if possible, to fulfill
-the new huge page pool size.
+the new persistent huge page pool size.
-The administrator may shrink the pool of preallocated huge pages for
+The administrator may shrink the pool of persistent huge pages for
the default huge page size by setting the nr_hugepages sysctl to a
smaller value. The kernel will attempt to balance the freeing of huge pages
-across all on-line nodes. Any free huge pages on the selected nodes will
-be freed back to the buddy allocator.
-
-Caveat: Shrinking the pool via nr_hugepages such that it becomes less
-than the number of huge pages in use will convert the balance to surplus
-huge pages even if it would exceed the overcommit value. As long as
-this condition holds, however, no more surplus huge pages will be
-allowed on the system until one of the two sysctls are increased
-sufficiently, or the surplus huge pages go out of use and are freed.
+across all nodes in the memory policy of the task modifying nr_hugepages.
+Any free huge pages on the selected nodes will be freed back to the kernel's
+normal page pool.
+
+Caveat: Shrinking the persistent huge page pool via nr_hugepages such that
+it becomes less than the number of huge pages in use will convert the balance
+of the in-use huge pages to surplus huge pages. This will occur even if
+the number of surplus pages it would exceed the overcommit value. As long as
+this condition holds--that is, until nr_hugepages+nr_overcommit_hugepages is
+increased sufficiently, or the surplus huge pages go out of use and are freed--
+no more surplus huge pages will be allowed to be allocated.
With support for multiple huge page pools at run-time available, much of
-the huge page userspace interface has been duplicated in sysfs. The above
-information applies to the default huge page size which will be
-controlled by the /proc interfaces for backwards compatibility. The root
-huge page control directory in sysfs is:
+the huge page userspace interface in /proc/sys/vm has been duplicated in sysfs.
+The /proc interfaces discussed above have been retained for backwards
+compatibility. The root huge page control directory in sysfs is:
/sys/kernel/mm/hugepages
For each huge page size supported by the running kernel, a subdirectory
-will exist, of the form
+will exist, of the form:
hugepages-${size}kB
@@ -159,6 +162,98 @@ Inside each of these directories, the sa
which function as described above for the default huge page-sized case.
+
+Interaction of Task Memory Policy with Huge Page Allocation/Freeing:
+
+Whether huge pages are allocated and freed via the /proc interface or
+the /sysfs interface, the NUMA nodes from which huge pages are allocated
+or freed are controlled by the NUMA memory policy of the task that modifies
+the nr_hugepages parameter. [nr_overcommit_hugepages is a global limit.]
+
+The recommended method to allocate or free huge pages to/from the kernel
+huge page pool, using the nr_hugepages example above, is:
+
+ numactl --interleave <node-list> echo 20 >/proc/sys/vm/nr_hugepages
+
+or, more succinctly:
+
+ numactl -m <node-list> echo 20 >/proc/sys/vm/nr_hugepages
+
+This will allocate or free abs(20 - nr_hugepages) to or from the nodes
+specified in <node-list>, depending on whether nr_hugepages is initially
+less than or greater than 20, respectively. No huge pages will be
+allocated nor freed on any node not included in the specified <node-list>.
+
+Any memory policy mode--bind, preferred, local or interleave--may be
+used. The effect on persistent huge page allocation is as follows:
+
+1) Regardless of mempolicy mode [see Documentation/vm/numa_memory_policy.txt],
+ persistent huge pages will be distributed across the node or nodes
+ specified in the mempolicy as if "interleave" had been specified.
+ However, if a node in the policy does not contain sufficient contiguous
+ memory for a huge page, the allocation will not "fallback" to the nearest
+ neighbor node with sufficient contiguous memory. To do this would cause
+ undesirable imbalance in the distribution of the huge page pool, or
+ possibly, allocation of persistent huge pages on nodes not allowed by
+ the task's memory policy.
+
+2) One or more nodes may be specified with the bind or interleave policy.
+ If more than one node is specified with the preferred policy, only the
+ lowest numeric id will be used. Local policy will select the node where
+ the task is running at the time the nodes_allowed mask is constructed.
+
+3) For local policy to be deterministic, the task must be bound to a cpu or
+ cpus in a single node. Otherwise, the task could be migrated to some
+ other node at any time after launch and the resulting node will be
+ indeterminate. Thus, local policy is not very useful for this purpose.
+ Any of the other mempolicy modes may be used to specify a single node.
+
+4) The nodes allowed mask will be derived from any non-default task mempolicy,
+ whether this policy was set explicitly by the task itself or one of its
+ ancestors, such as numactl. This means that if the task is invoked from a
+ shell with non-default policy, that policy will be used. One can specify a
+ node list of "all" with numactl --interleave or --membind [-m] to achieve
+ interleaving over all nodes in the system or cpuset.
+
+5) Any task mempolicy specifed--e.g., using numactl--will be constrained by
+ the resource limits of any cpuset in which the task runs. Thus, there will
+ be no way for a task with non-default policy running in a cpuset with a
+ subset of the system nodes to allocate huge pages outside the cpuset
+ without first moving to a cpuset that contains all of the desired nodes.
+
+6) Boot-time huge page allocation attempts to distribute the requested number
+ of huge pages over all on-lines nodes.
+
+Per Node Hugepages Attributes
+
+A subset of the contents of the root huge page control directory in sysfs,
+described above, has been replicated under each "node" system device in:
+
+ /sys/devices/system/node/node[0-9]*/hugepages/
+
+Under this directory, the subdirectory for each supported huge page size
+contains the following attribute files:
+
+ nr_hugepages
+ free_hugepages
+ surplus_hugepages
+
+The free_' and surplus_' attribute files are read-only. They return the number
+of free and surplus [overcommitted] huge pages, respectively, on the parent
+node.
+
+The nr_hugepages attribute will return the total number of huge pages on the
+specified node. When this attribute is written, the number of persistent huge
+pages on the parent node will be adjusted to the specified value, if sufficient
+resources exist, regardless of the task's mempolicy or cpuset constraints.
+
+Note that the number of overcommit and reserve pages remain global quantities,
+as we don't know until fault time, when the faulting task's mempolicy is applied,
+from which node the huge page allocation will be attempted.
+
+
+Using Huge Pages:
+
If the user applications are going to request huge pages using mmap system
call, then it is required that system administrator mount a file system of
type hugetlbfs:
@@ -206,9 +301,11 @@ map_hugetlb.c.
* requesting huge pages.
*
* For the ia64 architecture, the Linux kernel reserves Region number 4 for
- * huge pages. That means the addresses starting with 0x800000... will need
- * to be specified. Specifying a fixed address is not required on ppc64,
- * i386 or x86_64.
+ * huge pages. That means that if one requires a fixed address, a huge page
+ * aligned address starting with 0x800000... will be required. If a fixed
+ * address is not required, the kernel will select an address in the proper
+ * range.
+ * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*
* Note: The default shared memory limit is quite low on many kernels,
* you may need to increase it via:
@@ -237,14 +334,8 @@ map_hugetlb.c.
#define dprintf(x) printf(x)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define SHMAT_FLAGS (SHM_RND)
-#else
-#define ADDR (void *)(0x0UL)
+#define ADDR (void *)(0x0UL) /* let kernel choose address */
#define SHMAT_FLAGS (0)
-#endif
int main(void)
{
@@ -302,10 +393,12 @@ int main(void)
* example, the app is requesting memory of size 256MB that is backed by
* huge pages.
*
- * For ia64 architecture, Linux kernel reserves Region number 4 for huge pages.
- * That means the addresses starting with 0x800000... will need to be
- * specified. Specifying a fixed address is not required on ppc64, i386
- * or x86_64.
+ * For the ia64 architecture, the Linux kernel reserves Region number 4 for
+ * huge pages. That means that if one requires a fixed address, a huge page
+ * aligned address starting with 0x800000... will be required. If a fixed
+ * address is not required, the kernel will select an address in the proper
+ * range.
+ * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*/
#include <stdlib.h>
#include <stdio.h>
@@ -317,14 +410,8 @@ int main(void)
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define FLAGS (MAP_SHARED | MAP_FIXED)
-#else
-#define ADDR (void *)(0x0UL)
+#define ADDR (void *)(0x0UL) /* let kernel choose address */
#define FLAGS (MAP_SHARED)
-#endif
void check_bytes(char *addr)
{
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 6/6] hugetlb: update hugetlb documentation for mempolicy based management.
@ 2009-09-09 16:32 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:32 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
[PATCH 6/6] hugetlb: update hugetlb documentation for mempolicy based management.
Against: 2.6.31-rc7-mmotm-090827-1651
V2: Add brief description of per node attributes.
V6: address review comments
This patch updates the kernel huge tlb documentation to describe the
numa memory policy based huge page management. Additionaly, the patch
includes a fair amount of rework to improve consistency, eliminate
duplication and set the context for documenting the memory policy
interaction.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: David Rientjes <rientjes@google.com>
Documentation/vm/hugetlbpage.txt | 263 +++++++++++++++++++++++++--------------
1 file changed, 175 insertions(+), 88 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:26.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:37.000000000 -0400
@@ -11,23 +11,21 @@ This optimization is more critical now a
(several GBs) are more readily available.
Users can use the huge page support in Linux kernel by either using the mmap
-system call or standard SYSv shared memory system calls (shmget, shmat).
+system call or standard SYSV shared memory system calls (shmget, shmat).
First the Linux kernel needs to be built with the CONFIG_HUGETLBFS
(present under "File systems") and CONFIG_HUGETLB_PAGE (selected
automatically when CONFIG_HUGETLBFS is selected) configuration
options.
-The kernel built with huge page support should show the number of configured
-huge pages in the system by running the "cat /proc/meminfo" command.
+The /proc/meminfo file provides information about the total number of
+persistent hugetlb pages in the kernel's huge page pool. It also displays
+information about the number of free, reserved and surplus huge pages and the
+default huge page size. The huge page size is needed for generating the
+proper alignment and size of the arguments to system calls that map huge page
+regions.
-/proc/meminfo also provides information about the total number of hugetlb
-pages configured in the kernel. It also displays information about the
-number of free hugetlb pages at any time. It also displays information about
-the configured huge page size - this is needed for generating the proper
-alignment and size of the arguments to the above system calls.
-
-The output of "cat /proc/meminfo" will have lines like:
+The output of "cat /proc/meminfo" will include lines like:
.....
HugePages_Total: vvv
@@ -53,59 +51,63 @@ HugePages_Surp is short for "surplus,"
/proc/filesystems should also show a filesystem of type "hugetlbfs" configured
in the kernel.
-/proc/sys/vm/nr_hugepages indicates the current number of configured hugetlb
-pages in the kernel. Super user can dynamically request more (or free some
-pre-configured) huge pages.
-The allocation (or deallocation) of hugetlb pages is possible only if there are
-enough physically contiguous free pages in system (freeing of huge pages is
-possible only if there are enough hugetlb pages free that can be transferred
-back to regular memory pool).
-
-Pages that are used as hugetlb pages are reserved inside the kernel and cannot
-be used for other purposes.
-
-Once the kernel with Hugetlb page support is built and running, a user can
-use either the mmap system call or shared memory system calls to start using
-the huge pages. It is required that the system administrator preallocate
-enough memory for huge page purposes.
-
-The administrator can preallocate huge pages on the kernel boot command line by
-specifying the "hugepages=N" parameter, where 'N' = the number of huge pages
-requested. This is the most reliable method for preallocating huge pages as
-memory has not yet become fragmented.
+/proc/sys/vm/nr_hugepages indicates the current number of "persistent" huge
+pages in the kernel's huge page pool. "Persistent" huge pages will be
+returned to the huge page pool when freed by a task. A user with root
+privileges can dynamically allocate more or free some persistent huge pages
+by increasing or decreasing the value of 'nr_hugepages'.
+
+Pages that are used as huge pages are reserved inside the kernel and cannot
+be used for other purposes. Huge pages cannot be swapped out under
+memory pressure.
+
+Once a number of huge pages have been pre-allocated to the kernel huge page
+pool, a user with appropriate privilege can use either the mmap system call
+or shared memory system calls to use the huge pages. See the discussion of
+Using Huge Pages, below.
+
+The administrator can allocate persistent huge pages on the kernel boot
+command line by specifying the "hugepages=N" parameter, where 'N' = the
+number of huge pages requested. This is the most reliable method of
+allocating huge pages as memory has not yet become fragmented.
-Some platforms support multiple huge page sizes. To preallocate huge pages
+Some platforms support multiple huge page sizes. To allocate huge pages
of a specific size, one must preceed the huge pages boot command parameters
with a huge page size selection parameter "hugepagesz=<size>". <size> must
be specified in bytes with optional scale suffix [kKmMgG]. The default huge
page size may be selected with the "default_hugepagesz=<size>" boot parameter.
-/proc/sys/vm/nr_hugepages indicates the current number of configured [default
-size] hugetlb pages in the kernel. Super user can dynamically request more
-(or free some pre-configured) huge pages.
-
-Use the following command to dynamically allocate/deallocate default sized
-huge pages:
+When multiple huge page sizes are supported, /proc/sys/vm/nr_hugepages
+indicates the current number of pre-allocated huge pages of the default size.
+Thus, one can use the following command to dynamically allocate/deallocate
+default sized persistent huge pages:
echo 20 > /proc/sys/vm/nr_hugepages
-This command will try to configure 20 default sized huge pages in the system.
+This command will try to adjust the number of default sized huge pages in the
+huge page pool to 20, allocating or freeing huge pages, as required.
+
On a NUMA platform, the kernel will attempt to distribute the huge page pool
-over the all on-line nodes. These huge pages, allocated when nr_hugepages
-is increased, are called "persistent huge pages".
+over all the set of allowed nodes specified by the NUMA memory policy of the
+task that modifies nr_hugepages. The default for the allowed nodes--when the
+task has default memory policy--is all on-line nodes. Allowed nodes with
+insufficient available, contiguous memory for a huge page will be silently
+skipped when allocating persistent huge pages. See the discussion below of
+the interaction of task memory policy, cpusets and per node attributes with
+the allocation and freeing of persistent huge pages.
The success or failure of huge page allocation depends on the amount of
-physically contiguous memory that is preset in system at the time of the
+physically contiguous memory that is present in system at the time of the
allocation attempt. If the kernel is unable to allocate huge pages from
some nodes in a NUMA system, it will attempt to make up the difference by
allocating extra pages on other nodes with sufficient available contiguous
memory, if any.
-System administrators may want to put this command in one of the local rc init
-files. This will enable the kernel to request huge pages early in the boot
-process when the possibility of getting physical contiguous pages is still
-very high. Administrators can verify the number of huge pages actually
-allocated by checking the sysctl or meminfo. To check the per node
+System administrators may want to put this command in one of the local rc
+init files. This will enable the kernel to allocate huge pages early in
+the boot process when the possibility of getting physical contiguous pages
+is still very high. Administrators can verify the number of huge pages
+actually allocated by checking the sysctl or meminfo. To check the per node
distribution of huge pages in a NUMA system, use:
cat /sys/devices/system/node/node*/meminfo | fgrep Huge
@@ -113,39 +115,40 @@ distribution of huge pages in a NUMA sys
/proc/sys/vm/nr_overcommit_hugepages specifies how large the pool of
huge pages can grow, if more huge pages than /proc/sys/vm/nr_hugepages are
requested by applications. Writing any non-zero value into this file
-indicates that the hugetlb subsystem is allowed to try to obtain "surplus"
-huge pages from the buddy allocator, when the normal pool is exhausted. As
-these surplus huge pages go out of use, they are freed back to the buddy
-allocator.
+indicates that the hugetlb subsystem is allowed to try to obtain that
+number of "surplus" huge pages from the kernel's normal page pool, when the
+persistent huge page pool is exhausted. As these surplus huge pages become
+unused, they are freed back to the kernel's normal page pool.
-When increasing the huge page pool size via nr_hugepages, any surplus
+When increasing the huge page pool size via nr_hugepages, any existing surplus
pages will first be promoted to persistent huge pages. Then, additional
huge pages will be allocated, if necessary and if possible, to fulfill
-the new huge page pool size.
+the new persistent huge page pool size.
-The administrator may shrink the pool of preallocated huge pages for
+The administrator may shrink the pool of persistent huge pages for
the default huge page size by setting the nr_hugepages sysctl to a
smaller value. The kernel will attempt to balance the freeing of huge pages
-across all on-line nodes. Any free huge pages on the selected nodes will
-be freed back to the buddy allocator.
-
-Caveat: Shrinking the pool via nr_hugepages such that it becomes less
-than the number of huge pages in use will convert the balance to surplus
-huge pages even if it would exceed the overcommit value. As long as
-this condition holds, however, no more surplus huge pages will be
-allowed on the system until one of the two sysctls are increased
-sufficiently, or the surplus huge pages go out of use and are freed.
+across all nodes in the memory policy of the task modifying nr_hugepages.
+Any free huge pages on the selected nodes will be freed back to the kernel's
+normal page pool.
+
+Caveat: Shrinking the persistent huge page pool via nr_hugepages such that
+it becomes less than the number of huge pages in use will convert the balance
+of the in-use huge pages to surplus huge pages. This will occur even if
+the number of surplus pages it would exceed the overcommit value. As long as
+this condition holds--that is, until nr_hugepages+nr_overcommit_hugepages is
+increased sufficiently, or the surplus huge pages go out of use and are freed--
+no more surplus huge pages will be allowed to be allocated.
With support for multiple huge page pools at run-time available, much of
-the huge page userspace interface has been duplicated in sysfs. The above
-information applies to the default huge page size which will be
-controlled by the /proc interfaces for backwards compatibility. The root
-huge page control directory in sysfs is:
+the huge page userspace interface in /proc/sys/vm has been duplicated in sysfs.
+The /proc interfaces discussed above have been retained for backwards
+compatibility. The root huge page control directory in sysfs is:
/sys/kernel/mm/hugepages
For each huge page size supported by the running kernel, a subdirectory
-will exist, of the form
+will exist, of the form:
hugepages-${size}kB
@@ -159,6 +162,98 @@ Inside each of these directories, the sa
which function as described above for the default huge page-sized case.
+
+Interaction of Task Memory Policy with Huge Page Allocation/Freeing:
+
+Whether huge pages are allocated and freed via the /proc interface or
+the /sysfs interface, the NUMA nodes from which huge pages are allocated
+or freed are controlled by the NUMA memory policy of the task that modifies
+the nr_hugepages parameter. [nr_overcommit_hugepages is a global limit.]
+
+The recommended method to allocate or free huge pages to/from the kernel
+huge page pool, using the nr_hugepages example above, is:
+
+ numactl --interleave <node-list> echo 20 >/proc/sys/vm/nr_hugepages
+
+or, more succinctly:
+
+ numactl -m <node-list> echo 20 >/proc/sys/vm/nr_hugepages
+
+This will allocate or free abs(20 - nr_hugepages) to or from the nodes
+specified in <node-list>, depending on whether nr_hugepages is initially
+less than or greater than 20, respectively. No huge pages will be
+allocated nor freed on any node not included in the specified <node-list>.
+
+Any memory policy mode--bind, preferred, local or interleave--may be
+used. The effect on persistent huge page allocation is as follows:
+
+1) Regardless of mempolicy mode [see Documentation/vm/numa_memory_policy.txt],
+ persistent huge pages will be distributed across the node or nodes
+ specified in the mempolicy as if "interleave" had been specified.
+ However, if a node in the policy does not contain sufficient contiguous
+ memory for a huge page, the allocation will not "fallback" to the nearest
+ neighbor node with sufficient contiguous memory. To do this would cause
+ undesirable imbalance in the distribution of the huge page pool, or
+ possibly, allocation of persistent huge pages on nodes not allowed by
+ the task's memory policy.
+
+2) One or more nodes may be specified with the bind or interleave policy.
+ If more than one node is specified with the preferred policy, only the
+ lowest numeric id will be used. Local policy will select the node where
+ the task is running at the time the nodes_allowed mask is constructed.
+
+3) For local policy to be deterministic, the task must be bound to a cpu or
+ cpus in a single node. Otherwise, the task could be migrated to some
+ other node at any time after launch and the resulting node will be
+ indeterminate. Thus, local policy is not very useful for this purpose.
+ Any of the other mempolicy modes may be used to specify a single node.
+
+4) The nodes allowed mask will be derived from any non-default task mempolicy,
+ whether this policy was set explicitly by the task itself or one of its
+ ancestors, such as numactl. This means that if the task is invoked from a
+ shell with non-default policy, that policy will be used. One can specify a
+ node list of "all" with numactl --interleave or --membind [-m] to achieve
+ interleaving over all nodes in the system or cpuset.
+
+5) Any task mempolicy specifed--e.g., using numactl--will be constrained by
+ the resource limits of any cpuset in which the task runs. Thus, there will
+ be no way for a task with non-default policy running in a cpuset with a
+ subset of the system nodes to allocate huge pages outside the cpuset
+ without first moving to a cpuset that contains all of the desired nodes.
+
+6) Boot-time huge page allocation attempts to distribute the requested number
+ of huge pages over all on-lines nodes.
+
+Per Node Hugepages Attributes
+
+A subset of the contents of the root huge page control directory in sysfs,
+described above, has been replicated under each "node" system device in:
+
+ /sys/devices/system/node/node[0-9]*/hugepages/
+
+Under this directory, the subdirectory for each supported huge page size
+contains the following attribute files:
+
+ nr_hugepages
+ free_hugepages
+ surplus_hugepages
+
+The free_' and surplus_' attribute files are read-only. They return the number
+of free and surplus [overcommitted] huge pages, respectively, on the parent
+node.
+
+The nr_hugepages attribute will return the total number of huge pages on the
+specified node. When this attribute is written, the number of persistent huge
+pages on the parent node will be adjusted to the specified value, if sufficient
+resources exist, regardless of the task's mempolicy or cpuset constraints.
+
+Note that the number of overcommit and reserve pages remain global quantities,
+as we don't know until fault time, when the faulting task's mempolicy is applied,
+from which node the huge page allocation will be attempted.
+
+
+Using Huge Pages:
+
If the user applications are going to request huge pages using mmap system
call, then it is required that system administrator mount a file system of
type hugetlbfs:
@@ -206,9 +301,11 @@ map_hugetlb.c.
* requesting huge pages.
*
* For the ia64 architecture, the Linux kernel reserves Region number 4 for
- * huge pages. That means the addresses starting with 0x800000... will need
- * to be specified. Specifying a fixed address is not required on ppc64,
- * i386 or x86_64.
+ * huge pages. That means that if one requires a fixed address, a huge page
+ * aligned address starting with 0x800000... will be required. If a fixed
+ * address is not required, the kernel will select an address in the proper
+ * range.
+ * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*
* Note: The default shared memory limit is quite low on many kernels,
* you may need to increase it via:
@@ -237,14 +334,8 @@ map_hugetlb.c.
#define dprintf(x) printf(x)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define SHMAT_FLAGS (SHM_RND)
-#else
-#define ADDR (void *)(0x0UL)
+#define ADDR (void *)(0x0UL) /* let kernel choose address */
#define SHMAT_FLAGS (0)
-#endif
int main(void)
{
@@ -302,10 +393,12 @@ int main(void)
* example, the app is requesting memory of size 256MB that is backed by
* huge pages.
*
- * For ia64 architecture, Linux kernel reserves Region number 4 for huge pages.
- * That means the addresses starting with 0x800000... will need to be
- * specified. Specifying a fixed address is not required on ppc64, i386
- * or x86_64.
+ * For the ia64 architecture, the Linux kernel reserves Region number 4 for
+ * huge pages. That means that if one requires a fixed address, a huge page
+ * aligned address starting with 0x800000... will be required. If a fixed
+ * address is not required, the kernel will select an address in the proper
+ * range.
+ * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*/
#include <stdlib.h>
#include <stdio.h>
@@ -317,14 +410,8 @@ int main(void)
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define FLAGS (MAP_SHARED | MAP_FIXED)
-#else
-#define ADDR (void *)(0x0UL)
+#define ADDR (void *)(0x0UL) /* let kernel choose address */
#define FLAGS (MAP_SHARED)
-#endif
void check_bytes(char *addr)
{
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 1/3] hugetlb: use only nodes with memory for huge pages
2009-09-09 16:31 ` Lee Schermerhorn
@ 2009-09-09 16:32 ` Lee Schermerhorn
-1 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:32 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
PATCH 1/3 hugetlb: use only nodes with memory
Against: 2.6.31-rc7-mmotm-090827-1651
Register per node hstate sysfs attributes only for nodes with
memory. Suggested by David Rientjes.
A subsequent patch will handle adding/removing of per node hstate
sysfs attributes when nodes transition to/from memoryless state
via memory hotplug.
NOTE: this patch has not been tested with memoryless nodes.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Documentation/vm/hugetlbpage.txt | 12 ++++++------
mm/hugetlb.c | 38 +++++++++++++++++++++++---------------
2 files changed, 29 insertions(+), 21 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/hugetlb.c 2009-09-09 11:57:37.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c 2009-09-09 11:57:38.000000000 -0400
@@ -942,14 +942,14 @@ static void return_unused_surplus_pages(
/*
* We want to release as many surplus pages as possible, spread
- * evenly across all nodes. Iterate across all nodes until we
- * can no longer free unreserved surplus pages. This occurs when
- * the nodes with surplus pages have no free pages.
- * free_pool_huge_page() will balance the the frees across the
- * on-line nodes for us and will handle the hstate accounting.
+ * evenly across all nodes with memory. Iterate across these nodes
+ * until we can no longer free unreserved surplus pages. This occurs
+ * when the nodes with surplus pages have no free pages.
+ * free_pool_huge_page() will balance the the freed pages across the
+ * on-line nodes with memory and will handle the hstate accounting.
*/
while (nr_pages--) {
- if (!free_pool_huge_page(h, &node_online_map, 1))
+ if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
break;
}
}
@@ -1053,7 +1053,7 @@ static struct page *alloc_huge_page(stru
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
- int nr_nodes = nodes_weight(node_online_map);
+ int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
while (nr_nodes) {
void *addr;
@@ -1114,7 +1114,8 @@ static void __init hugetlb_hstate_alloc_
if (h->order >= MAX_ORDER) {
if (!alloc_bootmem_huge_page(h))
break;
- } else if (!alloc_fresh_huge_page(h, &node_online_map))
+ } else if (!alloc_fresh_huge_page(h,
+ &node_states[N_HIGH_MEMORY]))
break;
}
h->max_huge_pages = i;
@@ -1165,7 +1166,7 @@ static void try_to_free_low(struct hstat
return;
if (!nodes_allowed)
- nodes_allowed = &node_online_map;
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
for (i = 0; i < MAX_NUMNODES; ++i) {
struct page *page, *next;
@@ -1268,7 +1269,7 @@ static unsigned long set_max_huge_pages(
printk(KERN_WARNING "%s unable to allocate nodes allowed mask "
"for huge page allocation. Falling back to default.\n",
current->comm);
- nodes_allowed = &node_online_map;
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
}
/*
@@ -1331,7 +1332,7 @@ static unsigned long set_max_huge_pages(
out:
ret = persistent_huge_pages(h);
spin_unlock(&hugetlb_lock);
- if (nodes_allowed != &node_online_map)
+ if (nodes_allowed != &node_states[N_HIGH_MEMORY])
kfree(nodes_allowed);
return ret;
}
@@ -1560,7 +1561,7 @@ void hugetlb_unregister_node(struct node
struct node_hstate *nhs = &node_hstates[node->sysdev.id];
if (!nhs->hugepages_kobj)
- return;
+ return; /* no hstate attributes */
for_each_hstate(h)
if (nhs->hstate_kobjs[h - hstates]) {
@@ -1572,6 +1573,10 @@ void hugetlb_unregister_node(struct node
nhs->hugepages_kobj = NULL;
}
+/*
+ * Unregister hstate sysfs attributes from all nodes that have them.
+ * Visit all possible node ids up to maximum node id seen [nr_node_ids].
+ */
static void hugetlb_unregister_all_nodes(void)
{
int nid;
@@ -1610,11 +1615,14 @@ void hugetlb_register_node(struct node *
}
}
+/*
+ * register hstate sysfs attributes for each on-line node with memory
+ */
static void hugetlb_register_all_nodes(void)
{
int nid;
- for (nid = 0; nid < nr_node_ids; nid++) {
+ for_each_node_state(nid, N_HIGH_MEMORY) {
struct node *node = &node_devices[nid];
if (node->sysdev.id == nid)
hugetlb_register_node(node);
@@ -1704,8 +1712,8 @@ void __init hugetlb_add_hstate(unsigned
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
- h->next_nid_to_alloc = first_node(node_online_map);
- h->next_nid_to_free = first_node(node_online_map);
+ h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
+ h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
Index: linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:37.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:38.000000000 -0400
@@ -90,11 +90,11 @@ huge page pool to 20, allocating or free
On a NUMA platform, the kernel will attempt to distribute the huge page pool
over all the set of allowed nodes specified by the NUMA memory policy of the
task that modifies nr_hugepages. The default for the allowed nodes--when the
-task has default memory policy--is all on-line nodes. Allowed nodes with
-insufficient available, contiguous memory for a huge page will be silently
-skipped when allocating persistent huge pages. See the discussion below of
-the interaction of task memory policy, cpusets and per node attributes with
-the allocation and freeing of persistent huge pages.
+task has default memory policy--is all on-line nodes with memory. Allowed
+nodes with insufficient available, contiguous memory for a huge page will be
+silently skipped when allocating persistent huge pages. See the discussion
+below of the interaction of task memory policy, cpusets and per node attributes
+with the allocation and freeing of persistent huge pages.
The success or failure of huge page allocation depends on the amount of
physically contiguous memory that is present in system at the time of the
@@ -222,7 +222,7 @@ used. The effect on persistent huge pag
without first moving to a cpuset that contains all of the desired nodes.
6) Boot-time huge page allocation attempts to distribute the requested number
- of huge pages over all on-lines nodes.
+ of huge pages over all on-lines nodes with memory.
Per Node Hugepages Attributes
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 1/3] hugetlb: use only nodes with memory for huge pages
@ 2009-09-09 16:32 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:32 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
PATCH 1/3 hugetlb: use only nodes with memory
Against: 2.6.31-rc7-mmotm-090827-1651
Register per node hstate sysfs attributes only for nodes with
memory. Suggested by David Rientjes.
A subsequent patch will handle adding/removing of per node hstate
sysfs attributes when nodes transition to/from memoryless state
via memory hotplug.
NOTE: this patch has not been tested with memoryless nodes.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Documentation/vm/hugetlbpage.txt | 12 ++++++------
mm/hugetlb.c | 38 +++++++++++++++++++++++---------------
2 files changed, 29 insertions(+), 21 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/mm/hugetlb.c 2009-09-09 11:57:37.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/mm/hugetlb.c 2009-09-09 11:57:38.000000000 -0400
@@ -942,14 +942,14 @@ static void return_unused_surplus_pages(
/*
* We want to release as many surplus pages as possible, spread
- * evenly across all nodes. Iterate across all nodes until we
- * can no longer free unreserved surplus pages. This occurs when
- * the nodes with surplus pages have no free pages.
- * free_pool_huge_page() will balance the the frees across the
- * on-line nodes for us and will handle the hstate accounting.
+ * evenly across all nodes with memory. Iterate across these nodes
+ * until we can no longer free unreserved surplus pages. This occurs
+ * when the nodes with surplus pages have no free pages.
+ * free_pool_huge_page() will balance the the freed pages across the
+ * on-line nodes with memory and will handle the hstate accounting.
*/
while (nr_pages--) {
- if (!free_pool_huge_page(h, &node_online_map, 1))
+ if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
break;
}
}
@@ -1053,7 +1053,7 @@ static struct page *alloc_huge_page(stru
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
- int nr_nodes = nodes_weight(node_online_map);
+ int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
while (nr_nodes) {
void *addr;
@@ -1114,7 +1114,8 @@ static void __init hugetlb_hstate_alloc_
if (h->order >= MAX_ORDER) {
if (!alloc_bootmem_huge_page(h))
break;
- } else if (!alloc_fresh_huge_page(h, &node_online_map))
+ } else if (!alloc_fresh_huge_page(h,
+ &node_states[N_HIGH_MEMORY]))
break;
}
h->max_huge_pages = i;
@@ -1165,7 +1166,7 @@ static void try_to_free_low(struct hstat
return;
if (!nodes_allowed)
- nodes_allowed = &node_online_map;
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
for (i = 0; i < MAX_NUMNODES; ++i) {
struct page *page, *next;
@@ -1268,7 +1269,7 @@ static unsigned long set_max_huge_pages(
printk(KERN_WARNING "%s unable to allocate nodes allowed mask "
"for huge page allocation. Falling back to default.\n",
current->comm);
- nodes_allowed = &node_online_map;
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
}
/*
@@ -1331,7 +1332,7 @@ static unsigned long set_max_huge_pages(
out:
ret = persistent_huge_pages(h);
spin_unlock(&hugetlb_lock);
- if (nodes_allowed != &node_online_map)
+ if (nodes_allowed != &node_states[N_HIGH_MEMORY])
kfree(nodes_allowed);
return ret;
}
@@ -1560,7 +1561,7 @@ void hugetlb_unregister_node(struct node
struct node_hstate *nhs = &node_hstates[node->sysdev.id];
if (!nhs->hugepages_kobj)
- return;
+ return; /* no hstate attributes */
for_each_hstate(h)
if (nhs->hstate_kobjs[h - hstates]) {
@@ -1572,6 +1573,10 @@ void hugetlb_unregister_node(struct node
nhs->hugepages_kobj = NULL;
}
+/*
+ * Unregister hstate sysfs attributes from all nodes that have them.
+ * Visit all possible node ids up to maximum node id seen [nr_node_ids].
+ */
static void hugetlb_unregister_all_nodes(void)
{
int nid;
@@ -1610,11 +1615,14 @@ void hugetlb_register_node(struct node *
}
}
+/*
+ * register hstate sysfs attributes for each on-line node with memory
+ */
static void hugetlb_register_all_nodes(void)
{
int nid;
- for (nid = 0; nid < nr_node_ids; nid++) {
+ for_each_node_state(nid, N_HIGH_MEMORY) {
struct node *node = &node_devices[nid];
if (node->sysdev.id == nid)
hugetlb_register_node(node);
@@ -1704,8 +1712,8 @@ void __init hugetlb_add_hstate(unsigned
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
- h->next_nid_to_alloc = first_node(node_online_map);
- h->next_nid_to_free = first_node(node_online_map);
+ h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
+ h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
Index: linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:37.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:38.000000000 -0400
@@ -90,11 +90,11 @@ huge page pool to 20, allocating or free
On a NUMA platform, the kernel will attempt to distribute the huge page pool
over all the set of allowed nodes specified by the NUMA memory policy of the
task that modifies nr_hugepages. The default for the allowed nodes--when the
-task has default memory policy--is all on-line nodes. Allowed nodes with
-insufficient available, contiguous memory for a huge page will be silently
-skipped when allocating persistent huge pages. See the discussion below of
-the interaction of task memory policy, cpusets and per node attributes with
-the allocation and freeing of persistent huge pages.
+task has default memory policy--is all on-line nodes with memory. Allowed
+nodes with insufficient available, contiguous memory for a huge page will be
+silently skipped when allocating persistent huge pages. See the discussion
+below of the interaction of task memory policy, cpusets and per node attributes
+with the allocation and freeing of persistent huge pages.
The success or failure of huge page allocation depends on the amount of
physically contiguous memory that is present in system at the time of the
@@ -222,7 +222,7 @@ used. The effect on persistent huge pag
without first moving to a cpuset that contains all of the desired nodes.
6) Boot-time huge page allocation attempts to distribute the requested number
- of huge pages over all on-lines nodes.
+ of huge pages over all on-lines nodes with memory.
Per Node Hugepages Attributes
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 1/3] hugetlb: use only nodes with memory for huge pages
2009-09-09 16:32 ` Lee Schermerhorn
(?)
@ 2009-09-10 23:33 ` Andrew Morton
2009-09-11 13:54 ` Lee Schermerhorn
-1 siblings, 1 reply; 44+ messages in thread
From: Andrew Morton @ 2009-09-10 23:33 UTC (permalink / raw)
To: Lee Schermerhorn
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
I ducked these three. It's already a bit late for the first six, and
that first six looked a bit half-baked to me.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* Re: [PATCH 1/3] hugetlb: use only nodes with memory for huge pages
2009-09-10 23:33 ` Andrew Morton
@ 2009-09-11 13:54 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-11 13:54 UTC (permalink / raw)
To: Andrew Morton
Cc: linux-mm, linux-numa, mel, randy.dunlap, nacc, rientjes, agl,
apw, eric.whitney
On Thu, 2009-09-10 at 16:33 -0700, Andrew Morton wrote:
> I ducked these three. It's already a bit late for the first six, and
> that first six looked a bit half-baked to me.
That's appropriate. These 3 were new in V6, and I have some cleanup,
better comments, ... queued up. But the others: "half-baked" ??? More
like 3/4, methinks. :)
Anyway, I'm glad to see you've added them to mmotm. I've been testing
each version of the series on x86_64 and, occassionally, ia64, with the
libhugetlbfs regression tests and ad hoc testing of the mempolicy based
constraint and the per node attributes. I believe that Mel has been
testing on ppc. But, they definitely can benefit from more exposure.
Lee
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 2/3] hugetlb: handle memory hot-plug events
2009-09-09 16:31 ` Lee Schermerhorn
@ 2009-09-09 16:32 ` Lee Schermerhorn
-1 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:32 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
PATCH 2/3 hugetlb: per node attributes -- handle memory hot plug
Against: 2.6.31-rc7-mmotm-090827-1651
Register per node hstate attributes only for nodes with memory.
With Memory Hotplug, memory can be added to a memoryless node and
a node with memory can become memoryless. Therefore, add a memory
on/off-line notifier callback to [un]register a node's attributes
on transition to/from memoryless state.
N.B., Only tested build, boot, libhugetlbfs regression.
i.e., no memory hotplug testing.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Documentation/vm/hugetlbpage.txt | 7 ++--
drivers/base/node.c | 56 ++++++++++++++++++++++++++++++++++-----
2 files changed, 54 insertions(+), 9 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:37.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:39.000000000 -0400
@@ -180,11 +180,12 @@ static SYSDEV_ATTR(distance, S_IRUGO, no
/*
* hugetlbfs per node attributes registration interface:
* When/if hugetlb[fs] subsystem initializes [sometime after this module],
- * it will register it's per node attributes for all nodes on-line at that
- * point. It will also call register_hugetlbfs_with_node(), below, to
+ * it will register it's per node attributes for all on-line nodes with
+ * memory. It will also call register_hugetlbfs_with_node(), below, to
* register it's attribute registration functions with this node driver.
* Once these hooks have been initialized, the node driver will call into
- * the hugetlb module to [un]register attributes for hot-plugged nodes.
+ * the hugetlb module to [un]register attributes for hot-plugged nodes
+ * with memory and transitions to/from memoryless state.
*/
NODE_REGISTRATION_FUNC __hugetlb_register_node;
NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
@@ -231,7 +232,9 @@ int register_node(struct node *node, int
sysdev_create_file(&node->sysdev, &attr_distance);
scan_unevictable_register_node(node);
- hugetlb_register_node(node);
+
+ if (node_state(node->sysdev.id, N_HIGH_MEMORY))
+ hugetlb_register_node(node);
}
return error;
}
@@ -252,7 +255,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_distance);
scan_unevictable_unregister_node(node);
- hugetlb_unregister_node(node);
+ hugetlb_unregister_node(node); /* no-op, if memoryless node */
sysdev_unregister(&node->sysdev);
}
@@ -382,8 +385,45 @@ static int link_mem_sections(int nid)
}
return err;
}
+
+/*
+ * Handle per node hstate attribute [un]registration on transistions
+ * to/from memoryless state.
+ */
+
+static int node_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mnb = arg;
+ int nid = mnb->status_change_nid;
+
+ switch (action) {
+ case MEM_ONLINE: /* memory successfully brought online */
+ if (nid != NUMA_NO_NODE)
+ hugetlb_register_node(&node_devices[nid]);
+ break;
+ case MEM_OFFLINE: /* or offline */
+ if (nid != NUMA_NO_NODE)
+ hugetlb_unregister_node(&node_devices[nid]);
+ break;
+ case MEM_GOING_ONLINE:
+ case MEM_GOING_OFFLINE:
+ case MEM_CANCEL_ONLINE:
+ case MEM_CANCEL_OFFLINE:
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
#else
static int link_mem_sections(int nid) { return 0; }
+
+static inline int node_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ return NOTIFY_OK;
+}
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
int register_one_node(int nid)
@@ -497,13 +537,17 @@ static int node_states_init(void)
return err;
}
+#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
static int __init register_node_type(void)
{
int ret;
ret = sysdev_class_register(&node_class);
- if (!ret)
+ if (!ret) {
ret = node_states_init();
+ hotplug_memory_notifier(node_memory_callback,
+ NODE_CALLBACK_PRI);
+ }
/*
* Note: we're not going to unregister the node class if we fail
Index: linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:38.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:39.000000000 -0400
@@ -227,7 +227,8 @@ used. The effect on persistent huge pag
Per Node Hugepages Attributes
A subset of the contents of the root huge page control directory in sysfs,
-described above, has been replicated under each "node" system device in:
+described above, will be replicated under each the system device of each
+NUMA node with memory in:
/sys/devices/system/node/node[0-9]*/hugepages/
@@ -248,8 +249,8 @@ pages on the parent node will be adjuste
resources exist, regardless of the task's mempolicy or cpuset constraints.
Note that the number of overcommit and reserve pages remain global quantities,
-as we don't know until fault time, when the faulting task's mempolicy is applied,
-from which node the huge page allocation will be attempted.
+as we don't know until fault time, when the faulting task's mempolicy is
+applied, from which node the huge page allocation will be attempted.
Using Huge Pages:
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 2/3] hugetlb: handle memory hot-plug events
@ 2009-09-09 16:32 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:32 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
PATCH 2/3 hugetlb: per node attributes -- handle memory hot plug
Against: 2.6.31-rc7-mmotm-090827-1651
Register per node hstate attributes only for nodes with memory.
With Memory Hotplug, memory can be added to a memoryless node and
a node with memory can become memoryless. Therefore, add a memory
on/off-line notifier callback to [un]register a node's attributes
on transition to/from memoryless state.
N.B., Only tested build, boot, libhugetlbfs regression.
i.e., no memory hotplug testing.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Documentation/vm/hugetlbpage.txt | 7 ++--
drivers/base/node.c | 56 ++++++++++++++++++++++++++++++++++-----
2 files changed, 54 insertions(+), 9 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:37.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:39.000000000 -0400
@@ -180,11 +180,12 @@ static SYSDEV_ATTR(distance, S_IRUGO, no
/*
* hugetlbfs per node attributes registration interface:
* When/if hugetlb[fs] subsystem initializes [sometime after this module],
- * it will register it's per node attributes for all nodes on-line at that
- * point. It will also call register_hugetlbfs_with_node(), below, to
+ * it will register it's per node attributes for all on-line nodes with
+ * memory. It will also call register_hugetlbfs_with_node(), below, to
* register it's attribute registration functions with this node driver.
* Once these hooks have been initialized, the node driver will call into
- * the hugetlb module to [un]register attributes for hot-plugged nodes.
+ * the hugetlb module to [un]register attributes for hot-plugged nodes
+ * with memory and transitions to/from memoryless state.
*/
NODE_REGISTRATION_FUNC __hugetlb_register_node;
NODE_REGISTRATION_FUNC __hugetlb_unregister_node;
@@ -231,7 +232,9 @@ int register_node(struct node *node, int
sysdev_create_file(&node->sysdev, &attr_distance);
scan_unevictable_register_node(node);
- hugetlb_register_node(node);
+
+ if (node_state(node->sysdev.id, N_HIGH_MEMORY))
+ hugetlb_register_node(node);
}
return error;
}
@@ -252,7 +255,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_distance);
scan_unevictable_unregister_node(node);
- hugetlb_unregister_node(node);
+ hugetlb_unregister_node(node); /* no-op, if memoryless node */
sysdev_unregister(&node->sysdev);
}
@@ -382,8 +385,45 @@ static int link_mem_sections(int nid)
}
return err;
}
+
+/*
+ * Handle per node hstate attribute [un]registration on transistions
+ * to/from memoryless state.
+ */
+
+static int node_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mnb = arg;
+ int nid = mnb->status_change_nid;
+
+ switch (action) {
+ case MEM_ONLINE: /* memory successfully brought online */
+ if (nid != NUMA_NO_NODE)
+ hugetlb_register_node(&node_devices[nid]);
+ break;
+ case MEM_OFFLINE: /* or offline */
+ if (nid != NUMA_NO_NODE)
+ hugetlb_unregister_node(&node_devices[nid]);
+ break;
+ case MEM_GOING_ONLINE:
+ case MEM_GOING_OFFLINE:
+ case MEM_CANCEL_ONLINE:
+ case MEM_CANCEL_OFFLINE:
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
#else
static int link_mem_sections(int nid) { return 0; }
+
+static inline int node_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ return NOTIFY_OK;
+}
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
int register_one_node(int nid)
@@ -497,13 +537,17 @@ static int node_states_init(void)
return err;
}
+#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
static int __init register_node_type(void)
{
int ret;
ret = sysdev_class_register(&node_class);
- if (!ret)
+ if (!ret) {
ret = node_states_init();
+ hotplug_memory_notifier(node_memory_callback,
+ NODE_CALLBACK_PRI);
+ }
/*
* Note: we're not going to unregister the node class if we fail
Index: linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:38.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/Documentation/vm/hugetlbpage.txt 2009-09-09 11:57:39.000000000 -0400
@@ -227,7 +227,8 @@ used. The effect on persistent huge pag
Per Node Hugepages Attributes
A subset of the contents of the root huge page control directory in sysfs,
-described above, has been replicated under each "node" system device in:
+described above, will be replicated under each the system device of each
+NUMA node with memory in:
/sys/devices/system/node/node[0-9]*/hugepages/
@@ -248,8 +249,8 @@ pages on the parent node will be adjuste
resources exist, regardless of the task's mempolicy or cpuset constraints.
Note that the number of overcommit and reserve pages remain global quantities,
-as we don't know until fault time, when the faulting task's mempolicy is applied,
-from which node the huge page allocation will be attempted.
+as we don't know until fault time, when the faulting task's mempolicy is
+applied, from which node the huge page allocation will be attempted.
Using Huge Pages:
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 3/3] hugetlb: offload per node attribute registrations
2009-09-09 16:31 ` Lee Schermerhorn
@ 2009-09-09 16:32 ` Lee Schermerhorn
-1 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:32 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
PATCH 3/3 hugetlb: offload [un]registration of sysfs attr to worker thread
Against: 2.6.31-rc7-mmotm-090827-1651
This patch offloads the registration and unregistration of per node
hstate sysfs attributes to a worker thread rather than attempt the
allocation/attachment or detachment/freeing of the attributes in
the context of the memory hotplug handler.
N.B., Only tested build, boot, libhugetlbfs regression.
i.e., no memory hotplug testing.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
drivers/base/node.c | 34 ++++++++++++++++++++++++++++------
include/linux/node.h | 5 +++++
2 files changed, 33 insertions(+), 6 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/node.h 2009-09-09 11:57:37.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h 2009-09-09 11:57:39.000000000 -0400
@@ -21,9 +21,14 @@
#include <linux/sysdev.h>
#include <linux/cpumask.h>
+#include <linux/workqueue.h>
struct node {
struct sys_device sysdev;
+
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+ struct work_struct node_work;
+#endif
};
struct memory_block;
Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:39.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:39.000000000 -0400
@@ -390,6 +390,20 @@ static int link_mem_sections(int nid)
* Handle per node hstate attribute [un]registration on transistions
* to/from memoryless state.
*/
+static void node_hugetlb_work(struct work_struct *work)
+{
+ struct node *node = container_of(work, struct node, node_work);
+
+ if (node_state(node->sysdev.id, N_HIGH_MEMORY))
+ hugetlb_register_node(node);
+ else
+ hugetlb_unregister_node(node);
+}
+
+static void init_node_hugetlb_work(int nid)
+{
+ INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
+}
static int node_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
@@ -398,14 +412,16 @@ static int node_memory_callback(struct n
int nid = mnb->status_change_nid;
switch (action) {
- case MEM_ONLINE: /* memory successfully brought online */
+ case MEM_ONLINE:
+ case MEM_OFFLINE:
+ /*
+ * offload per node hstate[un]registration to work thread
+ * when transitioning to/from memoryless state.
+ */
if (nid != NUMA_NO_NODE)
- hugetlb_register_node(&node_devices[nid]);
- break;
- case MEM_OFFLINE: /* or offline */
- if (nid != NUMA_NO_NODE)
- hugetlb_unregister_node(&node_devices[nid]);
+ schedule_work(&node_devices[nid].node_work);
break;
+
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_ONLINE:
@@ -424,6 +440,9 @@ static inline int node_memory_callback(s
{
return NOTIFY_OK;
}
+
+static void init_node_hugetlb_work(int nid) { }
+
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
int register_one_node(int nid)
@@ -448,6 +467,9 @@ int register_one_node(int nid)
/* link memory sections under this node */
error = link_mem_sections(nid);
+
+ /* initialize work queue for memory hot plug */
+ init_node_hugetlb_work(nid);
}
return error;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 44+ messages in thread
* [PATCH 3/3] hugetlb: offload per node attribute registrations
@ 2009-09-09 16:32 ` Lee Schermerhorn
0 siblings, 0 replies; 44+ messages in thread
From: Lee Schermerhorn @ 2009-09-09 16:32 UTC (permalink / raw)
To: linux-mm, linux-numa
Cc: akpm, Mel Gorman, Randy Dunlap, Nishanth Aravamudan,
David Rientjes, Adam Litke, Andy Whitcroft, eric.whitney
PATCH 3/3 hugetlb: offload [un]registration of sysfs attr to worker thread
Against: 2.6.31-rc7-mmotm-090827-1651
This patch offloads the registration and unregistration of per node
hstate sysfs attributes to a worker thread rather than attempt the
allocation/attachment or detachment/freeing of the attributes in
the context of the memory hotplug handler.
N.B., Only tested build, boot, libhugetlbfs regression.
i.e., no memory hotplug testing.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
drivers/base/node.c | 34 ++++++++++++++++++++++++++++------
include/linux/node.h | 5 +++++
2 files changed, 33 insertions(+), 6 deletions(-)
Index: linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/include/linux/node.h 2009-09-09 11:57:37.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/include/linux/node.h 2009-09-09 11:57:39.000000000 -0400
@@ -21,9 +21,14 @@
#include <linux/sysdev.h>
#include <linux/cpumask.h>
+#include <linux/workqueue.h>
struct node {
struct sys_device sysdev;
+
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+ struct work_struct node_work;
+#endif
};
struct memory_block;
Index: linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c
===================================================================
--- linux-2.6.31-rc7-mmotm-090827-1651.orig/drivers/base/node.c 2009-09-09 11:57:39.000000000 -0400
+++ linux-2.6.31-rc7-mmotm-090827-1651/drivers/base/node.c 2009-09-09 11:57:39.000000000 -0400
@@ -390,6 +390,20 @@ static int link_mem_sections(int nid)
* Handle per node hstate attribute [un]registration on transistions
* to/from memoryless state.
*/
+static void node_hugetlb_work(struct work_struct *work)
+{
+ struct node *node = container_of(work, struct node, node_work);
+
+ if (node_state(node->sysdev.id, N_HIGH_MEMORY))
+ hugetlb_register_node(node);
+ else
+ hugetlb_unregister_node(node);
+}
+
+static void init_node_hugetlb_work(int nid)
+{
+ INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
+}
static int node_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
@@ -398,14 +412,16 @@ static int node_memory_callback(struct n
int nid = mnb->status_change_nid;
switch (action) {
- case MEM_ONLINE: /* memory successfully brought online */
+ case MEM_ONLINE:
+ case MEM_OFFLINE:
+ /*
+ * offload per node hstate[un]registration to work thread
+ * when transitioning to/from memoryless state.
+ */
if (nid != NUMA_NO_NODE)
- hugetlb_register_node(&node_devices[nid]);
- break;
- case MEM_OFFLINE: /* or offline */
- if (nid != NUMA_NO_NODE)
- hugetlb_unregister_node(&node_devices[nid]);
+ schedule_work(&node_devices[nid].node_work);
break;
+
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_ONLINE:
@@ -424,6 +440,9 @@ static inline int node_memory_callback(s
{
return NOTIFY_OK;
}
+
+static void init_node_hugetlb_work(int nid) { }
+
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
int register_one_node(int nid)
@@ -448,6 +467,9 @@ int register_one_node(int nid)
/* link memory sections under this node */
error = link_mem_sections(nid);
+
+ /* initialize work queue for memory hot plug */
+ init_node_hugetlb_work(nid);
}
return error;
^ permalink raw reply [flat|nested] 44+ messages in thread