linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/slub: skip node in case there is no slab to acquire
@ 2018-11-08  1:12 Wei Yang
  2018-11-09 20:48 ` Andrew Morton
                   ` (3 more replies)
  0 siblings, 4 replies; 21+ messages in thread
From: Wei Yang @ 2018-11-08  1:12 UTC (permalink / raw)
  To: cl, penberg; +Cc: akpm, linux-mm, Wei Yang

for_each_zone_zonelist() iterates the zonelist one by one, which means
it will iterate on zones on the same node. While get_partial_node()
checks available slab on node base instead of zone.

This patch skip a node in case get_partial_node() fails to acquire slab
on that node.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
---
 mm/slub.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index e3629cd7aff1..97a480b5dfb9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1873,7 +1873,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
  * Get a page from somewhere. Search in increasing NUMA distances.
  */
 static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
-		struct kmem_cache_cpu *c)
+		struct kmem_cache_cpu *c, int except)
 {
 #ifdef CONFIG_NUMA
 	struct zonelist *zonelist;
@@ -1882,6 +1882,9 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
 	enum zone_type high_zoneidx = gfp_zone(flags);
 	void *object;
 	unsigned int cpuset_mems_cookie;
+	nodemask_t nmask = node_states[N_MEMORY];
+
+	node_clear(except, nmask);
 
 	/*
 	 * The defrag ratio allows a configuration of the tradeoffs between
@@ -1908,7 +1911,8 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
 	do {
 		cpuset_mems_cookie = read_mems_allowed_begin();
 		zonelist = node_zonelist(mempolicy_slab_node(), flags);
-		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+		for_each_zone_zonelist_nodemask(zone, z, zonelist,
+						high_zoneidx, &nmask) {
 			struct kmem_cache_node *n;
 
 			n = get_node(s, zone_to_nid(zone));
@@ -1926,6 +1930,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
 					 */
 					return object;
 				}
+				node_clear(zone_to_nid(zone), nmask);
 			}
 		}
 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
@@ -1951,7 +1956,7 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
 	if (object || node != NUMA_NO_NODE)
 		return object;
 
-	return get_any_partial(s, flags, c);
+	return get_any_partial(s, flags, c, searchnode);
 }
 
 #ifdef CONFIG_PREEMPT
-- 
2.15.1

^ permalink raw reply related	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2018-12-24 22:03 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-08  1:12 [PATCH] mm/slub: skip node in case there is no slab to acquire Wei Yang
2018-11-09 20:48 ` Andrew Morton
2018-11-09 23:47   ` Wei Yang
2018-11-13  9:12 ` [PATCH v2] " Wei Yang
2018-11-13 13:17 ` [PATCH] " Michal Hocko
2018-11-13 13:26   ` Wei Yang
2018-11-13 13:34     ` Michal Hocko
2018-11-20  3:31 ` [PATCH v2] mm/slub: improve performance by skipping checked node in get_any_partial() Wei Yang
2018-11-22  3:05   ` Andrew Morton
2018-11-22  9:13     ` Wei Yang
2018-11-22 23:41     ` Wei Yang
2018-11-23 13:39       ` Michal Hocko
2018-11-23 13:49         ` Michal Hocko
2018-11-23 15:27           ` Wei Yang
2018-12-20 22:41   ` Andrew Morton
2018-12-21  0:25     ` Alexander Duyck
2018-12-21  3:29       ` Wei Yang
2018-12-21  1:37     ` Christopher Lameter
2018-12-21  1:37       ` Christopher Lameter
2018-12-21  3:33       ` Wei Yang
2018-12-24 22:03       ` Wei Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).