All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Tobin C. Harding" <tobin@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Tobin C. Harding" <tobin@kernel.org>,
	Christopher Lameter <cl@linux.com>,
	Pekka Enberg <penberg@cs.helsinki.fi>,
	Matthew Wilcox <willy@infradead.org>,
	Tycho Andersen <tycho@tycho.ws>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [RFC 15/15] slub: Enable balancing slab objects across nodes
Date: Fri,  8 Mar 2019 15:14:26 +1100	[thread overview]
Message-ID: <20190308041426.16654-16-tobin@kernel.org> (raw)
In-Reply-To: <20190308041426.16654-1-tobin@kernel.org>

We have just implemented Slab Movable Objects (SMO).  On NUMA systems
slabs can become unbalanced i.e. many objects on one node while other
nodes have few objects.  Using SMO we can balance the objects across all
the nodes.

The algorithm used is as follows:

 1. Move all objects to node 0 (this has the effect of defragmenting the
    cache).

 2. Calculate the desired number of slabs for each node (this is done
    using the approximation nr_slabs / nr_nodes).

 3. Loop over the nodes moving the desired number of slabs from node 0
    to the node.

Feature is conditionally built in with CONFIG_SMO_NODE, this is because
we need the full list (we enable SLUB_DEBUG to get this).  Future
version may separate final list out of SLUB_DEBUG.

Expose this functionality to userspace via a sysfs entry.  Add sysfs
entry:

       /sysfs/kernel/slab/<cache>/balance

Write of '1' to this file triggers balance, no other value accepted.

This feature relies on SMO being enable for the cache, this is done with
a call to, after the isolate/migrate functions have been defined.

	kmem_cache_setup_mobility(s, isolate, migrate)

Signed-off-by: Tobin C. Harding <tobin@kernel.org>
---
 mm/slub.c | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 115 insertions(+)

diff --git a/mm/slub.c b/mm/slub.c
index ac9b8f592e10..65cf305a70c3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4584,6 +4584,104 @@ static unsigned long __move_all_objects_to(struct kmem_cache *s, int node)
 
 	return left;
 }
+
+/*
+ * __move_n_slabs() - Attempt to move 'num' slabs to target_node,
+ * Return: The number of slabs moved or error code.
+ */
+static long __move_n_slabs(struct kmem_cache *s, int node, int target_node,
+			   long num)
+{
+	struct kmem_cache_node *n = get_node(s, node);
+	LIST_HEAD(move_list);
+	struct page *page, *page2;
+	unsigned long flags;
+	void **scratch;
+	long done = 0;
+
+	if (node == target_node)
+		return -EINVAL;
+
+	scratch = alloc_scratch(s);
+	if (!scratch)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&n->list_lock, flags);
+	list_for_each_entry_safe(page, page2, &n->full, lru) {
+		if (!slab_trylock(page))
+			/* Busy slab. Get out of the way */
+			continue;
+
+		list_move(&page->lru, &move_list);
+		page->frozen = 1;
+		slab_unlock(page);
+
+		if (++done >= num)
+			break;
+	}
+	spin_unlock_irqrestore(&n->list_lock, flags);
+
+	list_for_each_entry(page, &move_list, lru) {
+		if (page->inuse)
+			__move(page, scratch, target_node);
+	}
+	kfree(scratch);
+
+	/* Inspect results and dispose of pages */
+	spin_lock_irqsave(&n->list_lock, flags);
+	list_for_each_entry_safe(page, page2, &move_list, lru) {
+		list_del(&page->lru);
+		slab_lock(page);
+		page->frozen = 0;
+
+		if (page->inuse) {
+			/*
+			 * This is best effort only, if slab still has
+			 * objects just put it back on the partial list.
+			 */
+			n->nr_partial++;
+			list_add_tail(&page->lru, &n->partial);
+			slab_unlock(page);
+		} else {
+			slab_unlock(page);
+			discard_slab(s, page);
+		}
+	}
+	spin_unlock_irqrestore(&n->list_lock, flags);
+
+	return done;
+}
+
+/*
+ * __balance_nodes_partial() - Balance partial objects.
+ * @s: The cache we are working on.
+ *
+ * Attempt to balance the objects that are in partial slabs evenly
+ * across all nodes.
+ */
+static void __balance_nodes_partial(struct kmem_cache *s)
+{
+	struct kmem_cache_node *n = get_node(s, 0);
+	unsigned long desired_nr_slabs_per_node;
+	unsigned long nr_slabs;
+	int nr_nodes = 0;
+	int nid;
+
+	(void)__move_all_objects_to(s, 0);
+
+	for_each_node_state(nid, N_NORMAL_MEMORY)
+		nr_nodes++;
+
+	nr_slabs = atomic_long_read(&n->nr_slabs);
+	desired_nr_slabs_per_node = nr_slabs / nr_nodes;
+
+	for_each_node_state(nid, N_NORMAL_MEMORY) {
+		if (nid == 0)
+			continue;
+
+		__move_n_slabs(s, 0, nid, desired_nr_slabs_per_node);
+	}
+}
 #endif
 
 /**
@@ -5836,6 +5934,22 @@ static ssize_t move_store(struct kmem_cache *s, const char *buf, size_t length)
 	return length;
 }
 SLAB_ATTR(move);
+
+static ssize_t balance_show(struct kmem_cache *s, char *buf)
+{
+	return 0;
+}
+
+static ssize_t balance_store(struct kmem_cache *s,
+			     const char *buf, size_t length)
+{
+	if (buf[0] == '1')
+		__balance_nodes_partial(s);
+	else
+		return -EINVAL;
+	return length;
+}
+SLAB_ATTR(balance);
 #endif	/* CONFIG_SMO_NODE */
 
 #ifdef CONFIG_NUMA
@@ -5964,6 +6078,7 @@ static struct attribute *slab_attrs[] = {
 	&shrink_attr.attr,
 #ifdef CONFIG_SMO_NODE
 	&move_attr.attr,
+	&balance_attr.attr,
 #endif
 	&slabs_cpu_partial_attr.attr,
 #ifdef CONFIG_SLUB_DEBUG
-- 
2.21.0


  parent reply	other threads:[~2019-03-08  4:16 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-08  4:14 [RFC 00/15] mm: Implement Slab Movable Objects (SMO) Tobin C. Harding
2019-03-08  4:14 ` [RFC 01/15] slub: Create sysfs field /sys/slab/<cache>/ops Tobin C. Harding
2019-03-11 21:23   ` Roman Gushchin
2019-03-12  1:16     ` Tobin C. Harding
2019-03-08  4:14 ` [RFC 02/15] slub: Add isolate() and migrate() methods Tobin C. Harding
2019-03-08 15:28   ` Tycho Andersen
2019-03-08 16:15     ` Christopher Lameter
2019-03-08 16:15       ` Christopher Lameter
2019-03-08 16:22       ` Tycho Andersen
2019-03-08 19:53         ` Tobin C. Harding
2019-03-08 20:08           ` Tycho Andersen
2019-03-11 21:51   ` Roman Gushchin
2019-03-12  1:08     ` Tobin C. Harding
2019-03-12  4:35     ` Christopher Lameter
2019-03-12  4:35       ` Christopher Lameter
2019-03-12 18:47       ` Roman Gushchin
2019-03-08  4:14 ` [RFC 03/15] tools/vm/slabinfo: Add support for -C and -F options Tobin C. Harding
2019-03-11 21:54   ` Roman Gushchin
2019-03-12  1:20     ` Tobin C. Harding
2019-03-08  4:14 ` [RFC 04/15] slub: Enable Slab Movable Objects (SMO) Tobin C. Harding
2019-03-11 22:48   ` Roman Gushchin
2019-03-12  1:47     ` Tobin C. Harding
2019-03-12 18:00       ` Roman Gushchin
2019-03-12  4:39     ` Christopher Lameter
2019-03-12  4:39       ` Christopher Lameter
2019-03-08  4:14 ` [RFC 05/15] slub: Sort slab cache list Tobin C. Harding
2019-03-08  4:14 ` [RFC 06/15] tools/vm/slabinfo: Add remote node defrag ratio output Tobin C. Harding
2019-03-08  4:14 ` [RFC 07/15] slub: Add defrag_used_ratio field and sysfs support Tobin C. Harding
2019-03-08 16:01   ` Tycho Andersen
2019-03-11  6:04     ` Tobin C. Harding
2019-03-08  4:14 ` [RFC 08/15] tools/vm/slabinfo: Add defrag_used_ratio output Tobin C. Harding
2019-03-08  4:14 ` [RFC 09/15] slub: Enable slab defragmentation using SMO Tobin C. Harding
2019-03-11 23:35   ` Roman Gushchin
2019-03-12  1:49     ` Tobin C. Harding
2019-03-08  4:14 ` [RFC 10/15] tools/testing/slab: Add object migration test module Tobin C. Harding
2019-03-08  4:14 ` [RFC 11/15] tools/testing/slab: Add object migration test suite Tobin C. Harding
2019-03-08  4:14 ` [RFC 12/15] xarray: Implement migration function for objects Tobin C. Harding
2019-03-12  0:16   ` Roman Gushchin
2019-03-12  1:54     ` Tobin C. Harding
2019-03-08  4:14 ` [RFC 13/15] tools/testing/slab: Add XArray movable objects tests Tobin C. Harding
2019-03-08  4:14 ` [RFC 14/15] slub: Enable move _all_ objects to node Tobin C. Harding
2019-03-08  4:14 ` Tobin C. Harding [this message]
2019-03-12  0:09 ` [RFC 00/15] mm: Implement Slab Movable Objects (SMO) Roman Gushchin
2019-03-12  1:48   ` Tobin C. Harding

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190308041426.16654-16-tobin@kernel.org \
    --to=tobin@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=penberg@cs.helsinki.fi \
    --cc=tycho@tycho.ws \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.