linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Lameter <cl@linux.com>
To: akpm@linuxfoundation.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org, penberg@kernel.org, iamjoonsoo@lge.com,
	Jesper Dangaard Brouer <brouer@redhat.com>
Subject: [RFC 2/3] slub: Support for array operations
Date: Fri, 23 Jan 2015 15:37:29 -0600	[thread overview]
Message-ID: <20150123213735.707854993@linux.com> (raw)
In-Reply-To: 20150123213727.142554068@linux.com

[-- Attachment #1: array_alloc_slub --]
[-- Type: text/plain, Size: 5154 bytes --]

The major portions are there but there is no support yet for
directly allocating per cpu objects. There could also be more
sophisticated code to exploit the batch freeing.

Signed-off-by: Christoph Lameter <cl@linux.com>

Index: linux/include/linux/slub_def.h
===================================================================
--- linux.orig/include/linux/slub_def.h
+++ linux/include/linux/slub_def.h
@@ -110,4 +110,5 @@ static inline void sysfs_slab_remove(str
 }
 #endif
 
+#define _HAVE_SLAB_ALLOCATOR_ARRAY_OPERATIONS
 #endif /* _LINUX_SLUB_DEF_H */
Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1379,13 +1379,9 @@ static void setup_object(struct kmem_cac
 		s->ctor(object);
 }
 
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+static struct page *__new_slab(struct kmem_cache *s, gfp_t flags, int node)
 {
 	struct page *page;
-	void *start;
-	void *p;
-	int order;
-	int idx;
 
 	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
 		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
@@ -1394,33 +1390,42 @@ static struct page *new_slab(struct kmem
 
 	page = allocate_slab(s,
 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-	if (!page)
-		goto out;
+	if (page) {
+		inc_slabs_node(s, page_to_nid(page), page->objects);
+		page->slab_cache = s;
+		__SetPageSlab(page);
+		if (page->pfmemalloc)
+			SetPageSlabPfmemalloc(page);
+	}
 
-	order = compound_order(page);
-	inc_slabs_node(s, page_to_nid(page), page->objects);
-	page->slab_cache = s;
-	__SetPageSlab(page);
-	if (page->pfmemalloc)
-		SetPageSlabPfmemalloc(page);
-
-	start = page_address(page);
-
-	if (unlikely(s->flags & SLAB_POISON))
-		memset(start, POISON_INUSE, PAGE_SIZE << order);
-
-	for_each_object_idx(p, idx, s, start, page->objects) {
-		setup_object(s, page, p);
-		if (likely(idx < page->objects))
-			set_freepointer(s, p, p + s->size);
-		else
-			set_freepointer(s, p, NULL);
-	}
-
-	page->freelist = start;
-	page->inuse = page->objects;
-	page->frozen = 1;
-out:
+	return page;
+}
+
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+	struct page *page = __new_slab(s, flags, node);
+
+	if (page) {
+		void *p;
+		int idx;
+		void *start = page_address(page);
+
+		if (unlikely(s->flags & SLAB_POISON))
+			memset(start, POISON_INUSE,
+				PAGE_SIZE << compound_order(page));
+
+		for_each_object_idx(p, idx, s, start, page->objects) {
+			setup_object(s, page, p);
+			if (likely(idx < page->objects))
+				set_freepointer(s, p, p + s->size);
+			else
+				set_freepointer(s, p, NULL);
+		}
+
+		page->freelist = start;
+		page->inuse = page->objects;
+		page->frozen = 1;
+	}
 	return page;
 }
 
@@ -2516,8 +2521,78 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trac
 #endif
 #endif
 
+int slab_array_alloc_from_partial(struct kmem_cache *s,
+			size_t nr, void **p)
+{
+	void **end = p + nr;
+	struct kmem_cache_node *n = get_node(s, numa_mem_id());
+	int allocated = 0;
+	unsigned long flags;
+	struct page *page, *page2;
+
+	if (!n->nr_partial)
+		return 0;
+
+
+	spin_lock_irqsave(&n->list_lock, flags);
+	list_for_each_entry_safe(page, page2, &n->partial, lru) {
+		void *freelist;
+
+		if (page->objects - page->inuse > end - p)
+			/* More objects free in page than we want */
+			break;
+		list_del(&page->lru);
+		slab_lock(page);
+		freelist = page->freelist;
+		page->inuse = page->objects;
+		page->freelist = NULL;
+		slab_unlock(page);
+		/* Grab all available objects */
+		while (freelist) {
+			*p++ = freelist;
+			freelist = get_freepointer(s, freelist);
+			allocated++;
+		}
+	}
+	spin_unlock_irqrestore(&n->list_lock, flags);
+	return allocated;
+}
+
+int slab_array_alloc_from_page_allocator(struct kmem_cache *s,
+		gfp_t flags, size_t nr, void **p)
+{
+	void **end = p + nr;
+	int allocated = 0;
+
+	while (end - p >= oo_objects(s->oo)) {
+		struct page *page = __new_slab(s, flags, NUMA_NO_NODE);
+		void *q = page_address(page);
+		int i;
+
+		/* Use all the objects */
+		for (i = 0; i < page->objects; i++) {
+			setup_object(s, page, q);
+			*p++ = q;
+			q += s->size;
+		}
+
+		page->inuse = page->objects;
+		page->freelist = NULL;
+		allocated += page->objects;
+	}
+	return allocated;
+}
+
+int slab_array_alloc_from_local(struct kmem_cache *s,
+		size_t nr, void **p)
+{
+	/* Go for the per cpu partials list first */
+	/* Use the cpu_slab if objects are still needed */
+	return 0;
+}
+
 /*
- * Slow patch handling. This may still be called frequently since objects
+ * Slow path handling. This may still be called frequently since objects
  * have a longer lifetime than the cpu slabs in most processing loads.
  *
  * So we still attempt to reduce cache line usage. Just take the slab
@@ -2637,6 +2712,14 @@ slab_empty:
 	discard_slab(s, page);
 }
 
+void kmem_cache_free_array(struct kmem_cache *s, size_t nr, void **p)
+{
+	void **end = p + nr;
+
+	for ( ; p < end; p++)
+		__slab_free(s, virt_to_head_page(p), p, 0);
+}
+
 /*
  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
  * can perform fastpath freeing without additional function calls.


  parent reply	other threads:[~2015-01-23 21:38 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-01-23 21:37 [RFC 0/3] Slab allocator array operations Christoph Lameter
2015-01-23 21:37 ` [RFC 1/3] Slab infrastructure for " Christoph Lameter
2015-01-27  8:21   ` Joonsoo Kim
2015-01-27 16:57     ` Christoph Lameter
2015-01-28  1:33       ` Joonsoo Kim
2015-01-28 15:30         ` Christoph Lameter
2015-01-29  7:44           ` Joonsoo Kim
2015-02-03 22:55             ` Jesper Dangaard Brouer
2015-01-23 21:37 ` Christoph Lameter [this message]
2015-01-23 21:37 ` [RFC 3/3] Array alloc test code Christoph Lameter
2015-01-23 22:57 ` [RFC 0/3] Slab allocator array operations Andrew Morton
2015-01-24  0:28   ` Christoph Lameter
2015-02-03 23:19     ` Jesper Dangaard Brouer
2015-02-06 18:39       ` Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20150123213735.707854993@linux.com \
    --to=cl@linux.com \
    --cc=akpm@linuxfoundation.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).