All of lore.kernel.org
 help / color / mirror / Atom feed
From: madvenka@linux.microsoft.com
To: gregkh@linuxfoundation.org, pbonzini@redhat.com, rppt@kernel.org,
	jgowans@amazon.com, graf@amazon.de, arnd@arndb.de,
	keescook@chromium.org, stanislav.kinsburskii@gmail.com,
	anthony.yznaga@oracle.com, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, madvenka@linux.microsoft.com,
	jamorris@linux.microsoft.com
Subject: [RFC PATCH v1 05/10] mm/prmem: Implement a buffer allocator for persistent memory
Date: Mon, 16 Oct 2023 18:32:10 -0500	[thread overview]
Message-ID: <20231016233215.13090-6-madvenka@linux.microsoft.com> (raw)
In-Reply-To: <20231016233215.13090-1-madvenka@linux.microsoft.com>

From: "Madhavan T. Venkataraman" <madvenka@linux.microsoft.com>

Implement functions that can allocate and free memory smaller than a page
size.

	- prmem_alloc()
	- prmem_free()

These functions look like kmalloc() and kfree(). However, the only GFP flag
that is processed is __GFP_ZERO to zero out the allocated memory.

To make the implementation simpler, create allocation caches for different
object sizes:

	8, 16, 32, 64, ..., PAGE_SIZE

For a given size, allocate from the appropriate cache. This idea has been
plagiarized from the kmem allocator.

To fill the cache of a specific size, allocate a page, break it up into
equal sized objects and add the objects to the cache. This is just a very
simple allocator. It does not attempt to do sophisticated things like
cache coloring, coalescing objects that belong to the same page so the
page can be freed, etc.

Signed-off-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
---
 include/linux/prmem.h          |  12 ++++
 kernel/prmem/prmem_allocator.c | 112 ++++++++++++++++++++++++++++++++-
 2 files changed, 123 insertions(+), 1 deletion(-)

diff --git a/include/linux/prmem.h b/include/linux/prmem.h
index 108683933c82..1cb4660cf35e 100644
--- a/include/linux/prmem.h
+++ b/include/linux/prmem.h
@@ -50,6 +50,8 @@ struct prmem_region {
 	struct gen_pool_chunk	*chunk;
 };
 
+#define PRMEM_MAX_CACHES	14
+
 /*
  * PRMEM metadata.
  *
@@ -60,6 +62,9 @@ struct prmem_region {
  * size		Size of initial memory allocated to prmem.
  *
  * regions	List of memory regions.
+ *
+ * caches	Caches for different object sizes. For allocations smaller than
+ *		PAGE_SIZE, these caches are used.
  */
 struct prmem {
 	unsigned long		checksum;
@@ -68,6 +73,9 @@ struct prmem {
 
 	/* Persistent Regions. */
 	struct list_head	regions;
+
+	/* Allocation caches. */
+	void			*caches[PRMEM_MAX_CACHES];
 };
 
 extern struct prmem		*prmem;
@@ -87,6 +95,8 @@ int  prmem_cmdline_size(void);
 /* Allocator API. */
 struct page *prmem_alloc_pages(unsigned int order, gfp_t gfp);
 void prmem_free_pages(struct page *pages, unsigned int order);
+void *prmem_alloc(size_t size, gfp_t gfp);
+void prmem_free(void *va, size_t size);
 
 /* Internal functions. */
 struct prmem_region *prmem_add_region(unsigned long pa, size_t size);
@@ -95,6 +105,8 @@ void *prmem_alloc_pool(struct prmem_region *region, size_t size, int align);
 void prmem_free_pool(struct prmem_region *region, void *va, size_t size);
 void *prmem_alloc_pages_locked(unsigned int order);
 void prmem_free_pages_locked(void *va, unsigned int order);
+void *prmem_alloc_locked(size_t size);
+void prmem_free_locked(void *va, size_t size);
 unsigned long prmem_checksum(void *start, size_t size);
 bool __init prmem_validate(void);
 void prmem_cmdline(char *cmdline);
diff --git a/kernel/prmem/prmem_allocator.c b/kernel/prmem/prmem_allocator.c
index 07a5a430630c..f12975bc6777 100644
--- a/kernel/prmem/prmem_allocator.c
+++ b/kernel/prmem/prmem_allocator.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Persistent-Across-Kexec memory feature (prmem) - Allocator.
+ * Persistent-Across-Kexec memory (prmem) - Allocator.
  *
  * Copyright (C) 2023 Microsoft Corporation
  * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com)
@@ -72,3 +72,113 @@ void prmem_free_pages(struct page *pages, unsigned int order)
 	spin_unlock(&prmem_lock);
 }
 EXPORT_SYMBOL_GPL(prmem_free_pages);
+
+/* Buffer allocation functions. */
+
+#if PAGE_SIZE > 65536
+#error "Page size is too big"
+#endif
+
+static size_t	prmem_cache_sizes[PRMEM_MAX_CACHES] = {
+	8, 16, 32, 64, 128, 256, 512,
+	1024, 2048, 4096, 8192, 16384, 32768, 65536,
+};
+
+static int prmem_cache_index(size_t size)
+{
+	int	i;
+
+	for (i = 0; i < PRMEM_MAX_CACHES; i++) {
+		if (size <= prmem_cache_sizes[i])
+			return i;
+	}
+	BUG();
+}
+
+static void prmem_refill(void **cache, size_t size)
+{
+	void		*va;
+	int		i, n = PAGE_SIZE / size;
+
+	/* Allocate a page. */
+	va = prmem_alloc_pages_locked(0);
+	if (!va)
+		return;
+
+	/* Break up the page into pieces and put them in the cache. */
+	for (i = 0; i < n; i++, va += size) {
+		*((void **) va) = *cache;
+		*cache = va;
+	}
+}
+
+void *prmem_alloc_locked(size_t size)
+{
+	void		*va;
+	int		index;
+	void		**cache;
+
+	index = prmem_cache_index(size);
+	size = prmem_cache_sizes[index];
+
+	cache = &prmem->caches[index];
+	if (!*cache) {
+		/* Refill the cache. */
+		prmem_refill(cache, size);
+	}
+
+	/* Allocate one from the cache. */
+	va = *cache;
+	if (va)
+		*cache = *((void **) va);
+	return va;
+}
+
+void *prmem_alloc(size_t size, gfp_t gfp)
+{
+	void		*va;
+	bool		zero = !!(gfp & __GFP_ZERO);
+
+	if (!prmem_inited || !size)
+		return NULL;
+
+	/* This function is only for sizes up to a PAGE_SIZE. */
+	if (size > PAGE_SIZE)
+		return NULL;
+
+	spin_lock(&prmem_lock);
+	va = prmem_alloc_locked(size);
+	spin_unlock(&prmem_lock);
+
+	if (va && zero)
+		memset(va, 0, size);
+	return va;
+}
+EXPORT_SYMBOL_GPL(prmem_alloc);
+
+void prmem_free_locked(void *va, size_t size)
+{
+	int		index;
+	void		**cache;
+
+	/* Free the object into its cache. */
+	index = prmem_cache_index(size);
+	cache = &prmem->caches[index];
+	*((void **) va) = *cache;
+	*cache = va;
+}
+
+void prmem_free(void *va, size_t size)
+{
+	if (!prmem_inited || !va || !size)
+		return;
+
+	/* This function is only for sizes up to a PAGE_SIZE. */
+	if (size > PAGE_SIZE)
+		return;
+
+	spin_lock(&prmem_lock);
+	prmem_free_locked(va, size);
+	spin_unlock(&prmem_lock);
+}
+EXPORT_SYMBOL_GPL(prmem_free);
-- 
2.25.1


  parent reply	other threads:[~2023-10-16 23:32 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1b1bc25eb87355b91fcde1de7c2f93f38abb2bf9>
2023-10-16 23:32 ` [RFC PATCH v1 00/10] mm/prmem: Implement the Persistent-Across-Kexec memory feature (prmem) madvenka
2023-10-16 23:32   ` [RFC PATCH v1 01/10] mm/prmem: Allocate memory during boot for storing persistent data madvenka
2023-10-17 18:36     ` kernel test robot
2023-10-16 23:32   ` [RFC PATCH v1 02/10] mm/prmem: Reserve metadata and persistent regions in early boot after kexec madvenka
2023-10-17 19:29     ` kernel test robot
2023-10-16 23:32   ` [RFC PATCH v1 03/10] mm/prmem: Manage persistent memory with the gen pool allocator madvenka
2023-10-16 23:32   ` [RFC PATCH v1 04/10] mm/prmem: Implement a page allocator for persistent memory madvenka
2023-10-16 23:32   ` madvenka [this message]
2023-10-16 23:32   ` [RFC PATCH v1 06/10] mm/prmem: Implement persistent XArray (and Radix Tree) madvenka
2023-10-16 23:32   ` [RFC PATCH v1 07/10] mm/prmem: Implement named Persistent Instances madvenka
2023-10-16 23:32   ` [RFC PATCH v1 08/10] mm/prmem: Implement Persistent Ramdisk instances madvenka
2023-10-17 16:39     ` kernel test robot
2023-10-16 23:32   ` [RFC PATCH v1 09/10] mm/prmem: Implement DAX support for Persistent Ramdisks madvenka
2023-10-16 23:32   ` [RFC PATCH v1 10/10] mm/prmem: Implement dynamic expansion of prmem madvenka
2023-10-17  8:31   ` [RFC PATCH v1 00/10] mm/prmem: Implement the Persistent-Across-Kexec memory feature (prmem) Alexander Graf
2023-10-17 18:08     ` Madhavan T. Venkataraman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231016233215.13090-6-madvenka@linux.microsoft.com \
    --to=madvenka@linux.microsoft.com \
    --cc=anthony.yznaga@oracle.com \
    --cc=arnd@arndb.de \
    --cc=graf@amazon.de \
    --cc=gregkh@linuxfoundation.org \
    --cc=jamorris@linux.microsoft.com \
    --cc=jgowans@amazon.com \
    --cc=keescook@chromium.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=pbonzini@redhat.com \
    --cc=rppt@kernel.org \
    --cc=stanislav.kinsburskii@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.