All of lore.kernel.org
 help / color / mirror / Atom feed
From: Claudio Imbrenda <imbrenda@linux.ibm.com>
To: kvm@vger.kernel.org, pbonzini@redhat.com
Cc: frankja@linux.ibm.com, david@redhat.com, thuth@redhat.com,
	cohuck@redhat.com, lvivier@redhat.com
Subject: [kvm-unit-tests PATCH v2 7/7] lib/alloc_page: allow reserving arbitrary memory ranges
Date: Fri,  2 Oct 2020 17:44:20 +0200	[thread overview]
Message-ID: <20201002154420.292134-8-imbrenda@linux.ibm.com> (raw)
In-Reply-To: <20201002154420.292134-1-imbrenda@linux.ibm.com>

Two new functions are introduced, that allow specific memory ranges to
be reserved and freed.

This is useful when a testcase needs memory at very specific addresses,
with the guarantee that the page allocator will not touch those pages.

Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
---
 lib/alloc_page.h | 15 ++++++++++
 lib/alloc_page.c | 78 ++++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 88 insertions(+), 5 deletions(-)

diff --git a/lib/alloc_page.h b/lib/alloc_page.h
index 6c23018..816ff5d 100644
--- a/lib/alloc_page.h
+++ b/lib/alloc_page.h
@@ -75,4 +75,19 @@ static inline void free_pages_by_order(void *mem, unsigned int order)
 	free_pages(mem);
 }
 
+/*
+ * Allocates and reserves the specified memory range if possible.
+ * Returns NULL in case of failure.
+ */
+void *alloc_pages_special(uintptr_t addr, size_t npages);
+
+/*
+ * Frees a reserved memory range that had been reserved with
+ * alloc_pages_special.
+ * The memory range does not need to match a previous allocation
+ * exactly, it can also be a subset, in which case only the specified
+ * pages will be freed and unreserved.
+ */
+void free_pages_special(uintptr_t addr, size_t npages);
+
 #endif
diff --git a/lib/alloc_page.c b/lib/alloc_page.c
index 3c6c4ee..d9665a4 100644
--- a/lib/alloc_page.c
+++ b/lib/alloc_page.c
@@ -23,13 +23,14 @@
 
 #define ORDER_MASK	0x3f
 #define ALLOC_MASK	0x40
+#define SPECIAL_MASK	0x80
 
 struct mem_area {
 	/* Physical frame number of the first usable frame in the area */
 	uintptr_t base;
 	/* Physical frame number of the first frame outside the area */
 	uintptr_t top;
-	/* Combination ALLOC_MASK and order */
+	/* Combination of SPECIAL_MASK, ALLOC_MASK, and order */
 	u8 *page_states;
 	/* One freelist for each possible block size, up to NLISTS */
 	struct linked_list freelists[NLISTS];
@@ -136,6 +137,16 @@ static void *page_memalign_order(struct mem_area *a, u8 al, u8 sz)
 	return res;
 }
 
+static struct mem_area *get_area(uintptr_t pfn)
+{
+	uintptr_t i;
+
+	for (i = 0; i < MAX_AREAS; i++)
+		if ((areas_mask & BIT(i)) && area_contains(areas + i, pfn))
+			return areas + i;
+	return NULL;
+}
+
 /*
  * Try to merge two blocks into a bigger one.
  * Returns true in case of a successful merge.
@@ -210,10 +221,7 @@ static void _free_pages(void *mem)
 	assert(IS_ALIGNED((uintptr_t)mem, PAGE_SIZE));
 
 	/* find which area this pointer belongs to*/
-	for (i = 0; !a && (i < MAX_AREAS); i++) {
-		if ((areas_mask & BIT(i)) && area_contains(areas + i, pfn))
-			a = areas + i;
-	}
+	a = get_area(pfn);
 	assert_msg(a, "memory does not belong to any area: %p", mem);
 
 	p = pfn - a->base;
@@ -262,6 +270,66 @@ void free_pages(void *mem)
 	spin_unlock(&lock);
 }
 
+static void *_alloc_page_special(uintptr_t addr)
+{
+	struct mem_area *a;
+	uintptr_t mask, i;
+
+	a = get_area(PFN(addr));
+	assert(a);
+	i = PFN(addr) - a->base;
+	if (a->page_states[i] & (ALLOC_MASK | SPECIAL_MASK))
+		return NULL;
+	while (a->page_states[i]) {
+		mask = GENMASK_ULL(63, PAGE_SHIFT + a->page_states[i]);
+		split(a, (void *)(addr & mask));
+	}
+	a->page_states[i] = SPECIAL_MASK;
+	return (void *)addr;
+}
+
+static void _free_page_special(uintptr_t addr)
+{
+	struct mem_area *a;
+	uintptr_t i;
+
+	a = get_area(PFN(addr));
+	assert(a);
+	i = PFN(addr) - a->base;
+	assert(a->page_states[i] == SPECIAL_MASK);
+	a->page_states[i] = ALLOC_MASK;
+	_free_pages((void *)addr);
+}
+
+void *alloc_pages_special(uintptr_t addr, size_t n)
+{
+	uintptr_t i;
+
+	assert(IS_ALIGNED(addr, PAGE_SIZE));
+	spin_lock(&lock);
+	for (i = 0; i < n; i++)
+		if (!_alloc_page_special(addr + i * PAGE_SIZE))
+			break;
+	if (i < n) {
+		for (n = 0 ; n < i; n++)
+			_free_page_special(addr + n * PAGE_SIZE);
+		addr = 0;
+	}
+	spin_unlock(&lock);
+	return (void *)addr;
+}
+
+void free_pages_special(uintptr_t addr, size_t n)
+{
+	uintptr_t i;
+
+	assert(IS_ALIGNED(addr, PAGE_SIZE));
+	spin_lock(&lock);
+	for (i = 0; i < n; i++)
+		_free_page_special(addr + i * PAGE_SIZE);
+	spin_unlock(&lock);
+}
+
 static void *page_memalign_order_area(unsigned area, u8 ord, u8 al)
 {
 	void *res = NULL;
-- 
2.26.2


  parent reply	other threads:[~2020-10-02 15:44 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-02 15:44 [kvm-unit-tests PATCH v2 0/7] Rewrite the allocators Claudio Imbrenda
2020-10-02 15:44 ` [kvm-unit-tests PATCH v2 1/7] lib/list: Add double linked list management functions Claudio Imbrenda
2020-10-02 18:18   ` Andrew Jones
2020-10-05  6:57     ` Claudio Imbrenda
2020-11-06 11:29   ` Paolo Bonzini
2020-10-02 15:44 ` [kvm-unit-tests PATCH v2 2/7] lib/vmalloc: vmalloc support for handling allocation metadata Claudio Imbrenda
2020-10-03  8:46   ` Andrew Jones
2020-10-05  7:00     ` Claudio Imbrenda
2020-10-02 15:44 ` [kvm-unit-tests PATCH v2 3/7] lib/asm: Add definitions of memory areas Claudio Imbrenda
2020-10-03  9:23   ` Andrew Jones
2020-10-05  7:10     ` Claudio Imbrenda
2020-11-06 11:34   ` Paolo Bonzini
2020-11-06 12:58     ` Claudio Imbrenda
2020-11-06 13:04       ` Paolo Bonzini
2020-10-02 15:44 ` [kvm-unit-tests PATCH v2 4/7] lib/alloc_page: complete rewrite of the page allocator Claudio Imbrenda
2020-10-05 12:40   ` Andrew Jones
2020-10-05 15:56     ` Claudio Imbrenda
2020-10-05 16:53       ` Andrew Jones
2020-10-05 17:18         ` Claudio Imbrenda
2020-10-05 18:04           ` Andrew Jones
2020-12-08  0:41   ` Nadav Amit
2020-12-08  1:10     ` Nadav Amit
2020-12-08  9:15       ` Claudio Imbrenda
2020-12-08  9:23         ` Nadav Amit
2020-12-08 10:00           ` Claudio Imbrenda
2020-12-08 12:48             ` Nadav Amit
2020-12-08 13:41               ` Claudio Imbrenda
2020-12-08 14:26                 ` Andrew Jones
2020-12-09  8:53                   ` Claudio Imbrenda
2020-12-08  9:11     ` Claudio Imbrenda
2020-12-08  9:16       ` Nadav Amit
2020-10-02 15:44 ` [kvm-unit-tests PATCH v2 5/7] lib/alloc: simplify free and malloc Claudio Imbrenda
2020-10-02 15:44 ` [kvm-unit-tests PATCH v2 6/7] lib/alloc.h: remove align_min from struct alloc_ops Claudio Imbrenda
2020-11-06 11:35   ` Paolo Bonzini
2020-11-06 12:56     ` Claudio Imbrenda
2020-10-02 15:44 ` Claudio Imbrenda [this message]
2020-10-05 11:54 ` [kvm-unit-tests PATCH v2 0/7] Rewrite the allocators Pierre Morel
2020-10-05 12:35   ` Claudio Imbrenda
2020-10-05 12:49     ` Andrew Jones
2020-10-05 12:57     ` Pierre Morel
2020-10-05 14:59       ` Claudio Imbrenda
2020-11-06 11:36 ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201002154420.292134-8-imbrenda@linux.ibm.com \
    --to=imbrenda@linux.ibm.com \
    --cc=cohuck@redhat.com \
    --cc=david@redhat.com \
    --cc=frankja@linux.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=lvivier@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.