From: Stefano Stabellini <sstabellini@kernel.org>
To: xen-devel@lists.xenproject.org
Cc: sstabellini@kernel.org, julien@xen.org, Wei Liu <wl@xen.org>,
andrew.cooper3@citrix.com,
Ian Jackson <ian.jackson@eu.citrix.com>,
George Dunlap <george.dunlap@citrix.com>,
jbeulich@suse.com,
Stefano Stabellini <stefano.stabellini@xilinx.com>,
Volodymyr_Babchuk@epam.com
Subject: [PATCH 05/12] xen: introduce reserve_heap_pages
Date: Tue, 14 Apr 2020 18:02:48 -0700 [thread overview]
Message-ID: <20200415010255.10081-5-sstabellini@kernel.org> (raw)
In-Reply-To: <alpine.DEB.2.21.2004141746350.8746@sstabellini-ThinkPad-T480s>
Introduce a function named reserve_heap_pages (similar to
alloc_heap_pages) that allocates a requested memory range. Call
__alloc_heap_pages for the implementation.
Change __alloc_heap_pages so that the original page doesn't get
modified, giving back unneeded memory top to bottom rather than bottom
to top.
Also introduce a function named reserve_domheap_pages, similar to
alloc_domheap_pages, that checks memflags before calling
reserve_heap_pages. It also assign_pages to the domain on success.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
CC: andrew.cooper3@citrix.com
CC: jbeulich@suse.com
CC: George Dunlap <george.dunlap@citrix.com>
CC: Ian Jackson <ian.jackson@eu.citrix.com>
CC: Wei Liu <wl@xen.org>
---
xen/common/page_alloc.c | 72 ++++++++++++++++++++++++++++++++++++++---
xen/include/xen/mm.h | 2 ++
2 files changed, 69 insertions(+), 5 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 79ae64d4b8..3a9c1a291b 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -911,7 +911,7 @@ static struct page_info *get_free_buddy(unsigned int zone_lo,
}
}
-static void __alloc_heap_pages(struct page_info **pgo,
+static void __alloc_heap_pages(struct page_info *pg,
unsigned int order,
unsigned int memflags,
struct domain *d)
@@ -922,7 +922,7 @@ static void __alloc_heap_pages(struct page_info **pgo,
bool need_tlbflush = false;
uint32_t tlbflush_timestamp = 0;
unsigned int dirty_cnt = 0;
- struct page_info *pg = *pgo;
+ struct page_info *pg_start = pg;
node = phys_to_nid(page_to_maddr(pg));
zone = page_to_zone(pg);
@@ -934,10 +934,10 @@ static void __alloc_heap_pages(struct page_info **pgo,
while ( buddy_order != order )
{
buddy_order--;
+ pg = pg_start + (1U << buddy_order);
page_list_add_scrub(pg, node, zone, buddy_order,
(1U << buddy_order) > first_dirty ?
first_dirty : INVALID_DIRTY_IDX);
- pg += 1U << buddy_order;
if ( first_dirty != INVALID_DIRTY_IDX )
{
@@ -948,7 +948,7 @@ static void __alloc_heap_pages(struct page_info **pgo,
first_dirty = 0; /* We've moved past original first_dirty */
}
}
- *pgo = pg;
+ pg = pg_start;
ASSERT(avail[node][zone] >= request);
avail[node][zone] -= request;
@@ -1073,7 +1073,42 @@ static struct page_info *alloc_heap_pages(
return NULL;
}
- __alloc_heap_pages(&pg, order, memflags, d);
+ __alloc_heap_pages(pg, order, memflags, d);
+ return pg;
+}
+
+static struct page_info *reserve_heap_pages(struct domain *d,
+ paddr_t start,
+ unsigned int order,
+ unsigned int memflags)
+{
+ nodeid_t node;
+ unsigned int zone;
+ struct page_info *pg;
+
+ if ( unlikely(order > MAX_ORDER) )
+ return NULL;
+
+ spin_lock(&heap_lock);
+
+ /*
+ * Claimed memory is considered unavailable unless the request
+ * is made by a domain with sufficient unclaimed pages.
+ */
+ if ( (outstanding_claims + (1UL << order) > total_avail_pages) &&
+ ((memflags & MEMF_no_refcount) ||
+ !d || d->outstanding_pages < (1UL << order)) )
+ {
+ spin_unlock(&heap_lock);
+ return NULL;
+ }
+
+ pg = maddr_to_page(start);
+ node = phys_to_nid(start);
+ zone = page_to_zone(pg);
+ page_list_del(pg, &heap(node, zone, order));
+
+ __alloc_heap_pages(pg, order, memflags, d);
return pg;
}
@@ -2385,6 +2420,33 @@ struct page_info *alloc_domheap_pages(
return pg;
}
+struct page_info *reserve_domheap_pages(
+ struct domain *d, paddr_t start, unsigned int order, unsigned int memflags)
+{
+ struct page_info *pg = NULL;
+
+ ASSERT(!in_irq());
+
+ if ( memflags & MEMF_no_owner )
+ memflags |= MEMF_no_refcount;
+ else if ( (memflags & MEMF_no_refcount) && d )
+ {
+ ASSERT(!(memflags & MEMF_no_refcount));
+ return NULL;
+ }
+
+ pg = reserve_heap_pages(d, start, order, memflags);
+
+ if ( d && !(memflags & MEMF_no_owner) &&
+ assign_pages(d, pg, order, memflags) )
+ {
+ free_heap_pages(pg, order, memflags & MEMF_no_scrub);
+ return NULL;
+ }
+
+ return pg;
+}
+
void free_domheap_pages(struct page_info *pg, unsigned int order)
{
struct domain *d = page_get_owner(pg);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 9b62087be1..35407e1b68 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -199,6 +199,8 @@ void get_outstanding_claims(uint64_t *free_pages, uint64_t *outstanding_pages);
void init_domheap_pages(paddr_t ps, paddr_t pe);
struct page_info *alloc_domheap_pages(
struct domain *d, unsigned int order, unsigned int memflags);
+struct page_info *reserve_domheap_pages(
+ struct domain *d, paddr_t start, unsigned int order, unsigned int memflags);
void free_domheap_pages(struct page_info *pg, unsigned int order);
unsigned long avail_domheap_pages_region(
unsigned int node, unsigned int min_width, unsigned int max_width);
--
2.17.1
next prev parent reply other threads:[~2020-04-15 1:03 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-15 1:02 [PATCH 0/12] direct-map DomUs Stefano Stabellini
2020-04-15 1:02 ` [PATCH 01/12] xen: introduce xen_dom_flags Stefano Stabellini
2020-04-15 9:12 ` Jan Beulich
2020-04-15 13:26 ` Julien Grall
2020-04-29 23:57 ` Stefano Stabellini
2020-04-15 1:02 ` [PATCH 02/12] xen/arm: introduce arch_xen_dom_flags and direct_map Stefano Stabellini
2020-04-15 10:27 ` Jan Beulich
2020-04-15 11:27 ` Andrew Cooper
2020-04-30 0:34 ` Stefano Stabellini
2020-04-15 1:02 ` [PATCH 03/12] xen/arm: introduce 1:1 mapping for domUs Stefano Stabellini
2020-04-15 13:36 ` Julien Grall
2020-05-01 1:26 ` Stefano Stabellini
2020-05-01 8:30 ` Julien Grall
2020-05-09 0:07 ` Stefano Stabellini
2020-05-09 9:56 ` Julien Grall
2020-04-15 1:02 ` [PATCH 04/12] xen: split alloc_heap_pages in two halves for reusability Stefano Stabellini
2020-04-15 11:22 ` Wei Liu
2020-04-17 10:02 ` Jan Beulich
2020-04-29 23:09 ` Stefano Stabellini
2020-04-15 1:02 ` Stefano Stabellini [this message]
2020-04-15 13:24 ` [PATCH 05/12] xen: introduce reserve_heap_pages Julien Grall
2020-04-17 10:11 ` Jan Beulich
2020-04-29 22:46 ` Stefano Stabellini
2020-04-30 6:29 ` Jan Beulich
2020-04-30 16:21 ` Stefano Stabellini
2020-05-04 9:16 ` Jan Beulich
2020-04-30 14:51 ` Julien Grall
2020-04-30 17:00 ` Stefano Stabellini
2020-04-30 18:27 ` Julien Grall
2020-05-12 1:10 ` Stefano Stabellini
2020-05-12 8:57 ` Julien Grall
2020-04-15 1:02 ` [PATCH 06/12] xen/arm: reserve 1:1 memory for direct_map domUs Stefano Stabellini
2020-04-15 13:38 ` Julien Grall
2020-04-15 1:02 ` [PATCH 07/12] xen/arm: new vgic: rename vgic_cpu/dist_base to c/dbase Stefano Stabellini
2020-04-15 13:41 ` Julien Grall
2020-04-15 1:02 ` [PATCH 08/12] xen/arm: if is_domain_direct_mapped use native addresses for GICv2 Stefano Stabellini
2020-04-15 14:00 ` Julien Grall
2020-05-01 1:26 ` Stefano Stabellini
2020-05-01 8:23 ` Julien Grall
2020-05-09 0:06 ` Stefano Stabellini
2020-04-15 1:02 ` [PATCH 09/12] xen/arm: if is_domain_direct_mapped use native addresses for GICv3 Stefano Stabellini
2020-04-15 14:09 ` Julien Grall
2020-05-01 1:31 ` Stefano Stabellini
2020-05-01 8:40 ` Julien Grall
2020-05-09 0:06 ` Stefano Stabellini
2020-04-15 1:02 ` [PATCH 10/12] xen/arm: if is_domain_direct_mapped use native UART address for vPL011 Stefano Stabellini
2020-04-15 14:11 ` Julien Grall
2020-05-01 1:26 ` Stefano Stabellini
2020-05-01 8:09 ` Julien Grall
2020-05-09 0:07 ` Stefano Stabellini
2020-05-09 10:11 ` Julien Grall
2020-05-11 22:58 ` Stefano Stabellini
2020-04-15 1:02 ` [PATCH 11/12] xen/arm: if xen_force don't try to setup the IOMMU Stefano Stabellini
2020-04-15 14:12 ` Julien Grall
2020-04-29 21:55 ` Stefano Stabellini
2020-04-30 13:51 ` Julien Grall
2020-05-01 1:25 ` Stefano Stabellini
2020-04-15 1:02 ` [PATCH 12/12] xen/arm: call iomem_permit_access for passthrough devices Stefano Stabellini
2020-04-15 14:18 ` Julien Grall
2020-04-29 20:47 ` Stefano Stabellini
2020-04-30 13:01 ` Julien Grall
2020-05-24 14:12 ` Julien Grall
2020-05-26 16:46 ` Stefano Stabellini
2020-05-27 18:09 ` Julien Grall
2020-04-16 8:59 ` [PATCH 0/12] direct-map DomUs Julien Grall
2020-04-29 20:16 ` Stefano Stabellini
2020-04-30 12:54 ` Julien Grall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200415010255.10081-5-sstabellini@kernel.org \
--to=sstabellini@kernel.org \
--cc=Volodymyr_Babchuk@epam.com \
--cc=andrew.cooper3@citrix.com \
--cc=george.dunlap@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=julien@xen.org \
--cc=stefano.stabellini@xilinx.com \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).