xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Stefano Stabellini <sstabellini@kernel.org>
To: xen-devel@lists.xenproject.org
Cc: sstabellini@kernel.org, julien@xen.org, Wei Liu <wl@xen.org>,
	andrew.cooper3@citrix.com,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	jbeulich@suse.com,
	Stefano Stabellini <stefano.stabellini@xilinx.com>,
	Volodymyr_Babchuk@epam.com
Subject: [PATCH 04/12] xen: split alloc_heap_pages in two halves for reusability
Date: Tue, 14 Apr 2020 18:02:47 -0700	[thread overview]
Message-ID: <20200415010255.10081-4-sstabellini@kernel.org> (raw)
In-Reply-To: <alpine.DEB.2.21.2004141746350.8746@sstabellini-ThinkPad-T480s>

This patch splits the implementation of alloc_heap_pages into two halves
so that the second half can be reused by the next patch.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
CC: andrew.cooper3@citrix.com
CC: jbeulich@suse.com
CC: George Dunlap <george.dunlap@citrix.com>
CC: Ian Jackson <ian.jackson@eu.citrix.com>
CC: Wei Liu <wl@xen.org>
---
Comments are welcome. I am not convinced that this is the right way to
split it. Please let me know if you have any suggestions.
---
 xen/common/page_alloc.c | 94 +++++++++++++++++++++++------------------
 1 file changed, 53 insertions(+), 41 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 10b7aeca48..79ae64d4b8 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -911,54 +911,18 @@ static struct page_info *get_free_buddy(unsigned int zone_lo,
     }
 }
 
-/* Allocate 2^@order contiguous pages. */
-static struct page_info *alloc_heap_pages(
-    unsigned int zone_lo, unsigned int zone_hi,
-    unsigned int order, unsigned int memflags,
-    struct domain *d)
+static void __alloc_heap_pages(struct page_info **pgo,
+                               unsigned int order,
+                               unsigned int memflags,
+                               struct domain *d)
 {
     nodeid_t node;
     unsigned int i, buddy_order, zone, first_dirty;
     unsigned long request = 1UL << order;
-    struct page_info *pg;
     bool need_tlbflush = false;
     uint32_t tlbflush_timestamp = 0;
     unsigned int dirty_cnt = 0;
-
-    /* Make sure there are enough bits in memflags for nodeID. */
-    BUILD_BUG_ON((_MEMF_bits - _MEMF_node) < (8 * sizeof(nodeid_t)));
-
-    ASSERT(zone_lo <= zone_hi);
-    ASSERT(zone_hi < NR_ZONES);
-
-    if ( unlikely(order > MAX_ORDER) )
-        return NULL;
-
-    spin_lock(&heap_lock);
-
-    /*
-     * Claimed memory is considered unavailable unless the request
-     * is made by a domain with sufficient unclaimed pages.
-     */
-    if ( (outstanding_claims + request > total_avail_pages) &&
-          ((memflags & MEMF_no_refcount) ||
-           !d || d->outstanding_pages < request) )
-    {
-        spin_unlock(&heap_lock);
-        return NULL;
-    }
-
-    pg = get_free_buddy(zone_lo, zone_hi, order, memflags, d);
-    /* Try getting a dirty buddy if we couldn't get a clean one. */
-    if ( !pg && !(memflags & MEMF_no_scrub) )
-        pg = get_free_buddy(zone_lo, zone_hi, order,
-                            memflags | MEMF_no_scrub, d);
-    if ( !pg )
-    {
-        /* No suitable memory blocks. Fail the request. */
-        spin_unlock(&heap_lock);
-        return NULL;
-    }
+    struct page_info *pg = *pgo;
 
     node = phys_to_nid(page_to_maddr(pg));
     zone = page_to_zone(pg);
@@ -984,6 +948,7 @@ static struct page_info *alloc_heap_pages(
                 first_dirty = 0; /* We've moved past original first_dirty */
         }
     }
+    *pgo = pg;
 
     ASSERT(avail[node][zone] >= request);
     avail[node][zone] -= request;
@@ -1062,6 +1027,53 @@ static struct page_info *alloc_heap_pages(
     if ( need_tlbflush )
         filtered_flush_tlb_mask(tlbflush_timestamp);
 
+}
+
+/* Allocate 2^@order contiguous pages. */
+static struct page_info *alloc_heap_pages(
+    unsigned int zone_lo, unsigned int zone_hi,
+    unsigned int order, unsigned int memflags,
+    struct domain *d)
+{
+    unsigned long request = 1UL << order;
+    struct page_info *pg;
+
+    /* Make sure there are enough bits in memflags for nodeID. */
+    BUILD_BUG_ON((_MEMF_bits - _MEMF_node) < (8 * sizeof(nodeid_t)));
+
+    ASSERT(zone_lo <= zone_hi);
+    ASSERT(zone_hi < NR_ZONES);
+
+    if ( unlikely(order > MAX_ORDER) )
+        return NULL;
+
+    spin_lock(&heap_lock);
+
+    /*
+     * Claimed memory is considered unavailable unless the request
+     * is made by a domain with sufficient unclaimed pages.
+     */
+    if ( (outstanding_claims + request > total_avail_pages) &&
+          ((memflags & MEMF_no_refcount) ||
+           !d || d->outstanding_pages < request) )
+    {
+        spin_unlock(&heap_lock);
+        return NULL;
+    }
+
+    pg = get_free_buddy(zone_lo, zone_hi, order, memflags, d);
+    /* Try getting a dirty buddy if we couldn't get a clean one. */
+    if ( !pg && !(memflags & MEMF_no_scrub) )
+        pg = get_free_buddy(zone_lo, zone_hi, order,
+                            memflags | MEMF_no_scrub, d);
+    if ( !pg )
+    {
+        /* No suitable memory blocks. Fail the request. */
+        spin_unlock(&heap_lock);
+        return NULL;
+    }
+
+    __alloc_heap_pages(&pg, order, memflags, d);
     return pg;
 }
 
-- 
2.17.1



  parent reply	other threads:[~2020-04-15  1:03 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-15  1:02 [PATCH 0/12] direct-map DomUs Stefano Stabellini
2020-04-15  1:02 ` [PATCH 01/12] xen: introduce xen_dom_flags Stefano Stabellini
2020-04-15  9:12   ` Jan Beulich
2020-04-15 13:26     ` Julien Grall
2020-04-29 23:57     ` Stefano Stabellini
2020-04-15  1:02 ` [PATCH 02/12] xen/arm: introduce arch_xen_dom_flags and direct_map Stefano Stabellini
2020-04-15 10:27   ` Jan Beulich
2020-04-15 11:27     ` Andrew Cooper
2020-04-30  0:34     ` Stefano Stabellini
2020-04-15  1:02 ` [PATCH 03/12] xen/arm: introduce 1:1 mapping for domUs Stefano Stabellini
2020-04-15 13:36   ` Julien Grall
2020-05-01  1:26     ` Stefano Stabellini
2020-05-01  8:30       ` Julien Grall
2020-05-09  0:07         ` Stefano Stabellini
2020-05-09  9:56           ` Julien Grall
2020-04-15  1:02 ` Stefano Stabellini [this message]
2020-04-15 11:22   ` [PATCH 04/12] xen: split alloc_heap_pages in two halves for reusability Wei Liu
2020-04-17 10:02   ` Jan Beulich
2020-04-29 23:09     ` Stefano Stabellini
2020-04-15  1:02 ` [PATCH 05/12] xen: introduce reserve_heap_pages Stefano Stabellini
2020-04-15 13:24   ` Julien Grall
2020-04-17 10:11   ` Jan Beulich
2020-04-29 22:46     ` Stefano Stabellini
2020-04-30  6:29       ` Jan Beulich
2020-04-30 16:21         ` Stefano Stabellini
2020-05-04  9:16           ` Jan Beulich
2020-04-30 14:51       ` Julien Grall
2020-04-30 17:00         ` Stefano Stabellini
2020-04-30 18:27           ` Julien Grall
2020-05-12  1:10             ` Stefano Stabellini
2020-05-12  8:57               ` Julien Grall
2020-04-15  1:02 ` [PATCH 06/12] xen/arm: reserve 1:1 memory for direct_map domUs Stefano Stabellini
2020-04-15 13:38   ` Julien Grall
2020-04-15  1:02 ` [PATCH 07/12] xen/arm: new vgic: rename vgic_cpu/dist_base to c/dbase Stefano Stabellini
2020-04-15 13:41   ` Julien Grall
2020-04-15  1:02 ` [PATCH 08/12] xen/arm: if is_domain_direct_mapped use native addresses for GICv2 Stefano Stabellini
2020-04-15 14:00   ` Julien Grall
2020-05-01  1:26     ` Stefano Stabellini
2020-05-01  8:23       ` Julien Grall
2020-05-09  0:06         ` Stefano Stabellini
2020-04-15  1:02 ` [PATCH 09/12] xen/arm: if is_domain_direct_mapped use native addresses for GICv3 Stefano Stabellini
2020-04-15 14:09   ` Julien Grall
2020-05-01  1:31     ` Stefano Stabellini
2020-05-01  8:40       ` Julien Grall
2020-05-09  0:06         ` Stefano Stabellini
2020-04-15  1:02 ` [PATCH 10/12] xen/arm: if is_domain_direct_mapped use native UART address for vPL011 Stefano Stabellini
2020-04-15 14:11   ` Julien Grall
2020-05-01  1:26     ` Stefano Stabellini
2020-05-01  8:09       ` Julien Grall
2020-05-09  0:07         ` Stefano Stabellini
2020-05-09 10:11           ` Julien Grall
2020-05-11 22:58             ` Stefano Stabellini
2020-04-15  1:02 ` [PATCH 11/12] xen/arm: if xen_force don't try to setup the IOMMU Stefano Stabellini
2020-04-15 14:12   ` Julien Grall
2020-04-29 21:55     ` Stefano Stabellini
2020-04-30 13:51       ` Julien Grall
2020-05-01  1:25         ` Stefano Stabellini
2020-04-15  1:02 ` [PATCH 12/12] xen/arm: call iomem_permit_access for passthrough devices Stefano Stabellini
2020-04-15 14:18   ` Julien Grall
2020-04-29 20:47     ` Stefano Stabellini
2020-04-30 13:01       ` Julien Grall
2020-05-24 14:12         ` Julien Grall
2020-05-26 16:46           ` Stefano Stabellini
2020-05-27 18:09             ` Julien Grall
2020-04-16  8:59 ` [PATCH 0/12] direct-map DomUs Julien Grall
2020-04-29 20:16   ` Stefano Stabellini
2020-04-30 12:54     ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200415010255.10081-4-sstabellini@kernel.org \
    --to=sstabellini@kernel.org \
    --cc=Volodymyr_Babchuk@epam.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien@xen.org \
    --cc=stefano.stabellini@xilinx.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).