From: Bob Liu <lliubbo@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: keir@xen.org, ian.campbell@citrix.com,
George.Dunlap@eu.citrix.com, andrew.cooper3@citrix.com,
jbeulich@suse.com
Subject: [PATCH 4/4] xen: use idle vcpus to scrub pages
Date: Tue, 17 Jun 2014 19:49:43 +0800 [thread overview]
Message-ID: <1403005783-30746-4-git-send-email-bob.liu@oracle.com> (raw)
In-Reply-To: <1403005783-30746-1-git-send-email-bob.liu@oracle.com>
In case of heavy lock contention, use a percpu list.
- Delist a batch of pages to a percpu list from "scrub" free page list.
- Scrub pages on this percpu list.
- Add those clean pages to normal "head" free page list, merge with other
chunks if needed.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
xen/arch/x86/domain.c | 1 +
xen/common/domain.c | 2 ++
xen/common/page_alloc.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++
xen/include/xen/mm.h | 2 ++
4 files changed, 87 insertions(+)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 6fddd4c..a46a2ba 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -116,6 +116,7 @@ static void idle_loop(void)
{
if ( cpu_is_offline(smp_processor_id()) )
play_dead();
+ scrub_free_pages();
(*pm_idle)();
do_tasklet();
do_softirq();
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 4291e29..bd386e6 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -587,12 +587,14 @@ int domain_kill(struct domain *d)
d->tmem_client = NULL;
/* fallthrough */
case DOMDYING_dying:
+ enable_idle_scrub = 0;
rc = domain_relinquish_resources(d);
if ( rc != 0 )
{
BUG_ON(rc != -EAGAIN);
break;
}
+ enable_idle_scrub = 1;
for_each_vcpu ( d, v )
unmap_vcpu_info(v);
d->is_dying = DOMDYING_dead;
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 5698596..2b2fd04 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -79,6 +79,9 @@ PAGE_LIST_HEAD(page_offlined_list);
/* Broken page list, protected by heap_lock. */
PAGE_LIST_HEAD(page_broken_list);
+volatile bool_t enable_idle_scrub;
+DEFINE_PER_CPU(struct page_list_head, scrub_list_cpu);
+
/*************************
* BOOT-TIME ALLOCATOR
*/
@@ -1387,7 +1390,86 @@ void __init scrub_heap_pages(void)
setup_low_mem_virq();
}
+#define SCRUB_BATCH 1024
+void scrub_free_pages(void)
+{
+ struct page_info *pg;
+ unsigned int i, j, node_empty = 0, nr_delisted = 0;
+ int order;
+ unsigned int cpu = smp_processor_id();
+ unsigned int node = cpu_to_node(cpu);
+ struct page_list_head *temp_list = &this_cpu(scrub_list_cpu);
+
+ if ( !enable_idle_scrub )
+ return;
+
+ do
+ {
+ if ( page_list_empty(temp_list) )
+ {
+ /* Delist a batch of pages from global scrub list */
+ spin_lock(&heap_lock);
+ for ( j = 0; j < NR_ZONES; j++ )
+ {
+ for ( order = MAX_ORDER; order >= 0; order-- )
+ {
+ if ( (pg = page_list_remove_head(&scrub(node, j, order))) )
+ {
+ for ( i = 0; i < (1 << order); i++)
+ mark_page_offline(&pg[i], 0);
+
+ page_list_add_tail(pg, temp_list);
+ nr_delisted += (1 << order);
+ if ( nr_delisted > SCRUB_BATCH )
+ {
+ nr_delisted = 0;
+ spin_unlock(&heap_lock);
+ goto start_scrub;
+ }
+ }
+ }
+ }
+
+ node_empty = 1;
+ spin_unlock(&heap_lock);
+ }
+ else
+ {
+start_scrub:
+ /* Scrub percpu list */
+ while ( !page_list_empty(temp_list) )
+ {
+ pg = page_list_remove_head(temp_list);
+ ASSERT(pg);
+ order = PFN_ORDER(pg);
+ for ( i = 0; i < (1 << order); i++ )
+ {
+ ASSERT( test_bit(_PGC_need_scrub, &(pg[i].count_info)) );
+ scrub_one_page(&pg[i]);
+ pg[i].count_info &= ~(PGC_need_scrub);
+ }
+ /* Add pages to free heap list */
+ spin_lock(&heap_lock);
+ for ( i = 0; i < (1 << order); i++ )
+ {
+ ASSERT ( !test_bit(_PGC_need_scrub, &(pg[i].count_info)) );
+ pg[i].count_info |= PGC_state_free;
+ }
+ ASSERT (node == phys_to_nid(page_to_maddr(pg)));
+ merge_free_trunks(pg, order, 0);
+ spin_unlock(&heap_lock);
+
+ if ( softirq_pending(cpu) )
+ return;
+ }
+ }
+
+ /* Scrub list of this node is empty */
+ if ( node_empty )
+ return;
+ } while ( !softirq_pending(cpu) );
+}
/*************************
* XEN-HEAP SUB-ALLOCATOR
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index b183189..c3f481d 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -34,6 +34,7 @@
struct domain;
struct page_info;
+extern volatile bool_t enable_idle_scrub;
/* Boot-time allocator. Turns into generic allocator after bootstrap. */
void init_boot_pages(paddr_t ps, paddr_t pe);
@@ -78,6 +79,7 @@ int query_page_offline(unsigned long mfn, uint32_t *status);
unsigned long total_free_pages(void);
void scrub_heap_pages(void);
+void scrub_free_pages(void);
int assign_pages(
struct domain *d,
--
1.7.10.4
next prev parent reply other threads:[~2014-06-17 11:50 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-06-17 11:49 [PATCH 1/4] xen: asm-x86: introduce PGC_need_scrub page flag Bob Liu
2014-06-17 11:49 ` [PATCH 2/4] xen: introduce an "scrub" free page list Bob Liu
2014-06-17 12:36 ` Jan Beulich
2014-06-18 0:54 ` Bob Liu
2014-06-18 13:18 ` Konrad Rzeszutek Wilk
2014-06-17 11:49 ` [PATCH 3/4] xen: separate a function merge_free_trunks from Bob Liu
2014-06-17 12:39 ` Jan Beulich
2014-06-17 11:49 ` Bob Liu [this message]
2014-06-17 13:01 ` [PATCH 4/4] xen: use idle vcpus to scrub pages Jan Beulich
2014-06-18 1:18 ` Bob Liu
2014-06-18 10:42 ` Jan Beulich
2014-06-17 12:35 ` [PATCH 1/4] xen: asm-x86: introduce PGC_need_scrub page flag Jan Beulich
2014-06-17 12:46 ` Julien Grall
2014-06-18 0:55 ` Bob Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1403005783-30746-4-git-send-email-bob.liu@oracle.com \
--to=lliubbo@gmail.com \
--cc=George.Dunlap@eu.citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.