* [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing
@ 2010-05-30 12:36 Xiao Guangrong
2010-05-30 12:37 ` [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page() Xiao Guangrong
` (4 more replies)
0 siblings, 5 replies; 15+ messages in thread
From: Xiao Guangrong @ 2010-05-30 12:36 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Introduce for_each_gfn_sp(), for_each_gfn_indirect_sp() and
for_each_gfn_indirect_valid_sp() to cleanup hlist traverseing
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 129 ++++++++++++++++++++++------------------------------
1 files changed, 54 insertions(+), 75 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 56f8c3c..84c705e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1200,6 +1200,22 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
+ hlist_for_each_entry_safe(sp, pos, n, \
+ &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
+ if (sp->gfn == gfn)
+
+#define for_each_gfn_indirect_sp(kvm, sp, gfn, pos, n) \
+ hlist_for_each_entry_safe(sp, pos, n, \
+ &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
+ if (sp->gfn == gfn && !sp->role.direct)
+
+#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \
+ hlist_for_each_entry_safe(sp, pos, n, \
+ &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
+ if (sp->gfn == gfn && !sp->role.direct && \
+ !sp->role.invalid)
+
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
bool clear_unsync)
{
@@ -1243,16 +1259,12 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
/* @gfn should be write-protected at the call site */
static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
- struct hlist_head *bucket;
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
- unsigned index;
bool flush = false;
- index = kvm_page_table_hashfn(gfn);
- bucket = &vcpu->kvm->arch.mmu_page_hash[index];
- hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
- if (s->gfn != gfn || !s->unsync || s->role.invalid)
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
+ if (!s->unsync)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -1364,9 +1376,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
u64 *parent_pte)
{
union kvm_mmu_page_role role;
- unsigned index;
unsigned quadrant;
- struct hlist_head *bucket;
struct kvm_mmu_page *sp;
struct hlist_node *node, *tmp;
bool need_sync = false;
@@ -1382,36 +1392,36 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
}
- index = kvm_page_table_hashfn(gfn);
- bucket = &vcpu->kvm->arch.mmu_page_hash[index];
- hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
- if (sp->gfn == gfn) {
- if (!need_sync && sp->unsync)
- need_sync = true;
- if (sp->role.word != role.word)
- continue;
+ for_each_gfn_sp(vcpu->kvm, sp, gfn, node, tmp) {
+ if (!need_sync && sp->unsync)
+ need_sync = true;
- if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
- break;
+ if (sp->role.word != role.word)
+ continue;
- mmu_page_add_parent_pte(vcpu, sp, parent_pte);
- if (sp->unsync_children) {
- set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
- kvm_mmu_mark_parents_unsync(sp);
- } else if (sp->unsync)
- kvm_mmu_mark_parents_unsync(sp);
+ if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
+ break;
- trace_kvm_mmu_get_page(sp, false);
- return sp;
- }
+ mmu_page_add_parent_pte(vcpu, sp, parent_pte);
+ if (sp->unsync_children) {
+ set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
+ kvm_mmu_mark_parents_unsync(sp);
+ } else if (sp->unsync)
+ kvm_mmu_mark_parents_unsync(sp);
+
+ trace_kvm_mmu_get_page(sp, false);
+ return sp;
+ }
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
if (!sp)
return sp;
sp->gfn = gfn;
sp->role = role;
- hlist_add_head(&sp->hash_link, bucket);
+ hlist_add_head(&sp->hash_link,
+ &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
+
if (!direct) {
if (rmap_write_protect(vcpu->kvm, gfn))
kvm_flush_remote_tlbs(vcpu->kvm);
@@ -1616,46 +1626,34 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{
- unsigned index;
- struct hlist_head *bucket;
struct kvm_mmu_page *sp;
struct hlist_node *node, *n;
int r;
pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
r = 0;
- index = kvm_page_table_hashfn(gfn);
- bucket = &kvm->arch.mmu_page_hash[index];
restart:
- hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
- if (sp->gfn == gfn && !sp->role.direct) {
- pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
- sp->role.word);
- r = 1;
- if (kvm_mmu_zap_page(kvm, sp))
- goto restart;
- }
+ for_each_gfn_indirect_sp(kvm, sp, gfn, node, n) {
+ pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
+ sp->role.word);
+ r = 1;
+ if (kvm_mmu_zap_page(kvm, sp))
+ goto restart;
+ }
return r;
}
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
{
- unsigned index;
- struct hlist_head *bucket;
struct kvm_mmu_page *sp;
struct hlist_node *node, *nn;
- index = kvm_page_table_hashfn(gfn);
- bucket = &kvm->arch.mmu_page_hash[index];
restart:
- hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
- if (sp->gfn == gfn && !sp->role.direct
- && !sp->role.invalid) {
- pgprintk("%s: zap %lx %x\n",
- __func__, gfn, sp->role.word);
- if (kvm_mmu_zap_page(kvm, sp))
- goto restart;
- }
+ for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, nn) {
+ pgprintk("%s: zap %lx %x\n",
+ __func__, gfn, sp->role.word);
+ if (kvm_mmu_zap_page(kvm, sp))
+ goto restart;
}
}
@@ -1798,17 +1796,11 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
- struct hlist_head *bucket;
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
- unsigned index;
-
- index = kvm_page_table_hashfn(gfn);
- bucket = &vcpu->kvm->arch.mmu_page_hash[index];
- hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
- if (s->gfn != gfn || s->role.direct || s->unsync ||
- s->role.invalid)
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
+ if (s->unsync)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
__kvm_unsync_page(vcpu, s);
@@ -1818,18 +1810,11 @@ static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
- unsigned index;
- struct hlist_head *bucket;
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
bool need_unsync = false;
- index = kvm_page_table_hashfn(gfn);
- bucket = &vcpu->kvm->arch.mmu_page_hash[index];
- hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
- if (s->gfn != gfn || s->role.direct || s->role.invalid)
- continue;
-
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
@@ -2681,8 +2666,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
struct hlist_node *node, *n;
- struct hlist_head *bucket;
- unsigned index;
u64 entry, gentry;
u64 *spte;
unsigned offset = offset_in_page(gpa);
@@ -2750,13 +2733,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu->arch.last_pte_updated = NULL;
}
}
- index = kvm_page_table_hashfn(gfn);
- bucket = &vcpu->kvm->arch.mmu_page_hash[index];
restart:
- hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
- if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
- continue;
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node, n) {
pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
misaligned |= bytes < 4;
--
1.6.1.2
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page()
2010-05-30 12:36 [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Xiao Guangrong
@ 2010-05-30 12:37 ` Xiao Guangrong
2010-05-30 13:16 ` Avi Kivity
2010-05-30 12:39 ` [PATCH 3/5] KVM: MMU: gather remote tlb flush which occurs during page zapped Xiao Guangrong
` (3 subsequent siblings)
4 siblings, 1 reply; 15+ messages in thread
From: Xiao Guangrong @ 2010-05-30 12:37 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Using kvm_mmu_prepare_zap_page() and kvm_mmu_commit_zap_page() to
split kvm_mmu_zap_page() function, then we can:
- traverse hlist safely
- easily to gather remote tlb flush which occurs during page zapped
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/mmu.c | 42 ++++++++++++++++++++++++++++++++++++++-
arch/x86/kvm/x86.c | 1 +
3 files changed, 43 insertions(+), 1 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0cd0f29..e4df1cf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -388,6 +388,7 @@ struct kvm_arch {
* Hash table of struct kvm_mmu_page.
*/
struct list_head active_mmu_pages;
+ struct list_head invalid_mmu_pages;
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
int iommu_flags;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 84c705e..0c957bf 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -915,6 +915,7 @@ static int is_empty_shadow_page(u64 *spt)
static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
+ hlist_del(&sp->hash_link);
list_del(&sp->link);
__free_page(virt_to_page(sp->spt));
if (!sp->role.direct)
@@ -1560,6 +1561,46 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
return zapped;
}
+static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ int ret;
+
+ trace_kvm_mmu_zap_page(sp);
+ ++kvm->stat.mmu_shadow_zapped;
+ ret = mmu_zap_unsync_children(kvm, sp);
+ kvm_mmu_page_unlink_children(kvm, sp);
+ kvm_mmu_unlink_parents(kvm, sp);
+ if (!sp->role.invalid && !sp->role.direct)
+ unaccount_shadowed(kvm, sp->gfn);
+ if (sp->unsync)
+ kvm_unlink_unsync_page(kvm, sp);
+ if (!sp->root_count)
+ /* Count self */
+ ret++;
+ else
+ kvm_reload_remote_mmus(kvm);
+
+ sp->role.invalid = 1;
+ list_move(&sp->link, &kvm->arch.invalid_mmu_pages);
+ kvm_mmu_reset_last_pte_updated(kvm);
+ return ret;
+}
+
+static void kvm_mmu_commit_zap_page(struct kvm *kvm)
+{
+ struct kvm_mmu_page *sp, *n;
+
+ if (list_empty(&kvm->arch.invalid_mmu_pages))
+ return;
+
+ kvm_flush_remote_tlbs(kvm);
+ list_for_each_entry_safe(sp, n, &kvm->arch.invalid_mmu_pages, link) {
+ WARN_ON(!sp->role.invalid);
+ if (!sp->root_count)
+ kvm_mmu_free_page(kvm, sp);
+ }
+}
+
static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
int ret;
@@ -1577,7 +1618,6 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
if (!sp->root_count) {
/* Count self */
ret++;
- hlist_del(&sp->hash_link);
kvm_mmu_free_page(kvm, sp);
} else {
sp->role.invalid = 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5e5cd8d..225c3c4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5331,6 +5331,7 @@ struct kvm *kvm_arch_create_vm(void)
}
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ INIT_LIST_HEAD(&kvm->arch.invalid_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
--
1.6.1.2
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 3/5] KVM: MMU: gather remote tlb flush which occurs during page zapped
2010-05-30 12:36 [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Xiao Guangrong
2010-05-30 12:37 ` [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page() Xiao Guangrong
@ 2010-05-30 12:39 ` Xiao Guangrong
2010-05-30 12:40 ` [PATCH 4/5] KVM: MMU: traverse sp hlish safely Xiao Guangrong
` (2 subsequent siblings)
4 siblings, 0 replies; 15+ messages in thread
From: Xiao Guangrong @ 2010-05-30 12:39 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Using kvm_mmu_prepare_zap_page() and kvm_mmu_zap_page() instead of
kvm_mmu_zap_page() that can reduce remote tlb flush IPI
Have tested with xp/vista64, fedora12 32/64 guests, and it not broken
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 75 +++++++++++++++++++----------------------------
arch/x86/kvm/mmutrace.h | 2 +-
2 files changed, 31 insertions(+), 46 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0c957bf..e2b1020 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1199,7 +1199,8 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
--kvm->stat.mmu_unsync;
}
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+static void kvm_mmu_commit_zap_page(struct kvm *kvm);
#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
hlist_for_each_entry_safe(sp, pos, n, \
@@ -1221,7 +1222,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
bool clear_unsync)
{
if (sp->role.cr4_pae != !!is_pae(vcpu)) {
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp);
return 1;
}
@@ -1232,7 +1233,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
}
if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp);
return 1;
}
@@ -1249,6 +1250,7 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
ret = __kvm_sync_page(vcpu, sp, false);
if (!ret)
mmu_convert_notrap(sp);
+ kvm_mmu_commit_zap_page(vcpu->kvm);
return ret;
}
@@ -1271,13 +1273,13 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
(vcpu->arch.mmu.sync_page(vcpu, s))) {
- kvm_mmu_zap_page(vcpu->kvm, s);
+ kvm_mmu_prepare_zap_page(vcpu->kvm, s);
continue;
}
kvm_unlink_unsync_page(vcpu->kvm, s);
flush = true;
}
-
+ kvm_mmu_commit_zap_page(vcpu->kvm);
if (flush)
kvm_mmu_flush_tlb(vcpu);
}
@@ -1363,6 +1365,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
kvm_sync_page(vcpu, sp);
mmu_pages_clear_parents(&parents);
}
+ kvm_mmu_commit_zap_page(vcpu->kvm);
cond_resched_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_pages_init(parent, &parents, &pages);
}
@@ -1551,7 +1554,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
struct kvm_mmu_page *sp;
for_each_sp(pages, sp, parents, i) {
- kvm_mmu_zap_page(kvm, sp);
+ kvm_mmu_prepare_zap_page(kvm, sp);
mmu_pages_clear_parents(&parents);
zapped++;
}
@@ -1565,7 +1568,7 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
int ret;
- trace_kvm_mmu_zap_page(sp);
+ trace_kvm_mmu_prepare_zap_page(sp);
++kvm->stat.mmu_shadow_zapped;
ret = mmu_zap_unsync_children(kvm, sp);
kvm_mmu_page_unlink_children(kvm, sp);
@@ -1601,33 +1604,6 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm)
}
}
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
- int ret;
-
- trace_kvm_mmu_zap_page(sp);
- ++kvm->stat.mmu_shadow_zapped;
- ret = mmu_zap_unsync_children(kvm, sp);
- kvm_mmu_page_unlink_children(kvm, sp);
- kvm_mmu_unlink_parents(kvm, sp);
- kvm_flush_remote_tlbs(kvm);
- if (!sp->role.invalid && !sp->role.direct)
- unaccount_shadowed(kvm, sp->gfn);
- if (sp->unsync)
- kvm_unlink_unsync_page(kvm, sp);
- if (!sp->root_count) {
- /* Count self */
- ret++;
- kvm_mmu_free_page(kvm, sp);
- } else {
- sp->role.invalid = 1;
- list_move(&sp->link, &kvm->arch.active_mmu_pages);
- kvm_reload_remote_mmus(kvm);
- }
- kvm_mmu_reset_last_pte_updated(kvm);
- return ret;
-}
-
/*
* Changing the number of mmu pages allocated to the vm
* Note: if kvm_nr_mmu_pages is too small, you will get dead lock
@@ -1652,8 +1628,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- used_pages -= kvm_mmu_zap_page(kvm, page);
+ used_pages -= kvm_mmu_prepare_zap_page(kvm, page);
}
+ kvm_mmu_commit_zap_page(kvm);
kvm_nr_mmu_pages = used_pages;
kvm->arch.n_free_mmu_pages = 0;
}
@@ -1677,9 +1654,10 @@ restart:
pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
sp->role.word);
r = 1;
- if (kvm_mmu_zap_page(kvm, sp))
+ if (kvm_mmu_prepare_zap_page(kvm, sp))
goto restart;
}
+ kvm_mmu_commit_zap_page(kvm);
return r;
}
@@ -1692,9 +1670,10 @@ restart:
for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, nn) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
- if (kvm_mmu_zap_page(kvm, sp))
+ if (kvm_mmu_prepare_zap_page(kvm, sp))
goto restart;
}
+ kvm_mmu_commit_zap_page(kvm);
}
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
@@ -2121,8 +2100,10 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
sp = page_header(root);
--sp->root_count;
- if (!sp->root_count && sp->role.invalid)
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ if (!sp->root_count && sp->role.invalid) {
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp);
+ kvm_mmu_commit_zap_page(vcpu->kvm);
+ }
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
spin_unlock(&vcpu->kvm->mmu_lock);
return;
@@ -2135,10 +2116,11 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
sp = page_header(root);
--sp->root_count;
if (!sp->root_count && sp->role.invalid)
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp);
}
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
}
+ kvm_mmu_commit_zap_page(vcpu->kvm);
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
}
@@ -2792,7 +2774,7 @@ restart:
*/
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, sp->role.word);
- if (kvm_mmu_zap_page(vcpu->kvm, sp))
+ if (kvm_mmu_prepare_zap_page(vcpu->kvm, sp))
goto restart;
++vcpu->kvm->stat.mmu_flooded;
continue;
@@ -2827,6 +2809,7 @@ restart:
++spte;
}
}
+ kvm_mmu_commit_zap_page(vcpu->kvm);
kvm_mmu_audit(vcpu, "post pte write");
spin_unlock(&vcpu->kvm->mmu_lock);
if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
@@ -2860,9 +2843,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp);
++vcpu->kvm->stat.mmu_recycled;
}
+ kvm_mmu_commit_zap_page(vcpu->kvm);
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
@@ -3001,9 +2985,10 @@ void kvm_mmu_zap_all(struct kvm *kvm)
spin_lock(&kvm->mmu_lock);
restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
- if (kvm_mmu_zap_page(kvm, sp))
+ if (kvm_mmu_prepare_zap_page(kvm, sp))
goto restart;
+ kvm_mmu_commit_zap_page(kvm);
spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);
@@ -3015,7 +3000,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- return kvm_mmu_zap_page(kvm, page);
+ return kvm_mmu_prepare_zap_page(kvm, page);
}
static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
@@ -3040,7 +3025,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
kvm_freed = kvm;
}
nr_to_scan--;
-
+ kvm_mmu_commit_zap_page(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 42f07b1..3aab0f0 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -190,7 +190,7 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
TP_ARGS(sp)
);
-DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_zap_page,
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp)
--
1.6.1.2
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 4/5] KVM: MMU: traverse sp hlish safely
2010-05-30 12:36 [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Xiao Guangrong
2010-05-30 12:37 ` [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page() Xiao Guangrong
2010-05-30 12:39 ` [PATCH 3/5] KVM: MMU: gather remote tlb flush which occurs during page zapped Xiao Guangrong
@ 2010-05-30 12:40 ` Xiao Guangrong
2010-05-30 12:42 ` [PATCH 5/5] KVM: MMU: reduce remote tlb flush in kvm_mmu_pte_write() Xiao Guangrong
2010-05-30 13:12 ` [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Avi Kivity
4 siblings, 0 replies; 15+ messages in thread
From: Xiao Guangrong @ 2010-05-30 12:40 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Now, we can safely to traverse sp hlish
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 53 +++++++++++++++++++++++----------------------------
1 files changed, 24 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e2b1020..be75cba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1202,18 +1202,18 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
static void kvm_mmu_commit_zap_page(struct kvm *kvm);
-#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
- hlist_for_each_entry_safe(sp, pos, n, \
+#define for_each_gfn_sp(kvm, sp, gfn, pos) \
+ hlist_for_each_entry(sp, pos, \
&kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
if (sp->gfn == gfn)
-#define for_each_gfn_indirect_sp(kvm, sp, gfn, pos, n) \
- hlist_for_each_entry_safe(sp, pos, n, \
+#define for_each_gfn_indirect_sp(kvm, sp, gfn, pos) \
+ hlist_for_each_entry(sp, pos, \
&kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
if (sp->gfn == gfn && !sp->role.direct)
-#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \
- hlist_for_each_entry_safe(sp, pos, n, \
+#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
+ hlist_for_each_entry(sp, pos, \
&kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
if (sp->gfn == gfn && !sp->role.direct && \
!sp->role.invalid)
@@ -1263,10 +1263,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_mmu_page *s;
- struct hlist_node *node, *n;
+ struct hlist_node *node;
bool flush = false;
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
if (!s->unsync)
continue;
@@ -1382,7 +1382,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
union kvm_mmu_page_role role;
unsigned quadrant;
struct kvm_mmu_page *sp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *node;
bool need_sync = false;
role = vcpu->arch.mmu.base_role;
@@ -1397,7 +1397,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
role.quadrant = quadrant;
}
- for_each_gfn_sp(vcpu->kvm, sp, gfn, node, tmp) {
+ for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
if (!need_sync && sp->unsync)
need_sync = true;
@@ -1644,18 +1644,17 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_mmu_page *sp;
- struct hlist_node *node, *n;
+ struct hlist_node *node;
int r;
pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
r = 0;
-restart:
- for_each_gfn_indirect_sp(kvm, sp, gfn, node, n) {
+
+ for_each_gfn_indirect_sp(kvm, sp, gfn, node) {
pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
sp->role.word);
r = 1;
- if (kvm_mmu_prepare_zap_page(kvm, sp))
- goto restart;
+ kvm_mmu_prepare_zap_page(kvm, sp);
}
kvm_mmu_commit_zap_page(kvm);
return r;
@@ -1664,14 +1663,12 @@ restart:
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
{
struct kvm_mmu_page *sp;
- struct hlist_node *node, *nn;
+ struct hlist_node *node;
-restart:
- for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, nn) {
+ for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
- if (kvm_mmu_prepare_zap_page(kvm, sp))
- goto restart;
+ kvm_mmu_prepare_zap_page(kvm, sp);
}
kvm_mmu_commit_zap_page(kvm);
}
@@ -1816,9 +1813,9 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_mmu_page *s;
- struct hlist_node *node, *n;
+ struct hlist_node *node;
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
if (s->unsync)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -1830,10 +1827,10 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
struct kvm_mmu_page *s;
- struct hlist_node *node, *n;
+ struct hlist_node *node;
bool need_unsync = false;
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
@@ -2687,7 +2684,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
- struct hlist_node *node, *n;
+ struct hlist_node *node;
u64 entry, gentry;
u64 *spte;
unsigned offset = offset_in_page(gpa);
@@ -2756,8 +2753,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
}
}
-restart:
- for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node, n) {
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
misaligned |= bytes < 4;
@@ -2774,8 +2770,7 @@ restart:
*/
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, sp->role.word);
- if (kvm_mmu_prepare_zap_page(vcpu->kvm, sp))
- goto restart;
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp);
++vcpu->kvm->stat.mmu_flooded;
continue;
}
--
1.6.1.2
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 5/5] KVM: MMU: reduce remote tlb flush in kvm_mmu_pte_write()
2010-05-30 12:36 [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Xiao Guangrong
` (2 preceding siblings ...)
2010-05-30 12:40 ` [PATCH 4/5] KVM: MMU: traverse sp hlish safely Xiao Guangrong
@ 2010-05-30 12:42 ` Xiao Guangrong
2010-05-30 13:12 ` [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Avi Kivity
4 siblings, 0 replies; 15+ messages in thread
From: Xiao Guangrong @ 2010-05-30 12:42 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
collect remote tlb flush in kvm_mmu_pte_write() path
Have tested with xp/vista64, fedora12 32/64 guests, and it not broken
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 20 ++++++++++++++++----
1 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index be75cba..cbd8d9c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2629,11 +2629,15 @@ static bool need_remote_flush(u64 old, u64 new)
return (old & ~new & PT64_PERM_MASK) != 0;
}
-static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
+static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
+ bool remote_flush, bool local_flush)
{
- if (need_remote_flush(old, new))
+ if (zap_page)
+ return;
+
+ if (remote_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
- else
+ else if (local_flush)
kvm_mmu_flush_tlb(vcpu);
}
@@ -2697,6 +2701,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int npte;
int r;
int invlpg_counter;
+ bool remote_flush, local_flush, zap_page;
+
+ zap_page = remote_flush = local_flush = false;
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
@@ -2770,6 +2777,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
*/
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, sp->role.word);
+ zap_page = true;
kvm_mmu_prepare_zap_page(vcpu->kvm, sp);
++vcpu->kvm->stat.mmu_flooded;
continue;
@@ -2794,16 +2802,20 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
if (quadrant != sp->role.quadrant)
continue;
}
+
+ local_flush = true;
spte = &sp->spt[page_offset / sizeof(*spte)];
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
if (gentry)
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
- mmu_pte_write_flush_tlb(vcpu, entry, *spte);
+ if (!remote_flush && need_remote_flush(entry, *spte))
+ remote_flush = true;
++spte;
}
}
+ mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
kvm_mmu_commit_zap_page(vcpu->kvm);
kvm_mmu_audit(vcpu, "post pte write");
spin_unlock(&vcpu->kvm->mmu_lock);
--
1.6.1.2
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing
2010-05-30 12:36 [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Xiao Guangrong
` (3 preceding siblings ...)
2010-05-30 12:42 ` [PATCH 5/5] KVM: MMU: reduce remote tlb flush in kvm_mmu_pte_write() Xiao Guangrong
@ 2010-05-30 13:12 ` Avi Kivity
2010-05-31 2:00 ` Xiao Guangrong
4 siblings, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2010-05-30 13:12 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/30/2010 03:36 PM, Xiao Guangrong wrote:
> Introduce for_each_gfn_sp(), for_each_gfn_indirect_sp() and
> for_each_gfn_indirect_valid_sp() to cleanup hlist traverseing
>
> Signed-off-by: Xiao Guangrong<xiaoguangrong@cn.fujitsu.com>
> ---
> arch/x86/kvm/mmu.c | 129 ++++++++++++++++++++++------------------------------
> 1 files changed, 54 insertions(+), 75 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 56f8c3c..84c705e 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1200,6 +1200,22 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
>
> static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
>
> +#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
> + hlist_for_each_entry_safe(sp, pos, n, \
> + &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
> + if (sp->gfn == gfn)
>
if (...)
for_each_gfn_sp(...)
blah();
else
BUG();
will break. Can do 'if ((sp)->gfn != (gfn)) ; else'.
Or call functions from the for (;;) parameters to advance the cursor.
(also use parentheses to protect macro arguments)
> +
> +#define for_each_gfn_indirect_sp(kvm, sp, gfn, pos, n) \
> + hlist_for_each_entry_safe(sp, pos, n, \
> + &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
> + if (sp->gfn == gfn&& !sp->role.direct)
> +
> +#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \
> + hlist_for_each_entry_safe(sp, pos, n, \
> + &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
> + if (sp->gfn == gfn&& !sp->role.direct&& \
> + !sp->role.invalid)
>
Shouldn't we always skip invalid gfns? What about providing both gfn
and role to the macro?
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page()
2010-05-30 12:37 ` [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page() Xiao Guangrong
@ 2010-05-30 13:16 ` Avi Kivity
2010-05-31 2:13 ` Xiao Guangrong
0 siblings, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2010-05-30 13:16 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/30/2010 03:37 PM, Xiao Guangrong wrote:
> Using kvm_mmu_prepare_zap_page() and kvm_mmu_commit_zap_page() to
> split kvm_mmu_zap_page() function, then we can:
>
> - traverse hlist safely
> - easily to gather remote tlb flush which occurs during page zapped
>
>
> +static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
> +{
> + int ret;
> +
> + trace_kvm_mmu_zap_page(sp);
> + ++kvm->stat.mmu_shadow_zapped;
> + ret = mmu_zap_unsync_children(kvm, sp);
> + kvm_mmu_page_unlink_children(kvm, sp);
> + kvm_mmu_unlink_parents(kvm, sp);
> + if (!sp->role.invalid&& !sp->role.direct)
> + unaccount_shadowed(kvm, sp->gfn);
> + if (sp->unsync)
> + kvm_unlink_unsync_page(kvm, sp);
> + if (!sp->root_count)
> + /* Count self */
> + ret++;
> + else
> + kvm_reload_remote_mmus(kvm);
> +
> + sp->role.invalid = 1;
> + list_move(&sp->link,&kvm->arch.invalid_mmu_pages);
> + kvm_mmu_reset_last_pte_updated(kvm);
> + return ret;
> +}
> +
> +static void kvm_mmu_commit_zap_page(struct kvm *kvm)
> +{
> + struct kvm_mmu_page *sp, *n;
> +
> + if (list_empty(&kvm->arch.invalid_mmu_pages))
> + return;
> +
> + kvm_flush_remote_tlbs(kvm);
> + list_for_each_entry_safe(sp, n,&kvm->arch.invalid_mmu_pages, link) {
> + WARN_ON(!sp->role.invalid);
> + if (!sp->root_count)
> + kvm_mmu_free_page(kvm, sp);
> + }
> +}
> +
>
You're adding two new functions but not using them here? Possibly in
the old kvm_mmu_zap_page()?
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 5e5cd8d..225c3c4 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -5331,6 +5331,7 @@ struct kvm *kvm_arch_create_vm(void)
> }
>
> INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
> + INIT_LIST_HEAD(&kvm->arch.invalid_mmu_pages);
> INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
>
This is a good idea, but belongs in a separate patch? We can use it to
reclaim invalid pages before allocating new ones.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing
2010-05-30 13:12 ` [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Avi Kivity
@ 2010-05-31 2:00 ` Xiao Guangrong
2010-05-31 11:14 ` Avi Kivity
0 siblings, 1 reply; 15+ messages in thread
From: Xiao Guangrong @ 2010-05-31 2:00 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Avi Kivity wrote:
> On 05/30/2010 03:36 PM, Xiao Guangrong wrote:
>> Introduce for_each_gfn_sp(), for_each_gfn_indirect_sp() and
>> for_each_gfn_indirect_valid_sp() to cleanup hlist traverseing
>>
>> Signed-off-by: Xiao Guangrong<xiaoguangrong@cn.fujitsu.com>
>> ---
>> arch/x86/kvm/mmu.c | 129
>> ++++++++++++++++++++++------------------------------
>> 1 files changed, 54 insertions(+), 75 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index 56f8c3c..84c705e 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -1200,6 +1200,22 @@ static void kvm_unlink_unsync_page(struct kvm
>> *kvm, struct kvm_mmu_page *sp)
>>
>> static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
>>
>> +#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
>> + hlist_for_each_entry_safe(sp, pos, n, \
>> + &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
>> + if (sp->gfn == gfn)
>>
Avi,
Thanks for your review.
>
>
> if (...)
> for_each_gfn_sp(...)
> blah();
> else
> BUG();
>
> will break. Can do 'if ((sp)->gfn != (gfn)) ; else'.
>
> Or call functions from the for (;;) parameters to advance the cursor.
>
> (also use parentheses to protect macro arguments)
>
Yeah, it's my mistake, i'll fix it in the next version.
>
>
>> +
>> +#define for_each_gfn_indirect_sp(kvm, sp, gfn, pos, n) \
>> + hlist_for_each_entry_safe(sp, pos, n, \
>> + &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
>> + if (sp->gfn == gfn&& !sp->role.direct)
>> +
>> +#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \
>> + hlist_for_each_entry_safe(sp, pos, n, \
>> + &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
>> + if (sp->gfn == gfn&& !sp->role.direct&& \
>> + !sp->role.invalid)
>>
>
> Shouldn't we always skip invalid gfns?
Actually, in kvm_mmu_unprotect_page() function, it need find out
invalid shadow pages:
| hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
| if (sp->gfn == gfn && !sp->role.direct) {
| pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
| sp->role.word);
| r = 1;
| if (kvm_mmu_zap_page(kvm, sp))
| goto restart;
| }
I'm not sure whether we can skip invalid sp here, since it can change this
function's return value. :-(
> What about providing both gfn and role to the macro?
>
In current code, no code simply use role and gfn to find sp,
in kvm_mmu_get_page(), we need do other work for
'sp->gfn == gfn && sp->role != role' sp, and other functions only need compare
some members in role, but not all members.
Xiao
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page()
2010-05-30 13:16 ` Avi Kivity
@ 2010-05-31 2:13 ` Xiao Guangrong
2010-05-31 11:05 ` Avi Kivity
0 siblings, 1 reply; 15+ messages in thread
From: Xiao Guangrong @ 2010-05-31 2:13 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Avi Kivity wrote:
> On 05/30/2010 03:37 PM, Xiao Guangrong wrote:
>> Using kvm_mmu_prepare_zap_page() and kvm_mmu_commit_zap_page() to
>> split kvm_mmu_zap_page() function, then we can:
>>
>> - traverse hlist safely
>> - easily to gather remote tlb flush which occurs during page zapped
>>
>>
>> +static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct
>> kvm_mmu_page *sp)
>> +{
>> + int ret;
>> +
>> + trace_kvm_mmu_zap_page(sp);
>> + ++kvm->stat.mmu_shadow_zapped;
>> + ret = mmu_zap_unsync_children(kvm, sp);
>> + kvm_mmu_page_unlink_children(kvm, sp);
>> + kvm_mmu_unlink_parents(kvm, sp);
>> + if (!sp->role.invalid&& !sp->role.direct)
>> + unaccount_shadowed(kvm, sp->gfn);
>> + if (sp->unsync)
>> + kvm_unlink_unsync_page(kvm, sp);
>> + if (!sp->root_count)
>> + /* Count self */
>> + ret++;
>> + else
>> + kvm_reload_remote_mmus(kvm);
>> +
>> + sp->role.invalid = 1;
>> + list_move(&sp->link,&kvm->arch.invalid_mmu_pages);
>> + kvm_mmu_reset_last_pte_updated(kvm);
>> + return ret;
>> +}
>> +
>> +static void kvm_mmu_commit_zap_page(struct kvm *kvm)
>> +{
>> + struct kvm_mmu_page *sp, *n;
>> +
>> + if (list_empty(&kvm->arch.invalid_mmu_pages))
>> + return;
>> +
>> + kvm_flush_remote_tlbs(kvm);
>> + list_for_each_entry_safe(sp, n,&kvm->arch.invalid_mmu_pages, link) {
>> + WARN_ON(!sp->role.invalid);
>> + if (!sp->root_count)
>> + kvm_mmu_free_page(kvm, sp);
>> + }
>> +}
>> +
>>
>
> You're adding two new functions but not using them here? Possibly in
> the old kvm_mmu_zap_page()?
I use those in the next patch, it's not in kvm_mmu_zap_page(), it's used like:
hold mmu spin lock
kvm_mmu_prepare_zap_page page A
kvm_mmu_prepare_zap_page page B
kvm_mmu_prepare_zap_page page C
......
kvm_mmu_commit_zap_page
release mmu spin lock
>
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 5e5cd8d..225c3c4 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -5331,6 +5331,7 @@ struct kvm *kvm_arch_create_vm(void)
>> }
>>
>> INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
>> + INIT_LIST_HEAD(&kvm->arch.invalid_mmu_pages);
>> INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
>>
>
> This is a good idea, but belongs in a separate patch? We can use it to
> reclaim invalid pages before allocating new ones.
>
This patch is very simple and kvm_mmu_commit_zap_page() function should depend on
kvm->arch.invalid_mmu_pages, so i think we on need separate this patch, your opinion? :-)
Thanks,
Xiao
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page()
2010-05-31 2:13 ` Xiao Guangrong
@ 2010-05-31 11:05 ` Avi Kivity
2010-06-01 2:29 ` Xiao Guangrong
0 siblings, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2010-05-31 11:05 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/31/2010 05:13 AM, Xiao Guangrong wrote:
>
> Avi Kivity wrote:
>
>> On 05/30/2010 03:37 PM, Xiao Guangrong wrote:
>>
>>> Using kvm_mmu_prepare_zap_page() and kvm_mmu_commit_zap_page() to
>>> split kvm_mmu_zap_page() function, then we can:
>>>
>>> - traverse hlist safely
>>> - easily to gather remote tlb flush which occurs during page zapped
>>>
>>>
>>> +static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct
>>> kvm_mmu_page *sp)
>>> +{
>>> + int ret;
>>> +
>>> + trace_kvm_mmu_zap_page(sp);
>>> + ++kvm->stat.mmu_shadow_zapped;
>>> + ret = mmu_zap_unsync_children(kvm, sp);
>>> + kvm_mmu_page_unlink_children(kvm, sp);
>>> + kvm_mmu_unlink_parents(kvm, sp);
>>> + if (!sp->role.invalid&& !sp->role.direct)
>>> + unaccount_shadowed(kvm, sp->gfn);
>>> + if (sp->unsync)
>>> + kvm_unlink_unsync_page(kvm, sp);
>>> + if (!sp->root_count)
>>> + /* Count self */
>>> + ret++;
>>> + else
>>> + kvm_reload_remote_mmus(kvm);
>>> +
>>> + sp->role.invalid = 1;
>>> + list_move(&sp->link,&kvm->arch.invalid_mmu_pages);
>>> + kvm_mmu_reset_last_pte_updated(kvm);
>>> + return ret;
>>> +}
>>> +
>>> +static void kvm_mmu_commit_zap_page(struct kvm *kvm)
>>> +{
>>> + struct kvm_mmu_page *sp, *n;
>>> +
>>> + if (list_empty(&kvm->arch.invalid_mmu_pages))
>>> + return;
>>> +
>>> + kvm_flush_remote_tlbs(kvm);
>>> + list_for_each_entry_safe(sp, n,&kvm->arch.invalid_mmu_pages, link) {
>>> + WARN_ON(!sp->role.invalid);
>>> + if (!sp->root_count)
>>> + kvm_mmu_free_page(kvm, sp);
>>> + }
>>> +}
>>> +
>>>
>>>
>> You're adding two new functions but not using them here? Possibly in
>> the old kvm_mmu_zap_page()?
>>
> I use those in the next patch, it's not in kvm_mmu_zap_page(), it's used like:
>
> hold mmu spin lock
>
> kvm_mmu_prepare_zap_page page A
> kvm_mmu_prepare_zap_page page B
> kvm_mmu_prepare_zap_page page C
> ......
> kvm_mmu_commit_zap_page
>
It would be better to rewrite kvm_mmu_zap_page() in terms of
prepare/commit in the patch so we don't have two copies of the same
thing (also easier to review).
>> This is a good idea, but belongs in a separate patch? We can use it to
>> reclaim invalid pages before allocating new ones.
>>
>>
> This patch is very simple and kvm_mmu_commit_zap_page() function should depend on
> kvm->arch.invalid_mmu_pages, so i think we on need separate this patch, your opinion? :-)
>
>
How about passing the list as a parameter to prepare() and commit()? If
the lifetime of the list is just prepare/commit, it shouldn't be a global.
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing
2010-05-31 2:00 ` Xiao Guangrong
@ 2010-05-31 11:14 ` Avi Kivity
2010-06-01 2:38 ` Xiao Guangrong
0 siblings, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2010-05-31 11:14 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 05/31/2010 05:00 AM, Xiao Guangrong wrote:
>
>>
>>> +
>>> +#define for_each_gfn_indirect_sp(kvm, sp, gfn, pos, n) \
>>> + hlist_for_each_entry_safe(sp, pos, n, \
>>> +&kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
>>> + if (sp->gfn == gfn&& !sp->role.direct)
>>> +
>>> +#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \
>>> + hlist_for_each_entry_safe(sp, pos, n, \
>>> +&kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
>>> + if (sp->gfn == gfn&& !sp->role.direct&& \
>>> + !sp->role.invalid)
>>>
>>>
>> Shouldn't we always skip invalid gfns?
>>
> Actually, in kvm_mmu_unprotect_page() function, it need find out
> invalid shadow pages:
>
> | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
> | if (sp->gfn == gfn&& !sp->role.direct) {
> | pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
> | sp->role.word);
> | r = 1;
> | if (kvm_mmu_zap_page(kvm, sp))
> | goto restart;
> | }
>
> I'm not sure whether we can skip invalid sp here, since it can change this
> function's return value. :-(
>
Hm. Invalid pages don't need to be write protected. So I think you can
patch unprotect_page() to ignore invalid pages, and then you can convert
it to the new macros which ignore invalid pages as well.
The invariant is: if an sp exists with !role.invalid and !unsync, then
the page must be write protected.
>> What about providing both gfn and role to the macro?
>>
>>
> In current code, no code simply use role and gfn to find sp,
> in kvm_mmu_get_page(), we need do other work for
> 'sp->gfn == gfn&& sp->role != role' sp, and other functions only need compare
> some members in role, but not all members.
>
How about just gfn? I think everything compares against that!
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page()
2010-05-31 11:05 ` Avi Kivity
@ 2010-06-01 2:29 ` Xiao Guangrong
2010-06-01 8:08 ` Avi Kivity
0 siblings, 1 reply; 15+ messages in thread
From: Xiao Guangrong @ 2010-06-01 2:29 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Avi Kivity wrote:
>
> It would be better to rewrite kvm_mmu_zap_page() in terms of
> prepare/commit in the patch so we don't have two copies of the same
> thing (also easier to review).
OK, i'll do it in the next version.
>
>
>
>
>>> This is a good idea, but belongs in a separate patch? We can use it to
>>> reclaim invalid pages before allocating new ones.
>>>
>>>
>> This patch is very simple and kvm_mmu_commit_zap_page() function
>> should depend on
>> kvm->arch.invalid_mmu_pages, so i think we on need separate this
>> patch, your opinion? :-)
>>
>>
>
> How about passing the list as a parameter to prepare() and commit()? If
> the lifetime of the list is just prepare/commit, it shouldn't be a global.
>
Does below example code show your meaning correctly?
+ struct list_head free_list = LIST_HEAD_INIT(&free_list);
hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
if (sp->gfn == gfn && !sp->role.direct
&& !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
+ kvm_mmu_prepare_zap_page(kvm, sp, &free_list);
}
}
+ kvm_mmu_commit_zap_page(kvm, &free_list);
Thanks,
Xiao
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing
2010-05-31 11:14 ` Avi Kivity
@ 2010-06-01 2:38 ` Xiao Guangrong
2010-06-01 8:09 ` Avi Kivity
0 siblings, 1 reply; 15+ messages in thread
From: Xiao Guangrong @ 2010-06-01 2:38 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM list
Avi Kivity wrote:
> On 05/31/2010 05:00 AM, Xiao Guangrong wrote:
>>
>>>
>>>> +
>>>> +#define for_each_gfn_indirect_sp(kvm, sp, gfn, pos, n) \
>>>> + hlist_for_each_entry_safe(sp, pos, n, \
>>>> +&kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
>>>> + if (sp->gfn == gfn&& !sp->role.direct)
>>>> +
>>>> +#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \
>>>> + hlist_for_each_entry_safe(sp, pos, n, \
>>>> +&kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
>>>> + if (sp->gfn == gfn&& !sp->role.direct&& \
>>>> + !sp->role.invalid)
>>>>
>>>>
>>> Shouldn't we always skip invalid gfns?
>>>
>> Actually, in kvm_mmu_unprotect_page() function, it need find out
>> invalid shadow pages:
>>
>> | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
>> | if (sp->gfn == gfn&& !sp->role.direct) {
>> | pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
>> | sp->role.word);
>> | r = 1;
>> | if (kvm_mmu_zap_page(kvm, sp))
>> | goto restart;
>> | }
>>
>> I'm not sure whether we can skip invalid sp here, since it can change
>> this
>> function's return value. :-(
>>
>
> Hm. Invalid pages don't need to be write protected. So I think you can
> patch unprotect_page() to ignore invalid pages, and then you can convert
> it to the new macros which ignore invalid pages as well.
>
> The invariant is: if an sp exists with !role.invalid and !unsync, then
> the page must be write protected.
OK, will fix it in the next version.
>
>
>>> What about providing both gfn and role to the macro?
>>>
>>>
>> In current code, no code simply use role and gfn to find sp,
>> in kvm_mmu_get_page(), we need do other work for
>> 'sp->gfn == gfn&& sp->role != role' sp, and other functions only need
>> compare
>> some members in role, but not all members.
>>
>
> How about just gfn? I think everything compares against that!
>
In this patch, it already introduced a macro to only compares 'gfn', that is:
+#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
+ hlist_for_each_entry_safe(sp, pos, n, \
+ &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
+ if (sp->gfn == gfn)
Sorry if i misunderstand your meaning.
Thanks,
Xiao
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page()
2010-06-01 2:29 ` Xiao Guangrong
@ 2010-06-01 8:08 ` Avi Kivity
0 siblings, 0 replies; 15+ messages in thread
From: Avi Kivity @ 2010-06-01 8:08 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 06/01/2010 05:29 AM, Xiao Guangrong wrote:
>
>> How about passing the list as a parameter to prepare() and commit()? If
>> the lifetime of the list is just prepare/commit, it shouldn't be a global.
>>
>>
> Does below example code show your meaning correctly?
>
> + struct list_head free_list = LIST_HEAD_INIT(&free_list);
>
> hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
> if (sp->gfn == gfn&& !sp->role.direct
> && !sp->role.invalid) {
> pgprintk("%s: zap %lx %x\n",
> __func__, gfn, sp->role.word);
> + kvm_mmu_prepare_zap_page(kvm, sp,&free_list);
> }
> }
> + kvm_mmu_commit_zap_page(kvm,&free_list);
>
>
Yes.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing
2010-06-01 2:38 ` Xiao Guangrong
@ 2010-06-01 8:09 ` Avi Kivity
0 siblings, 0 replies; 15+ messages in thread
From: Avi Kivity @ 2010-06-01 8:09 UTC (permalink / raw)
To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM list
On 06/01/2010 05:38 AM, Xiao Guangrong wrote:
>
> Avi Kivity wrote:
>
>> On 05/31/2010 05:00 AM, Xiao Guangrong wrote:
>>
>>>
>>>>
>>>>
>>>>> +
>>>>> +#define for_each_gfn_indirect_sp(kvm, sp, gfn, pos, n) \
>>>>> + hlist_for_each_entry_safe(sp, pos, n, \
>>>>> +&kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
>>>>> + if (sp->gfn == gfn&& !sp->role.direct)
>>>>> +
>>>>> +#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \
>>>>> + hlist_for_each_entry_safe(sp, pos, n, \
>>>>> +&kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
>>>>> + if (sp->gfn == gfn&& !sp->role.direct&& \
>>>>> + !sp->role.invalid)
>>>>>
>>>>>
>>>>>
>>>> Shouldn't we always skip invalid gfns?
>>>>
>>>>
>>> Actually, in kvm_mmu_unprotect_page() function, it need find out
>>> invalid shadow pages:
>>>
>>> | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
>>> | if (sp->gfn == gfn&& !sp->role.direct) {
>>> | pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
>>> | sp->role.word);
>>> | r = 1;
>>> | if (kvm_mmu_zap_page(kvm, sp))
>>> | goto restart;
>>> | }
>>>
>>> I'm not sure whether we can skip invalid sp here, since it can change
>>> this
>>> function's return value. :-(
>>>
>>>
>> Hm. Invalid pages don't need to be write protected. So I think you can
>> patch unprotect_page() to ignore invalid pages, and then you can convert
>> it to the new macros which ignore invalid pages as well.
>>
>> The invariant is: if an sp exists with !role.invalid and !unsync, then
>> the page must be write protected.
>>
> OK, will fix it in the next version.
>
>
>>
>>
>>>> What about providing both gfn and role to the macro?
>>>>
>>>>
>>>>
>>> In current code, no code simply use role and gfn to find sp,
>>> in kvm_mmu_get_page(), we need do other work for
>>> 'sp->gfn == gfn&& sp->role != role' sp, and other functions only need
>>> compare
>>> some members in role, but not all members.
>>>
>>>
>> How about just gfn? I think everything compares against that!
>>
>>
> In this patch, it already introduced a macro to only compares 'gfn', that is:
>
> +#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
> + hlist_for_each_entry_safe(sp, pos, n, \
> + &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)\
> + if (sp->gfn == gfn)
>
> Sorry if i misunderstand your meaning.
>
No, I got confused.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2010-06-01 8:09 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-05-30 12:36 [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Xiao Guangrong
2010-05-30 12:37 ` [PATCH 2/5] KVM: MMU: split the operations of kvm_mmu_zap_page() Xiao Guangrong
2010-05-30 13:16 ` Avi Kivity
2010-05-31 2:13 ` Xiao Guangrong
2010-05-31 11:05 ` Avi Kivity
2010-06-01 2:29 ` Xiao Guangrong
2010-06-01 8:08 ` Avi Kivity
2010-05-30 12:39 ` [PATCH 3/5] KVM: MMU: gather remote tlb flush which occurs during page zapped Xiao Guangrong
2010-05-30 12:40 ` [PATCH 4/5] KVM: MMU: traverse sp hlish safely Xiao Guangrong
2010-05-30 12:42 ` [PATCH 5/5] KVM: MMU: reduce remote tlb flush in kvm_mmu_pte_write() Xiao Guangrong
2010-05-30 13:12 ` [PATCH 1/5] KVM: MMU: introduce some macros to cleanup hlist traverseing Avi Kivity
2010-05-31 2:00 ` Xiao Guangrong
2010-05-31 11:14 ` Avi Kivity
2010-06-01 2:38 ` Xiao Guangrong
2010-06-01 8:09 ` Avi Kivity
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.