All of lore.kernel.org
 help / color / mirror / Atom feed
From: Avi Kivity <avi@redhat.com>
To: linux-kernel@vger.kernel.org
Cc: kvm@vger.kernel.org
Subject: [PATCH 21/46] KVM: MMU: remove global page optimization logic
Date: Wed, 20 May 2009 14:18:18 +0300	[thread overview]
Message-ID: <1242818323-10413-22-git-send-email-avi@redhat.com> (raw)
In-Reply-To: <1242818323-10413-1-git-send-email-avi@redhat.com>

From: Marcelo Tosatti <mtosatti@redhat.com>

Complexity to fix it not worthwhile the gains, as discussed
in http://article.gmane.org/gmane.comp.emulators.kvm.devel/28649.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |    4 ---
 arch/x86/kvm/mmu.c              |   50 ++++----------------------------------
 arch/x86/kvm/paging_tmpl.h      |    6 +---
 arch/x86/kvm/x86.c              |    4 ---
 4 files changed, 8 insertions(+), 56 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3fc4623..0e3a7c6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -213,7 +213,6 @@ struct kvm_mmu_page {
 	int multimapped;         /* More than one parent_pte? */
 	int root_count;          /* Currently serving as active root */
 	bool unsync;
-	bool global;
 	unsigned int unsync_children;
 	union {
 		u64 *parent_pte;               /* !multimapped */
@@ -395,7 +394,6 @@ struct kvm_arch{
 	 */
 	struct list_head active_mmu_pages;
 	struct list_head assigned_dev_head;
-	struct list_head oos_global_pages;
 	struct iommu_domain *iommu_domain;
 	struct kvm_pic *vpic;
 	struct kvm_ioapic *vioapic;
@@ -425,7 +423,6 @@ struct kvm_vm_stat {
 	u32 mmu_recycled;
 	u32 mmu_cache_miss;
 	u32 mmu_unsync;
-	u32 mmu_unsync_global;
 	u32 remote_tlb_flush;
 	u32 lpages;
 };
@@ -640,7 +637,6 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
-void kvm_mmu_sync_global(struct kvm_vcpu *vcpu);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 409d08e..5b79afa 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1075,18 +1075,10 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
 	return NULL;
 }
 
-static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
-	list_del(&sp->oos_link);
-	--kvm->stat.mmu_unsync_global;
-}
-
 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	WARN_ON(!sp->unsync);
 	sp->unsync = 0;
-	if (sp->global)
-		kvm_unlink_unsync_global(kvm, sp);
 	--kvm->stat.mmu_unsync;
 }
 
@@ -1249,7 +1241,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 	pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
 	sp->gfn = gfn;
 	sp->role = role;
-	sp->global = 0;
 	hlist_add_head(&sp->hash_link, bucket);
 	if (!direct) {
 		if (rmap_write_protect(vcpu->kvm, gfn))
@@ -1647,11 +1638,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 	++vcpu->kvm->stat.mmu_unsync;
 	sp->unsync = 1;
 
-	if (sp->global) {
-		list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
-		++vcpu->kvm->stat.mmu_unsync_global;
-	} else
-		kvm_mmu_mark_parents_unsync(vcpu, sp);
+	kvm_mmu_mark_parents_unsync(vcpu, sp);
 
 	mmu_convert_notrap(sp);
 	return 0;
@@ -1678,21 +1665,12 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 		    unsigned pte_access, int user_fault,
 		    int write_fault, int dirty, int largepage,
-		    int global, gfn_t gfn, pfn_t pfn, bool speculative,
+		    gfn_t gfn, pfn_t pfn, bool speculative,
 		    bool can_unsync)
 {
 	u64 spte;
 	int ret = 0;
 	u64 mt_mask = shadow_mt_mask;
-	struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
-
-	if (!global && sp->global) {
-		sp->global = 0;
-		if (sp->unsync) {
-			kvm_unlink_unsync_global(vcpu->kvm, sp);
-			kvm_mmu_mark_parents_unsync(vcpu, sp);
-		}
-	}
 
 	/*
 	 * We don't set the accessed bit, since we sometimes want to see
@@ -1766,8 +1744,8 @@ set_pte:
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 			 unsigned pt_access, unsigned pte_access,
 			 int user_fault, int write_fault, int dirty,
-			 int *ptwrite, int largepage, int global,
-			 gfn_t gfn, pfn_t pfn, bool speculative)
+			 int *ptwrite, int largepage, gfn_t gfn,
+			 pfn_t pfn, bool speculative)
 {
 	int was_rmapped = 0;
 	int was_writeble = is_writeble_pte(*shadow_pte);
@@ -1796,7 +1774,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 			was_rmapped = 1;
 	}
 	if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
-		      dirty, largepage, global, gfn, pfn, speculative, true)) {
+		      dirty, largepage, gfn, pfn, speculative, true)) {
 		if (write_fault)
 			*ptwrite = 1;
 		kvm_x86_ops->tlb_flush(vcpu);
@@ -1844,7 +1822,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
 		    || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
 			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
 				     0, write, 1, &pt_write,
-				     largepage, 0, gfn, pfn, false);
+				     largepage, gfn, pfn, false);
 			++vcpu->stat.pf_fixed;
 			break;
 		}
@@ -2015,15 +1993,6 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
 	}
 }
 
-static void mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-	struct kvm *kvm = vcpu->kvm;
-	struct kvm_mmu_page *sp, *n;
-
-	list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
-		kvm_sync_page(vcpu, sp);
-}
-
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 {
 	spin_lock(&vcpu->kvm->mmu_lock);
@@ -2031,13 +2000,6 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 	spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
-void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-	spin_lock(&vcpu->kvm->mmu_lock);
-	mmu_sync_global(vcpu);
-	spin_unlock(&vcpu->kvm->mmu_lock);
-}
-
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
 	return vaddr;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 09782a9..258e459 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -268,8 +268,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
 	kvm_get_pfn(pfn);
 	mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
 		     gpte & PT_DIRTY_MASK, NULL, largepage,
-		     gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte),
-		     pfn, true);
+		     gpte_to_gfn(gpte), pfn, true);
 }
 
 /*
@@ -303,7 +302,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 				     user_fault, write_fault,
 				     gw->ptes[gw->level-1] & PT_DIRTY_MASK,
 				     ptwrite, largepage,
-				     gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
 				     gw->gfn, pfn, false);
 			break;
 		}
@@ -592,7 +590,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 		nr_present++;
 		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
 		set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
-			 is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn,
+			 is_dirty_pte(gpte), 0, gfn,
 			 spte_to_pfn(sp->spt[i]), true, false);
 	}
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 95f1369..9b89d9b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -108,7 +108,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ "mmu_recycled", VM_STAT(mmu_recycled) },
 	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
 	{ "mmu_unsync", VM_STAT(mmu_unsync) },
-	{ "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
 	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
 	{ "largepages", VM_STAT(lpages) },
 	{ NULL }
@@ -322,7 +321,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 	kvm_x86_ops->set_cr0(vcpu, cr0);
 	vcpu->arch.cr0 = cr0;
 
-	kvm_mmu_sync_global(vcpu);
 	kvm_mmu_reset_context(vcpu);
 	return;
 }
@@ -367,7 +365,6 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 	kvm_x86_ops->set_cr4(vcpu, cr4);
 	vcpu->arch.cr4 = cr4;
 	vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
-	kvm_mmu_sync_global(vcpu);
 	kvm_mmu_reset_context(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr4);
@@ -4360,7 +4357,6 @@ struct  kvm *kvm_arch_create_vm(void)
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
-	INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
 
 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
-- 
1.6.0.6


  parent reply	other threads:[~2009-05-20 11:27 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-05-20 11:17 [PATCH 00/46] KVM updates for the 2.6.31 merge window (batch 2/3) Avi Kivity
2009-05-20 11:17 ` [PATCH 01/46] KVM: VMX: Make flexpriority module parameter reflect hardware capability Avi Kivity
2009-05-20 11:17 ` [PATCH 02/46] KVM: VMX: Correct wrong vmcs field sizes Avi Kivity
2009-05-20 11:18 ` [PATCH 03/46] KVM: MMU: Fix comment in page_fault() Avi Kivity
2009-05-20 11:18 ` [PATCH 04/46] KVM: ia64: enable external interrupt in vmm Avi Kivity
2009-05-20 11:18 ` [PATCH 05/46] KVM: MMU: Emulate #PF error code of reserved bits violation Avi Kivity
2009-05-20 11:18 ` [PATCH 06/46] KVM: MMU: Use different shadows when EFER.NXE changes Avi Kivity
2009-05-20 11:18 ` [PATCH 07/46] KVM: remove pointless conditional before kfree() in lapic initialization Avi Kivity
2009-05-20 11:18 ` [PATCH 08/46] KVM: VMX: Clean up Flex Priority related Avi Kivity
2009-05-20 11:18 ` [PATCH 09/46] KVM: VMX: Fix feature testing Avi Kivity
2009-05-20 11:18 ` [PATCH 10/46] KVM: Use rsvd_bits_mask in load_pdptrs() Avi Kivity
2009-05-20 11:18 ` [PATCH 11/46] KVM: VMX: Fix handling of a fault during NMI unblocked due to IRET Avi Kivity
2009-05-20 11:18 ` [PATCH 12/46] KVM: VMX: Rewrite vmx_complete_interrupt()'s twisted maze of if() statements Avi Kivity
2009-05-20 11:18 ` [PATCH 13/46] KVM: VMX: Do not zero idt_vectoring_info in vmx_complete_interrupts() Avi Kivity
2009-05-20 11:18 ` [PATCH 14/46] KVM: Fix task switch back link handling Avi Kivity
2009-05-20 11:18 ` [PATCH 15/46] KVM: Fix unneeded instruction skipping during task switching Avi Kivity
2009-05-20 11:18 ` [PATCH 16/46] KVM: MMU: Discard reserved bits checking on PDE bit 7-8 Avi Kivity
2009-05-20 11:18 ` [PATCH 17/46] KVM: x86 emulator: fix call near emulation Avi Kivity
2009-05-20 11:18 ` [PATCH 18/46] KVM: ia64: make kvm depend on CONFIG_MODULES Avi Kivity
2009-05-20 11:18 ` [PATCH 19/46] KVM: PIT: fix count read and mode 0 handling Avi Kivity
2009-05-20 11:18 ` [PATCH 20/46] KVM: Make kvm header C++ friendly Avi Kivity
2009-05-20 11:18 ` Avi Kivity [this message]
2009-05-20 11:18 ` [PATCH 22/46] KVM: x86 emulator: Add decoding of 16bit second immediate argument Avi Kivity
2009-05-20 11:18 ` [PATCH 23/46] KVM: x86 emulator: Add lcall decoding Avi Kivity
2009-05-20 11:18 ` [PATCH 24/46] KVM: x86 emulator: Complete ljmp decoding at decode stage Avi Kivity
2009-05-20 11:18 ` [PATCH 25/46] KVM: x86 emulator: Complete short/near jcc decoding in " Avi Kivity
2009-05-20 11:18 ` [PATCH 26/46] KVM: x86 emulator: Complete decoding of call near " Avi Kivity
2009-05-20 11:18 ` [PATCH 27/46] KVM: x86 emulator: Add unsigned byte immediate decode Avi Kivity
2009-05-20 11:18 ` [PATCH 28/46] KVM: x86 emulator: Completely decode in/out at decoding stage Avi Kivity
2009-05-20 11:18 ` [PATCH 29/46] KVM: x86 emulator: Decode soft interrupt instructions Avi Kivity
2009-05-20 11:18 ` [PATCH 30/46] KVM: x86 emulator: Add new mode of instruction emulation: skip Avi Kivity
2009-05-20 11:18 ` [PATCH 31/46] KVM: SVM: Skip instruction on a task switch only when appropriate Avi Kivity
2009-05-20 11:18 ` [PATCH 32/46] KVM: Replace kvmclock open-coded get_cpu_var() with the real thing Avi Kivity
2009-05-20 11:18 ` [PATCH 33/46] KVM: ia64: Don't hold slots_lock in guest mode Avi Kivity
2009-05-20 11:18 ` [PATCH 34/46] KVM: x86: check for cr3 validity in ioctl_set_sregs Avi Kivity
2009-05-20 11:18 ` [PATCH 35/46] KVM: ia64: Flush all TLBs once guest's memory mapping changes Avi Kivity
2009-05-20 11:18 ` [PATCH 36/46] KVM: ia64: remove empty function vti_vcpu_load() Avi Kivity
2009-05-20 11:18 ` [PATCH 37/46] KVM: ia64: restore irq state before calling kvm_vcpu_init Avi Kivity
2009-05-20 11:18 ` [PATCH 38/46] KVM: ia64: preserve int status through call to kvm_insert_vmm_mapping Avi Kivity
2009-05-20 11:18 ` [PATCH 39/46] KVM: ia64: ia64 vcpu_reset() do not call kmalloc() with irqs disabled Avi Kivity
2009-05-20 11:18 ` [PATCH 40/46] KVM: MMU: Fix auditing code Avi Kivity
2009-05-20 11:18 ` [PATCH 41/46] KVM: Make kvm_cpu_(has|get)_interrupt() work for userspace irqchip too Avi Kivity
2009-05-20 11:18 ` [PATCH 42/46] KVM: VMX: Consolidate userspace and kernel interrupt injection for VMX Avi Kivity
2009-05-20 11:18 ` [PATCH 43/46] KVM: VMX: Cleanup vmx_intr_assist() Avi Kivity
2009-05-20 11:18 ` [PATCH 44/46] KVM: Use kvm_arch_interrupt_allowed() instead of checking interrupt_window_open directly Avi Kivity
2009-05-20 11:18 ` [PATCH 45/46] KVM: SVM: Coalesce userspace/kernel irqchip interrupt injection logic Avi Kivity
2009-05-20 11:18 ` [PATCH 46/46] KVM: Remove exception_injected() callback Avi Kivity

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1242818323-10413-22-git-send-email-avi@redhat.com \
    --to=avi@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.