From mboxrd@z Thu Jan 1 00:00:00 1970 From: Marcelo Tosatti Subject: [patch 03/10] KVM: switch dirty_log to mmu_lock protection Date: Mon, 21 Sep 2009 20:37:14 -0300 Message-ID: <20090921234124.205345161@amt.cnet> References: <20090921233711.213665413@amt.cnet> Cc: avi@redhat.com, Marcelo Tosatti To: kvm@vger.kernel.org Return-path: Received: from mx1.redhat.com ([209.132.183.28]:10796 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754504AbZIUXmR (ORCPT ); Mon, 21 Sep 2009 19:42:17 -0400 Received: from int-mx01.intmail.prod.int.phx2.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id n8LNgLAH005199 for ; Mon, 21 Sep 2009 19:42:21 -0400 Content-Disposition: inline; filename=get-dirty-log Sender: kvm-owner@vger.kernel.org List-ID: get_dirty_log vs mark_page_dirty need to be mutually exclusive. Switch to mmu_lock protection. Signed-off-by: Marcelo Tosatti Index: kvm-slotslock/arch/x86/kvm/paging_tmpl.h =================================================================== --- kvm-slotslock.orig/arch/x86/kvm/paging_tmpl.h +++ kvm-slotslock/arch/x86/kvm/paging_tmpl.h @@ -175,7 +175,9 @@ walk: if (!(pte & PT_ACCESSED_MASK)) { trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); + spin_lock(&vcpu->kvm->mmu_lock); mark_page_dirty(vcpu->kvm, table_gfn); + spin_unlock(&vcpu->kvm->mmu_lock); if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, pte|PT_ACCESSED_MASK)) goto walk; @@ -215,7 +217,9 @@ walk: bool ret; trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); + spin_lock(&vcpu->kvm->mmu_lock); mark_page_dirty(vcpu->kvm, table_gfn); + spin_unlock(&vcpu->kvm->mmu_lock); ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, pte|PT_DIRTY_MASK); if (ret) Index: kvm-slotslock/arch/x86/kvm/x86.c =================================================================== --- kvm-slotslock.orig/arch/x86/kvm/x86.c +++ kvm-slotslock/arch/x86/kvm/x86.c @@ -692,7 +692,9 @@ static void kvm_write_guest_time(struct kunmap_atomic(shared_kaddr, KM_USER0); + spin_lock(&v->kvm->mmu_lock); mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); + spin_unlock(&v->kvm->mmu_lock); } static int kvm_request_guest_time_update(struct kvm_vcpu *v) @@ -2147,27 +2149,45 @@ static int kvm_vm_ioctl_reinject(struct int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { - int r; - int n; + int r, n, i; struct kvm_memory_slot *memslot; - int is_dirty = 0; + unsigned long is_dirty = 0; + unsigned long *dirty_bitmap; down_write(&kvm->slots_lock); - r = kvm_get_dirty_log(kvm, log, &is_dirty); - if (r) + r = -EINVAL; + if (log->slot >= KVM_MEMORY_SLOTS) + goto out; + + memslot = &kvm->memslots->memslots[log->slot]; + r = -ENOENT; + if (!memslot->dirty_bitmap) + goto out; + + n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + r = -ENOMEM; + dirty_bitmap = vmalloc(n); + if (!dirty_bitmap) goto out; + memset(dirty_bitmap, 0, n); + + spin_lock(&kvm->mmu_lock); + for (i = 0; !is_dirty && i < n/sizeof(long); ++i) + is_dirty = memslot->dirty_bitmap[i]; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { - spin_lock(&kvm->mmu_lock); + memcpy(dirty_bitmap, memslot->dirty_bitmap, n); + memset(memslot->dirty_bitmap, 0, n); kvm_mmu_slot_remove_write_access(kvm, log->slot); - spin_unlock(&kvm->mmu_lock); memslot = &kvm->memslots->memslots[log->slot]; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; - memset(memslot->dirty_bitmap, 0, n); } + spin_unlock(&kvm->mmu_lock); + r = 0; + if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) + r = -EFAULT; out: up_write(&kvm->slots_lock); return r; @@ -3491,7 +3511,9 @@ static void vapic_exit(struct kvm_vcpu * down_read(&vcpu->kvm->slots_lock); kvm_release_page_dirty(apic->vapic_page); + spin_lock(&vcpu->kvm->mmu_lock); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); + spin_unlock(&vcpu->kvm->mmu_lock); up_read(&vcpu->kvm->slots_lock); } Index: kvm-slotslock/virt/kvm/kvm_main.c =================================================================== --- kvm-slotslock.orig/virt/kvm/kvm_main.c +++ kvm-slotslock/virt/kvm/kvm_main.c @@ -1007,7 +1007,9 @@ int kvm_write_guest_page(struct kvm *kvm r = copy_to_user((void __user *)addr + offset, data, len); if (r) return -EFAULT; + spin_lock(&kvm->mmu_lock); mark_page_dirty(kvm, gfn); + spin_unlock(&kvm->mmu_lock); return 0; } EXPORT_SYMBOL_GPL(kvm_write_guest_page);