From mboxrd@z Thu Jan 1 00:00:00 1970 From: mtosatti@redhat.com Subject: [patch 1/4] KVM: MMU: protect kvm_mmu_change_mmu_pages with mmu_lock Date: Thu, 07 May 2009 18:03:32 -0300 Message-ID: <20090507210533.908595461@amt.cnet> References: <20090507210331.370806285@amt.cnet> Cc: avi@redhat.com, Andrea Arcangeli , Marcelo Tosatti To: kvm@vger.kernel.org Return-path: Received: from mx2.redhat.com ([66.187.237.31]:45202 "EHLO mx2.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751154AbZEGVJ4 (ORCPT ); Thu, 7 May 2009 17:09:56 -0400 Received: from int-mx2.corp.redhat.com (int-mx2.corp.redhat.com [172.16.27.26]) by mx2.redhat.com (8.13.8/8.13.8) with ESMTP id n47L9vf7016567 for ; Thu, 7 May 2009 17:09:57 -0400 Content-Disposition: inline; filename=set-mem-lock Sender: kvm-owner@vger.kernel.org List-ID: kvm_handle_hva, called by MMU notifiers, manipulates mmu data only with the protection of mmu_lock. Update kvm_mmu_change_mmu_pages callers to take mmu_lock, thus protecting against kvm_handle_hva. CC: Andrea Arcangeli Signed-off-by: Marcelo Tosatti Index: kvm-pending/arch/x86/kvm/mmu.c =================================================================== --- kvm-pending.orig/arch/x86/kvm/mmu.c +++ kvm-pending/arch/x86/kvm/mmu.c @@ -2723,7 +2723,6 @@ void kvm_mmu_slot_remove_write_access(st { struct kvm_mmu_page *sp; - spin_lock(&kvm->mmu_lock); list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { int i; u64 *pt; @@ -2738,7 +2737,6 @@ void kvm_mmu_slot_remove_write_access(st pt[i] &= ~PT_WRITABLE_MASK; } kvm_flush_remote_tlbs(kvm); - spin_unlock(&kvm->mmu_lock); } void kvm_mmu_zap_all(struct kvm *kvm) Index: kvm-pending/arch/x86/kvm/x86.c =================================================================== --- kvm-pending.orig/arch/x86/kvm/x86.c +++ kvm-pending/arch/x86/kvm/x86.c @@ -1607,10 +1607,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages return -EINVAL; down_write(&kvm->slots_lock); + spin_lock(&kvm->mmu_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; + spin_unlock(&kvm->mmu_lock); up_write(&kvm->slots_lock); return 0; } @@ -1786,7 +1788,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kv /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { + spin_lock(&kvm->mmu_lock); kvm_mmu_slot_remove_write_access(kvm, log->slot); + spin_unlock(&kvm->mmu_lock); kvm_flush_remote_tlbs(kvm); memslot = &kvm->memslots[log->slot]; n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; @@ -4530,12 +4534,14 @@ int kvm_arch_set_memory_region(struct kv } } + spin_lock(&kvm->mmu_lock); if (!kvm->arch.n_requested_mmu_pages) { unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); } kvm_mmu_slot_remove_write_access(kvm, mem->slot); + spin_unlock(&kvm->mmu_lock); kvm_flush_remote_tlbs(kvm); return 0;