Have a pointer to an allocated region inside struct kvm. Signed-off-by: Marcelo Tosatti Index: kvm-slotslock/arch/x86/kvm/mmu.c =================================================================== --- kvm-slotslock.orig/arch/x86/kvm/mmu.c +++ kvm-slotslock/arch/x86/kvm/mmu.c @@ -768,13 +768,14 @@ static int kvm_handle_hva(struct kvm *kv { int i, j; int retval = 0; + struct kvm_memslots *slots = kvm->memslots; /* * If mmap_sem isn't taken, we can look the memslots with only * the mmu_lock by skipping over the slots with userspace_addr == 0. */ - for (i = 0; i < kvm->nmemslots; i++) { - struct kvm_memory_slot *memslot = &kvm->memslots[i]; + for (i = 0; i < slots->nmemslots; i++) { + struct kvm_memory_slot *memslot = &slots->memslots[i]; unsigned long start = memslot->userspace_addr; unsigned long end; @@ -2969,8 +2970,8 @@ unsigned int kvm_mmu_calculate_mmu_pages unsigned int nr_mmu_pages; unsigned int nr_pages = 0; - for (i = 0; i < kvm->nmemslots; i++) - nr_pages += kvm->memslots[i].npages; + for (i = 0; i < kvm->memslots->nmemslots; i++) + nr_pages += kvm->memslots->memslots[i].npages; nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; nr_mmu_pages = max(nr_mmu_pages, @@ -3243,7 +3244,7 @@ static int count_rmaps(struct kvm_vcpu * int i, j, k; for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *m = &vcpu->kvm->memslots[i]; + struct kvm_memory_slot *m = &vcpu->kvm->memslots->memslots[i]; struct kvm_rmap_desc *d; for (j = 0; j < m->npages; ++j) { Index: kvm-slotslock/arch/x86/kvm/vmx.c =================================================================== --- kvm-slotslock.orig/arch/x86/kvm/vmx.c +++ kvm-slotslock/arch/x86/kvm/vmx.c @@ -1465,8 +1465,8 @@ static void enter_pmode(struct kvm_vcpu static gva_t rmode_tss_base(struct kvm *kvm) { if (!kvm->arch.tss_addr) { - gfn_t base_gfn = kvm->memslots[0].base_gfn + - kvm->memslots[0].npages - 3; + gfn_t base_gfn = kvm->memslots->memslots[0].base_gfn + + kvm->memslots->memslots[0].npages - 3; return base_gfn << PAGE_SHIFT; } return kvm->arch.tss_addr; Index: kvm-slotslock/arch/x86/kvm/x86.c =================================================================== --- kvm-slotslock.orig/arch/x86/kvm/x86.c +++ kvm-slotslock/arch/x86/kvm/x86.c @@ -2159,7 +2159,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv spin_lock(&kvm->mmu_lock); kvm_mmu_slot_remove_write_access(kvm, log->slot); spin_unlock(&kvm->mmu_lock); - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; memset(memslot->dirty_bitmap, 0, n); } @@ -4845,7 +4845,7 @@ int kvm_arch_set_memory_region(struct kv int user_alloc) { int npages = mem->memory_size >> PAGE_SHIFT; - struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; + struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot]; /*To keep backward compatibility with older userspace, *x86 needs to hanlde !user_alloc case. Index: kvm-slotslock/include/linux/kvm_host.h =================================================================== --- kvm-slotslock.orig/include/linux/kvm_host.h +++ kvm-slotslock/include/linux/kvm_host.h @@ -151,14 +151,18 @@ struct kvm_irq_routing_table {}; #endif +struct kvm_memslots { + int nmemslots; + struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + + KVM_PRIVATE_MEM_SLOTS]; +}; + struct kvm { spinlock_t mmu_lock; spinlock_t requests_lock; struct rw_semaphore slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ - int nmemslots; - struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + - KVM_PRIVATE_MEM_SLOTS]; + struct kvm_memslots *memslots; #ifdef CONFIG_KVM_APIC_ARCHITECTURE u32 bsp_vcpu_id; struct kvm_vcpu *bsp_vcpu; @@ -482,7 +486,7 @@ static inline void kvm_guest_exit(void) static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) { - return slot - kvm->memslots; + return slot - kvm->memslots->memslots; } static inline gpa_t gfn_to_gpa(gfn_t gfn) Index: kvm-slotslock/virt/kvm/iommu.c =================================================================== --- kvm-slotslock.orig/virt/kvm/iommu.c +++ kvm-slotslock/virt/kvm/iommu.c @@ -76,10 +76,13 @@ unmap_pages: static int kvm_iommu_map_memslots(struct kvm *kvm) { int i, r = 0; + struct kvm_memslots *slots; - for (i = 0; i < kvm->nmemslots; i++) { - r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn, - kvm->memslots[i].npages); + slots = kvm->memslots; + + for (i = 0; i < slots->nmemslots; i++) { + r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn, + slots->memslots[i].npages); if (r) break; } @@ -210,10 +213,13 @@ static void kvm_iommu_put_pages(struct k static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int i; + struct kvm_memslots *slots; + + slots = kvm->memslots; - for (i = 0; i < kvm->nmemslots; i++) { - kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn, - kvm->memslots[i].npages); + for (i = 0; i < slots->nmemslots; i++) { + kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, + slots->memslots[i].npages); } return 0; Index: kvm-slotslock/virt/kvm/kvm_main.c =================================================================== --- kvm-slotslock.orig/virt/kvm/kvm_main.c +++ kvm-slotslock/virt/kvm/kvm_main.c @@ -348,12 +348,16 @@ static struct kvm *kvm_create_vm(void) INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif + r = -ENOMEM; + kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); + if (!kvm->memslots) + goto out_err; + #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) { - r = -ENOMEM; + if (!page) goto out_err; - } + kvm->coalesced_mmio_ring = (struct kvm_coalesced_mmio_ring *)page_address(page); #endif @@ -394,6 +398,7 @@ out: out_err: hardware_disable_all(); out_err_nodisable: + kfree(kvm->memslots); kfree(kvm); return ERR_PTR(r); } @@ -428,9 +433,12 @@ static void kvm_free_physmem_slot(struct void kvm_free_physmem(struct kvm *kvm) { int i; + struct kvm_memslots *slots = kvm->memslots; + + for (i = 0; i < slots->nmemslots; ++i) + kvm_free_physmem_slot(&slots->memslots[i], NULL); - for (i = 0; i < kvm->nmemslots; ++i) - kvm_free_physmem_slot(&kvm->memslots[i], NULL); + kfree(kvm->memslots); } static void kvm_destroy_vm(struct kvm *kvm) @@ -514,7 +522,7 @@ int __kvm_set_memory_region(struct kvm * if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) goto out; - memslot = &kvm->memslots[mem->slot]; + memslot = &kvm->memslots->memslots[mem->slot]; base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT; @@ -535,7 +543,7 @@ int __kvm_set_memory_region(struct kvm * /* Check for overlaps */ r = -EEXIST; for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *s = &kvm->memslots[i]; + struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; if (s == memslot || !s->npages) continue; @@ -637,8 +645,8 @@ skip_lpage: kvm_arch_flush_shadow(kvm); spin_lock(&kvm->mmu_lock); - if (mem->slot >= kvm->nmemslots) - kvm->nmemslots = mem->slot + 1; + if (mem->slot >= kvm->memslots->nmemslots) + kvm->memslots->nmemslots = mem->slot + 1; *memslot = new; spin_unlock(&kvm->mmu_lock); @@ -708,7 +716,7 @@ int kvm_get_dirty_log(struct kvm *kvm, if (log->slot >= KVM_MEMORY_SLOTS) goto out; - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; r = -ENOENT; if (!memslot->dirty_bitmap) goto out; @@ -762,9 +770,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) { int i; + struct kvm_memslots *slots = kvm->memslots; - for (i = 0; i < kvm->nmemslots; ++i) { - struct kvm_memory_slot *memslot = &kvm->memslots[i]; + for (i = 0; i < slots->nmemslots; ++i) { + struct kvm_memory_slot *memslot = &slots->memslots[i]; if (gfn >= memslot->base_gfn && gfn < memslot->base_gfn + memslot->npages) @@ -783,10 +792,11 @@ struct kvm_memory_slot *gfn_to_memslot(s int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { int i; + struct kvm_memslots *slots = kvm->memslots; gfn = unalias_gfn(kvm, gfn); for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *memslot = &kvm->memslots[i]; + struct kvm_memory_slot *memslot = &slots->memslots[i]; if (gfn >= memslot->base_gfn && gfn < memslot->base_gfn + memslot->npages)