All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jintack Lim <jintack@cs.columbia.edu>
To: christoffer.dall@linaro.org, marc.zyngier@arm.com,
	pbonzini@redhat.com, rkrcmar@redhat.com, linux@armlinux.org.uk,
	catalin.marinas@arm.com, will.deacon@arm.com,
	vladimir.murzin@arm.com, suzuki.poulose@arm.com,
	mark.rutland@arm.com, james.morse@arm.com,
	lorenzo.pieralisi@arm.com, kevin.brodsky@arm.com,
	wcohen@redhat.com, shankerd@codeaurora.org, geoff@infradead.org,
	andre.przywara@arm.com, eric.auger@redhat.com,
	anna-maria@linutronix.de, shihwei@cs.columbia.edu,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org,
	linux-kernel@vger.kernel.org
Cc: jintack@cs.columbia.edu
Subject: [RFC 38/55] KVM: arm/arm64: Make mmu functions non-static
Date: Mon,  9 Jan 2017 01:24:34 -0500	[thread overview]
Message-ID: <1483943091-1364-39-git-send-email-jintack@cs.columbia.edu> (raw)
In-Reply-To: <1483943091-1364-1-git-send-email-jintack@cs.columbia.edu>

From: Christoffer Dall <christoffer.dall@linaro.org>

Make mmu functions non-static so that we can reuse those functions
to support mmu for the nested VMs.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Jintack Lim <jintack@cs.columbia.edu>
---
 arch/arm/kvm/mmu.c               | 90 +++++++++++++++++++++++-----------------
 arch/arm64/include/asm/kvm_mmu.h |  9 ++++
 2 files changed, 61 insertions(+), 38 deletions(-)

diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 56358fa..98b42e8 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -301,7 +301,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
 }
 
 /**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * kvm_unmap_stage2_range -- Clear stage2 page table entries to unmap a range
  * @kvm:   The VM pointer
  * @start: The intermediate physical base address of the range to unmap
  * @size:  The size of the area to unmap
@@ -311,8 +311,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
  * destroying the VM), otherwise another faulting VCPU may come in and mess
  * with things behind our backs.
  */
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu,
-			       phys_addr_t start, u64 size)
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
 {
 	pgd_t *pgd;
 	phys_addr_t addr = start, end = start + size;
@@ -374,11 +373,10 @@ static void stage2_flush_puds(pgd_t *pgd,
 	} while (pud++, addr = next, addr != end);
 }
 
-static void stage2_flush_memslot(struct kvm_s2_mmu *mmu,
-				 struct kvm_memory_slot *memslot)
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t start, phys_addr_t end)
 {
-	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
-	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+	phys_addr_t addr = start;
 	phys_addr_t next;
 	pgd_t *pgd;
 
@@ -389,6 +387,15 @@ static void stage2_flush_memslot(struct kvm_s2_mmu *mmu,
 	} while (pgd++, addr = next, addr != end);
 }
 
+static void stage2_flush_memslot(struct kvm_s2_mmu *mmu,
+				 struct kvm_memory_slot *memslot)
+{
+	phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
+	phys_addr_t end = start + PAGE_SIZE * memslot->npages;
+
+	kvm_stage2_flush_range(mmu, start, end);
+}
+
 /**
  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
  * @kvm: The struct kvm pointer
@@ -745,21 +752,9 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
 				     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
 }
 
-/**
- * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
- * @kvm:	The KVM struct pointer for the VM.
- *
- * Allocates only the stage-2 HW PGD level table(s) (can support either full
- * 40-bit input addresses or limited to 32-bit input addresses). Clears the
- * allocated pages.
- *
- * Note we don't need locking here as this is only called when the VM is
- * created, which can only be done once.
- */
-int kvm_alloc_stage2_pgd(struct kvm *kvm)
+int __kvm_alloc_stage2_pgd(struct kvm_s2_mmu *mmu)
 {
 	pgd_t *pgd;
-	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
 
 	if (mmu->pgd != NULL) {
 		kvm_err("kvm_arch already initialized?\n");
@@ -776,6 +771,22 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
 	return 0;
 }
 
+/**
+ * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
+ * @kvm:	The KVM struct pointer for the VM.
+ *
+ * Allocates only the stage-2 HW PGD level table(s) (can support either full
+ * 40-bit input addresses or limited to 32-bit input addresses). Clears the
+ * allocated pages.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * created, which can only be done once.
+ */
+int kvm_alloc_stage2_pgd(struct kvm *kvm)
+{
+	return __kvm_alloc_stage2_pgd(&kvm->arch.mmu);
+}
+
 static void stage2_unmap_memslot(struct kvm_s2_mmu *mmu,
 				 struct kvm_memory_slot *memslot)
 {
@@ -811,7 +822,7 @@ static void stage2_unmap_memslot(struct kvm_s2_mmu *mmu,
 
 		if (!(vma->vm_flags & VM_PFNMAP)) {
 			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
-			unmap_stage2_range(mmu, gpa, vm_end - vm_start);
+			kvm_unmap_stage2_range(mmu, gpa, vm_end - vm_start);
 		}
 		hva = vm_end;
 	} while (hva < reg_end);
@@ -841,6 +852,17 @@ void stage2_unmap_vm(struct kvm *kvm)
 	srcu_read_unlock(&kvm->srcu, idx);
 }
 
+void __kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
+{
+	if (mmu->pgd == NULL)
+		return;
+
+	kvm_unmap_stage2_range(mmu, 0, KVM_PHYS_SIZE);
+	/* Free the HW pgd, one page at a time */
+	free_pages_exact(mmu->pgd, S2_PGD_SIZE);
+	mmu->pgd = NULL;
+}
+
 /**
  * kvm_free_stage2_pgd - free all stage-2 tables
  * @kvm:	The KVM struct pointer for the VM.
@@ -854,15 +876,7 @@ void stage2_unmap_vm(struct kvm *kvm)
  */
 void kvm_free_stage2_pgd(struct kvm *kvm)
 {
-	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
-
-	if (mmu->pgd == NULL)
-		return;
-
-	unmap_stage2_range(mmu, 0, KVM_PHYS_SIZE);
-	/* Free the HW pgd, one page at a time */
-	free_pages_exact(mmu->pgd, S2_PGD_SIZE);
-	mmu->pgd = NULL;
+	__kvm_free_stage2_pgd(&kvm->arch.mmu);
 }
 
 static pud_t *stage2_get_pud(struct kvm_s2_mmu *mmu,
@@ -1175,13 +1189,13 @@ static void  stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
 }
 
 /**
- * stage2_wp_range() - write protect stage2 memory region range
+ * kvm_stage2_wp_range() - write protect stage2 memory region range
  * @kvm:	The KVM pointer
  * @addr:	Start address of range
  * @end:	End address of range
  */
-static void stage2_wp_range(struct kvm *kvm, struct kvm_s2_mmu *mmu,
-			    phys_addr_t addr, phys_addr_t end)
+void kvm_stage2_wp_range(struct kvm *kvm, struct kvm_s2_mmu *mmu,
+			 phys_addr_t addr, phys_addr_t end)
 {
 	pgd_t *pgd;
 	phys_addr_t next;
@@ -1225,7 +1239,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
 	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
 	spin_unlock(&kvm->mmu_lock);
 	kvm_flush_remote_tlbs(kvm);
 }
@@ -1249,7 +1263,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
 
-	stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
 }
 
 /*
@@ -1589,7 +1603,7 @@ static int handle_hva_to_gpa(struct kvm *kvm,
 
 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
 {
-	unmap_stage2_range(&kvm->arch.mmu, gpa, PAGE_SIZE);
+	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, PAGE_SIZE);
 	return 0;
 }
 
@@ -1900,7 +1914,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 
 	spin_lock(&kvm->mmu_lock);
 	if (ret)
-		unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr,
+		kvm_unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr,
 				   mem->memory_size);
 	else
 		stage2_flush_memslot(&kvm->arch.mmu, memslot);
@@ -1944,7 +1958,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 	phys_addr_t size = slot->npages << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
 	spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e3455c4..a504162 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -145,9 +145,18 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
 
 void stage2_unmap_vm(struct kvm *kvm);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
+int __kvm_alloc_stage2_pgd(struct kvm_s2_mmu *mmu);
 void kvm_free_stage2_pgd(struct kvm *kvm);
+void __kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 			  phys_addr_t pa, unsigned long size, bool writable);
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
+			    u64 size);
+void kvm_stage2_wp_range(struct kvm *kvm, struct kvm_s2_mmu *mmu,
+			 phys_addr_t addr, phys_addr_t end);
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t start, phys_addr_t end);
+
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
-- 
1.9.1

WARNING: multiple messages have this Message-ID (diff)
From: Jintack Lim <jintack@cs.columbia.edu>
To: christoffer.dall@linaro.org, marc.zyngier@arm.com,
	pbonzini@redhat.com, rkrcmar@redhat.com, linux@armlinux.org.uk,
	catalin.marinas@arm.com, will.deacon@arm.com,
	vladimir.murzin@arm.com, suzuki.poulose@arm.com,
	mark.rutland@arm.com, james.morse@arm.com,
	lorenzo.pieralisi@arm.com, kevin.brodsky@arm.com,
	wcohen@redhat.com, shankerd@codeaurora.org, geoff@infradead.org,
	andre.przywara@arm.com, eric.auger@redhat.com,
	anna-maria@linutronix.de, shihwei@cs.columbia.edu,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [RFC 38/55] KVM: arm/arm64: Make mmu functions non-static
Date: Mon,  9 Jan 2017 01:24:34 -0500	[thread overview]
Message-ID: <1483943091-1364-39-git-send-email-jintack@cs.columbia.edu> (raw)
In-Reply-To: <1483943091-1364-1-git-send-email-jintack@cs.columbia.edu>

From: Christoffer Dall <christoffer.dall@linaro.org>

Make mmu functions non-static so that we can reuse those functions
to support mmu for the nested VMs.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Jintack Lim <jintack@cs.columbia.edu>
---
 arch/arm/kvm/mmu.c               | 90 +++++++++++++++++++++++-----------------
 arch/arm64/include/asm/kvm_mmu.h |  9 ++++
 2 files changed, 61 insertions(+), 38 deletions(-)

diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 56358fa..98b42e8 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -301,7 +301,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
 }
 
 /**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * kvm_unmap_stage2_range -- Clear stage2 page table entries to unmap a range
  * @kvm:   The VM pointer
  * @start: The intermediate physical base address of the range to unmap
  * @size:  The size of the area to unmap
@@ -311,8 +311,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
  * destroying the VM), otherwise another faulting VCPU may come in and mess
  * with things behind our backs.
  */
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu,
-			       phys_addr_t start, u64 size)
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
 {
 	pgd_t *pgd;
 	phys_addr_t addr = start, end = start + size;
@@ -374,11 +373,10 @@ static void stage2_flush_puds(pgd_t *pgd,
 	} while (pud++, addr = next, addr != end);
 }
 
-static void stage2_flush_memslot(struct kvm_s2_mmu *mmu,
-				 struct kvm_memory_slot *memslot)
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t start, phys_addr_t end)
 {
-	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
-	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+	phys_addr_t addr = start;
 	phys_addr_t next;
 	pgd_t *pgd;
 
@@ -389,6 +387,15 @@ static void stage2_flush_memslot(struct kvm_s2_mmu *mmu,
 	} while (pgd++, addr = next, addr != end);
 }
 
+static void stage2_flush_memslot(struct kvm_s2_mmu *mmu,
+				 struct kvm_memory_slot *memslot)
+{
+	phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
+	phys_addr_t end = start + PAGE_SIZE * memslot->npages;
+
+	kvm_stage2_flush_range(mmu, start, end);
+}
+
 /**
  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
  * @kvm: The struct kvm pointer
@@ -745,21 +752,9 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
 				     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
 }
 
-/**
- * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
- * @kvm:	The KVM struct pointer for the VM.
- *
- * Allocates only the stage-2 HW PGD level table(s) (can support either full
- * 40-bit input addresses or limited to 32-bit input addresses). Clears the
- * allocated pages.
- *
- * Note we don't need locking here as this is only called when the VM is
- * created, which can only be done once.
- */
-int kvm_alloc_stage2_pgd(struct kvm *kvm)
+int __kvm_alloc_stage2_pgd(struct kvm_s2_mmu *mmu)
 {
 	pgd_t *pgd;
-	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
 
 	if (mmu->pgd != NULL) {
 		kvm_err("kvm_arch already initialized?\n");
@@ -776,6 +771,22 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
 	return 0;
 }
 
+/**
+ * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
+ * @kvm:	The KVM struct pointer for the VM.
+ *
+ * Allocates only the stage-2 HW PGD level table(s) (can support either full
+ * 40-bit input addresses or limited to 32-bit input addresses). Clears the
+ * allocated pages.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * created, which can only be done once.
+ */
+int kvm_alloc_stage2_pgd(struct kvm *kvm)
+{
+	return __kvm_alloc_stage2_pgd(&kvm->arch.mmu);
+}
+
 static void stage2_unmap_memslot(struct kvm_s2_mmu *mmu,
 				 struct kvm_memory_slot *memslot)
 {
@@ -811,7 +822,7 @@ static void stage2_unmap_memslot(struct kvm_s2_mmu *mmu,
 
 		if (!(vma->vm_flags & VM_PFNMAP)) {
 			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
-			unmap_stage2_range(mmu, gpa, vm_end - vm_start);
+			kvm_unmap_stage2_range(mmu, gpa, vm_end - vm_start);
 		}
 		hva = vm_end;
 	} while (hva < reg_end);
@@ -841,6 +852,17 @@ void stage2_unmap_vm(struct kvm *kvm)
 	srcu_read_unlock(&kvm->srcu, idx);
 }
 
+void __kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
+{
+	if (mmu->pgd == NULL)
+		return;
+
+	kvm_unmap_stage2_range(mmu, 0, KVM_PHYS_SIZE);
+	/* Free the HW pgd, one page at a time */
+	free_pages_exact(mmu->pgd, S2_PGD_SIZE);
+	mmu->pgd = NULL;
+}
+
 /**
  * kvm_free_stage2_pgd - free all stage-2 tables
  * @kvm:	The KVM struct pointer for the VM.
@@ -854,15 +876,7 @@ void stage2_unmap_vm(struct kvm *kvm)
  */
 void kvm_free_stage2_pgd(struct kvm *kvm)
 {
-	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
-
-	if (mmu->pgd == NULL)
-		return;
-
-	unmap_stage2_range(mmu, 0, KVM_PHYS_SIZE);
-	/* Free the HW pgd, one page at a time */
-	free_pages_exact(mmu->pgd, S2_PGD_SIZE);
-	mmu->pgd = NULL;
+	__kvm_free_stage2_pgd(&kvm->arch.mmu);
 }
 
 static pud_t *stage2_get_pud(struct kvm_s2_mmu *mmu,
@@ -1175,13 +1189,13 @@ static void  stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
 }
 
 /**
- * stage2_wp_range() - write protect stage2 memory region range
+ * kvm_stage2_wp_range() - write protect stage2 memory region range
  * @kvm:	The KVM pointer
  * @addr:	Start address of range
  * @end:	End address of range
  */
-static void stage2_wp_range(struct kvm *kvm, struct kvm_s2_mmu *mmu,
-			    phys_addr_t addr, phys_addr_t end)
+void kvm_stage2_wp_range(struct kvm *kvm, struct kvm_s2_mmu *mmu,
+			 phys_addr_t addr, phys_addr_t end)
 {
 	pgd_t *pgd;
 	phys_addr_t next;
@@ -1225,7 +1239,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
 	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
 	spin_unlock(&kvm->mmu_lock);
 	kvm_flush_remote_tlbs(kvm);
 }
@@ -1249,7 +1263,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
 
-	stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
 }
 
 /*
@@ -1589,7 +1603,7 @@ static int handle_hva_to_gpa(struct kvm *kvm,
 
 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
 {
-	unmap_stage2_range(&kvm->arch.mmu, gpa, PAGE_SIZE);
+	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, PAGE_SIZE);
 	return 0;
 }
 
@@ -1900,7 +1914,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 
 	spin_lock(&kvm->mmu_lock);
 	if (ret)
-		unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr,
+		kvm_unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr,
 				   mem->memory_size);
 	else
 		stage2_flush_memslot(&kvm->arch.mmu, memslot);
@@ -1944,7 +1958,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 	phys_addr_t size = slot->npages << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
 	spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e3455c4..a504162 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -145,9 +145,18 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
 
 void stage2_unmap_vm(struct kvm *kvm);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
+int __kvm_alloc_stage2_pgd(struct kvm_s2_mmu *mmu);
 void kvm_free_stage2_pgd(struct kvm *kvm);
+void __kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 			  phys_addr_t pa, unsigned long size, bool writable);
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
+			    u64 size);
+void kvm_stage2_wp_range(struct kvm *kvm, struct kvm_s2_mmu *mmu,
+			 phys_addr_t addr, phys_addr_t end);
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t start, phys_addr_t end);
+
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
-- 
1.9.1

WARNING: multiple messages have this Message-ID (diff)
From: jintack@cs.columbia.edu (Jintack Lim)
To: linux-arm-kernel@lists.infradead.org
Subject: [RFC 38/55] KVM: arm/arm64: Make mmu functions non-static
Date: Mon,  9 Jan 2017 01:24:34 -0500	[thread overview]
Message-ID: <1483943091-1364-39-git-send-email-jintack@cs.columbia.edu> (raw)
In-Reply-To: <1483943091-1364-1-git-send-email-jintack@cs.columbia.edu>

From: Christoffer Dall <christoffer.dall@linaro.org>

Make mmu functions non-static so that we can reuse those functions
to support mmu for the nested VMs.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Jintack Lim <jintack@cs.columbia.edu>
---
 arch/arm/kvm/mmu.c               | 90 +++++++++++++++++++++++-----------------
 arch/arm64/include/asm/kvm_mmu.h |  9 ++++
 2 files changed, 61 insertions(+), 38 deletions(-)

diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 56358fa..98b42e8 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -301,7 +301,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
 }
 
 /**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * kvm_unmap_stage2_range -- Clear stage2 page table entries to unmap a range
  * @kvm:   The VM pointer
  * @start: The intermediate physical base address of the range to unmap
  * @size:  The size of the area to unmap
@@ -311,8 +311,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
  * destroying the VM), otherwise another faulting VCPU may come in and mess
  * with things behind our backs.
  */
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu,
-			       phys_addr_t start, u64 size)
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
 {
 	pgd_t *pgd;
 	phys_addr_t addr = start, end = start + size;
@@ -374,11 +373,10 @@ static void stage2_flush_puds(pgd_t *pgd,
 	} while (pud++, addr = next, addr != end);
 }
 
-static void stage2_flush_memslot(struct kvm_s2_mmu *mmu,
-				 struct kvm_memory_slot *memslot)
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t start, phys_addr_t end)
 {
-	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
-	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+	phys_addr_t addr = start;
 	phys_addr_t next;
 	pgd_t *pgd;
 
@@ -389,6 +387,15 @@ static void stage2_flush_memslot(struct kvm_s2_mmu *mmu,
 	} while (pgd++, addr = next, addr != end);
 }
 
+static void stage2_flush_memslot(struct kvm_s2_mmu *mmu,
+				 struct kvm_memory_slot *memslot)
+{
+	phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
+	phys_addr_t end = start + PAGE_SIZE * memslot->npages;
+
+	kvm_stage2_flush_range(mmu, start, end);
+}
+
 /**
  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
  * @kvm: The struct kvm pointer
@@ -745,21 +752,9 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
 				     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
 }
 
-/**
- * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
- * @kvm:	The KVM struct pointer for the VM.
- *
- * Allocates only the stage-2 HW PGD level table(s) (can support either full
- * 40-bit input addresses or limited to 32-bit input addresses). Clears the
- * allocated pages.
- *
- * Note we don't need locking here as this is only called when the VM is
- * created, which can only be done once.
- */
-int kvm_alloc_stage2_pgd(struct kvm *kvm)
+int __kvm_alloc_stage2_pgd(struct kvm_s2_mmu *mmu)
 {
 	pgd_t *pgd;
-	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
 
 	if (mmu->pgd != NULL) {
 		kvm_err("kvm_arch already initialized?\n");
@@ -776,6 +771,22 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
 	return 0;
 }
 
+/**
+ * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
+ * @kvm:	The KVM struct pointer for the VM.
+ *
+ * Allocates only the stage-2 HW PGD level table(s) (can support either full
+ * 40-bit input addresses or limited to 32-bit input addresses). Clears the
+ * allocated pages.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * created, which can only be done once.
+ */
+int kvm_alloc_stage2_pgd(struct kvm *kvm)
+{
+	return __kvm_alloc_stage2_pgd(&kvm->arch.mmu);
+}
+
 static void stage2_unmap_memslot(struct kvm_s2_mmu *mmu,
 				 struct kvm_memory_slot *memslot)
 {
@@ -811,7 +822,7 @@ static void stage2_unmap_memslot(struct kvm_s2_mmu *mmu,
 
 		if (!(vma->vm_flags & VM_PFNMAP)) {
 			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
-			unmap_stage2_range(mmu, gpa, vm_end - vm_start);
+			kvm_unmap_stage2_range(mmu, gpa, vm_end - vm_start);
 		}
 		hva = vm_end;
 	} while (hva < reg_end);
@@ -841,6 +852,17 @@ void stage2_unmap_vm(struct kvm *kvm)
 	srcu_read_unlock(&kvm->srcu, idx);
 }
 
+void __kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
+{
+	if (mmu->pgd == NULL)
+		return;
+
+	kvm_unmap_stage2_range(mmu, 0, KVM_PHYS_SIZE);
+	/* Free the HW pgd, one page at a time */
+	free_pages_exact(mmu->pgd, S2_PGD_SIZE);
+	mmu->pgd = NULL;
+}
+
 /**
  * kvm_free_stage2_pgd - free all stage-2 tables
  * @kvm:	The KVM struct pointer for the VM.
@@ -854,15 +876,7 @@ void stage2_unmap_vm(struct kvm *kvm)
  */
 void kvm_free_stage2_pgd(struct kvm *kvm)
 {
-	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
-
-	if (mmu->pgd == NULL)
-		return;
-
-	unmap_stage2_range(mmu, 0, KVM_PHYS_SIZE);
-	/* Free the HW pgd, one page at a time */
-	free_pages_exact(mmu->pgd, S2_PGD_SIZE);
-	mmu->pgd = NULL;
+	__kvm_free_stage2_pgd(&kvm->arch.mmu);
 }
 
 static pud_t *stage2_get_pud(struct kvm_s2_mmu *mmu,
@@ -1175,13 +1189,13 @@ static void  stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
 }
 
 /**
- * stage2_wp_range() - write protect stage2 memory region range
+ * kvm_stage2_wp_range() - write protect stage2 memory region range
  * @kvm:	The KVM pointer
  * @addr:	Start address of range
  * @end:	End address of range
  */
-static void stage2_wp_range(struct kvm *kvm, struct kvm_s2_mmu *mmu,
-			    phys_addr_t addr, phys_addr_t end)
+void kvm_stage2_wp_range(struct kvm *kvm, struct kvm_s2_mmu *mmu,
+			 phys_addr_t addr, phys_addr_t end)
 {
 	pgd_t *pgd;
 	phys_addr_t next;
@@ -1225,7 +1239,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
 	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
 	spin_unlock(&kvm->mmu_lock);
 	kvm_flush_remote_tlbs(kvm);
 }
@@ -1249,7 +1263,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
 
-	stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(kvm, &kvm->arch.mmu, start, end);
 }
 
 /*
@@ -1589,7 +1603,7 @@ static int handle_hva_to_gpa(struct kvm *kvm,
 
 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
 {
-	unmap_stage2_range(&kvm->arch.mmu, gpa, PAGE_SIZE);
+	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, PAGE_SIZE);
 	return 0;
 }
 
@@ -1900,7 +1914,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 
 	spin_lock(&kvm->mmu_lock);
 	if (ret)
-		unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr,
+		kvm_unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr,
 				   mem->memory_size);
 	else
 		stage2_flush_memslot(&kvm->arch.mmu, memslot);
@@ -1944,7 +1958,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 	phys_addr_t size = slot->npages << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
 	spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e3455c4..a504162 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -145,9 +145,18 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
 
 void stage2_unmap_vm(struct kvm *kvm);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
+int __kvm_alloc_stage2_pgd(struct kvm_s2_mmu *mmu);
 void kvm_free_stage2_pgd(struct kvm *kvm);
+void __kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 			  phys_addr_t pa, unsigned long size, bool writable);
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
+			    u64 size);
+void kvm_stage2_wp_range(struct kvm *kvm, struct kvm_s2_mmu *mmu,
+			 phys_addr_t addr, phys_addr_t end);
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t start, phys_addr_t end);
+
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
-- 
1.9.1

  parent reply	other threads:[~2017-01-09  6:31 UTC|newest]

Thread overview: 322+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-09  6:23 [RFC 00/55] Nested Virtualization on KVM/ARM Jintack Lim
2017-01-09  6:23 ` Jintack Lim
2017-01-09  6:23 ` Jintack Lim
2017-01-09  6:23 ` [RFC 01/55] arm64: Add missing TCR hw defines Jintack Lim
2017-01-09  6:23   ` Jintack Lim
2017-01-09  6:23   ` Jintack Lim
2017-01-09  6:23 ` [RFC 02/55] KVM: arm64: Add nesting config option Jintack Lim
2017-01-09  6:23   ` Jintack Lim
2017-01-09  6:23   ` Jintack Lim
2017-01-09  6:23 ` [RFC 03/55] KVM: arm64: Add KVM nesting feature Jintack Lim
2017-01-09  6:23   ` Jintack Lim
2017-01-09  6:23   ` Jintack Lim
2017-01-09  6:24 ` [RFC 04/55] KVM: arm64: Allow userspace to set PSR_MODE_EL2x Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 05/55] KVM: arm64: Add vcpu_mode_el2 primitive to support nesting Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 06/55] KVM: arm64: Add EL2 execution context for nesting Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:10   ` Christoffer Dall
2017-02-22 11:10     ` Christoffer Dall
2017-02-22 11:10     ` Christoffer Dall
2017-06-26 14:33     ` Jintack Lim
2017-06-26 14:33       ` Jintack Lim
2017-06-26 14:33       ` Jintack Lim
2017-07-03  9:03       ` Christoffer Dall
2017-07-03  9:03         ` Christoffer Dall
2017-07-03  9:03         ` Christoffer Dall
2017-07-03  9:32         ` Marc Zyngier
2017-07-03  9:32           ` Marc Zyngier
2017-07-03  9:32           ` Marc Zyngier
2017-07-03  9:54           ` Christoffer Dall
2017-07-03  9:54             ` Christoffer Dall
2017-07-03  9:54             ` Christoffer Dall
2017-07-03 14:44             ` Jintack Lim
2017-07-03 14:44               ` Jintack Lim
2017-07-03 14:44               ` Jintack Lim
2017-07-03 15:30               ` Christoffer Dall
2017-07-03 15:30                 ` Christoffer Dall
2017-07-03 15:30                 ` Christoffer Dall
2017-01-09  6:24 ` [RFC 07/55] KVM: arm/arm64: Add virtual EL2 state emulation framework Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:12   ` Christoffer Dall
2017-02-22 11:12     ` Christoffer Dall
2017-02-22 11:12     ` Christoffer Dall
2017-06-01 20:05   ` Bandan Das
2017-06-01 20:05     ` Bandan Das
2017-06-02 11:51     ` Christoffer Dall
2017-06-02 11:51       ` Christoffer Dall
2017-06-02 11:51       ` Christoffer Dall
2017-06-02 17:36       ` Bandan Das
2017-06-02 17:36         ` Bandan Das
2017-06-02 17:36         ` Bandan Das
2017-06-02 19:06         ` Christoffer Dall
2017-06-02 19:06           ` Christoffer Dall
2017-06-02 19:06           ` Christoffer Dall
2017-06-02 19:25           ` Bandan Das
2017-06-02 19:25             ` Bandan Das
2017-06-02 19:25             ` Bandan Das
     [not found]             ` <20170602194353.GG397@cbox>
2017-06-02 20:18               ` Bandan Das
2017-06-02 21:15                 ` Christoffer Dall
2017-06-02 23:49                   ` Bandan Das
2017-01-09  6:24 ` [RFC 08/55] KVM: arm64: Set virtual EL2 context depending on the guest exception level Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:14   ` Christoffer Dall
2017-02-22 11:14     ` Christoffer Dall
2017-02-22 11:14     ` Christoffer Dall
2017-06-01 20:22   ` Bandan Das
2017-06-01 20:22     ` Bandan Das
2017-06-02  8:48     ` Marc Zyngier
2017-06-02  8:48       ` Marc Zyngier
2017-06-02  8:48       ` Marc Zyngier
2017-01-09  6:24 ` [RFC 09/55] KVM: arm64: Set shadow EL1 registers for virtual EL2 execution Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:19   ` Christoffer Dall
2017-02-22 11:19     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 10/55] KVM: arm64: Synchronize EL1 system registers on virtual EL2 entry and exit Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-06-06 20:16   ` Bandan Das
2017-06-06 20:16     ` Bandan Das
2017-06-06 20:16     ` Bandan Das
2017-06-07  4:26     ` Jintack Lim
2017-06-07  4:26       ` Jintack Lim
2017-01-09  6:24 ` [RFC 11/55] KVM: arm64: Emulate taking an exception to the guest hypervisor Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:28   ` Christoffer Dall
2017-02-22 11:28     ` Christoffer Dall
2017-02-22 11:28     ` Christoffer Dall
2017-06-06 20:21   ` Bandan Das
2017-06-06 20:21     ` Bandan Das
2017-06-06 20:21     ` Bandan Das
2017-06-06 20:38     ` Jintack Lim
2017-06-06 20:38       ` Jintack Lim
2017-06-06 22:07       ` Bandan Das
2017-06-06 22:07         ` Bandan Das
2017-06-06 23:16         ` Jintack Lim
2017-06-06 23:16           ` Jintack Lim
2017-06-06 23:16           ` Jintack Lim
2017-06-07 17:21           ` Bandan Das
2017-06-07 17:21             ` Bandan Das
2017-01-09  6:24 ` [RFC 12/55] KVM: arm64: Handle EL2 register access traps Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:30   ` Christoffer Dall
2017-02-22 11:30     ` Christoffer Dall
2017-02-22 11:31   ` Christoffer Dall
2017-02-22 11:31     ` Christoffer Dall
2017-02-22 11:31     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 13/55] KVM: arm64: Handle eret instruction traps Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 14/55] KVM: arm64: Take account of system " Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:34   ` Christoffer Dall
2017-02-22 11:34     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 15/55] KVM: arm64: Trap EL1 VM register accesses in virtual EL2 Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 16/55] KVM: arm64: Forward VM reg traps to the guest hypervisor Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:39   ` Christoffer Dall
2017-02-22 11:39     ` Christoffer Dall
2017-02-22 11:39     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 17/55] KVM: arm64: Trap SPSR_EL1, ELR_EL1 and VBAR_EL1 in virtual EL2 Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:40   ` Christoffer Dall
2017-02-22 11:40     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 18/55] KVM: arm64: Forward traps due to HCR_EL2.NV1 bit to the guest hypervisor Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:41   ` Christoffer Dall
2017-02-22 11:41     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 19/55] KVM: arm64: Trap CPACR_EL1 access in virtual EL2 Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 20/55] KVM: arm64: Forward CPACR_EL1 traps to the guest hypervisor Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 21/55] KVM: arm64: Forward HVC instruction " Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 11:47   ` Christoffer Dall
2017-02-22 11:47     ` Christoffer Dall
2017-02-22 11:47     ` Christoffer Dall
2017-06-26 15:21     ` Jintack Lim
2017-06-26 15:21       ` Jintack Lim
2017-06-26 15:21       ` Jintack Lim
2017-07-03  9:08       ` Christoffer Dall
2017-07-03  9:08         ` Christoffer Dall
2017-07-03  9:08         ` Christoffer Dall
2017-07-03  9:31         ` Andrew Jones
2017-07-03  9:31           ` Andrew Jones
2017-07-03  9:31           ` Andrew Jones
2017-07-03  9:51           ` Christoffer Dall
2017-07-03  9:51             ` Christoffer Dall
2017-07-03  9:51             ` Christoffer Dall
2017-07-03 12:03             ` Will Deacon
2017-07-03 12:03               ` Will Deacon
2017-07-03 12:03               ` Will Deacon
2017-07-03 12:35               ` Marc Zyngier
2017-07-03 12:35                 ` Marc Zyngier
2017-07-03 12:35                 ` Marc Zyngier
2017-07-03 13:29         ` Jintack Lim
2017-07-03 13:29           ` Jintack Lim
2017-07-03 13:29           ` Jintack Lim
2017-01-09  6:24 ` [RFC 22/55] KVM: arm64: Handle PSCI call from the guest Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 23/55] KVM: arm64: Forward WFX to the guest hypervisor Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 24/55] KVM: arm64: Forward FP exceptions " Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 25/55] KVM: arm/arm64: Let vcpu thread modify its own active state Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 12:27   ` Christoffer Dall
2017-02-22 12:27     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 26/55] KVM: arm/arm64: Add VGIC data structures for the nesting Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 27/55] KVM: arm/arm64: Emulate GICH interface on GICv2 Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 13:06   ` Christoffer Dall
2017-02-22 13:06     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 28/55] KVM: arm/arm64: Prepare vgic state for the nested VM Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 13:12   ` Christoffer Dall
2017-02-22 13:12     ` Christoffer Dall
2017-02-22 13:12     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 29/55] KVM: arm/arm64: Set up the prepared vgic state Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 30/55] KVM: arm/arm64: Inject irqs to the guest hypervisor Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 13:16   ` Christoffer Dall
2017-02-22 13:16     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 31/55] KVM: arm/arm64: Inject maintenance interrupts " Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 13:19   ` Christoffer Dall
2017-02-22 13:19     ` Christoffer Dall
2017-02-22 13:19     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 32/55] KVM: arm/arm64: register GICH iodev for " Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 13:21   ` Christoffer Dall
2017-02-22 13:21     ` Christoffer Dall
2017-02-22 13:21     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 33/55] KVM: arm/arm64: Remove unused params in mmu functions Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 34/55] KVM: arm/arm64: Abstract stage-2 MMU state into a separate structure Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 35/55] KVM: arm/arm64: Support mmu for the virtual EL2 execution Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 13:38   ` Christoffer Dall
2017-02-22 13:38     ` Christoffer Dall
2017-02-22 13:38     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 36/55] KVM: arm64: Invalidate virtual EL2 TLB entries when needed Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 37/55] KVM: arm64: Setup vttbr_el2 on each VM entry Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` Jintack Lim [this message]
2017-01-09  6:24   ` [RFC 38/55] KVM: arm/arm64: Make mmu functions non-static Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 39/55] KVM: arm/arm64: Add mmu context for the nesting Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 13:34   ` Christoffer Dall
2017-02-22 13:34     ` Christoffer Dall
2017-02-22 13:34     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 40/55] KVM: arm/arm64: Handle vttbr_el2 write operation from the guest hypervisor Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 17:59   ` Christoffer Dall
2017-02-22 17:59     ` Christoffer Dall
2017-02-22 17:59     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 41/55] KVM: arm/arm64: Unmap/flush shadow stage 2 page tables Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 18:09   ` Christoffer Dall
2017-02-22 18:09     ` Christoffer Dall
2017-02-22 18:09     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 42/55] KVM: arm64: Implement nested Stage-2 page table walk logic Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 43/55] KVM: arm/arm64: Handle shadow stage 2 page faults Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 44/55] KVM: arm/arm64: Move kvm_is_write_fault to header file Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 45/55] KVM: arm64: KVM: Inject stage-2 page faults Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 46/55] KVM: arm64: Add more info to the S2 translation result Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 47/55] KVM: arm/arm64: Forward the guest hypervisor's stage 2 permission faults Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 18:15   ` Christoffer Dall
2017-02-22 18:15     ` Christoffer Dall
2017-02-22 18:15     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 48/55] KVM: arm64: Emulate TLBI instruction Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 49/55] KVM: arm64: Fixes to toggle_cache for nesting Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 50/55] KVM: arm/arm64: Abstract kvm_phys_addr_ioremap() function Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 51/55] KVM: arm64: Expose physical address of vcpu interface Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 52/55] KVM: arm/arm64: Create a vcpu mapping for the nested VM Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 53/55] KVM: arm64: Reflect shadow VMPIDR_EL2 value to MPIDR_EL1 Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24 ` [RFC 54/55] KVM: arm/arm64: Adjust virtual offset considering nesting Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-02-22 19:28   ` Christoffer Dall
2017-02-22 19:28     ` Christoffer Dall
2017-02-22 19:28     ` Christoffer Dall
2017-01-09  6:24 ` [RFC 55/55] KVM: arm64: Enable nested virtualization Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09  6:24   ` Jintack Lim
2017-01-09 15:05 ` [RFC 00/55] Nested Virtualization on KVM/ARM David Hildenbrand
2017-01-09 15:05   ` David Hildenbrand
2017-01-10 16:18   ` Jintack Lim
2017-01-10 16:18     ` Jintack Lim
2017-01-10 16:18     ` Jintack Lim
2017-02-22 18:23 ` Christoffer Dall
2017-02-22 18:23   ` Christoffer Dall
2017-02-22 18:23   ` Christoffer Dall
2017-02-24 10:08   ` Jintack Lim
2017-02-24 10:28   ` Jintack Lim
2017-02-24 10:28     ` Jintack Lim
2017-02-24 10:28     ` Jintack Lim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1483943091-1364-39-git-send-email-jintack@cs.columbia.edu \
    --to=jintack@cs.columbia.edu \
    --cc=andre.przywara@arm.com \
    --cc=anna-maria@linutronix.de \
    --cc=catalin.marinas@arm.com \
    --cc=christoffer.dall@linaro.org \
    --cc=eric.auger@redhat.com \
    --cc=geoff@infradead.org \
    --cc=james.morse@arm.com \
    --cc=kevin.brodsky@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=lorenzo.pieralisi@arm.com \
    --cc=marc.zyngier@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=shankerd@codeaurora.org \
    --cc=shihwei@cs.columbia.edu \
    --cc=suzuki.poulose@arm.com \
    --cc=vladimir.murzin@arm.com \
    --cc=wcohen@redhat.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.