All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zenghui Yu <yuzenghui@huawei.com>
To: <linux-arm-kernel@lists.infradead.org>,
	<kvmarm@lists.cs.columbia.edu>, <linux-kernel@vger.kernel.org>,
	<kvm@vger.kernel.org>
Cc: <marc.zyngier@arm.com>, <christoffer.dall@arm.com>,
	<linux@armlinux.org.uk>, <catalin.marinas@arm.com>,
	<will.deacon@arm.com>, <james.morse@arm.com>,
	<julien.thierry@arm.com>, <suzuki.poulose@arm.com>,
	<steve.capper@arm.com>, <wanghaibin.wang@huawei.com>,
	Zenghui Yu <yuzenghui@huawei.com>
Subject: [PATCH 2/5] KVM: arm/arm64: Re-factor building the stage2 page table entries
Date: Wed, 1 May 2019 09:44:24 +0000	[thread overview]
Message-ID: <1556703867-22396-3-git-send-email-yuzenghui@huawei.com> (raw)
In-Reply-To: <1556703867-22396-1-git-send-email-yuzenghui@huawei.com>

As we're going to support creating CONT_{PTE,PMD}_SIZE huge mappings in
user_mem_abort(), the logic to check vma_pagesize and build page table
entries will become longer, and looks almost the same (but actually they
dont). Refactor this part to make it a bit cleaner.

Add contiguous as a parameter of stage2_build_{pmd,pte}, to indicate
if we're creating contiguous huge mappings.

Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
---
 virt/kvm/arm/mmu.c | 81 ++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 57 insertions(+), 24 deletions(-)

diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 27c9583..cf8b035 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1616,6 +1616,56 @@ static void kvm_send_hwpoison_signal(unsigned long address,
 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
 }
 
+static pud_t stage2_build_pud(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec)
+{
+	pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
+
+	new_pud = kvm_pud_mkhuge(new_pud);
+	if (writable)
+		new_pud = kvm_s2pud_mkwrite(new_pud);
+
+	if (needs_exec)
+		new_pud = kvm_s2pud_mkexec(new_pud);
+
+	return new_pud;
+}
+
+static pmd_t stage2_build_pmd(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec, bool contiguous)
+{
+	pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
+
+	new_pmd = kvm_pmd_mkhuge(new_pmd);
+	if (writable)
+		new_pmd = kvm_s2pmd_mkwrite(new_pmd);
+
+	if (needs_exec)
+		new_pmd = kvm_s2pmd_mkexec(new_pmd);
+
+	if (contiguous)
+		new_pmd = kvm_s2pmd_mkcont(new_pmd);
+
+	return new_pmd;
+}
+
+static pte_t stage2_build_pte(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec, bool contiguous)
+{
+	pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+
+	if (writable)
+		new_pte = kvm_s2pte_mkwrite(new_pte);
+
+	if (needs_exec)
+		new_pte = kvm_s2pte_mkexec(new_pte);
+
+	if (contiguous)
+		new_pte = kvm_s2pte_mkcont(new_pte);
+
+	return new_pte;
+}
+
 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
 					       unsigned long hva,
 					       unsigned long map_size)
@@ -1807,38 +1857,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
 
 	if (vma_pagesize == PUD_SIZE) {
-		pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
-
-		new_pud = kvm_pud_mkhuge(new_pud);
-		if (writable)
-			new_pud = kvm_s2pud_mkwrite(new_pud);
-
-		if (needs_exec)
-			new_pud = kvm_s2pud_mkexec(new_pud);
+		pud_t new_pud = stage2_build_pud(pfn, mem_type, writable,
+						 needs_exec);
 
 		ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
 	} else if (vma_pagesize == PMD_SIZE) {
-		pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
-
-		new_pmd = kvm_pmd_mkhuge(new_pmd);
-
-		if (writable)
-			new_pmd = kvm_s2pmd_mkwrite(new_pmd);
-
-		if (needs_exec)
-			new_pmd = kvm_s2pmd_mkexec(new_pmd);
+		pmd_t new_pmd = stage2_build_pmd(pfn, mem_type, writable,
+						 needs_exec, false);
 
 		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
 	} else {
-		pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+		pte_t new_pte = stage2_build_pte(pfn, mem_type, writable,
+						 needs_exec, false);
 
-		if (writable) {
-			new_pte = kvm_s2pte_mkwrite(new_pte);
+		if (writable)
 			mark_page_dirty(kvm, gfn);
-		}
-
-		if (needs_exec)
-			new_pte = kvm_s2pte_mkexec(new_pte);
 
 		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
 	}
-- 
1.8.3.1



WARNING: multiple messages have this Message-ID (diff)
From: Zenghui Yu <yuzenghui@huawei.com>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org
Cc: marc.zyngier@arm.com, catalin.marinas@arm.com,
	will.deacon@arm.com, linux@armlinux.org.uk
Subject: [PATCH 2/5] KVM: arm/arm64: Re-factor building the stage2 page table entries
Date: Wed, 1 May 2019 09:44:24 +0000	[thread overview]
Message-ID: <1556703867-22396-3-git-send-email-yuzenghui@huawei.com> (raw)
In-Reply-To: <1556703867-22396-1-git-send-email-yuzenghui@huawei.com>

As we're going to support creating CONT_{PTE,PMD}_SIZE huge mappings in
user_mem_abort(), the logic to check vma_pagesize and build page table
entries will become longer, and looks almost the same (but actually they
dont). Refactor this part to make it a bit cleaner.

Add contiguous as a parameter of stage2_build_{pmd,pte}, to indicate
if we're creating contiguous huge mappings.

Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
---
 virt/kvm/arm/mmu.c | 81 ++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 57 insertions(+), 24 deletions(-)

diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 27c9583..cf8b035 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1616,6 +1616,56 @@ static void kvm_send_hwpoison_signal(unsigned long address,
 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
 }
 
+static pud_t stage2_build_pud(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec)
+{
+	pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
+
+	new_pud = kvm_pud_mkhuge(new_pud);
+	if (writable)
+		new_pud = kvm_s2pud_mkwrite(new_pud);
+
+	if (needs_exec)
+		new_pud = kvm_s2pud_mkexec(new_pud);
+
+	return new_pud;
+}
+
+static pmd_t stage2_build_pmd(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec, bool contiguous)
+{
+	pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
+
+	new_pmd = kvm_pmd_mkhuge(new_pmd);
+	if (writable)
+		new_pmd = kvm_s2pmd_mkwrite(new_pmd);
+
+	if (needs_exec)
+		new_pmd = kvm_s2pmd_mkexec(new_pmd);
+
+	if (contiguous)
+		new_pmd = kvm_s2pmd_mkcont(new_pmd);
+
+	return new_pmd;
+}
+
+static pte_t stage2_build_pte(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec, bool contiguous)
+{
+	pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+
+	if (writable)
+		new_pte = kvm_s2pte_mkwrite(new_pte);
+
+	if (needs_exec)
+		new_pte = kvm_s2pte_mkexec(new_pte);
+
+	if (contiguous)
+		new_pte = kvm_s2pte_mkcont(new_pte);
+
+	return new_pte;
+}
+
 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
 					       unsigned long hva,
 					       unsigned long map_size)
@@ -1807,38 +1857,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
 
 	if (vma_pagesize == PUD_SIZE) {
-		pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
-
-		new_pud = kvm_pud_mkhuge(new_pud);
-		if (writable)
-			new_pud = kvm_s2pud_mkwrite(new_pud);
-
-		if (needs_exec)
-			new_pud = kvm_s2pud_mkexec(new_pud);
+		pud_t new_pud = stage2_build_pud(pfn, mem_type, writable,
+						 needs_exec);
 
 		ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
 	} else if (vma_pagesize == PMD_SIZE) {
-		pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
-
-		new_pmd = kvm_pmd_mkhuge(new_pmd);
-
-		if (writable)
-			new_pmd = kvm_s2pmd_mkwrite(new_pmd);
-
-		if (needs_exec)
-			new_pmd = kvm_s2pmd_mkexec(new_pmd);
+		pmd_t new_pmd = stage2_build_pmd(pfn, mem_type, writable,
+						 needs_exec, false);
 
 		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
 	} else {
-		pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+		pte_t new_pte = stage2_build_pte(pfn, mem_type, writable,
+						 needs_exec, false);
 
-		if (writable) {
-			new_pte = kvm_s2pte_mkwrite(new_pte);
+		if (writable)
 			mark_page_dirty(kvm, gfn);
-		}
-
-		if (needs_exec)
-			new_pte = kvm_s2pte_mkexec(new_pte);
 
 		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
 	}
-- 
1.8.3.1

WARNING: multiple messages have this Message-ID (diff)
From: Zenghui Yu <yuzenghui@huawei.com>
To: <linux-arm-kernel@lists.infradead.org>,
	<kvmarm@lists.cs.columbia.edu>, <linux-kernel@vger.kernel.org>,
	<kvm@vger.kernel.org>
Cc: marc.zyngier@arm.com, catalin.marinas@arm.com,
	will.deacon@arm.com, linux@armlinux.org.uk
Subject: [PATCH 2/5] KVM: arm/arm64: Re-factor building the stage2 page table entries
Date: Wed, 1 May 2019 09:44:24 +0000	[thread overview]
Message-ID: <1556703867-22396-3-git-send-email-yuzenghui@huawei.com> (raw)
Message-ID: <20190501094424.oIsewpHStWJJ02C9tgY9A1bxM_ISr3LG0fMTfqDJhJA@z> (raw)
In-Reply-To: <1556703867-22396-1-git-send-email-yuzenghui@huawei.com>

As we're going to support creating CONT_{PTE,PMD}_SIZE huge mappings in
user_mem_abort(), the logic to check vma_pagesize and build page table
entries will become longer, and looks almost the same (but actually they
dont). Refactor this part to make it a bit cleaner.

Add contiguous as a parameter of stage2_build_{pmd,pte}, to indicate
if we're creating contiguous huge mappings.

Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
---
 virt/kvm/arm/mmu.c | 81 ++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 57 insertions(+), 24 deletions(-)

diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 27c9583..cf8b035 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1616,6 +1616,56 @@ static void kvm_send_hwpoison_signal(unsigned long address,
 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
 }
 
+static pud_t stage2_build_pud(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec)
+{
+	pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
+
+	new_pud = kvm_pud_mkhuge(new_pud);
+	if (writable)
+		new_pud = kvm_s2pud_mkwrite(new_pud);
+
+	if (needs_exec)
+		new_pud = kvm_s2pud_mkexec(new_pud);
+
+	return new_pud;
+}
+
+static pmd_t stage2_build_pmd(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec, bool contiguous)
+{
+	pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
+
+	new_pmd = kvm_pmd_mkhuge(new_pmd);
+	if (writable)
+		new_pmd = kvm_s2pmd_mkwrite(new_pmd);
+
+	if (needs_exec)
+		new_pmd = kvm_s2pmd_mkexec(new_pmd);
+
+	if (contiguous)
+		new_pmd = kvm_s2pmd_mkcont(new_pmd);
+
+	return new_pmd;
+}
+
+static pte_t stage2_build_pte(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec, bool contiguous)
+{
+	pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+
+	if (writable)
+		new_pte = kvm_s2pte_mkwrite(new_pte);
+
+	if (needs_exec)
+		new_pte = kvm_s2pte_mkexec(new_pte);
+
+	if (contiguous)
+		new_pte = kvm_s2pte_mkcont(new_pte);
+
+	return new_pte;
+}
+
 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
 					       unsigned long hva,
 					       unsigned long map_size)
@@ -1807,38 +1857,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
 
 	if (vma_pagesize == PUD_SIZE) {
-		pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
-
-		new_pud = kvm_pud_mkhuge(new_pud);
-		if (writable)
-			new_pud = kvm_s2pud_mkwrite(new_pud);
-
-		if (needs_exec)
-			new_pud = kvm_s2pud_mkexec(new_pud);
+		pud_t new_pud = stage2_build_pud(pfn, mem_type, writable,
+						 needs_exec);
 
 		ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
 	} else if (vma_pagesize == PMD_SIZE) {
-		pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
-
-		new_pmd = kvm_pmd_mkhuge(new_pmd);
-
-		if (writable)
-			new_pmd = kvm_s2pmd_mkwrite(new_pmd);
-
-		if (needs_exec)
-			new_pmd = kvm_s2pmd_mkexec(new_pmd);
+		pmd_t new_pmd = stage2_build_pmd(pfn, mem_type, writable,
+						 needs_exec, false);
 
 		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
 	} else {
-		pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+		pte_t new_pte = stage2_build_pte(pfn, mem_type, writable,
+						 needs_exec, false);
 
-		if (writable) {
-			new_pte = kvm_s2pte_mkwrite(new_pte);
+		if (writable)
 			mark_page_dirty(kvm, gfn);
-		}
-
-		if (needs_exec)
-			new_pte = kvm_s2pte_mkexec(new_pte);
 
 		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
 	}
-- 
1.8.3.1


_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Zenghui Yu <yuzenghui@huawei.com>
To: <linux-arm-kernel@lists.infradead.org>,
	<kvmarm@lists.cs.columbia.edu>, <linux-kernel@vger.kernel.org>,
	<kvm@vger.kernel.org>
Cc: julien.thierry@arm.com, marc.zyngier@arm.com,
	catalin.marinas@arm.com, suzuki.poulose@arm.com,
	will.deacon@arm.com, christoffer.dall@arm.com,
	linux@armlinux.org.uk, james.morse@arm.com,
	Zenghui Yu <yuzenghui@huawei.com>,
	wanghaibin.wang@huawei.com, steve.capper@arm.com
Subject: [PATCH 2/5] KVM: arm/arm64: Re-factor building the stage2 page table entries
Date: Wed, 1 May 2019 09:44:24 +0000	[thread overview]
Message-ID: <1556703867-22396-3-git-send-email-yuzenghui@huawei.com> (raw)
In-Reply-To: <1556703867-22396-1-git-send-email-yuzenghui@huawei.com>

As we're going to support creating CONT_{PTE,PMD}_SIZE huge mappings in
user_mem_abort(), the logic to check vma_pagesize and build page table
entries will become longer, and looks almost the same (but actually they
dont). Refactor this part to make it a bit cleaner.

Add contiguous as a parameter of stage2_build_{pmd,pte}, to indicate
if we're creating contiguous huge mappings.

Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
---
 virt/kvm/arm/mmu.c | 81 ++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 57 insertions(+), 24 deletions(-)

diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 27c9583..cf8b035 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1616,6 +1616,56 @@ static void kvm_send_hwpoison_signal(unsigned long address,
 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
 }
 
+static pud_t stage2_build_pud(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec)
+{
+	pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
+
+	new_pud = kvm_pud_mkhuge(new_pud);
+	if (writable)
+		new_pud = kvm_s2pud_mkwrite(new_pud);
+
+	if (needs_exec)
+		new_pud = kvm_s2pud_mkexec(new_pud);
+
+	return new_pud;
+}
+
+static pmd_t stage2_build_pmd(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec, bool contiguous)
+{
+	pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
+
+	new_pmd = kvm_pmd_mkhuge(new_pmd);
+	if (writable)
+		new_pmd = kvm_s2pmd_mkwrite(new_pmd);
+
+	if (needs_exec)
+		new_pmd = kvm_s2pmd_mkexec(new_pmd);
+
+	if (contiguous)
+		new_pmd = kvm_s2pmd_mkcont(new_pmd);
+
+	return new_pmd;
+}
+
+static pte_t stage2_build_pte(kvm_pfn_t pfn, pgprot_t mem_type, bool writable,
+			      bool needs_exec, bool contiguous)
+{
+	pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+
+	if (writable)
+		new_pte = kvm_s2pte_mkwrite(new_pte);
+
+	if (needs_exec)
+		new_pte = kvm_s2pte_mkexec(new_pte);
+
+	if (contiguous)
+		new_pte = kvm_s2pte_mkcont(new_pte);
+
+	return new_pte;
+}
+
 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
 					       unsigned long hva,
 					       unsigned long map_size)
@@ -1807,38 +1857,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
 
 	if (vma_pagesize == PUD_SIZE) {
-		pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
-
-		new_pud = kvm_pud_mkhuge(new_pud);
-		if (writable)
-			new_pud = kvm_s2pud_mkwrite(new_pud);
-
-		if (needs_exec)
-			new_pud = kvm_s2pud_mkexec(new_pud);
+		pud_t new_pud = stage2_build_pud(pfn, mem_type, writable,
+						 needs_exec);
 
 		ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
 	} else if (vma_pagesize == PMD_SIZE) {
-		pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
-
-		new_pmd = kvm_pmd_mkhuge(new_pmd);
-
-		if (writable)
-			new_pmd = kvm_s2pmd_mkwrite(new_pmd);
-
-		if (needs_exec)
-			new_pmd = kvm_s2pmd_mkexec(new_pmd);
+		pmd_t new_pmd = stage2_build_pmd(pfn, mem_type, writable,
+						 needs_exec, false);
 
 		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
 	} else {
-		pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
+		pte_t new_pte = stage2_build_pte(pfn, mem_type, writable,
+						 needs_exec, false);
 
-		if (writable) {
-			new_pte = kvm_s2pte_mkwrite(new_pte);
+		if (writable)
 			mark_page_dirty(kvm, gfn);
-		}
-
-		if (needs_exec)
-			new_pte = kvm_s2pte_mkexec(new_pte);
 
 		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
 	}
-- 
1.8.3.1



_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-05-01  9:48 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-01  9:44 [RFC PATCH 0/5] KVM: arm64: Add support for contiguous PTE/PMD hugepages at stage2 Zenghui Yu
2019-05-01  9:44 ` Zenghui Yu
2019-05-01  9:44 ` Zenghui Yu
2019-05-01  9:44 ` Zenghui Yu
2019-05-01  9:44 ` [PATCH 1/5] KVM: arm/arm64: Introduce helpers for page table enties with contiguous bit Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44 ` Zenghui Yu [this message]
2019-05-01  9:44   ` [PATCH 2/5] KVM: arm/arm64: Re-factor building the stage2 page table entries Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44 ` [PATCH 3/5] KVM: arm/arm64: Support dirty page tracking for contiguous hugepages Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44 ` [PATCH 4/5] KVM: arm/arm64: Add support for creating PTE contiguous hugepages at stage2 Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44 ` [PATCH 5/5] KVM: arm/arm64: Add support for creating PMD " Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu
2019-05-01  9:44   ` Zenghui Yu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1556703867-22396-3-git-send-email-yuzenghui@huawei.com \
    --to=yuzenghui@huawei.com \
    --cc=catalin.marinas@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=james.morse@arm.com \
    --cc=julien.thierry@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=marc.zyngier@arm.com \
    --cc=steve.capper@arm.com \
    --cc=suzuki.poulose@arm.com \
    --cc=wanghaibin.wang@huawei.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.