linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH v2 0/3] arm64: tlb: add support for TTL field
@ 2020-03-12  4:10 Zhenyu Ye
  2020-03-12  4:10 ` [RFC PATCH v2 1/3] arm64: tlb: use __tlbi_level replace __tlbi in Stage-1 Zhenyu Ye
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Zhenyu Ye @ 2020-03-12  4:10 UTC (permalink / raw)
  To: mark.rutland, catalin.marinas, will, aneesh.kumar, maz,
	steven.price, broonie, guohanjun
  Cc: yezhenyu2, linux-arm-kernel, linux-kernel, linux-arch, linux-mm,
	arm, xiexiangyou, prime.zeng, zhangshaokun

ARMv8.4-TTL provides the TTL field in tlbi instruction to indicate
the level of translation table walk holding the leaf entry for the
address that is being invalidated. Hardware can use this information
to determine if there was a risk of splintering.

The PATCH v2 is based on Marc's NV series[1].

[1] git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git kvm-arm64/nv-5.6-rc1


Zhenyu Ye (3):
  arm64: tlb: use __tlbi_level replace __tlbi in Stage-1
  arm64: tlb: use mm_struct.context.flags to indicate TTL value
  arm64: tlb: add support for TTL in some functions

 arch/arm64/include/asm/mmu.h      | 11 +++++++++++
 arch/arm64/include/asm/tlb.h      |  3 +++
 arch/arm64/include/asm/tlbflush.h | 19 ++++++-------------
 arch/arm64/kernel/process.c       |  2 +-
 arch/arm64/mm/hugetlbpage.c       |  2 ++
 5 files changed, 23 insertions(+), 14 deletions(-)

-- 
2.19.1




^ permalink raw reply	[flat|nested] 4+ messages in thread

* [RFC PATCH v2 1/3] arm64: tlb: use __tlbi_level replace __tlbi in Stage-1
  2020-03-12  4:10 [RFC PATCH v2 0/3] arm64: tlb: add support for TTL field Zhenyu Ye
@ 2020-03-12  4:10 ` Zhenyu Ye
  2020-03-12  4:10 ` [RFC PATCH v2 2/3] arm64: tlb: use mm_struct.context.flags to indicate TTL value Zhenyu Ye
  2020-03-12  4:10 ` [RFC PATCH v2 3/3] arm64: tlb: add support for TTL in some functions Zhenyu Ye
  2 siblings, 0 replies; 4+ messages in thread
From: Zhenyu Ye @ 2020-03-12  4:10 UTC (permalink / raw)
  To: mark.rutland, catalin.marinas, will, aneesh.kumar, maz,
	steven.price, broonie, guohanjun
  Cc: yezhenyu2, linux-arm-kernel, linux-kernel, linux-arch, linux-mm,
	arm, xiexiangyou, prime.zeng, zhangshaokun

ARMv8.4-TTL provides the TTL field in tlbi instruction to indicate
the level of translation table walk holding the leaf entry for the
address that is being invalidated.

This patch use __tlbi_level replace __tlbi and __tlbi_user in
Stage-1, and set the default value of level to 0.

Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com>
---
 arch/arm64/include/asm/tlbflush.h | 19 ++++++-------------
 1 file changed, 6 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index a3f70778a325..dda693f32099 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -46,11 +46,6 @@
 
 #define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
 
-#define __tlbi_user(op, arg) do {						\
-	if (arm64_kernel_unmapped_at_el0())					\
-		__tlbi(op, (arg) | USER_ASID_FLAG);				\
-} while (0)
-
 /* This macro creates a properly formatted VA operand for the TLBI */
 #define __TLBI_VADDR(addr, asid)				\
 	({							\
@@ -87,6 +82,8 @@
 		}							\
 									\
 		__tlbi(op,  arg);					\
+		if (arm64_kernel_unmapped_at_el0())			\
+			__tlbi(op, (arg) | USER_ASID_FLAG);		\
 	} while(0)
 
 /*
@@ -179,8 +176,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
 	unsigned long asid = __TLBI_VADDR(0, ASID(mm));
 
 	dsb(ishst);
-	__tlbi(aside1is, asid);
-	__tlbi_user(aside1is, asid);
+	__tlbi_level(aside1is, asid, 0);
 	dsb(ish);
 }
 
@@ -190,8 +186,7 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
 	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
 
 	dsb(ishst);
-	__tlbi(vale1is, addr);
-	__tlbi_user(vale1is, addr);
+	__tlbi_level(vale1is, addr, 0);
 }
 
 static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -231,11 +226,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
 	dsb(ishst);
 	for (addr = start; addr < end; addr += stride) {
 		if (last_level) {
-			__tlbi(vale1is, addr);
-			__tlbi_user(vale1is, addr);
+			__tlbi_level(vale1is, addr, 0);
 		} else {
-			__tlbi(vae1is, addr);
-			__tlbi_user(vae1is, addr);
+			__tlbi_level(vae1is, addr, 0);
 		}
 	}
 	dsb(ish);
-- 
2.19.1




^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [RFC PATCH v2 2/3] arm64: tlb: use mm_struct.context.flags to indicate TTL value
  2020-03-12  4:10 [RFC PATCH v2 0/3] arm64: tlb: add support for TTL field Zhenyu Ye
  2020-03-12  4:10 ` [RFC PATCH v2 1/3] arm64: tlb: use __tlbi_level replace __tlbi in Stage-1 Zhenyu Ye
@ 2020-03-12  4:10 ` Zhenyu Ye
  2020-03-12  4:10 ` [RFC PATCH v2 3/3] arm64: tlb: add support for TTL in some functions Zhenyu Ye
  2 siblings, 0 replies; 4+ messages in thread
From: Zhenyu Ye @ 2020-03-12  4:10 UTC (permalink / raw)
  To: mark.rutland, catalin.marinas, will, aneesh.kumar, maz,
	steven.price, broonie, guohanjun
  Cc: yezhenyu2, linux-arm-kernel, linux-kernel, linux-arch, linux-mm,
	arm, xiexiangyou, prime.zeng, zhangshaokun

Use Architecture-specific MM context to indicate the level of page
table walk.  This avoids lots of changes to common-interface.

Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com>
---
 arch/arm64/include/asm/mmu.h      | 11 +++++++++++
 arch/arm64/include/asm/tlbflush.h |  6 +++---
 arch/arm64/kernel/process.c       |  2 +-
 3 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index e4d862420bb4..f86a38ab3632 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -8,6 +8,10 @@
 #include <asm/cputype.h>
 
 #define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
+#define S1_PUD_LEVEL	0x10	/* mm context flag for the level of ptw */
+#define S1_PMD_LEVEL	0x20
+#define S1_PTE_LEVEL	0x30
+
 #define USER_ASID_BIT	48
 #define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
 #define TTBR_ASID_MASK	(UL(0xffff) << 48)
@@ -19,6 +23,10 @@
 typedef struct {
 	atomic64_t	id;
 	void		*vdso;
+	/*
+	 * flags[3:0]: AArch32 executables
+	 * flags[7:4]: the level of page table walk
+	 */
 	unsigned long	flags;
 } mm_context_t;
 
@@ -29,6 +37,9 @@ typedef struct {
  */
 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
 
+/* This macro is only used by TLBI TTL */
+#define TLBI_LEVEL(mm)	((mm)->context.flags >> 4 & 0xf)
+
 extern bool arm64_use_ng_mappings;
 
 static inline bool arm64_kernel_unmapped_at_el0(void)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index dda693f32099..312b9edb281b 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -186,7 +186,7 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
 	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
 
 	dsb(ishst);
-	__tlbi_level(vale1is, addr, 0);
+	__tlbi_level(vale1is, addr, TLBI_LEVEL(vma->vm_mm));
 }
 
 static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -226,9 +226,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
 	dsb(ishst);
 	for (addr = start; addr < end; addr += stride) {
 		if (last_level) {
-			__tlbi_level(vale1is, addr, 0);
+			__tlbi_level(vale1is, addr, TLBI_LEVEL(vma->vm_mm));
 		} else {
-			__tlbi_level(vae1is, addr, 0);
+			__tlbi_level(vae1is, addr, TLBI_LEVEL(vma->vm_mm));
 		}
 	}
 	dsb(ish);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index bbb0f0c145f6..bf835755d9ed 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -562,7 +562,7 @@ unsigned long arch_align_stack(unsigned long sp)
  */
 void arch_setup_new_exec(void)
 {
-	current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+	current->mm->context.flags |= is_compat_task() ? MMCF_AARCH32 : 0;
 
 	ptrauth_thread_init_user(current);
 }
-- 
2.19.1




^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [RFC PATCH v2 3/3] arm64: tlb: add support for TTL in some functions
  2020-03-12  4:10 [RFC PATCH v2 0/3] arm64: tlb: add support for TTL field Zhenyu Ye
  2020-03-12  4:10 ` [RFC PATCH v2 1/3] arm64: tlb: use __tlbi_level replace __tlbi in Stage-1 Zhenyu Ye
  2020-03-12  4:10 ` [RFC PATCH v2 2/3] arm64: tlb: use mm_struct.context.flags to indicate TTL value Zhenyu Ye
@ 2020-03-12  4:10 ` Zhenyu Ye
  2 siblings, 0 replies; 4+ messages in thread
From: Zhenyu Ye @ 2020-03-12  4:10 UTC (permalink / raw)
  To: mark.rutland, catalin.marinas, will, aneesh.kumar, maz,
	steven.price, broonie, guohanjun
  Cc: yezhenyu2, linux-arm-kernel, linux-kernel, linux-arch, linux-mm,
	arm, xiexiangyou, prime.zeng, zhangshaokun

Add support for TTL in some ARM64-Architecture functions. The
relevant functions are:

	__pte_free_tlb
	__pmd_free_tlb
	__pud_free_tlb
	clear_flush
	get_clear_flush

Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com>
---
 arch/arm64/include/asm/tlb.h | 3 +++
 arch/arm64/mm/hugetlbpage.c  | 2 ++
 2 files changed, 5 insertions(+)

diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index b76df828e6b7..5ab686e300e9 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -44,6 +44,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 				  unsigned long addr)
 {
+	tlb->mm->context.flags |= S1_PTE_LEVEL;
 	pgtable_pte_page_dtor(pte);
 	tlb_remove_table(tlb, pte);
 }
@@ -53,6 +54,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 				  unsigned long addr)
 {
 	struct page *page = virt_to_page(pmdp);
+	tlb->mm->context.flags |= S1_PMD_LEVEL;
 
 	pgtable_pmd_page_dtor(page);
 	tlb_remove_table(tlb, page);
@@ -63,6 +65,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
 				  unsigned long addr)
 {
+	tlb->mm->context.flags |= S1_PUD_LEVEL;
 	tlb_remove_table(tlb, virt_to_page(pudp));
 }
 #endif
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index bbeb6a5a6ba6..a69248aa6e1f 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -141,6 +141,7 @@ static pte_t get_clear_flush(struct mm_struct *mm,
 
 	if (valid) {
 		struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
+		mm->context.flags |= S1_PTE_LEVEL;
 		flush_tlb_range(&vma, saddr, addr);
 	}
 	return orig_pte;
@@ -163,6 +164,7 @@ static void clear_flush(struct mm_struct *mm,
 {
 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
 	unsigned long i, saddr = addr;
+	mm->context.flags |= S1_PTE_LEVEL;
 
 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
 		pte_clear(mm, addr, ptep);
-- 
2.19.1




^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-03-12  4:10 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-12  4:10 [RFC PATCH v2 0/3] arm64: tlb: add support for TTL field Zhenyu Ye
2020-03-12  4:10 ` [RFC PATCH v2 1/3] arm64: tlb: use __tlbi_level replace __tlbi in Stage-1 Zhenyu Ye
2020-03-12  4:10 ` [RFC PATCH v2 2/3] arm64: tlb: use mm_struct.context.flags to indicate TTL value Zhenyu Ye
2020-03-12  4:10 ` [RFC PATCH v2 3/3] arm64: tlb: add support for TTL in some functions Zhenyu Ye

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).