All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zhenyu Ye <yezhenyu2@huawei.com>
To: <peterz@infradead.org>, <mark.rutland@arm.com>, <will@kernel.org>,
	<catalin.marinas@arm.com>, <aneesh.kumar@linux.ibm.com>,
	<akpm@linux-foundation.org>, <npiggin@gmail.com>, <arnd@arndb.de>,
	<rostedt@goodmis.org>, <maz@kernel.org>, <suzuki.poulose@arm.com>,
	<tglx@linutronix.de>, <yuzhao@google.com>, <Dave.Martin@arm.com>,
	<steven.price@arm.com>, <broonie@kernel.org>,
	<guohanjun@huawei.com>, <corbet@lwn.net>, <vgupta@synopsys.com>,
	<tony.luck@intel.com>
Cc: <yezhenyu2@huawei.com>, <linux-arm-kernel@lists.infradead.org>,
	<linux-kernel@vger.kernel.org>, <linux-arch@vger.kernel.org>,
	<linux-mm@kvack.org>, <arm@kernel.org>, <xiexiangyou@huawei.com>,
	<prime.zeng@hisilicon.com>, <zhangshaokun@hisilicon.com>,
	<kuhn.chenqun@huawei.com>
Subject: [RFC PATCH v5 8/8] arm64: tlb: Set the TTL field in flush_tlb_range
Date: Tue, 31 Mar 2020 22:29:27 +0800	[thread overview]
Message-ID: <20200331142927.1237-9-yezhenyu2@huawei.com> (raw)
In-Reply-To: <20200331142927.1237-1-yezhenyu2@huawei.com>

This patch uses the cleared_* in struct mmu_gather to set the
TTL field in flush_tlb_range().

Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com>
---
 arch/arm64/include/asm/tlb.h      | 39 ++++++++++++++++++++++++++++++-
 arch/arm64/include/asm/tlbflush.h | 22 +++++------------
 2 files changed, 44 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index b76df828e6b7..72b6e3763df2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -21,11 +21,34 @@ static void tlb_flush(struct mmu_gather *tlb);
 
 #include <asm-generic/tlb.h>
 
+/*
+ * get the tlbi levels in arm64.  Default value is 0 if more than one
+ * of cleared_* is set or neither is set.
+ * Arm64 doesn't support p4ds now.
+ */
+static inline int tlb_get_level(struct mmu_gather *tlb)
+{
+	int sum = tlb->cleared_ptes + tlb->cleared_pmds +
+		  tlb->cleared_puds + tlb->cleared_p4ds;
+
+	if (sum != 1)
+		return 0;
+	else if (tlb->cleared_ptes)
+		return 3;
+	else if (tlb->cleared_pmds)
+		return 2;
+	else if (tlb->cleared_puds)
+		return 1;
+
+	return 0;
+}
+
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
 	struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
 	bool last_level = !tlb->freed_tables;
 	unsigned long stride = tlb_get_unmap_size(tlb);
+	int tlb_level = tlb_get_level(tlb);
 
 	/*
 	 * If we're tearing down the address space then we only care about
@@ -38,7 +61,21 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 		return;
 	}
 
-	__flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
+	__flush_tlb_range(&vma, tlb->start, tlb->end, stride,
+			  last_level, tlb_level);
+}
+
+static inline void flush_tlb_range(struct mmu_gather *tlb,
+				   struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
+{
+	/*
+	 * We cannot use leaf-only invalidation here, since we may be invalidating
+	 * table entries as part of collapsing hugepages or moving page tables.
+	 */
+	unsigned long stride = tlb_get_unmap_size(tlb);
+	int tlb_level = tlb_get_level(tlb);
+	__flush_tlb_range(vma, start, end, stride, false, tlb_level);
 }
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 0b4d75a2270b..dc8e803692f8 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -215,7 +215,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 
 static inline void __flush_tlb_range(struct vm_area_struct *vma,
 				     unsigned long start, unsigned long end,
-				     unsigned long stride, bool last_level)
+				     unsigned long stride, bool last_level,
+				     int tlb_level)
 {
 	unsigned long asid = ASID(vma->vm_mm);
 	unsigned long addr;
@@ -237,27 +238,16 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
 	dsb(ishst);
 	for (addr = start; addr < end; addr += stride) {
 		if (last_level) {
-			__tlbi_level(vale1is, addr, 0);
-			__tlbi_user_level(vale1is, addr, 0);
+			__tlbi_level(vale1is, addr, tlb_level);
+			__tlbi_user_level(vale1is, addr, tlb_level);
 		} else {
-			__tlbi_level(vae1is, addr, 0);
-			__tlbi_user_level(vae1is, addr, 0);
+			__tlbi_level(vae1is, addr, tlb_level);
+			__tlbi_user_level(vae1is, addr, tlb_level);
 		}
 	}
 	dsb(ish);
 }
 
-static inline void flush_tlb_range(struct mmu_gather *tlb,
-				   struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
-{
-	/*
-	 * We cannot use leaf-only invalidation here, since we may be invalidating
-	 * table entries as part of collapsing hugepages or moving page tables.
-	 */
-	__flush_tlb_range(vma, start, end, PAGE_SIZE, false);
-}
-
 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
 	unsigned long addr;
-- 
2.19.1



WARNING: multiple messages have this Message-ID (diff)
From: Zhenyu Ye <yezhenyu2@huawei.com>
To: peterz@infradead.org, mark.rutland@arm.com, will@kernel.org,
	catalin.marinas@arm.com, aneesh.kumar@linux.ibm.com,
	akpm@linux-foundation.org, npiggin@gmail.com, arnd@arndb.de,
	rostedt@goodmis.org, maz@kernel.org, suzuki.poulose@arm.com,
	tglx@linutronix.de, yuzhao@google.com, Dave.Martin@arm.com,
	steven.price@arm.com, broonie@kernel.org, guohanjun@huawei.com,
	corbet@lwn.net, vgupta@synopsys.com, tony.luck@intel.com
Cc: yezhenyu2@huawei.com, linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-mm@kvack.org, arm@kernel.org, xiexiangyou@huawei.com,
	prime.zeng@hisilicon.com, zhangshaokun@hisilicon.com,
	kuhn.chenqun@huawei.com
Subject: [RFC PATCH v5 8/8] arm64: tlb: Set the TTL field in flush_tlb_range
Date: Tue, 31 Mar 2020 22:29:27 +0800	[thread overview]
Message-ID: <20200331142927.1237-9-yezhenyu2@huawei.com> (raw)
In-Reply-To: <20200331142927.1237-1-yezhenyu2@huawei.com>

This patch uses the cleared_* in struct mmu_gather to set the
TTL field in flush_tlb_range().

Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com>
---
 arch/arm64/include/asm/tlb.h      | 39 ++++++++++++++++++++++++++++++-
 arch/arm64/include/asm/tlbflush.h | 22 +++++------------
 2 files changed, 44 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index b76df828e6b7..72b6e3763df2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -21,11 +21,34 @@ static void tlb_flush(struct mmu_gather *tlb);
 
 #include <asm-generic/tlb.h>
 
+/*
+ * get the tlbi levels in arm64.  Default value is 0 if more than one
+ * of cleared_* is set or neither is set.
+ * Arm64 doesn't support p4ds now.
+ */
+static inline int tlb_get_level(struct mmu_gather *tlb)
+{
+	int sum = tlb->cleared_ptes + tlb->cleared_pmds +
+		  tlb->cleared_puds + tlb->cleared_p4ds;
+
+	if (sum != 1)
+		return 0;
+	else if (tlb->cleared_ptes)
+		return 3;
+	else if (tlb->cleared_pmds)
+		return 2;
+	else if (tlb->cleared_puds)
+		return 1;
+
+	return 0;
+}
+
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
 	struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
 	bool last_level = !tlb->freed_tables;
 	unsigned long stride = tlb_get_unmap_size(tlb);
+	int tlb_level = tlb_get_level(tlb);
 
 	/*
 	 * If we're tearing down the address space then we only care about
@@ -38,7 +61,21 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 		return;
 	}
 
-	__flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
+	__flush_tlb_range(&vma, tlb->start, tlb->end, stride,
+			  last_level, tlb_level);
+}
+
+static inline void flush_tlb_range(struct mmu_gather *tlb,
+				   struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
+{
+	/*
+	 * We cannot use leaf-only invalidation here, since we may be invalidating
+	 * table entries as part of collapsing hugepages or moving page tables.
+	 */
+	unsigned long stride = tlb_get_unmap_size(tlb);
+	int tlb_level = tlb_get_level(tlb);
+	__flush_tlb_range(vma, start, end, stride, false, tlb_level);
 }
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 0b4d75a2270b..dc8e803692f8 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -215,7 +215,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 
 static inline void __flush_tlb_range(struct vm_area_struct *vma,
 				     unsigned long start, unsigned long end,
-				     unsigned long stride, bool last_level)
+				     unsigned long stride, bool last_level,
+				     int tlb_level)
 {
 	unsigned long asid = ASID(vma->vm_mm);
 	unsigned long addr;
@@ -237,27 +238,16 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
 	dsb(ishst);
 	for (addr = start; addr < end; addr += stride) {
 		if (last_level) {
-			__tlbi_level(vale1is, addr, 0);
-			__tlbi_user_level(vale1is, addr, 0);
+			__tlbi_level(vale1is, addr, tlb_level);
+			__tlbi_user_level(vale1is, addr, tlb_level);
 		} else {
-			__tlbi_level(vae1is, addr, 0);
-			__tlbi_user_level(vae1is, addr, 0);
+			__tlbi_level(vae1is, addr, tlb_level);
+			__tlbi_user_level(vae1is, addr, tlb_level);
 		}
 	}
 	dsb(ish);
 }
 
-static inline void flush_tlb_range(struct mmu_gather *tlb,
-				   struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
-{
-	/*
-	 * We cannot use leaf-only invalidation here, since we may be invalidating
-	 * table entries as part of collapsing hugepages or moving page tables.
-	 */
-	__flush_tlb_range(vma, start, end, PAGE_SIZE, false);
-}
-
 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
 	unsigned long addr;
-- 
2.19.1

WARNING: multiple messages have this Message-ID (diff)
From: Zhenyu Ye <yezhenyu2@huawei.com>
To: <peterz@infradead.org>, <mark.rutland@arm.com>, <will@kernel.org>,
	<catalin.marinas@arm.com>, <aneesh.kumar@linux.ibm.com>,
	<akpm@linux-foundation.org>, <npiggin@gmail.com>, <arnd@arndb.de>,
	<rostedt@goodmis.org>, <maz@kernel.org>, <suzuki.poulose@arm.com>,
	<tglx@linutronix.de>, <yuzhao@google.com>, <Dave.Martin@arm.com>,
	<steven.price@arm.com>, <broonie@kernel.org>,
	<guohanjun@huawei.com>, <corbet@lwn.net>, <vgupta@synopsys.com>,
	<tony.luck@intel.com>
Cc: linux-arch@vger.kernel.org, yezhenyu2@huawei.com,
	linux-kernel@vger.kernel.org, xiexiangyou@huawei.com,
	zhangshaokun@hisilicon.com, linux-mm@kvack.org, arm@kernel.org,
	prime.zeng@hisilicon.com, kuhn.chenqun@huawei.com,
	linux-arm-kernel@lists.infradead.org
Subject: [RFC PATCH v5 8/8] arm64: tlb: Set the TTL field in flush_tlb_range
Date: Tue, 31 Mar 2020 22:29:27 +0800	[thread overview]
Message-ID: <20200331142927.1237-9-yezhenyu2@huawei.com> (raw)
In-Reply-To: <20200331142927.1237-1-yezhenyu2@huawei.com>

This patch uses the cleared_* in struct mmu_gather to set the
TTL field in flush_tlb_range().

Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com>
---
 arch/arm64/include/asm/tlb.h      | 39 ++++++++++++++++++++++++++++++-
 arch/arm64/include/asm/tlbflush.h | 22 +++++------------
 2 files changed, 44 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index b76df828e6b7..72b6e3763df2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -21,11 +21,34 @@ static void tlb_flush(struct mmu_gather *tlb);
 
 #include <asm-generic/tlb.h>
 
+/*
+ * get the tlbi levels in arm64.  Default value is 0 if more than one
+ * of cleared_* is set or neither is set.
+ * Arm64 doesn't support p4ds now.
+ */
+static inline int tlb_get_level(struct mmu_gather *tlb)
+{
+	int sum = tlb->cleared_ptes + tlb->cleared_pmds +
+		  tlb->cleared_puds + tlb->cleared_p4ds;
+
+	if (sum != 1)
+		return 0;
+	else if (tlb->cleared_ptes)
+		return 3;
+	else if (tlb->cleared_pmds)
+		return 2;
+	else if (tlb->cleared_puds)
+		return 1;
+
+	return 0;
+}
+
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
 	struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
 	bool last_level = !tlb->freed_tables;
 	unsigned long stride = tlb_get_unmap_size(tlb);
+	int tlb_level = tlb_get_level(tlb);
 
 	/*
 	 * If we're tearing down the address space then we only care about
@@ -38,7 +61,21 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 		return;
 	}
 
-	__flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
+	__flush_tlb_range(&vma, tlb->start, tlb->end, stride,
+			  last_level, tlb_level);
+}
+
+static inline void flush_tlb_range(struct mmu_gather *tlb,
+				   struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
+{
+	/*
+	 * We cannot use leaf-only invalidation here, since we may be invalidating
+	 * table entries as part of collapsing hugepages or moving page tables.
+	 */
+	unsigned long stride = tlb_get_unmap_size(tlb);
+	int tlb_level = tlb_get_level(tlb);
+	__flush_tlb_range(vma, start, end, stride, false, tlb_level);
 }
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 0b4d75a2270b..dc8e803692f8 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -215,7 +215,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 
 static inline void __flush_tlb_range(struct vm_area_struct *vma,
 				     unsigned long start, unsigned long end,
-				     unsigned long stride, bool last_level)
+				     unsigned long stride, bool last_level,
+				     int tlb_level)
 {
 	unsigned long asid = ASID(vma->vm_mm);
 	unsigned long addr;
@@ -237,27 +238,16 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
 	dsb(ishst);
 	for (addr = start; addr < end; addr += stride) {
 		if (last_level) {
-			__tlbi_level(vale1is, addr, 0);
-			__tlbi_user_level(vale1is, addr, 0);
+			__tlbi_level(vale1is, addr, tlb_level);
+			__tlbi_user_level(vale1is, addr, tlb_level);
 		} else {
-			__tlbi_level(vae1is, addr, 0);
-			__tlbi_user_level(vae1is, addr, 0);
+			__tlbi_level(vae1is, addr, tlb_level);
+			__tlbi_user_level(vae1is, addr, tlb_level);
 		}
 	}
 	dsb(ish);
 }
 
-static inline void flush_tlb_range(struct mmu_gather *tlb,
-				   struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
-{
-	/*
-	 * We cannot use leaf-only invalidation here, since we may be invalidating
-	 * table entries as part of collapsing hugepages or moving page tables.
-	 */
-	__flush_tlb_range(vma, start, end, PAGE_SIZE, false);
-}
-
 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
 	unsigned long addr;
-- 
2.19.1



_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2020-03-31 14:30 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-31 14:29 [RFC PATCH v5 0/8] arm64: tlb: add support for TTL feature Zhenyu Ye
2020-03-31 14:29 ` Zhenyu Ye
2020-03-31 14:29 ` Zhenyu Ye
2020-03-31 14:29 ` [RFC PATCH v5 1/8] arm64: Detect the ARMv8.4 " Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29 ` [RFC PATCH v5 2/8] arm64: Add level-hinted TLB invalidation helper Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29 ` [RFC PATCH v5 3/8] arm64: Add tlbi_user_level " Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29 ` [RFC PATCH v5 4/8] mm: tlb: Pass struct mmu_gather to flush_pmd_tlb_range Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 15:13   ` Peter Zijlstra
2020-03-31 15:13     ` Peter Zijlstra
2020-04-01  8:51     ` Zhenyu Ye
2020-04-01  8:51       ` Zhenyu Ye
2020-04-01  8:51       ` Zhenyu Ye
2020-04-01  8:51       ` Zhenyu Ye
2020-04-01 12:20       ` Peter Zijlstra
2020-04-01 12:20         ` Peter Zijlstra
2020-04-01 12:20         ` Peter Zijlstra
2020-04-02 11:24         ` Zhenyu Ye
2020-04-02 11:24           ` Zhenyu Ye
2020-04-02 11:24           ` Zhenyu Ye
2020-04-02 16:38           ` Peter Zijlstra
2020-04-02 16:38             ` Peter Zijlstra
2020-04-02 16:38             ` Peter Zijlstra
2020-04-03  5:14             ` Zhenyu Ye
2020-04-03  5:14               ` Zhenyu Ye
2020-04-03  5:14               ` Zhenyu Ye
2020-04-03  5:14               ` Zhenyu Ye
2020-04-08  9:00               ` Zhenyu Ye
2020-04-08  9:00                 ` Zhenyu Ye
2020-04-08  9:00                 ` Zhenyu Ye
2020-03-31 14:29 ` [RFC PATCH v5 5/8] mm: tlb: Pass struct mmu_gather to flush_pud_tlb_range Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29 ` [RFC PATCH v5 6/8] mm: tlb: Pass struct mmu_gather to flush_hugetlb_tlb_range Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29 ` [RFC PATCH v5 7/8] mm: tlb: Pass struct mmu_gather to flush_tlb_range Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye
2020-03-31 21:08   ` kbuild test robot
2020-03-31 14:29 ` Zhenyu Ye [this message]
2020-03-31 14:29   ` [RFC PATCH v5 8/8] arm64: tlb: Set the TTL field in flush_tlb_range Zhenyu Ye
2020-03-31 14:29   ` Zhenyu Ye

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200331142927.1237-9-yezhenyu2@huawei.com \
    --to=yezhenyu2@huawei.com \
    --cc=Dave.Martin@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=arm@kernel.org \
    --cc=arnd@arndb.de \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=corbet@lwn.net \
    --cc=guohanjun@huawei.com \
    --cc=kuhn.chenqun@huawei.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=npiggin@gmail.com \
    --cc=peterz@infradead.org \
    --cc=prime.zeng@hisilicon.com \
    --cc=rostedt@goodmis.org \
    --cc=steven.price@arm.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=vgupta@synopsys.com \
    --cc=will@kernel.org \
    --cc=xiexiangyou@huawei.com \
    --cc=yuzhao@google.com \
    --cc=zhangshaokun@hisilicon.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.