All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] powerpc/mm/radix: Remove unused code
@ 2018-03-23  4:56 Aneesh Kumar K.V
  2018-03-23  4:56 ` [PATCH 2/3] powerpc/mm/radix: Move the functions that does the actual tlbie closer Aneesh Kumar K.V
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Aneesh Kumar K.V @ 2018-03-23  4:56 UTC (permalink / raw)
  To: benh, paulus, mpe, mauricfo; +Cc: linuxppc-dev, Aneesh Kumar K.V

These function are not used in the code. Remove them.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 .../powerpc/include/asm/book3s/64/tlbflush-radix.h |  3 --
 arch/powerpc/mm/tlb-radix.c                        | 40 ----------------------
 2 files changed, 43 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 8eea90f80e45..19b45ba6caf9 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -47,9 +47,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
 #endif
 extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
 extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
-extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
-				     unsigned long page_size);
-extern void radix__flush_tlb_lpid(unsigned long lpid);
 extern void radix__flush_tlb_all(void);
 extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
 					unsigned long address);
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 71d1b19ad1c0..8ce858ec59e1 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -603,46 +603,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
-			      unsigned long page_size)
-{
-	unsigned long rb,rs,prs,r;
-	unsigned long ap;
-	unsigned long ric = RIC_FLUSH_TLB;
-
-	ap = mmu_get_ap(radix_get_mmu_psize(page_size));
-	rb = gpa & ~(PPC_BITMASK(52, 63));
-	rb |= ap << PPC_BITLSHIFT(58);
-	rs = lpid & ((1UL << 32) - 1);
-	prs = 0; /* process scoped */
-	r = 1;   /* raidx format */
-
-	asm volatile("ptesync": : :"memory");
-	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
-		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
-	asm volatile("eieio; tlbsync; ptesync": : :"memory");
-	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
-}
-EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
-
-void radix__flush_tlb_lpid(unsigned long lpid)
-{
-	unsigned long rb,rs,prs,r;
-	unsigned long ric = RIC_FLUSH_ALL;
-
-	rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
-	rs = lpid & ((1UL << 32) - 1);
-	prs = 0; /* partition scoped */
-	r = 1;   /* raidx format */
-
-	asm volatile("ptesync": : :"memory");
-	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
-		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
-	asm volatile("eieio; tlbsync; ptesync": : :"memory");
-	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
-}
-EXPORT_SYMBOL(radix__flush_tlb_lpid);
-
 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
 				unsigned long start, unsigned long end)
 {
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/3] powerpc/mm/radix: Move the functions that does the actual tlbie closer
  2018-03-23  4:56 [PATCH 1/3] powerpc/mm/radix: Remove unused code Aneesh Kumar K.V
@ 2018-03-23  4:56 ` Aneesh Kumar K.V
  2018-03-23  4:56 ` [PATCH 3/3] powerpc/mm: Fixup tlbie vs store ordering issue on POWER9 Aneesh Kumar K.V
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Aneesh Kumar K.V @ 2018-03-23  4:56 UTC (permalink / raw)
  To: benh, paulus, mpe, mauricfo; +Cc: linuxppc-dev, Aneesh Kumar K.V

No functionality change. Just code movement to ease code changes later

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/mm/tlb-radix.c | 64 ++++++++++++++++++++++-----------------------
 1 file changed, 32 insertions(+), 32 deletions(-)

diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 8ce858ec59e1..570fdc7b0e74 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -119,6 +119,38 @@ static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
 }
 
+static inline void __tlbiel_va(unsigned long va, unsigned long pid,
+			       unsigned long ap, unsigned long ric)
+{
+	unsigned long rb,rs,prs,r;
+
+	rb = va & ~(PPC_BITMASK(52, 63));
+	rb |= ap << PPC_BITLSHIFT(58);
+	rs = pid << PPC_BITLSHIFT(31);
+	prs = 1; /* process scoped */
+	r = 1;   /* raidx format */
+
+	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+	trace_tlbie(0, 1, rb, rs, ric, prs, r);
+}
+
+static inline void __tlbie_va(unsigned long va, unsigned long pid,
+			      unsigned long ap, unsigned long ric)
+{
+	unsigned long rb,rs,prs,r;
+
+	rb = va & ~(PPC_BITMASK(52, 63));
+	rb |= ap << PPC_BITLSHIFT(58);
+	rs = pid << PPC_BITLSHIFT(31);
+	prs = 1; /* process scoped */
+	r = 1;   /* raidx format */
+
+	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+	trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
+
 /*
  * We use 128 set in radix mode and 256 set in hpt mode.
  */
@@ -155,22 +187,6 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
 
-static inline void __tlbiel_va(unsigned long va, unsigned long pid,
-			       unsigned long ap, unsigned long ric)
-{
-	unsigned long rb,rs,prs,r;
-
-	rb = va & ~(PPC_BITMASK(52, 63));
-	rb |= ap << PPC_BITLSHIFT(58);
-	rs = pid << PPC_BITLSHIFT(31);
-	prs = 1; /* process scoped */
-	r = 1;   /* raidx format */
-
-	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
-		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
-	trace_tlbie(0, 1, rb, rs, ric, prs, r);
-}
-
 static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
 				    unsigned long pid, unsigned long page_size,
 				    unsigned long psize)
@@ -203,22 +219,6 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
 	asm volatile("ptesync": : :"memory");
 }
 
-static inline void __tlbie_va(unsigned long va, unsigned long pid,
-			     unsigned long ap, unsigned long ric)
-{
-	unsigned long rb,rs,prs,r;
-
-	rb = va & ~(PPC_BITMASK(52, 63));
-	rb |= ap << PPC_BITLSHIFT(58);
-	rs = pid << PPC_BITLSHIFT(31);
-	prs = 1; /* process scoped */
-	r = 1;   /* raidx format */
-
-	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
-		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
-	trace_tlbie(0, 0, rb, rs, ric, prs, r);
-}
-
 static inline void __tlbie_va_range(unsigned long start, unsigned long end,
 				    unsigned long pid, unsigned long page_size,
 				    unsigned long psize)
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/3] powerpc/mm: Fixup tlbie vs store ordering issue on POWER9
  2018-03-23  4:56 [PATCH 1/3] powerpc/mm/radix: Remove unused code Aneesh Kumar K.V
  2018-03-23  4:56 ` [PATCH 2/3] powerpc/mm/radix: Move the functions that does the actual tlbie closer Aneesh Kumar K.V
@ 2018-03-23  4:56 ` Aneesh Kumar K.V
  2018-03-23  5:55   ` Michael Ellerman
  2018-03-23  6:28   ` Benjamin Herrenschmidt
  2018-03-23  7:33 ` [PATCH 1/3] powerpc/mm/radix: Remove unused code Nicholas Piggin
  2018-03-23 11:11 ` [1/3] " Michael Ellerman
  3 siblings, 2 replies; 7+ messages in thread
From: Aneesh Kumar K.V @ 2018-03-23  4:56 UTC (permalink / raw)
  To: benh, paulus, mpe, mauricfo; +Cc: linuxppc-dev, Aneesh Kumar K.V

On POWER9, under some circumstances, a broadcast TLB invalidation might complete
before all previous stores have drained, potentially allowing stale stores from
becoming visible after the invalidation. This works around it by doubling up
those TLB invalidations which was verified by HW to be sufficient to close the
risk window.

Fixes: 1a472c9dba6b ("powerpc/mm/radix: Add tlbflush routines")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/cputable.h    |  3 ++-
 arch/powerpc/kvm/book3s_64_mmu_radix.c |  3 +++
 arch/powerpc/kvm/book3s_hv_rm_mmu.c    | 11 +++++++++++
 arch/powerpc/mm/hash_native_64.c       | 16 +++++++++++++++-
 arch/powerpc/mm/pgtable_64.c           |  1 +
 arch/powerpc/mm/tlb-radix.c            | 15 +++++++++++++++
 6 files changed, 47 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index a2c5c95882cf..6c151d2e9bd9 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -203,6 +203,7 @@ static inline void cpu_feature_keys_init(void) { }
 #define CPU_FTR_DAWR			LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_DABRX			LONG_ASM_CONST(0x0800000000000000)
 #define CPU_FTR_PMAO_BUG		LONG_ASM_CONST(0x1000000000000000)
+#define CPU_FTR_TLBIE_BUG		LONG_ASM_CONST(0x2000000000000000)
 #define CPU_FTR_POWER9_DD1		LONG_ASM_CONST(0x4000000000000000)
 #define CPU_FTR_POWER9_DD2_1		LONG_ASM_CONST(0x8000000000000000)
 
@@ -465,7 +466,7 @@ static inline void cpu_feature_keys_init(void) { }
 	    CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
 	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
 	    CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \
-	    CPU_FTR_PKEY)
+	    CPU_FTR_PKEY | CPU_FTR_TLBIE_BUG)
 #define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
 			     (~CPU_FTR_SAO))
 #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 5cb4e4687107..62f60778177b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -157,6 +157,9 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
 	asm volatile("ptesync": : :"memory");
 	asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
 		     : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
+	if (cpu_has_feature(CPU_FTR_TLBIE_BUG))
+		asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
+			     : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
 	asm volatile("ptesync": : :"memory");
 }
 
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 8888e625a999..aebcb718b614 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -473,6 +473,17 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
 			trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
 				kvm->arch.lpid, 0, 0, 0);
 		}
+
+		if (cpu_has_feature(CPU_FTR_TLBIE_BUG)) {
+			/*
+			 * Need the extra ptesync to make sure we don't
+			 * re-order the tlbie
+			 */
+			asm volatile("ptesync": : :"memory");
+			asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
+				     "r" (rbvalues[0]), "r" (kvm->arch.lpid));
+		}
+
 		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 		kvm->arch.tlbie_lock = 0;
 	} else {
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 993842f1ed60..45b1772d3097 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -201,6 +201,15 @@ static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
 	return va;
 }
 
+static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+{
+	if (cpu_has_feature(CPU_FTR_TLBIE_BUG)) {
+		/* Need the extra ptesync to ensure we don't reorder tlbie*/
+		asm volatile("ptesync": : :"memory");
+		___tlbie(vpn, psize, apsize, ssize);
+	}
+}
+
 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
 {
 	unsigned long rb;
@@ -278,6 +287,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
 		asm volatile("ptesync": : :"memory");
 	} else {
 		__tlbie(vpn, psize, apsize, ssize);
+		fixup_tlbie(vpn, psize, apsize, ssize);
 		asm volatile("eieio; tlbsync; ptesync": : :"memory");
 	}
 	if (lock_tlbie && !use_local)
@@ -771,7 +781,7 @@ static void native_hpte_clear(void)
  */
 static void native_flush_hash_range(unsigned long number, int local)
 {
-	unsigned long vpn;
+	unsigned long vpn = 0;
 	unsigned long hash, index, hidx, shift, slot;
 	struct hash_pte *hptep;
 	unsigned long hpte_v;
@@ -843,6 +853,10 @@ static void native_flush_hash_range(unsigned long number, int local)
 				__tlbie(vpn, psize, psize, ssize);
 			} pte_iterate_hashed_end();
 		}
+		/*
+		 * Just do one more with the last used values.
+		 */
+		fixup_tlbie(vpn, psize, psize, ssize);
 		asm volatile("eieio; tlbsync; ptesync":::"memory");
 
 		if (lock_tlbie)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 28c980eb4422..adf469f312f2 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -481,6 +481,7 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
 			     "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
 		trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
 	}
+	/* do we need fixup here ?*/
 	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 }
 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 570fdc7b0e74..264b9e5f97cc 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -151,6 +151,17 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid,
 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
 }
 
+static inline void fixup_tlbie(void)
+{
+	unsigned long pid = 0;
+	unsigned long va = ((1UL << 52) - 1);
+
+	if (cpu_has_feature(CPU_FTR_TLBIE_BUG)) {
+		asm volatile("ptesync": : :"memory");
+		__tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
+	}
+}
+
 /*
  * We use 128 set in radix mode and 256 set in hpt mode.
  */
@@ -184,6 +195,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
 {
 	asm volatile("ptesync": : :"memory");
 	__tlbie_pid(pid, ric);
+	fixup_tlbie();
 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
 
@@ -237,6 +249,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
 
 	asm volatile("ptesync": : :"memory");
 	__tlbie_va(va, pid, ap, ric);
+	fixup_tlbie();
 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
 
@@ -248,6 +261,7 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
 	if (also_pwc)
 		__tlbie_pid(pid, RIC_FLUSH_PWC);
 	__tlbie_va_range(start, end, pid, page_size, psize);
+	fixup_tlbie();
 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
 
@@ -465,6 +479,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 			if (hflush)
 				__tlbie_va_range(hstart, hend, pid,
 						HPAGE_PMD_SIZE, MMU_PAGE_2M);
+			fixup_tlbie();
 			asm volatile("eieio; tlbsync; ptesync": : :"memory");
 		}
 	}
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/3] powerpc/mm: Fixup tlbie vs store ordering issue on POWER9
  2018-03-23  4:56 ` [PATCH 3/3] powerpc/mm: Fixup tlbie vs store ordering issue on POWER9 Aneesh Kumar K.V
@ 2018-03-23  5:55   ` Michael Ellerman
  2018-03-23  6:28   ` Benjamin Herrenschmidt
  1 sibling, 0 replies; 7+ messages in thread
From: Michael Ellerman @ 2018-03-23  5:55 UTC (permalink / raw)
  To: Aneesh Kumar K.V, benh, paulus, mauricfo; +Cc: linuxppc-dev, Aneesh Kumar K.V

"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> writes:
> diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
> index a2c5c95882cf..6c151d2e9bd9 100644
> --- a/arch/powerpc/include/asm/cputable.h
> +++ b/arch/powerpc/include/asm/cputable.h
> @@ -465,7 +466,7 @@ static inline void cpu_feature_keys_init(void) { }
>  	    CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
>  	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
>  	    CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \
> -	    CPU_FTR_PKEY)
> +	    CPU_FTR_PKEY | CPU_FTR_TLBIE_BUG)

We also need to enable this when we're using DT_CPU_FTRS.

We're still working out the best way to handle that sort of thing long
term, for now I'll just fold in a quirk that turns this feature on for
all Power9 revisions. We can do a follow-up patch to make that
conditional later.

diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 0bcfb0f256e1..553514cd6c27 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -709,6 +709,9 @@ static __init void cpufeatures_cpu_quirks(void)
 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
 	else if ((version & 0xffffefff) == 0x004e0201)
 		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
+
+	if ((version & 0xffff0000) == 0x004e0000)
+		cur_cpu_spec->cpu_features |= CPU_FTR_TLBIE_BUG;
 }
 

cheers

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/3] powerpc/mm: Fixup tlbie vs store ordering issue on POWER9
  2018-03-23  4:56 ` [PATCH 3/3] powerpc/mm: Fixup tlbie vs store ordering issue on POWER9 Aneesh Kumar K.V
  2018-03-23  5:55   ` Michael Ellerman
@ 2018-03-23  6:28   ` Benjamin Herrenschmidt
  1 sibling, 0 replies; 7+ messages in thread
From: Benjamin Herrenschmidt @ 2018-03-23  6:28 UTC (permalink / raw)
  To: Aneesh Kumar K.V, paulus, mpe, mauricfo; +Cc: linuxppc-dev

On Fri, 2018-03-23 at 10:26 +0530, Aneesh Kumar K.V wrote:
> +#define CPU_FTR_TLBIE_BUG              LONG_ASM_CONST(0x2000000000000000)

I did ask you to make this CPU_FTR_POWER9_TLBIE_BUG...

Cheers,
Ben

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/3] powerpc/mm/radix: Remove unused code
  2018-03-23  4:56 [PATCH 1/3] powerpc/mm/radix: Remove unused code Aneesh Kumar K.V
  2018-03-23  4:56 ` [PATCH 2/3] powerpc/mm/radix: Move the functions that does the actual tlbie closer Aneesh Kumar K.V
  2018-03-23  4:56 ` [PATCH 3/3] powerpc/mm: Fixup tlbie vs store ordering issue on POWER9 Aneesh Kumar K.V
@ 2018-03-23  7:33 ` Nicholas Piggin
  2018-03-23 11:11 ` [1/3] " Michael Ellerman
  3 siblings, 0 replies; 7+ messages in thread
From: Nicholas Piggin @ 2018-03-23  7:33 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: benh, paulus, mpe, mauricfo, linuxppc-dev

On Fri, 23 Mar 2018 10:26:25 +0530
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:

> These function are not used in the code. Remove them.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>

You already acked this one :)

https://patchwork.ozlabs.org/patch/868852/

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [1/3] powerpc/mm/radix: Remove unused code
  2018-03-23  4:56 [PATCH 1/3] powerpc/mm/radix: Remove unused code Aneesh Kumar K.V
                   ` (2 preceding siblings ...)
  2018-03-23  7:33 ` [PATCH 1/3] powerpc/mm/radix: Remove unused code Nicholas Piggin
@ 2018-03-23 11:11 ` Michael Ellerman
  3 siblings, 0 replies; 7+ messages in thread
From: Michael Ellerman @ 2018-03-23 11:11 UTC (permalink / raw)
  To: Aneesh Kumar K.V, benh, paulus, mauricfo; +Cc: linuxppc-dev, Aneesh Kumar K.V

On Fri, 2018-03-23 at 04:56:25 UTC, "Aneesh Kumar K.V" wrote:
> These function are not used in the code. Remove them.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>

Series applied to powerpc fixes, thanks.

https://git.kernel.org/powerpc/c/99491e2d0e50c53a0620dfec340b24

cheers

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2018-03-23 11:11 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-03-23  4:56 [PATCH 1/3] powerpc/mm/radix: Remove unused code Aneesh Kumar K.V
2018-03-23  4:56 ` [PATCH 2/3] powerpc/mm/radix: Move the functions that does the actual tlbie closer Aneesh Kumar K.V
2018-03-23  4:56 ` [PATCH 3/3] powerpc/mm: Fixup tlbie vs store ordering issue on POWER9 Aneesh Kumar K.V
2018-03-23  5:55   ` Michael Ellerman
2018-03-23  6:28   ` Benjamin Herrenschmidt
2018-03-23  7:33 ` [PATCH 1/3] powerpc/mm/radix: Remove unused code Nicholas Piggin
2018-03-23 11:11 ` [1/3] " Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.