All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] Fix __tlbiel in hash_native_64
@ 2016-09-13  6:40 Balbir Singh
  2016-09-20  7:04 ` [v2] " Michael Ellerman
  2016-09-28  7:25 ` [PATCH v2] Fix buglet in tlbiel where we set L=1 for ISA 206/7 Balbir Singh
  0 siblings, 2 replies; 3+ messages in thread
From: Balbir Singh @ 2016-09-13  6:40 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: Balbir Singh, Paul Mackerras, Aneesh Kumar K.V, Michael Ellerman,
	Benjamin Herrenschmidt

__tlbie and __tlbiel are out of sync. __tlbie does the right thing
it calls tlbie with "tlbie rb, L" if CPU_FTR_ARCH_206 (cpu feature) is clear
and with "tlbie rb" otherwise. During the cleanup of __tlbiel I noticed
that __tlbiel was setting bit 11 PPC_BIT(21) independent of the ISA
version for non-4k (L) pages. This patch fixes that issue. It also changes
the current PPC_TLBIEL to PPC_TLBIEL_5 and introduces a new PPC_TLBIEL similar
to PPC_TLBIE.

The arguments to PPC_TLBIE have also been changed/switched in order
to be consistent with the actual assembly usage for clearer reading
of code.

Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Michael Ellerman <michael@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@au1.ibm.com>

Signed-off-by: Balbir Singh <bsingharora@gmail.com>
---
 arch/powerpc/include/asm/ppc-opcode.h |  9 ++++++---
 arch/powerpc/mm/hash_native_64.c      | 14 ++++++++------
 arch/powerpc/mm/tlb-radix.c           |  4 ++--
 3 files changed, 16 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 127ebf5..308004a 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -354,14 +354,17 @@
 #define PPC_TLBILX_VA(a, b)	PPC_TLBILX(3, a, b)
 #define PPC_WAIT(w)		stringify_in_c(.long PPC_INST_WAIT | \
 					__PPC_WC(w))
-#define PPC_TLBIE(lp,a) 	stringify_in_c(.long PPC_INST_TLBIE | \
-					       ___PPC_RB(a) | ___PPC_RS(lp))
+#define PPC_TLBIE(rb,lp) 	stringify_in_c(.long PPC_INST_TLBIE | \
+					       ___PPC_RB(rb) | ___PPC_RS(lp))
 #define	PPC_TLBIE_5(rb,rs,ric,prs,r) \
 				stringify_in_c(.long PPC_INST_TLBIE | \
 					___PPC_RB(rb) | ___PPC_RS(rs) | \
 					___PPC_RIC(ric) | ___PPC_PRS(prs) | \
 					___PPC_R(r))
-#define	PPC_TLBIEL(rb,rs,ric,prs,r) \
+#define	PPC_TLBIEL(rb,lp) \
+				stringify_in_c(.long PPC_INST_TLBIEL | \
+					___PPC_RB(rb) | ___PPC_RS(lp))
+#define	PPC_TLBIEL_5(rb,rs,ric,prs,r) \
 				stringify_in_c(.long PPC_INST_TLBIEL | \
 					___PPC_RB(rb) | ___PPC_RS(rs) | \
 					___PPC_RIC(ric) | ___PPC_PRS(prs) | \
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 0e4e965..b3c34c8 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -74,7 +74,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
 		va |= ssize << 8;
 		sllp = get_sllp_encoding(apsize);
 		va |= sllp << 5;
-		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
+		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%0,%1), %2)
 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
 			     : "memory");
 		break;
@@ -93,7 +93,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
 		 */
 		va |= (vpn & 0xfe); /* AVAL */
 		va |= 1; /* L */
-		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
+		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%0,%1), %2)
 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
 			     : "memory");
 		break;
@@ -123,8 +123,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
 		va |= ssize << 8;
 		sllp = get_sllp_encoding(apsize);
 		va |= sllp << 5;
-		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
-			     : : "r"(va) : "memory");
+		asm volatile(ASM_FTR_IFCLR("tlbiel %0,0", PPC_TLBIEL(%0,0), %1)
+			     : : "r" (va),  "i" (CPU_FTR_ARCH_206)
+			     : "memory");
 		break;
 	default:
 		/* We need 14 to 14 + i bits of va */
@@ -141,8 +142,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
 		 */
 		va |= (vpn & 0xfe);
 		va |= 1; /* L */
-		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
-			     : : "r"(va) : "memory");
+		asm volatile(ASM_FTR_IFCLR("tlbiel %0,1", PPC_TLBIEL(%0,0), %1)
+			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
+			     : "memory");
 		break;
 	}
 
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 48df05e..7d31440 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -35,7 +35,7 @@ static inline void __tlbiel_pid(unsigned long pid, int set,
 	r = 1;   /* raidx format */
 
 	asm volatile("ptesync": : :"memory");
-	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+	asm volatile(PPC_TLBIEL_5(%0, %4, %3, %2, %1)
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
 	asm volatile("ptesync": : :"memory");
 }
@@ -80,7 +80,7 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
 	r = 1;   /* raidx format */
 
 	asm volatile("ptesync": : :"memory");
-	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+	asm volatile(PPC_TLBIEL_5(%0, %4, %3, %2, %1)
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
 	asm volatile("ptesync": : :"memory");
 }
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [v2] Fix __tlbiel in hash_native_64
  2016-09-13  6:40 [PATCH v2] Fix __tlbiel in hash_native_64 Balbir Singh
@ 2016-09-20  7:04 ` Michael Ellerman
  2016-09-28  7:25 ` [PATCH v2] Fix buglet in tlbiel where we set L=1 for ISA 206/7 Balbir Singh
  1 sibling, 0 replies; 3+ messages in thread
From: Michael Ellerman @ 2016-09-20  7:04 UTC (permalink / raw)
  To: Balbir Singh, linuxppc-dev
  Cc: Michael Ellerman, Benjamin Herrenschmidt, Aneesh Kumar K.V

On Tue, 2016-13-09 at 06:40:07 UTC, Balbir Singh wrote:
> __tlbie and __tlbiel are out of sync. __tlbie does the right thing
> it calls tlbie with "tlbie rb, L" if CPU_FTR_ARCH_206 (cpu feature) is clear
> and with "tlbie rb" otherwise. During the cleanup of __tlbiel I noticed
> that __tlbiel was setting bit 11 PPC_BIT(21) independent of the ISA
> version for non-4k (L) pages. This patch fixes that issue. It also changes
> the current PPC_TLBIEL to PPC_TLBIEL_5 and introduces a new PPC_TLBIEL similar
> to PPC_TLBIE.

This should give more description of what the problem is, ie. we're setting a
bit in the instruction which is reserved on some processors, before talking
about the solution or what tlbie does.
 
> The arguments to PPC_TLBIE have also been changed/switched in order
> to be consistent with the actual assembly usage for clearer reading
> of code.

The whole thing should be split in 3 patches:

One which fixes the buglet where we set L=1 on 2.06 and 2.07.
One which renames PPC_TLBIEL() to PPC_TLBIEL_5().
One which switches the order of the arguments to PPC_TLBIEL().

Also do we even need a macro for the two argument form? We've had uses of the
two argument tlbie since 2011, so I suspect the assembler will happily accept
it?

cheers

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH v2] Fix buglet in tlbiel where we set L=1 for ISA 206/7
  2016-09-13  6:40 [PATCH v2] Fix __tlbiel in hash_native_64 Balbir Singh
  2016-09-20  7:04 ` [v2] " Michael Ellerman
@ 2016-09-28  7:25 ` Balbir Singh
  1 sibling, 0 replies; 3+ messages in thread
From: Balbir Singh @ 2016-09-28  7:25 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: Paul Mackerras, Aneesh Kumar K.V, Michael Ellerman,
	Benjamin Herrenschmidt


This patch fixes a bug where we set bit 10 in tlbie via our
custom .long opcode for tlbiel. These bits are reserved for
2.06/2.07. In ISA 3.0 bit 10 is a part of the register field
RS and we could end up corrupting the meaning of the register
field completely with L=1

Signed-off-by: Balbir Singh <bsingharora@gmail.com>
---
 Changelog v2->v1:
	Use the compiler to generate tlbiel as suggsted by mpe
	Drop the rest of the changes to macros

 arch/powerpc/mm/hash_native_64.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 0e4e965..0f77edf 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -123,8 +123,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
 		va |= ssize << 8;
 		sllp = get_sllp_encoding(apsize);
 		va |= sllp << 5;
-		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
-			     : : "r"(va) : "memory");
+		asm volatile(ASM_FTR_IFCLR("tlbiel %0,0", "tlbiel %0", %1)
+			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
+			     : "memory");
 		break;
 	default:
 		/* We need 14 to 14 + i bits of va */
@@ -141,8 +142,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
 		 */
 		va |= (vpn & 0xfe);
 		va |= 1; /* L */
-		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
-			     : : "r"(va) : "memory");
+		asm volatile(ASM_FTR_IFCLR("tlbiel %0,1", "tlbiel %0", %1)
+			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
+			     : "memory");
 		break;
 	}
 
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2016-09-28  7:25 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-13  6:40 [PATCH v2] Fix __tlbiel in hash_native_64 Balbir Singh
2016-09-20  7:04 ` [v2] " Michael Ellerman
2016-09-28  7:25 ` [PATCH v2] Fix buglet in tlbiel where we set L=1 for ISA 206/7 Balbir Singh

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.