All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] powerpc/64s: reduce exception alignment
@ 2016-10-13  3:43 Nicholas Piggin
  2016-11-14 12:17 ` Michael Ellerman
  0 siblings, 1 reply; 2+ messages in thread
From: Nicholas Piggin @ 2016-10-13  3:43 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Piggin, Anton Blanchard

Exception handlers are aligned to 128 bytes (L1 cache) on 64s, which is
overkill. It can reduce the icache footprint of any individual exception
path. However taken as a whole, the expansion in icache footprint seems
likely to be counter-productive and cause more total misses.

Create IFETCH_ALIGN_SHIFT/BYTES, which should give optimal ifetch
alignment with much more reasonable alignment. This saves 1792 bytes
from head_64.o text with an allmodconfig build.

Other subarchitectures should define appropriate IFETCH_ALIGN_SHIFT
values if this becomes more widely used.

Cc: Anton Blanchard <anton@samba.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/include/asm/cache.h     | 3 +++
 arch/powerpc/include/asm/head-64.h   | 8 ++++----
 arch/powerpc/kernel/exceptions-64s.S | 2 +-
 3 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index ffbafbf..7657aa8 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -20,12 +20,15 @@
 #endif
 #else /* CONFIG_PPC64 */
 #define L1_CACHE_SHIFT		7
+#define IFETCH_ALIGN_SHIFT	4 /* POWER8,9 */
 #endif
 
 #define	L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 
 #define	SMP_CACHE_BYTES		L1_CACHE_BYTES
 
+#define IFETCH_ALIGN_BYTES	(1 << IFETCH_ALIGN_SHIFT)
+
 #if defined(__powerpc64__) && !defined(__ASSEMBLY__)
 struct ppc64_caches {
 	u32	dsize;			/* L1 d-cache size */
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index ab90c2f..fca7033 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -95,12 +95,12 @@ end_##sname:
 
 #define __FIXED_SECTION_ENTRY_BEGIN(sname, name, __align)	\
 	USE_FIXED_SECTION(sname);				\
-	.align __align;						\
+	.balign __align;					\
 	.global name;						\
 name:
 
 #define FIXED_SECTION_ENTRY_BEGIN(sname, name)			\
-	__FIXED_SECTION_ENTRY_BEGIN(sname, name, 0)
+	__FIXED_SECTION_ENTRY_BEGIN(sname, name, IFETCH_ALIGN_BYTES)
 
 #define FIXED_SECTION_ENTRY_BEGIN_LOCATION(sname, name, start)		\
 	USE_FIXED_SECTION(sname);				\
@@ -203,9 +203,9 @@ end_##sname:
 #define EXC_VIRT_END(name, start, end)			\
 	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, end)
 
-#define EXC_COMMON_BEGIN(name)					\
+#define EXC_COMMON_BEGIN(name)						\
 	USE_TEXT_SECTION();						\
-	.align	7;							\
+	.balign IFETCH_ALIGN_BYTES;					\
 	.global name;							\
 	DEFINE_FIXED_SYMBOL(name);					\
 name:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e680e84..4af87e4 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1403,7 +1403,7 @@ USE_TEXT_SECTION()
 /*
  * Hash table stuff
  */
-	.align	7
+	.balign	IFETCH_ALIGN_BYTES
 do_hash_page:
 #ifdef CONFIG_PPC_STD_MMU_64
 	andis.	r0,r4,0xa410		/* weird error? */
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: powerpc/64s: reduce exception alignment
  2016-10-13  3:43 [PATCH] powerpc/64s: reduce exception alignment Nicholas Piggin
@ 2016-11-14 12:17 ` Michael Ellerman
  0 siblings, 0 replies; 2+ messages in thread
From: Michael Ellerman @ 2016-11-14 12:17 UTC (permalink / raw)
  To: Nicholas Piggin, linuxppc-dev; +Cc: Anton Blanchard, Nicholas Piggin

On Thu, 2016-13-10 at 03:43:52 UTC, Nicholas Piggin wrote:
> Exception handlers are aligned to 128 bytes (L1 cache) on 64s, which is
> overkill. It can reduce the icache footprint of any individual exception
> path. However taken as a whole, the expansion in icache footprint seems
> likely to be counter-productive and cause more total misses.
> 
> Create IFETCH_ALIGN_SHIFT/BYTES, which should give optimal ifetch
> alignment with much more reasonable alignment. This saves 1792 bytes
> from head_64.o text with an allmodconfig build.
> 
> Other subarchitectures should define appropriate IFETCH_ALIGN_SHIFT
> values if this becomes more widely used.
> 
> Cc: Anton Blanchard <anton@samba.org>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>

Applied to powerpc next, thanks.

https://git.kernel.org/powerpc/c/f4329f2ecb149282fdfdd8830a936a

cheers

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2016-11-14 12:17 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-10-13  3:43 [PATCH] powerpc/64s: reduce exception alignment Nicholas Piggin
2016-11-14 12:17 ` Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.