All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree()
@ 2016-07-27 14:17 Michael Ellerman
  2016-07-27 14:17 ` [PATCH v3 02/21] powerpc/mm: Move disable_radix handling into mmu_early_init_devtree() Michael Ellerman
                   ` (20 more replies)
  0 siblings, 21 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:17 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

Empty for now, but we'll add to it in the next patch.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/book3s/64/mmu.h | 1 +
 arch/powerpc/include/asm/mmu.h           | 1 +
 arch/powerpc/kernel/prom.c               | 2 ++
 arch/powerpc/mm/init_64.c                | 6 ++++++
 4 files changed, 10 insertions(+)

v3: Merged into this series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index d4eda6420523..4eb4bd019716 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -107,6 +107,7 @@ extern int mmu_vmemmap_psize;
 extern int mmu_io_psize;
 
 /* MMU initialization */
+void mmu_early_init_devtree(void);
 extern void radix_init_native(void);
 extern void hash__early_init_mmu(void);
 extern void radix__early_init_mmu(void);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 54471228f7b8..14220c5c12c9 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -210,6 +210,7 @@ extern void early_init_mmu(void);
 extern void early_init_mmu_secondary(void);
 extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 				       phys_addr_t first_memblock_size);
+static inline void mmu_early_init_devtree(void) { }
 #endif /* __ASSEMBLY__ */
 #endif
 
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index bae3db791150..9686984e79c4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -750,6 +750,8 @@ void __init early_init_devtree(void *params)
 	if (disable_radix)
 		cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
 
+	mmu_early_init_devtree();
+
 #ifdef CONFIG_PPC_POWERNV
 	/* Scan and build the list of machine check recoverable ranges */
 	of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 33709bdb0419..d0fb33ac3db2 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -411,3 +411,9 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
 
 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
+
+#ifdef CONFIG_PPC_STD_MMU_64
+void __init mmu_early_init_devtree(void)
+{
+}
+#endif /* CONFIG_PPC_STD_MMU_64 */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 02/21] powerpc/mm: Move disable_radix handling into mmu_early_init_devtree()
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
@ 2016-07-27 14:17 ` Michael Ellerman
  2016-07-28  3:14   ` Balbir Singh
  2016-07-27 14:17 ` [PATCH v3 03/21] powerpc/mm: Do hash device tree scanning earlier Michael Ellerman
                   ` (19 subsequent siblings)
  20 siblings, 1 reply; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:17 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

Move the handling of the disable_radix command line argument into the
newly created mmu_early_init_devtree().

It's an MMU option so it's preferable to have it in an mm related file,
and it also means platforms that don't support radix don't have to carry
the code.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/kernel/prom.c | 13 -------------
 arch/powerpc/mm/init_64.c  | 11 +++++++++++
 2 files changed, 11 insertions(+), 13 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9686984e79c4..b4b6952e8991 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -647,14 +647,6 @@ static void __init early_reserve_mem(void)
 #endif
 }
 
-static bool disable_radix;
-static int __init parse_disable_radix(char *p)
-{
-	disable_radix = true;
-	return 0;
-}
-early_param("disable_radix", parse_disable_radix);
-
 void __init early_init_devtree(void *params)
 {
 	phys_addr_t limit;
@@ -744,11 +736,6 @@ void __init early_init_devtree(void *params)
 	 */
 	spinning_secondaries = boot_cpu_count - 1;
 #endif
-	/*
-	 * now fixup radix MMU mode based on kernel command line
-	 */
-	if (disable_radix)
-		cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
 
 	mmu_early_init_devtree();
 
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d0fb33ac3db2..0d51e6e25db5 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -413,7 +413,18 @@ EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
 
 #ifdef CONFIG_PPC_STD_MMU_64
+static bool disable_radix;
+static int __init parse_disable_radix(char *p)
+{
+	disable_radix = true;
+	return 0;
+}
+early_param("disable_radix", parse_disable_radix);
+
 void __init mmu_early_init_devtree(void)
 {
+	/* Disable radix mode based on kernel command line. */
+	if (disable_radix)
+		cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
 }
 #endif /* CONFIG_PPC_STD_MMU_64 */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 03/21] powerpc/mm: Do hash device tree scanning earlier
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
  2016-07-27 14:17 ` [PATCH v3 02/21] powerpc/mm: Move disable_radix handling into mmu_early_init_devtree() Michael Ellerman
@ 2016-07-27 14:17 ` Michael Ellerman
  2016-07-28 12:40   ` [PATCH v4] " Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 04/21] powerpc/mm: Do radix " Michael Ellerman
                   ` (18 subsequent siblings)
  20 siblings, 1 reply; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:17 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

Currently MMU initialisation (early_init_mmu()) consists of a mixture of
scanning the device tree, setting MMU feature bits, and then also doing
actual initialisation of MMU data structures.

We'd like to decouple the setting of the MMU features from the actual
setup. So split out the device tree scanning, and associated code, and
call it from mmu_init_early_devtree().

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/book3s/64/mmu.h |  1 +
 arch/powerpc/mm/hash_utils_64.c          | 15 +++++++++------
 arch/powerpc/mm/init_64.c                |  3 +++
 3 files changed, 13 insertions(+), 6 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 4eb4bd019716..358f1410dc0d 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -108,6 +108,7 @@ extern int mmu_io_psize;
 
 /* MMU initialization */
 void mmu_early_init_devtree(void);
+void hash__early_init_devtree(void);
 extern void radix_init_native(void);
 extern void hash__early_init_mmu(void);
 extern void radix__early_init_mmu(void);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1ff11c1bb182..5f922e93af25 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -759,12 +759,6 @@ static void __init htab_initialize(void)
 
 	DBG(" -> htab_initialize()\n");
 
-	/* Initialize segment sizes */
-	htab_init_seg_sizes();
-
-	/* Initialize page sizes */
-	htab_init_page_sizes();
-
 	if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
 		mmu_kernel_ssize = MMU_SEGSIZE_1T;
 		mmu_highuser_ssize = MMU_SEGSIZE_1T;
@@ -885,6 +879,15 @@ static void __init htab_initialize(void)
 #undef KB
 #undef MB
 
+void __init hash__early_init_devtree(void)
+{
+	/* Initialize segment sizes */
+	htab_init_seg_sizes();
+
+	/* Initialize page sizes */
+	htab_init_page_sizes();
+}
+
 void __init hash__early_init_mmu(void)
 {
 	/*
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 0d51e6e25db5..d023333c6c9a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -426,5 +426,8 @@ void __init mmu_early_init_devtree(void)
 	/* Disable radix mode based on kernel command line. */
 	if (disable_radix)
 		cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
+
+	if (!radix_enabled())
+		hash__early_init_devtree();
 }
 #endif /* CONFIG_PPC_STD_MMU_64 */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 04/21] powerpc/mm: Do radix device tree scanning earlier
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
  2016-07-27 14:17 ` [PATCH v3 02/21] powerpc/mm: Move disable_radix handling into mmu_early_init_devtree() Michael Ellerman
  2016-07-27 14:17 ` [PATCH v3 03/21] powerpc/mm: Do hash device tree scanning earlier Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-28  3:48   ` Balbir Singh
  2016-07-27 14:18 ` [PATCH v3 05/21] powerpc/64: Do feature patching before MMU init Michael Ellerman
                   ` (17 subsequent siblings)
  20 siblings, 1 reply; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

Like we just did for hash, split the device tree scanning parts out and
call them from mmu_early_init_devtree().

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/book3s/64/mmu.h | 1 +
 arch/powerpc/mm/init_64.c                | 4 +++-
 arch/powerpc/mm/pgtable-radix.c          | 3 +--
 3 files changed, 5 insertions(+), 3 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 358f1410dc0d..9ee00c2576d0 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -109,6 +109,7 @@ extern int mmu_io_psize;
 /* MMU initialization */
 void mmu_early_init_devtree(void);
 void hash__early_init_devtree(void);
+void radix__early_init_devtree(void);
 extern void radix_init_native(void);
 extern void hash__early_init_mmu(void);
 extern void radix__early_init_mmu(void);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d023333c6c9a..e0ab33d20a10 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -427,7 +427,9 @@ void __init mmu_early_init_devtree(void)
 	if (disable_radix)
 		cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
 
-	if (!radix_enabled())
+	if (radix_enabled())
+		radix__early_init_devtree();
+	else
 		hash__early_init_devtree();
 }
 #endif /* CONFIG_PPC_STD_MMU_64 */
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 003ff48a11b6..f34ccdbe0fbd 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -264,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
 	return 1;
 }
 
-static void __init radix_init_page_sizes(void)
+void __init radix__early_init_devtree(void)
 {
 	int rc;
 
@@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void)
 	__pte_frag_nr = H_PTE_FRAG_NR;
 	__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
 
-	radix_init_page_sizes();
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 		radix_init_native();
 		lpcr = mfspr(SPRN_LPCR);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 05/21] powerpc/64: Do feature patching before MMU init
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (2 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 04/21] powerpc/mm: Do radix " Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 06/21] powerpc/kernel: Check features don't change after patching Michael Ellerman
                   ` (16 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

Up until now we needed to do the MMU init before feature patching,
because part of the MMU init was scanning the device tree and setting
and/or clearing some MMU feature bits.

Now that we have split that MMU feature modification out into routines
called from early_init_devtree() (called earlier) we can now do feature
patching before calling MMU init.

The advantage of this is it means the remainder of the MMU init runs
with the final set of features which will apply for the rest of the life
of the system. This means we don't have to special case anything called
from MMU init to deal with a changing set of feature bits.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/kernel/setup_64.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index d8216aed22b7..984696136f96 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -298,12 +298,12 @@ void __init early_setup(unsigned long dt_ptr)
 	 */
 	configure_exceptions();
 
-	/* Initialize the hash table or TLB handling */
-	early_init_mmu();
-
 	/* Apply all the dynamic patching */
 	apply_feature_fixups();
 
+	/* Initialize the hash table or TLB handling */
+	early_init_mmu();
+
 	/*
 	 * At this point, we can let interrupts switch to virtual mode
 	 * (the MMU has been setup), so adjust the MSR in the PACA to
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 06/21] powerpc/kernel: Check features don't change after patching
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (3 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 05/21] powerpc/64: Do feature patching before MMU init Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 07/21] powerpc/mm: Make MMU_FTR_RADIX a MMU family feature Michael Ellerman
                   ` (15 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

Early in boot we binary patch some sections of code based on the CPU and
MMU feature bits. But it is a one-time patching, there is no facility
for repatching the code later if the set of features change.

It is a major bug if the set of features changes after we've done the
code patching - so add a check for it.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/lib/feature-fixups.c | 27 ++++++++++++++++++++++++++-
 1 file changed, 26 insertions(+), 1 deletion(-)

v3: Merged into this series.

diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index defb2998b818..854b8ba40f8e 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -152,10 +152,19 @@ static void do_final_fixups(void)
 #endif
 }
 
-void apply_feature_fixups(void)
+static unsigned long __initdata saved_cpu_features;
+static unsigned int __initdata saved_mmu_features;
+#ifdef CONFIG_PPC64
+static unsigned long __initdata saved_firmware_features;
+#endif
+
+void __init apply_feature_fixups(void)
 {
 	struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec);
 
+	saved_cpu_features = spec->cpu_features;
+	saved_mmu_features = spec->mmu_features;
+
 	/*
 	 * Apply the CPU-specific and firmware specific fixups to kernel text
 	 * (nop out sections not relevant to this CPU or this firmware).
@@ -173,12 +182,28 @@ void apply_feature_fixups(void)
 			 PTRRELOC(&__stop___lwsync_fixup));
 
 #ifdef CONFIG_PPC64
+	saved_firmware_features = powerpc_firmware_features;
 	do_feature_fixups(powerpc_firmware_features,
 			  &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
 #endif
 	do_final_fixups();
 }
 
+static int __init check_features(void)
+{
+	WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
+	     "CPU features changed after feature patching!\n");
+	WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
+	     "MMU features changed after feature patching!\n");
+#ifdef CONFIG_PPC64
+	WARN(saved_firmware_features != powerpc_firmware_features,
+	     "Firmware features changed after feature patching!\n");
+#endif
+
+	return 0;
+}
+late_initcall(check_features);
+
 #ifdef CONFIG_FTR_FIXUP_SELFTEST
 
 #define check(x)	\
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 07/21] powerpc/mm: Make MMU_FTR_RADIX a MMU family feature
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (4 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 06/21] powerpc/kernel: Check features don't change after patching Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 08/21] powerpc/kernel: Convert mmu_has_feature() to returning bool Michael Ellerman
                   ` (14 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

MMU feature bits are defined such that we use the lower half to
present MMU family features. Remove the strict split of half and
also move Radix to a mmu family feature. Radix introduce a new MMU
model and strictly speaking it is a new MMU family. This also free
up bits which can be used for individual features later.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/book3s/64/mmu.h |  2 +-
 arch/powerpc/include/asm/mmu.h           | 15 +++++++--------
 arch/powerpc/kernel/entry_64.S           |  2 +-
 arch/powerpc/kernel/exceptions-64s.S     |  8 ++++----
 arch/powerpc/kernel/idle_book3s.S        |  2 +-
 arch/powerpc/kernel/prom.c               |  2 +-
 arch/powerpc/mm/init_64.c                |  2 +-
 7 files changed, 16 insertions(+), 17 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 9ee00c2576d0..ad2d501cddcf 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -24,7 +24,7 @@ struct mmu_psize_def {
 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
 
 #ifdef CONFIG_PPC_RADIX_MMU
-#define radix_enabled() mmu_has_feature(MMU_FTR_RADIX)
+#define radix_enabled() mmu_has_feature(MMU_FTR_TYPE_RADIX)
 #else
 #define radix_enabled() (0)
 #endif
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 14220c5c12c9..599781e48552 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -12,7 +12,7 @@
  */
 
 /*
- * First half is MMU families
+ * MMU families
  */
 #define MMU_FTR_HPTE_TABLE		ASM_CONST(0x00000001)
 #define MMU_FTR_TYPE_8xx		ASM_CONST(0x00000002)
@@ -21,9 +21,13 @@
 #define MMU_FTR_TYPE_FSL_E		ASM_CONST(0x00000010)
 #define MMU_FTR_TYPE_47x		ASM_CONST(0x00000020)
 
+/* Radix page table supported and enabled */
+#define MMU_FTR_TYPE_RADIX		ASM_CONST(0x00000040)
+
 /*
- * This is individual features
+ * Individual features below.
  */
+
 /*
  * We need to clear top 16bits of va (from the remaining 64 bits )in
  * tlbie* instructions
@@ -93,11 +97,6 @@
  */
 #define MMU_FTR_1T_SEGMENT		ASM_CONST(0x40000000)
 
-/*
- * Radix page table available
- */
-#define MMU_FTR_RADIX			ASM_CONST(0x80000000)
-
 /* MMU feature bit sets for various CPUs */
 #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2	\
 	MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
@@ -131,7 +130,7 @@ enum {
 		MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
 		MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
 #ifdef CONFIG_PPC_RADIX_MMU
-		MMU_FTR_RADIX |
+		MMU_FTR_TYPE_RADIX |
 #endif
 		0,
 };
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2e0c565754aa..0bdceef11e75 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -532,7 +532,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 #ifdef CONFIG_PPC_STD_MMU_64
 BEGIN_MMU_FTR_SECTION
 	b	2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
 BEGIN_FTR_SECTION
 	clrrdi	r6,r8,28	/* get its ESID */
 	clrrdi	r9,r1,28	/* get current sp ESID */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 6200e4925d26..334c7fac7a4a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -938,7 +938,7 @@ BEGIN_MMU_FTR_SECTION
 	b	do_hash_page		/* Try to handle as hpte fault */
 MMU_FTR_SECTION_ELSE
 	b	handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 
 	.align  7
 	.globl  h_data_storage_common
@@ -969,7 +969,7 @@ BEGIN_MMU_FTR_SECTION
 	b	do_hash_page		/* Try to handle as hpte fault */
 MMU_FTR_SECTION_ELSE
 	b	handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 
 	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
 
@@ -1390,7 +1390,7 @@ slb_miss_realmode:
 #ifdef CONFIG_PPC_STD_MMU_64
 BEGIN_MMU_FTR_SECTION
 	bl	slb_allocate_realmode
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
 #endif
 	/* All done -- return from exception. */
 
@@ -1404,7 +1404,7 @@ BEGIN_MMU_FTR_SECTION
 	beq-	2f
 FTR_SECTION_ELSE
 	b	2f
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 
 .machine	push
 .machine	"power4"
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 335eb6cedae5..4c2222cffbbc 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -570,7 +570,7 @@ common_exit:
 
 BEGIN_MMU_FTR_SECTION
 	b	no_segments
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
 	/* Restore SLB  from PACA */
 	ld	r8,PACA_SLBSHADOWPTR(r13)
 
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index b4b6952e8991..b0245bed6f54 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -170,7 +170,7 @@ static struct ibm_pa_feature {
 	 */
 	{CPU_FTR_TM_COMP, 0, 0,
 	 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
-	{0, MMU_FTR_RADIX, 0, 0,		40, 0, 0},
+	{0, MMU_FTR_TYPE_RADIX, 0, 0,		40, 0, 0},
 };
 
 static void __init scan_features(unsigned long node, const unsigned char *ftrs,
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index e0ab33d20a10..6259f5db525b 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -425,7 +425,7 @@ void __init mmu_early_init_devtree(void)
 {
 	/* Disable radix mode based on kernel command line. */
 	if (disable_radix)
-		cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
+		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 
 	if (radix_enabled())
 		radix__early_init_devtree();
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 08/21] powerpc/kernel: Convert mmu_has_feature() to returning bool
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (5 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 07/21] powerpc/mm: Make MMU_FTR_RADIX a MMU family feature Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 09/21] powerpc/kernel: Convert cpu_has_feature() " Michael Ellerman
                   ` (13 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

The intention is that the result is only used as a boolean, so enforce
that by changing the return type to bool.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/mmu.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

v3: Split out.

diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 599781e48552..eb942a446969 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -135,9 +135,9 @@ enum {
 		0,
 };
 
-static inline int mmu_has_feature(unsigned long feature)
+static inline bool mmu_has_feature(unsigned long feature)
 {
-	return (MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
+	return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
 }
 
 static inline void mmu_clear_feature(unsigned long feature)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 09/21] powerpc/kernel: Convert cpu_has_feature() to returning bool
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (6 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 08/21] powerpc/kernel: Convert mmu_has_feature() to returning bool Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline Michael Ellerman
                   ` (12 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

The intention is that the result is only used as a boolean, so enforce
that by changing the return type to bool.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/cputable.h | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

v3: Split out.

diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index df4fb5faba43..7bb87017d9db 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -2,6 +2,7 @@
 #define __ASM_POWERPC_CPUTABLE_H
 
 
+#include <linux/types.h>
 #include <asm/asm-compat.h>
 #include <asm/feature-fixups.h>
 #include <uapi/asm/cputable.h>
@@ -576,12 +577,10 @@ enum {
 };
 #endif /* __powerpc64__ */
 
-static inline int cpu_has_feature(unsigned long feature)
+static inline bool cpu_has_feature(unsigned long feature)
 {
-	return (CPU_FTRS_ALWAYS & feature) ||
-	       (CPU_FTRS_POSSIBLE
-		& cur_cpu_spec->cpu_features
-		& feature);
+	return !!((CPU_FTRS_ALWAYS & feature) ||
+		  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
 }
 
 #define HBP_NUM 1
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (7 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 09/21] powerpc/kernel: Convert cpu_has_feature() " Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-28  7:46   ` Nicholas Piggin
  2016-07-27 14:18 ` [PATCH v3 11/21] powerpc/mm: Add __cpu/__mmu_has_feature() Michael Ellerman
                   ` (11 subsequent siblings)
  20 siblings, 1 reply; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

Currently we have radix_enabled() three times, twice in asm/book3s/64/mmu.h
and then a fallback in asm/mmu.h.

Consolidate them in asm/mmu.h. While we're at it convert them to be
static inlines, and change the fallback case to returning a bool, like
mmu_has_feature().

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/book3s/64/mmu.h |  7 -------
 arch/powerpc/include/asm/mmu.h           | 16 ++++++++++++----
 2 files changed, 12 insertions(+), 11 deletions(-)

v3: New.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index ad2d501cddcf..70c995870297 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -23,13 +23,6 @@ struct mmu_psize_def {
 };
 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
 
-#ifdef CONFIG_PPC_RADIX_MMU
-#define radix_enabled() mmu_has_feature(MMU_FTR_TYPE_RADIX)
-#else
-#define radix_enabled() (0)
-#endif
-
-
 #endif /* __ASSEMBLY__ */
 
 /* 64-bit classic hash table MMU */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index eb942a446969..f413b3213a3b 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -163,6 +163,18 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
 }
 #endif /* !CONFIG_DEBUG_VM */
 
+#ifdef CONFIG_PPC_RADIX_MMU
+static inline bool radix_enabled(void)
+{
+	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
+#else
+static inline bool radix_enabled(void)
+{
+	return false;
+}
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 /* The kernel use the constants below to index in the page sizes array.
@@ -230,9 +242,5 @@ static inline void mmu_early_init_devtree(void) { }
 #  include <asm/mmu-8xx.h>
 #endif
 
-#ifndef radix_enabled
-#define radix_enabled() (0)
-#endif
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_MMU_H_ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 11/21] powerpc/mm: Add __cpu/__mmu_has_feature()
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (8 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers Michael Ellerman
                   ` (10 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

In later patches, we will be switching cpu and mmu feature checks to
use static keys. For checks in early boot before jump label is
initialized we need a variant of cpu/mmu_has_feature() that doesn't use
jump labels. So create those called, unimaginatively,
__cpu/__mmu_has_feature().

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/cputable.h |  7 ++++++-
 arch/powerpc/include/asm/mmu.h      | 17 ++++++++++++++++-
 2 files changed, 22 insertions(+), 2 deletions(-)

v3: Don't change any logic.
    Bool conversions were split out.
    Don't convert any call sites in this patch.

diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 7bb87017d9db..85a6797f9231 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -577,12 +577,17 @@ enum {
 };
 #endif /* __powerpc64__ */
 
-static inline bool cpu_has_feature(unsigned long feature)
+static inline bool __cpu_has_feature(unsigned long feature)
 {
 	return !!((CPU_FTRS_ALWAYS & feature) ||
 		  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
 }
 
+static inline bool cpu_has_feature(unsigned long feature)
+{
+	return __cpu_has_feature(feature);
+}
+
 #define HBP_NUM 1
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index f413b3213a3b..e3eff365e55d 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -135,11 +135,16 @@ enum {
 		0,
 };
 
-static inline bool mmu_has_feature(unsigned long feature)
+static inline bool __mmu_has_feature(unsigned long feature)
 {
 	return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
 }
 
+static inline bool mmu_has_feature(unsigned long feature)
+{
+	return __mmu_has_feature(feature);
+}
+
 static inline void mmu_clear_feature(unsigned long feature)
 {
 	cur_cpu_spec->mmu_features &= ~feature;
@@ -168,11 +173,21 @@ static inline bool radix_enabled(void)
 {
 	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
 }
+
+static inline bool __radix_enabled(void)
+{
+	return __mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
 #else
 static inline bool radix_enabled(void)
 {
 	return false;
 }
+
+static inline bool __radix_enabled(void)
+{
+	return false;
+}
 #endif
 
 #endif /* !__ASSEMBLY__ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (9 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 11/21] powerpc/mm: Add __cpu/__mmu_has_feature() Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 21:37   ` Benjamin Herrenschmidt
                     ` (2 more replies)
  2016-07-27 14:18 ` [PATCH v3 13/21] jump_label: Make it possible for arches to invoke jump_label_init() earlier Michael Ellerman
                   ` (9 subsequent siblings)
  20 siblings, 3 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

This switches early feature checks to use the non static key variant of
the function. In later patches we will be switching cpu_has_feature()
and mmu_has_feature() to use static keys and we can use them only after
static key/jump label is initialized. Any check for feature before jump
label init should be done using this new helper.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/book3s/64/mmu.h | 4 ++--
 arch/powerpc/kernel/paca.c               | 2 +-
 arch/powerpc/kernel/setup_64.c           | 4 ++--
 arch/powerpc/mm/hash_utils_64.c          | 6 +++---
 arch/powerpc/mm/init_64.c                | 2 +-
 5 files changed, 9 insertions(+), 9 deletions(-)

v3: Add/remove some sites now that we're rebased on the early MMU init series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 70c995870297..6deda6ecc4f7 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -116,7 +116,7 @@ extern void hash__early_init_mmu_secondary(void);
 extern void radix__early_init_mmu_secondary(void);
 static inline void early_init_mmu_secondary(void)
 {
-	if (radix_enabled())
+	if (__radix_enabled())
 		return radix__early_init_mmu_secondary();
 	return hash__early_init_mmu_secondary();
 }
@@ -128,7 +128,7 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 					      phys_addr_t first_memblock_size)
 {
-	if (radix_enabled())
+	if (__radix_enabled())
 		return radix__setup_initial_memory_limit(first_memblock_base,
 						   first_memblock_size);
 	return hash__setup_initial_memory_limit(first_memblock_base,
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 93dae296b6be..1b0b89e80824 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca)
 	 * if we do a GET_PACA() before the feature fixups have been
 	 * applied
 	 */
-	if (cpu_has_feature(CPU_FTR_HVMODE))
+	if (__cpu_has_feature(CPU_FTR_HVMODE))
 		mtspr(SPRN_SPRG_HPACA, local_paca);
 #endif
 	mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 984696136f96..86ffab4c427b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -227,8 +227,8 @@ static void __init configure_exceptions(void)
 			opal_configure_cores();
 
 		/* Enable AIL if supported, and we are in hypervisor mode */
-		if (cpu_has_feature(CPU_FTR_HVMODE) &&
-		    cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		if (__cpu_has_feature(CPU_FTR_HVMODE) &&
+		    __cpu_has_feature(CPU_FTR_ARCH_207S)) {
 			unsigned long lpcr = mfspr(SPRN_LPCR);
 			mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
 		}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5f922e93af25..3aad12fb9d2f 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -530,7 +530,7 @@ static bool might_have_hea(void)
 	 * we will never see an HEA ethernet device.
 	 */
 #ifdef CONFIG_IBMEBUS
-	return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
+	return !__cpu_has_feature(CPU_FTR_ARCH_207S) &&
 		!firmware_has_feature(FW_FEATURE_SPLPAR);
 #else
 	return false;
@@ -561,7 +561,7 @@ static void __init htab_init_page_sizes(void)
 	 * Not in the device-tree, let's fallback on known size
 	 * list for 16M capable GP & GR
 	 */
-	if (mmu_has_feature(MMU_FTR_16M_PAGE))
+	if (__mmu_has_feature(MMU_FTR_16M_PAGE))
 		memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
 		       sizeof(mmu_psize_defaults_gp));
 found:
@@ -591,7 +591,7 @@ found:
 		mmu_vmalloc_psize = MMU_PAGE_64K;
 		if (mmu_linear_psize == MMU_PAGE_4K)
 			mmu_linear_psize = MMU_PAGE_64K;
-		if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
+		if (__mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
 			/*
 			 * When running on pSeries using 64k pages for ioremap
 			 * would stop us accessing the HEA ethernet. So if we
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6259f5db525b..c21d160088fa 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -427,7 +427,7 @@ void __init mmu_early_init_devtree(void)
 	if (disable_radix)
 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 
-	if (radix_enabled())
+	if (__radix_enabled())
 		radix__early_init_devtree();
 	else
 		hash__early_init_devtree();
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 13/21] jump_label: Make it possible for arches to invoke jump_label_init() earlier
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (10 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 14/21] powerpc: Call jump_label_init() in apply_feature_fixups() Michael Ellerman
                   ` (8 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

From: Kevin Hao <haokexin@gmail.com>

Some arches (powerpc at least) would like to invoke jump_label_init()
much earlier in boot. So check static_key_initialized in order to make
sure this function runs only once.

Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 kernel/jump_label.c | 3 +++
 1 file changed, 3 insertions(+)

v3: Updated change log.

diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 05254eeb4b4e..14d81315fd7e 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -205,6 +205,9 @@ void __init jump_label_init(void)
 	struct static_key *key = NULL;
 	struct jump_entry *iter;
 
+	if (static_key_initialized)
+		return;
+
 	jump_label_lock();
 	jump_label_sort_entries(iter_start, iter_stop);
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 14/21] powerpc: Call jump_label_init() in apply_feature_fixups()
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (11 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 13/21] jump_label: Make it possible for arches to invoke jump_label_init() earlier Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 15/21] powerpc: Remove mfvtb() Michael Ellerman
                   ` (7 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

Call jump_label_init() early so that we can use static keys for CPU and
MMU feature checks.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/lib/feature-fixups.c | 8 ++++++++
 1 file changed, 8 insertions(+)

v3: Updated comment.

diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 854b8ba40f8e..2a1904739843 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -13,6 +13,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/jump_label.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/init.h>
@@ -187,6 +188,13 @@ void __init apply_feature_fixups(void)
 			  &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
 #endif
 	do_final_fixups();
+
+	/*
+	 * Initialise jump label. This causes all the cpu/mmu_has_feature()
+	 * checks to take on their correct polarity based on the current set of
+	 * CPU/MMU features.
+	 */
+	jump_label_init();
 }
 
 static int __init check_features(void)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 15/21] powerpc: Remove mfvtb()
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (12 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 14/21] powerpc: Call jump_label_init() in apply_feature_fixups() Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 16/21] powerpc: Move cpu_has_feature() to a separate file Michael Ellerman
                   ` (6 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

From: Kevin Hao <haokexin@gmail.com>

This function is only used by get_vtb(). They are almost the same except
the reading from the real register. Move the mfspr() to get_vtb() and
kill the function mfvtb(). With this, we can eliminate the use of
cpu_has_feature() in very core header file like reg.h. This is a
preparation for the use of jump label for cpu_has_feature().

Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/reg.h  | 9 ---------
 arch/powerpc/include/asm/time.h | 2 +-
 2 files changed, 1 insertion(+), 10 deletions(-)

v3: No change.

diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index d7e9ab5e4709..817c005205f0 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1256,15 +1256,6 @@ static inline void msr_check_and_clear(unsigned long bits)
 		__msr_check_and_clear(bits);
 }
 
-static inline unsigned long mfvtb (void)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
-	if (cpu_has_feature(CPU_FTR_ARCH_207S))
-		return mfspr(SPRN_VTB);
-#endif
-	return 0;
-}
-
 #ifdef __powerpc64__
 #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
 #define mftb()		({unsigned long rval;				\
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 09211640a0e0..cbbeaf0a6597 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -103,7 +103,7 @@ static inline u64 get_vtb(void)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
-		return mfvtb();
+		return mfspr(SPRN_VTB);
 #endif
 	return 0;
 }
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 16/21] powerpc: Move cpu_has_feature() to a separate file
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (13 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 15/21] powerpc: Remove mfvtb() Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 17/21] powerpc: Add kconfig option to use jump labels for cpu/mmu_has_feature() Michael Ellerman
                   ` (5 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

From: Kevin Hao <haokexin@gmail.com>

We plan to use jump label for cpu_has_feature(). In order to implement
this we need to include the linux/jump_label.h in asm/cputable.h.

Unfortunately if we do that it leads to an include loop. The root of the
problem seems to be that reg.h needs cputable.h (for CPU_FTRs), and then
cputable.h via jump_label.h eventually pulls in hw_irq.h which needs
reg.h (for MSR_EE).

So move cpu_has_feature() to a separate file on its own.

Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/book3s/64/mmu-hash.h |  1 +
 arch/powerpc/include/asm/cacheflush.h         |  1 +
 arch/powerpc/include/asm/cpu_has_feature.h    | 20 ++++++++++++++++++++
 arch/powerpc/include/asm/cputable.h           | 11 -----------
 arch/powerpc/include/asm/cputime.h            |  1 +
 arch/powerpc/include/asm/dbell.h              |  1 +
 arch/powerpc/include/asm/dcr-native.h         |  1 +
 arch/powerpc/include/asm/mman.h               |  1 +
 arch/powerpc/include/asm/time.h               |  1 +
 arch/powerpc/include/asm/xor.h                |  1 +
 arch/powerpc/kernel/align.c                   |  1 +
 arch/powerpc/kernel/irq.c                     |  1 +
 arch/powerpc/kernel/process.c                 |  1 +
 arch/powerpc/kernel/setup-common.c            |  1 +
 arch/powerpc/kernel/setup_32.c                |  1 +
 arch/powerpc/kernel/smp.c                     |  1 +
 arch/powerpc/platforms/cell/pervasive.c       |  1 +
 arch/powerpc/xmon/ppc-dis.c                   |  1 +
 18 files changed, 36 insertions(+), 11 deletions(-)
 create mode 100644 arch/powerpc/include/asm/cpu_has_feature.h

v3: Change the header name, and flesh out change log.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 5eaf86ac143d..032e9f0bc708 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -24,6 +24,7 @@
 #include <asm/book3s/64/pgtable.h>
 #include <asm/bug.h>
 #include <asm/processor.h>
+#include <asm/cpu_has_feature.h>
 
 /*
  * SLB
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 69fb16d7a811..b77f0364df94 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -11,6 +11,7 @@
 
 #include <linux/mm.h>
 #include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
 
 /*
  * No cache flushing is required when address mappings are changed,
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
new file mode 100644
index 000000000000..ad296b2f1d84
--- /dev/null
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_POWERPC_CPUFEATURES_H
+#define __ASM_POWERPC_CPUFEATURES_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/cputable.h>
+
+static inline bool __cpu_has_feature(unsigned long feature)
+{
+	return !!((CPU_FTRS_ALWAYS & feature) ||
+		  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
+}
+
+static inline bool cpu_has_feature(unsigned long feature)
+{
+	return __cpu_has_feature(feature);
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 85a6797f9231..92961bcfbe3f 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -577,17 +577,6 @@ enum {
 };
 #endif /* __powerpc64__ */
 
-static inline bool __cpu_has_feature(unsigned long feature)
-{
-	return !!((CPU_FTRS_ALWAYS & feature) ||
-		  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
-}
-
-static inline bool cpu_has_feature(unsigned long feature)
-{
-	return __cpu_has_feature(feature);
-}
-
 #define HBP_NUM 1
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index e2452550bcb1..465653b6b393 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -28,6 +28,7 @@ static inline void setup_cputime_one_jiffy(void) { }
 #include <asm/div64.h>
 #include <asm/time.h>
 #include <asm/param.h>
+#include <asm/cpu_has_feature.h>
 
 typedef u64 __nocast cputime_t;
 typedef u64 __nocast cputime64_t;
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 5fa6b20eba10..378167377065 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -16,6 +16,7 @@
 #include <linux/threads.h>
 
 #include <asm/ppc-opcode.h>
+#include <asm/cpu_has_feature.h>
 
 #define PPC_DBELL_MSG_BRDCAST	(0x04000000)
 #define PPC_DBELL_TYPE(x)	(((x) & 0xf) << (63-36))
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 4efc11dacb98..4a2beef74277 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -24,6 +24,7 @@
 
 #include <linux/spinlock.h>
 #include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
 
 typedef struct {
 	unsigned int base;
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 2563c435a4b1..ef2d9ac1bc52 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -13,6 +13,7 @@
 
 #include <asm/cputable.h>
 #include <linux/mm.h>
+#include <asm/cpu_has_feature.h>
 
 /*
  * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index cbbeaf0a6597..b240666b7bc1 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -18,6 +18,7 @@
 #include <linux/percpu.h>
 
 #include <asm/processor.h>
+#include <asm/cpu_has_feature.h>
 
 /* time.c */
 extern unsigned long tb_ticks_per_jiffy;
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
index 0abb97f3be10..a36c2069d8ed 100644
--- a/arch/powerpc/include/asm/xor.h
+++ b/arch/powerpc/include/asm/xor.h
@@ -23,6 +23,7 @@
 #ifdef CONFIG_ALTIVEC
 
 #include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
 
 void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
 		   unsigned long *v2_in);
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index c7097f933114..033f3385fa49 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -26,6 +26,7 @@
 #include <asm/emulated_ops.h>
 #include <asm/switch_to.h>
 #include <asm/disassemble.h>
+#include <asm/cpu_has_feature.h>
 
 struct aligninfo {
 	unsigned char len;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ac910d9982df..08887cf2b20e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -75,6 +75,7 @@
 #endif
 #define CREATE_TRACE_POINTS
 #include <asm/trace.h>
+#include <asm/cpu_has_feature.h>
 
 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 EXPORT_PER_CPU_SYMBOL(irq_stat);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a8cca88e972f..9ee2623e0f67 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -58,6 +58,7 @@
 #include <asm/code-patching.h>
 #include <asm/exec.h>
 #include <asm/livepatch.h>
+#include <asm/cpu_has_feature.h>
 
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 714b4ba7ab86..dba265c586df 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -66,6 +66,7 @@
 #include <asm/hugetlb.h>
 #include <asm/livepatch.h>
 #include <asm/mmu_context.h>
+#include <asm/cpu_has_feature.h>
 
 #include "setup.h"
 
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 00f57754407e..c3e861df4b20 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -37,6 +37,7 @@
 #include <asm/serial.h>
 #include <asm/udbg.h>
 #include <asm/code-patching.h>
+#include <asm/cpu_has_feature.h>
 
 #define DBG(fmt...)
 
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 5a1f015ea9f3..25a39052bf6b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -55,6 +55,7 @@
 #include <asm/debug.h>
 #include <asm/kexec.h>
 #include <asm/asm-prototypes.h>
+#include <asm/cpu_has_feature.h>
 
 #ifdef DEBUG
 #include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index d17e98bc0c10..e7d075077cb0 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -35,6 +35,7 @@
 #include <asm/pgtable.h>
 #include <asm/reg.h>
 #include <asm/cell-regs.h>
+#include <asm/cpu_has_feature.h>
 
 #include "pervasive.h"
 
diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
index 89098f320ad5..ee9891734149 100644
--- a/arch/powerpc/xmon/ppc-dis.c
+++ b/arch/powerpc/xmon/ppc-dis.c
@@ -20,6 +20,7 @@ along with this file; see the file COPYING.  If not, write to the Free
 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
 
 #include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
 #include "nonstdio.h"
 #include "ansidecl.h"
 #include "ppc.h"
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 17/21] powerpc: Add kconfig option to use jump labels for cpu/mmu_has_feature()
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (14 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 16/21] powerpc: Move cpu_has_feature() to a separate file Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 18/21] powerpc: Add option to use jump label for cpu_has_feature() Michael Ellerman
                   ` (4 subsequent siblings)
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

Add a kconfig option to control whether we use jump label for the
cpu/mmu_has_feature() checks. Currently this does nothing, but we will
enabled it in the subsequent patches.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/Kconfig.debug | 9 +++++++++
 1 file changed, 9 insertions(+)

v3: New.

diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index cfe08eab90c6..2512dac77adb 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -60,6 +60,15 @@ config CODE_PATCHING_SELFTEST
 	depends on DEBUG_KERNEL
 	default n
 
+config JUMP_LABEL_FEATURE_CHECKS
+	bool "Enable use of jump label for cpu/mmu_has_feature()"
+	depends on JUMP_LABEL
+	default y
+	help
+	  Selecting this options enables use of jump labels for some internal
+	  feature checks. This should generate more optimal code for those
+	  checks.
+
 config FTR_FIXUP_SELFTEST
 	bool "Run self-tests of the feature-fixup code"
 	depends on DEBUG_KERNEL
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 18/21] powerpc: Add option to use jump label for cpu_has_feature()
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (15 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 17/21] powerpc: Add kconfig option to use jump labels for cpu/mmu_has_feature() Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-28  7:51   ` Nicholas Piggin
  2016-07-27 14:18 ` [PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature() Michael Ellerman
                   ` (3 subsequent siblings)
  20 siblings, 1 reply; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

From: Kevin Hao <haokexin@gmail.com>

We do binary patching of asm code using CPU features, which is a
one-time operation, done during early boot. However checks of CPU
features in C code are currently done at run time, even though the set
of CPU features can never change after boot.

We can optimise this by using jump labels to implement cpu_has_feature(),
meaning checks in C code are binary patched into a single nop or branch.

For a C sequence along the lines of:

    if (cpu_has_feature(FOO))
         return 2;

The generated code before is roughly:

    ld      r9,-27640(r2)
    ld      r9,0(r9)
    lwz     r9,32(r9)
    cmpwi   cr7,r9,0
    bge     cr7, 1f
    li      r3,2
    blr
1:  ...

After (true):
    nop
    li      r3,2
    blr

After (false):
    b	1f
    li      r3,2
    blr
1:  ...

Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/cpu_has_feature.h | 22 ++++++++++++++++++++++
 arch/powerpc/include/asm/cputable.h        |  6 ++++++
 arch/powerpc/kernel/cputable.c             | 20 ++++++++++++++++++++
 arch/powerpc/lib/feature-fixups.c          |  1 +
 4 files changed, 49 insertions(+)

v3: Rename MAX_CPU_FEATURES as we already have a #define with that name.
    Define NUM_CPU_FTR_KEYS as a constant.
    Rename the array to cpu_feature_keys.
    Use the kconfig we added to guard it.
    Rewrite the change log.

diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index ad296b2f1d84..18e60e61bea9 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -11,10 +11,32 @@ static inline bool __cpu_has_feature(unsigned long feature)
 		  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
 }
 
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include <linux/jump_label.h>
+
+#define NUM_CPU_FTR_KEYS	64
+
+extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
+
+static __always_inline bool cpu_has_feature(unsigned long feature)
+{
+	int i;
+
+	if (CPU_FTRS_ALWAYS & feature)
+		return true;
+
+	if (!(CPU_FTRS_POSSIBLE & feature))
+		return false;
+
+	i = __builtin_ctzl(feature);
+	return static_branch_likely(&cpu_feature_keys[i]);
+}
+#else
 static inline bool cpu_has_feature(unsigned long feature)
 {
 	return __cpu_has_feature(feature);
 }
+#endif
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 92961bcfbe3f..f23aa3450bca 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -123,6 +123,12 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
 
 extern const char *powerpc_base_platform;
 
+#ifdef CONFIG_JUMP_LABEL
+extern void cpu_feature_keys_init(void);
+#else
+static inline void cpu_feature_keys_init(void) { }
+#endif
+
 /* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */
 enum {
 	TLB_INVAL_SCOPE_GLOBAL = 0,	/* invalidate all TLBs */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index d81f826d1029..f268850f8fda 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -15,6 +15,7 @@
 #include <linux/threads.h>
 #include <linux/init.h>
 #include <linux/export.h>
+#include <linux/jump_label.h>
 
 #include <asm/oprofile_impl.h>
 #include <asm/cputable.h>
@@ -2224,3 +2225,22 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
 
 	return NULL;
 }
+
+#ifdef CONFIG_JUMP_LABEL
+struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
+			[0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
+};
+EXPORT_SYMBOL_GPL(cpu_feature_keys);
+
+void __init cpu_feature_keys_init(void)
+{
+	int i;
+
+	for (i = 0; i < NUM_CPU_FTR_KEYS; i++) {
+		unsigned long f = 1ul << i;
+
+		if (!(cur_cpu_spec->cpu_features & f))
+			static_branch_disable(&cpu_feature_keys[i]);
+	}
+}
+#endif
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 2a1904739843..f90423faade0 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -195,6 +195,7 @@ void __init apply_feature_fixups(void)
 	 * CPU/MMU features.
 	 */
 	jump_label_init();
+	cpu_feature_keys_init();
 }
 
 static int __init check_features(void)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature()
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (16 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 18/21] powerpc: Add option to use jump label for cpu_has_feature() Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-28  7:52   ` Nicholas Piggin
  2016-08-08  0:59   ` Anton Blanchard
  2016-07-27 14:18 ` [PATCH v3 20/21] powerpc/mm: Catch usage of cpu/mmu_has_feature() before jump label init Michael Ellerman
                   ` (2 subsequent siblings)
  20 siblings, 2 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

From: Kevin Hao <haokexin@gmail.com>

As we just did for CPU features.

Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/mmu.h    | 36 ++++++++++++++++++++++++++++++++++++
 arch/powerpc/kernel/cputable.c    | 17 +++++++++++++++++
 arch/powerpc/lib/feature-fixups.c |  1 +
 3 files changed, 54 insertions(+)

v3: Rename to mmu_feature_keys, and NUM_MMU_FTR_KEYS.
    Use the kconfig.

diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e3eff365e55d..3900cb7fe7cf 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -140,6 +140,41 @@ static inline bool __mmu_has_feature(unsigned long feature)
 	return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
 }
 
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include <linux/jump_label.h>
+
+#define NUM_MMU_FTR_KEYS	32
+
+extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
+
+extern void mmu_feature_keys_init(void);
+
+static __always_inline bool mmu_has_feature(unsigned long feature)
+{
+	int i;
+
+	if (!(MMU_FTRS_POSSIBLE & feature))
+		return false;
+
+	i = __builtin_ctzl(feature);
+	return static_branch_likely(&mmu_feature_keys[i]);
+}
+
+static inline void mmu_clear_feature(unsigned long feature)
+{
+	int i;
+
+	i = __builtin_ctzl(feature);
+	cur_cpu_spec->mmu_features &= ~feature;
+	static_branch_disable(&mmu_feature_keys[i]);
+}
+#else
+
+static inline void mmu_feature_keys_init(void)
+{
+
+}
+
 static inline bool mmu_has_feature(unsigned long feature)
 {
 	return __mmu_has_feature(feature);
@@ -149,6 +184,7 @@ static inline void mmu_clear_feature(unsigned long feature)
 {
 	cur_cpu_spec->mmu_features &= ~feature;
 }
+#endif /* CONFIG_JUMP_LABEL */
 
 extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
 
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f268850f8fda..db14efc7d3e0 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2243,4 +2243,21 @@ void __init cpu_feature_keys_init(void)
 			static_branch_disable(&cpu_feature_keys[i]);
 	}
 }
+
+struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
+			[0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
+};
+EXPORT_SYMBOL_GPL(mmu_feature_keys);
+
+void __init mmu_feature_keys_init(void)
+{
+	int i;
+
+	for (i = 0; i < NUM_MMU_FTR_KEYS; i++) {
+		unsigned long f = 1ul << i;
+
+		if (!(cur_cpu_spec->mmu_features & f))
+			static_branch_disable(&mmu_feature_keys[i]);
+	}
+}
 #endif
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index f90423faade0..8db370cec547 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -196,6 +196,7 @@ void __init apply_feature_fixups(void)
 	 */
 	jump_label_init();
 	cpu_feature_keys_init();
+	mmu_feature_keys_init();
 }
 
 static int __init check_features(void)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 20/21] powerpc/mm: Catch usage of cpu/mmu_has_feature() before jump label init
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (17 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature() Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-27 14:18 ` [PATCH v3 21/21] powerpc/jump_label: Annotate jump label assembly Michael Ellerman
       [not found] ` <1469629097-30859-14-git-send-email-mpe__30163.7288918302$1469630223$gmane$org@ellerman.id.au>
  20 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

This allows us to catch incorrect usage of cpu_has_feature() and
mmu_has_feature() prior to jump labels being initialised.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/Kconfig.debug                 | 10 ++++++++++
 arch/powerpc/include/asm/cpu_has_feature.h |  7 +++++++
 arch/powerpc/include/asm/mmu.h             | 14 ++++++++++++++
 arch/powerpc/kernel/process.c              |  2 +-
 4 files changed, 32 insertions(+), 1 deletion(-)

v3: Use printk() and dump_stack() rather than WARN_ON(), because
    WARN_ON() may not work this early in boot.
    Rename the Kconfig.

diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 2512dac77adb..0108fde08d90 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -69,6 +69,16 @@ config JUMP_LABEL_FEATURE_CHECKS
 	  feature checks. This should generate more optimal code for those
 	  checks.
 
+config JUMP_LABEL_FEATURE_CHECK_DEBUG
+	bool "Do extra check on feature fixup calls"
+	depends on DEBUG_KERNEL && JUMP_LABEL_FEATURE_CHECKS
+	default n
+	help
+	  This tries to catch incorrect usage of cpu_has_feature() and
+	  mmu_has_feature() in the code.
+
+	  If you don't know what this means, say N.
+
 config FTR_FIXUP_SELFTEST
 	bool "Run self-tests of the feature-fixup code"
 	depends on DEBUG_KERNEL
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 18e60e61bea9..b702a48c438d 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -22,6 +22,13 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
 {
 	int i;
 
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+	if (!static_key_initialized) {
+		printk("Warning! cpu_has_feature() used prior to jump label init!\n");
+		dump_stack();
+		return __cpu_has_feature(feature);
+	}
+#endif
 	if (CPU_FTRS_ALWAYS & feature)
 		return true;
 
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 3900cb7fe7cf..50d8c9f78976 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -153,6 +153,13 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
 {
 	int i;
 
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+	if (!static_key_initialized) {
+		printk("Warning! mmu_has_feature() used prior to jump label init!\n");
+		dump_stack();
+		return __mmu_has_feature(feature);
+	}
+#endif
 	if (!(MMU_FTRS_POSSIBLE & feature))
 		return false;
 
@@ -164,6 +171,13 @@ static inline void mmu_clear_feature(unsigned long feature)
 {
 	int i;
 
+#ifdef CONFIG_FEATURE_FIXUP_DEBUG
+	if (!static_key_initialized) {
+		WARN_ON(1);
+		cur_cpu_spec->mmu_features &= ~feature;
+		return;
+	}
+#endif
 	i = __builtin_ctzl(feature);
 	cur_cpu_spec->mmu_features &= ~feature;
 	static_branch_disable(&mmu_feature_keys[i]);

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH v3 21/21] powerpc/jump_label: Annotate jump label assembly
  2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
                   ` (18 preceding siblings ...)
  2016-07-27 14:18 ` [PATCH v3 20/21] powerpc/mm: Catch usage of cpu/mmu_has_feature() before jump label init Michael Ellerman
@ 2016-07-27 14:18 ` Michael Ellerman
  2016-07-28  7:56   ` Nicholas Piggin
       [not found] ` <1469629097-30859-14-git-send-email-mpe__30163.7288918302$1469630223$gmane$org@ellerman.id.au>
  20 siblings, 1 reply; 42+ messages in thread
From: Michael Ellerman @ 2016-07-27 14:18 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, aneesh.kumar, haokexin

Add a comment to the generated assembler for jump labels. This makes it
easier to identify them in asm listings (generated with $ make foo.s).

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/jump_label.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

v3: New.

diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 47e155f15433..9878cac7b47c 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -21,7 +21,7 @@
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\n\t"
-		 "nop\n\t"
+		 "nop # arch_static_branch\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
 		 ".popsection \n\t"
@@ -35,7 +35,7 @@ l_yes:
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\n\t"
-		 "b %l[l_yes]\n\t"
+		 "b %l[l_yes] # arch_static_branch_jump\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
 		 ".popsection \n\t"
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers
  2016-07-27 14:18 ` [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers Michael Ellerman
@ 2016-07-27 21:37   ` Benjamin Herrenschmidt
  2016-07-28 11:24     ` Michael Ellerman
  2016-07-27 21:42   ` Benjamin Herrenschmidt
  2016-07-28  7:49   ` Nicholas Piggin
  2 siblings, 1 reply; 42+ messages in thread
From: Benjamin Herrenschmidt @ 2016-07-27 21:37 UTC (permalink / raw)
  To: Michael Ellerman, linuxppc-dev; +Cc: aneesh.kumar, haokexin

On Thu, 2016-07-28 at 00:18 +1000, Michael Ellerman wrote:
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h
> b/arch/powerpc/include/asm/book3s/64/mmu.h
> index 70c995870297..6deda6ecc4f7 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> @@ -116,7 +116,7 @@ extern void hash__early_init_mmu_secondary(void);
>  extern void radix__early_init_mmu_secondary(void);
>  static inline void early_init_mmu_secondary(void)
>  {
> -       if (radix_enabled())
> +       if (__radix_enabled())
>                 return radix__early_init_mmu_secondary();
>         return hash__early_init_mmu_secondary();
>  }

This one can go, no ?

Cheers,
Ben.

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers
  2016-07-27 14:18 ` [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers Michael Ellerman
  2016-07-27 21:37   ` Benjamin Herrenschmidt
@ 2016-07-27 21:42   ` Benjamin Herrenschmidt
  2016-07-28  7:49   ` Nicholas Piggin
  2 siblings, 0 replies; 42+ messages in thread
From: Benjamin Herrenschmidt @ 2016-07-27 21:42 UTC (permalink / raw)
  To: Michael Ellerman, linuxppc-dev; +Cc: aneesh.kumar, haokexin

On Thu, 2016-07-28 at 00:18 +1000, Michael Ellerman wrote:
> --- a/arch/powerpc/mm/hash_utils_64.c
> +++ b/arch/powerpc/mm/hash_utils_64.c
> @@ -530,7 +530,7 @@ static bool might_have_hea(void)
>          * we will never see an HEA ethernet device.
>          */
>  #ifdef CONFIG_IBMEBUS
> -       return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
> +       return !__cpu_has_feature(CPU_FTR_ARCH_207S) &&
>                 !firmware_has_feature(FW_FEATURE_SPLPAR);
>  #else

All these could go if that function was split. The part that reads the
DT stays in early_init_mmu_devtree (bastically up to "found:" and then
the bit at the end that scans the huge pages).

The rest, which just assigns the various mmu_*_psize can go into
eary_init_mmu(). That means the only conversion needed is the one
below:

>         return false;
> @@ -561,7 +561,7 @@ static void __init htab_init_page_sizes(void)
>          * Not in the device-tree, let's fallback on known size
>          * list for 16M capable GP & GR
>          */
> -       if (mmu_has_feature(MMU_FTR_16M_PAGE))
> +       if (__mmu_has_feature(MMU_FTR_16M_PAGE))
>                 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
>                        sizeof(mmu_psize_defaults_gp));
>  found:

And the rest can remain.

> @@ -591,7 +591,7 @@ found:
>                 mmu_vmalloc_psize = MMU_PAGE_64K;
>                 if (mmu_linear_psize == MMU_PAGE_4K)
>                         mmu_linear_psize = MMU_PAGE_64K;
> -               if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
> +               if (__mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
>                         /*
>                          * When running on pSeries using 64k pages
> for ioremap
>                          * would stop us accessing the HEA ethernet.
> So if we

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 02/21] powerpc/mm: Move disable_radix handling into mmu_early_init_devtree()
  2016-07-27 14:17 ` [PATCH v3 02/21] powerpc/mm: Move disable_radix handling into mmu_early_init_devtree() Michael Ellerman
@ 2016-07-28  3:14   ` Balbir Singh
  0 siblings, 0 replies; 42+ messages in thread
From: Balbir Singh @ 2016-07-28  3:14 UTC (permalink / raw)
  To: linuxppc-dev



On 28/07/16 00:17, Michael Ellerman wrote:
> Move the handling of the disable_radix command line argument into the
> newly created mmu_early_init_devtree().
> 
> It's an MMU option so it's preferable to have it in an mm related file,
> and it also means platforms that don't support radix don't have to carry
> the code.
> 
> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
> ---

Should patch 1 and 2 be squashed together? Ideally nothing should ever bisect at patch 1

Anyway,
Acked-by: Balbir Singh <bsingharora@gmail.com>

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 04/21] powerpc/mm: Do radix device tree scanning earlier
  2016-07-27 14:18 ` [PATCH v3 04/21] powerpc/mm: Do radix " Michael Ellerman
@ 2016-07-28  3:48   ` Balbir Singh
  2016-07-28  8:11     ` Michael Ellerman
  0 siblings, 1 reply; 42+ messages in thread
From: Balbir Singh @ 2016-07-28  3:48 UTC (permalink / raw)
  To: Michael Ellerman, linuxppc-dev; +Cc: haokexin, aneesh.kumar



On 28/07/16 00:18, Michael Ellerman wrote:
> Like we just did for hash, split the device tree scanning parts out and
> call them from mmu_early_init_devtree().
> 
> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
> ---
>  arch/powerpc/include/asm/book3s/64/mmu.h | 1 +
>  arch/powerpc/mm/init_64.c                | 4 +++-
>  arch/powerpc/mm/pgtable-radix.c          | 3 +--
>  3 files changed, 5 insertions(+), 3 deletions(-)
> 
> v3: Merged into this series.
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
> index 358f1410dc0d..9ee00c2576d0 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> @@ -109,6 +109,7 @@ extern int mmu_io_psize;
>  /* MMU initialization */
>  void mmu_early_init_devtree(void);
>  void hash__early_init_devtree(void);
> +void radix__early_init_devtree(void);
>  extern void radix_init_native(void);
>  extern void hash__early_init_mmu(void);
>  extern void radix__early_init_mmu(void);
> diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
> index d023333c6c9a..e0ab33d20a10 100644
> --- a/arch/powerpc/mm/init_64.c
> +++ b/arch/powerpc/mm/init_64.c
> @@ -427,7 +427,9 @@ void __init mmu_early_init_devtree(void)
>  	if (disable_radix)
>  		cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
>  
> -	if (!radix_enabled())
> +	if (radix_enabled())
> +		radix__early_init_devtree();
> +	else
>  		hash__early_init_devtree();
>  }
>  #endif /* CONFIG_PPC_STD_MMU_64 */
> diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
> index 003ff48a11b6..f34ccdbe0fbd 100644
> --- a/arch/powerpc/mm/pgtable-radix.c
> +++ b/arch/powerpc/mm/pgtable-radix.c
> @@ -264,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
>  	return 1;
>  }
>  
> -static void __init radix_init_page_sizes(void)
> +void __init radix__early_init_devtree(void)
>  {
>  	int rc;
>  
> @@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void)
>  	__pte_frag_nr = H_PTE_FRAG_NR;
>  	__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
>  
> -	radix_init_page_sizes();
>  	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
>  		radix_init_native();
>  		lpcr = mfspr(SPRN_LPCR);
>

If I am reading this correctly, radix_init_page_sizes() has become
radix__early_init_devtree() where as hash__early_init_devtree() initializes
both segment and page sizes? I would still like to keep

mmu_early_init_devtree()
	-> radix__early_init_devtree()
		-> radix__init_page_sizes()


Balbir Singh.

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline
  2016-07-27 14:18 ` [PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline Michael Ellerman
@ 2016-07-28  7:46   ` Nicholas Piggin
  2016-07-29 11:42     ` Michael Ellerman
  0 siblings, 1 reply; 42+ messages in thread
From: Nicholas Piggin @ 2016-07-28  7:46 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: linuxppc-dev, haokexin, aneesh.kumar

On Thu, 28 Jul 2016 00:18:06 +1000
Michael Ellerman <mpe@ellerman.id.au> wrote:

> Currently we have radix_enabled() three times, twice in
> asm/book3s/64/mmu.h and then a fallback in asm/mmu.h.
> 
> Consolidate them in asm/mmu.h. While we're at it convert them to be
> static inlines, and change the fallback case to returning a bool, like
> mmu_has_feature().
> 
> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
> ---
>  arch/powerpc/include/asm/book3s/64/mmu.h |  7 -------
>  arch/powerpc/include/asm/mmu.h           | 16 ++++++++++++----
>  2 files changed, 12 insertions(+), 11 deletions(-)
> 
> v3: New.
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h
> b/arch/powerpc/include/asm/book3s/64/mmu.h index
> ad2d501cddcf..70c995870297 100644 ---
> a/arch/powerpc/include/asm/book3s/64/mmu.h +++
> b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -23,13 +23,6 @@ struct
> mmu_psize_def { };
>  extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
>  
> -#ifdef CONFIG_PPC_RADIX_MMU
> -#define radix_enabled() mmu_has_feature(MMU_FTR_TYPE_RADIX)
> -#else
> -#define radix_enabled() (0)
> -#endif
> -
> -
>  #endif /* __ASSEMBLY__ */
>  
>  /* 64-bit classic hash table MMU */
> diff --git a/arch/powerpc/include/asm/mmu.h
> b/arch/powerpc/include/asm/mmu.h index eb942a446969..f413b3213a3b
> 100644 --- a/arch/powerpc/include/asm/mmu.h
> +++ b/arch/powerpc/include/asm/mmu.h
> @@ -163,6 +163,18 @@ static inline void assert_pte_locked(struct
> mm_struct *mm, unsigned long addr) }
>  #endif /* !CONFIG_DEBUG_VM */
>  
> +#ifdef CONFIG_PPC_RADIX_MMU
> +static inline bool radix_enabled(void)
> +{
> +	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
> +}
> +#else
> +static inline bool radix_enabled(void)
> +{
> +	return false;
> +}
> +#endif

Won't MMU_FTRS_POSSIBLE just do the right thing when
!CONFIG_PPC_RADIX_MMU?

Thanks,
Nick

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers
  2016-07-27 14:18 ` [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers Michael Ellerman
  2016-07-27 21:37   ` Benjamin Herrenschmidt
  2016-07-27 21:42   ` Benjamin Herrenschmidt
@ 2016-07-28  7:49   ` Nicholas Piggin
  2016-07-28 13:04     ` Michael Ellerman
  2 siblings, 1 reply; 42+ messages in thread
From: Nicholas Piggin @ 2016-07-28  7:49 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: linuxppc-dev, haokexin, aneesh.kumar

On Thu, 28 Jul 2016 00:18:08 +1000
Michael Ellerman <mpe@ellerman.id.au> wrote:

> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
> 
> This switches early feature checks to use the non static key variant
> of the function. In later patches we will be switching
> cpu_has_feature() and mmu_has_feature() to use static keys and we can
> use them only after static key/jump label is initialized. Any check
> for feature before jump label init should be done using this new
> helper.

Can't convince you to call it *_has_feature_early()?

Any point to a WARN_ON_ONCE() in these guys that trips if they are
used after the jump labels are set up?

Thanks,
Nick

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 18/21] powerpc: Add option to use jump label for cpu_has_feature()
  2016-07-27 14:18 ` [PATCH v3 18/21] powerpc: Add option to use jump label for cpu_has_feature() Michael Ellerman
@ 2016-07-28  7:51   ` Nicholas Piggin
  0 siblings, 0 replies; 42+ messages in thread
From: Nicholas Piggin @ 2016-07-28  7:51 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: linuxppc-dev, haokexin, aneesh.kumar

On Thu, 28 Jul 2016 00:18:14 +1000
Michael Ellerman <mpe@ellerman.id.au> wrote:

> From: Kevin Hao <haokexin@gmail.com>
> 
> We do binary patching of asm code using CPU features, which is a
> one-time operation, done during early boot. However checks of CPU
> features in C code are currently done at run time, even though the set
> of CPU features can never change after boot.
> 
> We can optimise this by using jump labels to implement
> cpu_has_feature(), meaning checks in C code are binary patched into a
> single nop or branch.
> 
> For a C sequence along the lines of:
> 
>     if (cpu_has_feature(FOO))
>          return 2;
> 
> The generated code before is roughly:
> 
>     ld      r9,-27640(r2)
>     ld      r9,0(r9)
>     lwz     r9,32(r9)
>     cmpwi   cr7,r9,0
>     bge     cr7, 1f
>     li      r3,2
>     blr
> 1:  ...
> 
> After (true):
>     nop
>     li      r3,2
>     blr
> 
> After (false):
>     b	1f
>     li      r3,2
>     blr
> 1:  ...
> 
> Signed-off-by: Kevin Hao <haokexin@gmail.com>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
> ---
>  arch/powerpc/include/asm/cpu_has_feature.h | 22
> ++++++++++++++++++++++ arch/powerpc/include/asm/cputable.h        |
> 6 ++++++ arch/powerpc/kernel/cputable.c             | 20
> ++++++++++++++++++++ arch/powerpc/lib/feature-fixups.c          |  1 +
>  4 files changed, 49 insertions(+)
> 
> v3: Rename MAX_CPU_FEATURES as we already have a #define with that
> name. Define NUM_CPU_FTR_KEYS as a constant.
>     Rename the array to cpu_feature_keys.
>     Use the kconfig we added to guard it.
>     Rewrite the change log.
> 
> diff --git a/arch/powerpc/include/asm/cpu_has_feature.h
> b/arch/powerpc/include/asm/cpu_has_feature.h index
> ad296b2f1d84..18e60e61bea9 100644 ---
> a/arch/powerpc/include/asm/cpu_has_feature.h +++
> b/arch/powerpc/include/asm/cpu_has_feature.h @@ -11,10 +11,32 @@
> static inline bool __cpu_has_feature(unsigned long feature)
> (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); }
>  
> +#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
> +#include <linux/jump_label.h>
> +
> +#define NUM_CPU_FTR_KEYS	64
> +
> +extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
> +
> +static __always_inline bool cpu_has_feature(unsigned long feature)
> +{
> +	int i;
> +
> +	if (CPU_FTRS_ALWAYS & feature)
> +		return true;
> +
> +	if (!(CPU_FTRS_POSSIBLE & feature))
> +		return false;
> +
> +	i = __builtin_ctzl(feature);
> +	return static_branch_likely(&cpu_feature_keys[i]);

Just a reminder to add a BUILD_BUG_ON(!__builtin_constant_p(feature));
for this.

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature()
  2016-07-27 14:18 ` [PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature() Michael Ellerman
@ 2016-07-28  7:52   ` Nicholas Piggin
  2016-08-08  0:59   ` Anton Blanchard
  1 sibling, 0 replies; 42+ messages in thread
From: Nicholas Piggin @ 2016-07-28  7:52 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: linuxppc-dev, haokexin, aneesh.kumar

On Thu, 28 Jul 2016 00:18:15 +1000
Michael Ellerman <mpe@ellerman.id.au> wrote:

> From: Kevin Hao <haokexin@gmail.com>
> 
> As we just did for CPU features.
> 
> Signed-off-by: Kevin Hao <haokexin@gmail.com>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
> ---
>  arch/powerpc/include/asm/mmu.h    | 36
> ++++++++++++++++++++++++++++++++++++
> arch/powerpc/kernel/cputable.c    | 17 +++++++++++++++++
> arch/powerpc/lib/feature-fixups.c |  1 + 3 files changed, 54
> insertions(+)
> 
> v3: Rename to mmu_feature_keys, and NUM_MMU_FTR_KEYS.
>     Use the kconfig.
> 
> diff --git a/arch/powerpc/include/asm/mmu.h
> b/arch/powerpc/include/asm/mmu.h index e3eff365e55d..3900cb7fe7cf
> 100644 --- a/arch/powerpc/include/asm/mmu.h
> +++ b/arch/powerpc/include/asm/mmu.h
> @@ -140,6 +140,41 @@ static inline bool __mmu_has_feature(unsigned
> long feature) return !!(MMU_FTRS_POSSIBLE &
> cur_cpu_spec->mmu_features & feature); }
>  
> +#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
> +#include <linux/jump_label.h>
> +
> +#define NUM_MMU_FTR_KEYS	32
> +
> +extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
> +
> +extern void mmu_feature_keys_init(void);
> +
> +static __always_inline bool mmu_has_feature(unsigned long feature)
> +{
> +	int i;
> +
> +	if (!(MMU_FTRS_POSSIBLE & feature))
> +		return false;
> +
> +	i = __builtin_ctzl(feature);
> +	return static_branch_likely(&mmu_feature_keys[i]);
> +}

And here.

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 21/21] powerpc/jump_label: Annotate jump label assembly
  2016-07-27 14:18 ` [PATCH v3 21/21] powerpc/jump_label: Annotate jump label assembly Michael Ellerman
@ 2016-07-28  7:56   ` Nicholas Piggin
  0 siblings, 0 replies; 42+ messages in thread
From: Nicholas Piggin @ 2016-07-28  7:56 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: linuxppc-dev, haokexin, aneesh.kumar

On Thu, 28 Jul 2016 00:18:17 +1000
Michael Ellerman <mpe@ellerman.id.au> wrote:

> Add a comment to the generated assembler for jump labels. This makes
> it easier to identify them in asm listings (generated with $ make
> foo.s).

The series looks in much better shape now.

As a disclaimer I did not go through the shuffling around of the early
init code too closely, and I don't know that part of the arch very
well. But other than that it looks good.

Thanks,
Nick

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 04/21] powerpc/mm: Do radix device tree scanning earlier
  2016-07-28  3:48   ` Balbir Singh
@ 2016-07-28  8:11     ` Michael Ellerman
  2016-07-28 11:55       ` Balbir Singh
  0 siblings, 1 reply; 42+ messages in thread
From: Michael Ellerman @ 2016-07-28  8:11 UTC (permalink / raw)
  To: Balbir Singh, linuxppc-dev; +Cc: haokexin, aneesh.kumar

Balbir Singh <bsingharora@gmail.com> writes:

> On 28/07/16 00:18, Michael Ellerman wrote:
>> diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
>> index 003ff48a11b6..f34ccdbe0fbd 100644
>> --- a/arch/powerpc/mm/pgtable-radix.c
>> +++ b/arch/powerpc/mm/pgtable-radix.c
>> @@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void)
>>  	__pte_frag_nr = H_PTE_FRAG_NR;
>>  	__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
>>  
>> -	radix_init_page_sizes();
>>  	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
>>  		radix_init_native();
>>  		lpcr = mfspr(SPRN_LPCR);
>>
>
> If I am reading this correctly, radix_init_page_sizes() has become
> radix__early_init_devtree() where as hash__early_init_devtree() initializes
> both segment and page sizes? I would still like to keep
>
> mmu_early_init_devtree()
> 	-> radix__early_init_devtree()
> 		-> radix__init_page_sizes()

But then radix__early_init_devtree() would just be:

void radix__early_init_devtree(void)
{
    radix__init_page_sizes();
}

Which seems silly.

I'm doing a new version which splits the htab scanning from the page
init more, as Ben suggested.

cheers

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers
  2016-07-27 21:37   ` Benjamin Herrenschmidt
@ 2016-07-28 11:24     ` Michael Ellerman
  0 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-28 11:24 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, linuxppc-dev; +Cc: aneesh.kumar, haokexin

Benjamin Herrenschmidt <benh@kernel.crashing.org> writes:

> On Thu, 2016-07-28 at 00:18 +1000, Michael Ellerman wrote:
>>=20
>> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h
>> b/arch/powerpc/include/asm/book3s/64/mmu.h
>> index 70c995870297..6deda6ecc4f7 100644
>> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
>> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
>> @@ -116,7 +116,7 @@ extern void hash__early_init_mmu_secondary(void);
>> =C2=A0extern void radix__early_init_mmu_secondary(void);
>> =C2=A0static inline void early_init_mmu_secondary(void)
>> =C2=A0{
>> -=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0if (radix_enabled())
>> +=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0if (__radix_enabled())
>> =C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=
=C2=A0=C2=A0=C2=A0=C2=A0return radix__early_init_mmu_secondary();
>> =C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0=C2=A0return hash__early_init_=
mmu_secondary();
>> =C2=A0}
>
> This one can go, no ?

Yep.

cheers

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 04/21] powerpc/mm: Do radix device tree scanning earlier
  2016-07-28  8:11     ` Michael Ellerman
@ 2016-07-28 11:55       ` Balbir Singh
  0 siblings, 0 replies; 42+ messages in thread
From: Balbir Singh @ 2016-07-28 11:55 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: Balbir Singh, linuxppc-dev, haokexin, aneesh.kumar

On Thu, Jul 28, 2016 at 06:11:18PM +1000, Michael Ellerman wrote:
> Balbir Singh <bsingharora@gmail.com> writes:
> 
> > On 28/07/16 00:18, Michael Ellerman wrote:
> >> diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
> >> index 003ff48a11b6..f34ccdbe0fbd 100644
> >> --- a/arch/powerpc/mm/pgtable-radix.c
> >> +++ b/arch/powerpc/mm/pgtable-radix.c
> >> @@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void)
> >>  	__pte_frag_nr = H_PTE_FRAG_NR;
> >>  	__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
> >>  
> >> -	radix_init_page_sizes();
> >>  	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
> >>  		radix_init_native();
> >>  		lpcr = mfspr(SPRN_LPCR);
> >>
> >
> > If I am reading this correctly, radix_init_page_sizes() has become
> > radix__early_init_devtree() where as hash__early_init_devtree() initializes
> > both segment and page sizes? I would still like to keep
> >
> > mmu_early_init_devtree()
> > 	-> radix__early_init_devtree()
> > 		-> radix__init_page_sizes()
> 
> But then radix__early_init_devtree() would just be:
> 
> void radix__early_init_devtree(void)
> {
>     radix__init_page_sizes();
> }
> 
> Which seems silly.
>

But for a person parsing both hpte and radix bits, the code seems
saner and the compiler will do the right thing
 
> I'm doing a new version which splits the htab scanning from the page
> init more, as Ben suggested.
>

Sounds good

Balbir 

^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v4] powerpc/mm: Do hash device tree scanning earlier
  2016-07-27 14:17 ` [PATCH v3 03/21] powerpc/mm: Do hash device tree scanning earlier Michael Ellerman
@ 2016-07-28 12:40   ` Michael Ellerman
  0 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-28 12:40 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Herrenschmidt, bsingharora, aneesh.kumar

Currently MMU initialisation (early_init_mmu()) consists of a mixture of
scanning the device tree, setting MMU feature bits, and then also doing
actual initialisation of MMU data structures.

We'd like to decouple the setting of the MMU features from the actual
setup. So split out the device tree scanning, and associated code, and
call it from mmu_init_early_devtree().

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
 arch/powerpc/include/asm/book3s/64/mmu.h |  1 +
 arch/powerpc/mm/hash_utils_64.c          | 53 ++++++++++++++++----------------
 arch/powerpc/mm/init_64.c                |  3 ++
 3 files changed, 31 insertions(+), 26 deletions(-)

v4: Split htab_init_page_sizes() into two parts, the bit that needs to scan the
    device tree (now htab_scan_page_sizes()), and the rest.
    Fold htab_init_seg_sizes() into hash__early_init_devtree().

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 4eb4bd019716..358f1410dc0d 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -108,6 +108,7 @@ extern int mmu_io_psize;
 
 /* MMU initialization */
 void mmu_early_init_devtree(void);
+void hash__early_init_devtree(void);
 extern void radix_init_native(void);
 extern void hash__early_init_mmu(void);
 extern void radix__early_init_mmu(void);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index b78b5d211278..1a96b284b1a6 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -363,11 +363,6 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
 	return 0;
 }
 
-static void __init htab_init_seg_sizes(void)
-{
-	of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
-}
-
 static int __init get_idx_from_shift(unsigned int shift)
 {
 	int idx = -1;
@@ -539,7 +534,7 @@ static bool might_have_hea(void)
 
 #endif /* #ifdef CONFIG_PPC_64K_PAGES */
 
-static void __init htab_init_page_sizes(void)
+static void __init htab_scan_page_sizes(void)
 {
 	int rc;
 
@@ -554,17 +549,23 @@ static void __init htab_init_page_sizes(void)
 	 * Try to find the available page sizes in the device-tree
 	 */
 	rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
-	if (rc != 0)  /* Found */
-		goto found;
-
-	/*
-	 * Not in the device-tree, let's fallback on known size
-	 * list for 16M capable GP & GR
-	 */
-	if (mmu_has_feature(MMU_FTR_16M_PAGE))
+	if (rc == 0 && mmu_has_feature(MMU_FTR_16M_PAGE)) {
+		/*
+		 * Nothing in the device-tree, but the CPU supports 16M pages,
+		 * so let's fallback on a known size list for 16M capable CPUs.
+		 */
 		memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
 		       sizeof(mmu_psize_defaults_gp));
-found:
+	}
+
+#ifdef CONFIG_HUGETLB_PAGE
+	/* Reserve 16G huge page memory sections for huge pages */
+	of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
+#endif /* CONFIG_HUGETLB_PAGE */
+}
+
+static void __init htab_init_page_sizes(void)
+{
 	if (!debug_pagealloc_enabled()) {
 		/*
 		 * Pick a size for the linear mapping. Currently, we only
@@ -630,11 +631,6 @@ found:
 	       ,mmu_psize_defs[mmu_vmemmap_psize].shift
 #endif
 	       );
-
-#ifdef CONFIG_HUGETLB_PAGE
-	/* Reserve 16G huge page memory sections for huge pages */
-	of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
-#endif /* CONFIG_HUGETLB_PAGE */
 }
 
 static int __init htab_dt_scan_pftsize(unsigned long node,
@@ -759,12 +755,6 @@ static void __init htab_initialize(void)
 
 	DBG(" -> htab_initialize()\n");
 
-	/* Initialize segment sizes */
-	htab_init_seg_sizes();
-
-	/* Initialize page sizes */
-	htab_init_page_sizes();
-
 	if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
 		mmu_kernel_ssize = MMU_SEGSIZE_1T;
 		mmu_highuser_ssize = MMU_SEGSIZE_1T;
@@ -885,8 +875,19 @@ static void __init htab_initialize(void)
 #undef KB
 #undef MB
 
+void __init hash__early_init_devtree(void)
+{
+	/* Initialize segment sizes */
+	of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
+
+	/* Initialize page sizes */
+	htab_scan_page_sizes();
+}
+
 void __init hash__early_init_mmu(void)
 {
+	htab_init_page_sizes();
+
 	/*
 	 * initialize page table size
 	 */
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 0d51e6e25db5..d023333c6c9a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -426,5 +426,8 @@ void __init mmu_early_init_devtree(void)
 	/* Disable radix mode based on kernel command line. */
 	if (disable_radix)
 		cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
+
+	if (!radix_enabled())
+		hash__early_init_devtree();
 }
 #endif /* CONFIG_PPC_STD_MMU_64 */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers
  2016-07-28  7:49   ` Nicholas Piggin
@ 2016-07-28 13:04     ` Michael Ellerman
  0 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-28 13:04 UTC (permalink / raw)
  To: Nicholas Piggin; +Cc: linuxppc-dev, haokexin, aneesh.kumar

Nicholas Piggin <npiggin@gmail.com> writes:

> On Thu, 28 Jul 2016 00:18:08 +1000
> Michael Ellerman <mpe@ellerman.id.au> wrote:
>
>> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
>> 
>> This switches early feature checks to use the non static key variant
>> of the function. In later patches we will be switching
>> cpu_has_feature() and mmu_has_feature() to use static keys and we can
>> use them only after static key/jump label is initialized. Any check
>> for feature before jump label init should be done using this new
>> helper.
>
> Can't convince you to call it *_has_feature_early()?

Hmmm, I'll go with early_cpu_has_feature().

Otherwise it reads "does the CPU have feature 'early'".

> Any point to a WARN_ON_ONCE() in these guys that trips if they are
> used after the jump labels are set up?

See patch 20. Though we can't use WARN() as it may be too early in boot
to WARN(), so it just prints() and dumps stack.

cheers

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline
  2016-07-28  7:46   ` Nicholas Piggin
@ 2016-07-29 11:42     ` Michael Ellerman
  2016-07-29 12:54       ` Balbir Singh
  0 siblings, 1 reply; 42+ messages in thread
From: Michael Ellerman @ 2016-07-29 11:42 UTC (permalink / raw)
  To: Nicholas Piggin; +Cc: linuxppc-dev, haokexin, aneesh.kumar

Nicholas Piggin <npiggin@gmail.com> writes:

> On Thu, 28 Jul 2016 00:18:06 +1000
> Michael Ellerman <mpe@ellerman.id.au> wrote:
>> diff --git a/arch/powerpc/include/asm/mmu.h
>> b/arch/powerpc/include/asm/mmu.h index eb942a446969..f413b3213a3b
>> 100644 --- a/arch/powerpc/include/asm/mmu.h
>> +++ b/arch/powerpc/include/asm/mmu.h
>> @@ -163,6 +163,18 @@ static inline void assert_pte_locked(struct
>> mm_struct *mm, unsigned long addr) }
>>  #endif /* !CONFIG_DEBUG_VM */
>>  
>> +#ifdef CONFIG_PPC_RADIX_MMU
>> +static inline bool radix_enabled(void)
>> +{
>> +	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
>> +}
>> +#else
>> +static inline bool radix_enabled(void)
>> +{
>> +	return false;
>> +}
>> +#endif
>
> Won't MMU_FTRS_POSSIBLE just do the right thing when
> !CONFIG_PPC_RADIX_MMU?

Yes it should.

I'll have to work out why Aneesh thought he needed to do it explicitly
and whether that is needed or not.

cheers

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline
  2016-07-29 11:42     ` Michael Ellerman
@ 2016-07-29 12:54       ` Balbir Singh
  2016-07-30 10:08         ` Michael Ellerman
  0 siblings, 1 reply; 42+ messages in thread
From: Balbir Singh @ 2016-07-29 12:54 UTC (permalink / raw)
  To: Michael Ellerman, Nicholas Piggin; +Cc: linuxppc-dev, haokexin, aneesh.kumar

On Fri, 2016-07-29 at 21:42 +1000, Michael Ellerman wrote:
> Nicholas Piggin <npiggin@gmail.com> writes:
> 
> > 
> > On Thu, 28 Jul 2016 00:18:06 +1000
> > Michael Ellerman <mpe@ellerman.id.au> wrote:
> > > 
> > > diff --git a/arch/powerpc/include/asm/mmu.h
> > > b/arch/powerpc/include/asm/mmu.h index eb942a446969..f413b3213a3b
> > > 100644 --- a/arch/powerpc/include/asm/mmu.h
> > > +++ b/arch/powerpc/include/asm/mmu.h
> > > @@ -163,6 +163,18 @@ static inline void assert_pte_locked(struct
> > > mm_struct *mm, unsigned long addr) }
> > >  #endif /* !CONFIG_DEBUG_VM */
> > >  
> > > +#ifdef CONFIG_PPC_RADIX_MMU
> > > +static inline bool radix_enabled(void)
> > > +{
> > > +	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
> > > +}
> > > +#else
> > > +static inline bool radix_enabled(void)
> > > +{
> > > +	return false;
> > > +}
> > > +#endif
> > Won't MMU_FTRS_POSSIBLE just do the right thing when
> > !CONFIG_PPC_RADIX_MMU?
> Yes it should.
> 
> I'll have to work out why Aneesh thought he needed to do it explicitly
> and whether that is needed or not.
>

IIRC, If CONFIG_PPC_RADIX_MMU=n MMU_FTR_RAIDX will not be present in the
MMU_FTRS_POSSIBLE mask 

I'll also double check

Balbir

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline
  2016-07-29 12:54       ` Balbir Singh
@ 2016-07-30 10:08         ` Michael Ellerman
  0 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-07-30 10:08 UTC (permalink / raw)
  To: Balbir Singh, Nicholas Piggin; +Cc: linuxppc-dev, haokexin, aneesh.kumar

Balbir Singh <bsingharora@gmail.com> writes:

> On Fri, 2016-07-29 at 21:42 +1000, Michael Ellerman wrote:
>> Nicholas Piggin <npiggin@gmail.com> writes:
>> > Won't MMU_FTRS_POSSIBLE just do the right thing when
>> > !CONFIG_PPC_RADIX_MMU?
>> Yes it should.
>>
>> I'll have to work out why Aneesh thought he needed to do it explicitly
>> and whether that is needed or not.
>
> IIRC, If CONFIG_PPC_RADIX_MMU=3Dn MMU_FTR_RAIDX will not be present in the
> MMU_FTRS_POSSIBLE mask=C2=A0

That's right.

The obvious thing to do would be to define MMU_FTR_RADIX to 0 when
RADIX=3Dn, but that doesn't work because it breaks the ASM FTR macros
(because they check (mmu_features & mask) =3D=3D mask).

So instead we just remove it from the possible mask.

cheers

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature()
  2016-07-27 14:18 ` [PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature() Michael Ellerman
  2016-07-28  7:52   ` Nicholas Piggin
@ 2016-08-08  0:59   ` Anton Blanchard
  2016-08-08  7:35     ` Anton Blanchard
  1 sibling, 1 reply; 42+ messages in thread
From: Anton Blanchard @ 2016-08-08  0:59 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: linuxppc-dev, haokexin, aneesh.kumar

Hi,

> From: Kevin Hao <haokexin@gmail.com>
> 
> As we just did for CPU features.

This patch causes an oops when building with the gold linker:

Unable to handle kernel paging request for data at address 0xf000000000000000
Faulting instruction address: 0xc000000000971544
Oops: Kernel access of bad area, sig: 11 [#1]
SMP NR_CPUS=2048 NUMA pSeries
Modules linked in:
CPU: 0 PID: 0 Comm: swapper Not tainted 4.7.0-07470-gc12e6f2 #15
task: c000000000db7500 task.stack: c000000000e04000
NIP: c000000000971544 LR: c00000000097144c CTR: 0000000000000000
REGS: c000000000e078f0 TRAP: 0300   Not tainted  (4.7.0-07470-gc12e6f2)
MSR: 8000000000001033 <SF,ME,IR,DR,RI,LE>  CR: 44002422  XER: 00000000
CFAR: c000000000008768 DAR: f000000000000000 DSISR: 40000000 SOFTE: 0 
GPR00: c000000000971e8c c000000000e07b70 c000000000dfc700 0000000000000000 
GPR04: c000000000da5bc0 0000000000000000 0000000000000100 c000000000e3c700 
GPR08: c0000000fffd2300 0000000000000000 f000000000000000 0000000000000000 
GPR12: 0000000000000000 c00000000fe00000 c0000000009ab1c0 c000000000b52868 
GPR16: c000000000b52840 c000000000b52898 c000000000b528a0 0000000000000000 
GPR20: 0000000000000000 0000000000000000 0000000000000000 0000000000000001 
GPR24: 00000000003fffff 0000000000010000 0000000000000001 ffffffffffffffff 
GPR28: c000000000da5bc0 c000000000da5bc0 0000000000000000 0000000000000000 
NIP [c000000000971544] memmap_init_zone+0x204/0x2cc
LR [c00000000097144c] memmap_init_zone+0x10c/0x2cc
Call Trace:
[c000000000e07b70] [c000000000971648] init_currently_empty_zone+0x3c/0x11c (unreliable)
[c000000000e07c00] [c000000000971e8c] free_area_init_node+0x54c/0x694
[c000000000e07d10] [c000000000c6b108] free_area_init_nodes+0x788/0x838
[c000000000e07e20] [c000000000c4fc14] paging_init+0x88/0xa4
[c000000000e07e90] [c000000000c4ac44] setup_arch+0x2d0/0x30c
[c000000000e07f00] [c000000000c43b04] start_kernel+0x90/0x514
[c000000000e07f90] [c000000000008f5c] start_here_common+0x1c/0x9c

Haven't had a chance to determine if this is a Linux or gold issue yet.

Anton
--

> Signed-off-by: Kevin Hao <haokexin@gmail.com>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
> ---
>  arch/powerpc/include/asm/mmu.h    | 36
> ++++++++++++++++++++++++++++++++++++
> arch/powerpc/kernel/cputable.c    | 17 +++++++++++++++++
> arch/powerpc/lib/feature-fixups.c |  1 + 3 files changed, 54
> insertions(+)
> 
> v3: Rename to mmu_feature_keys, and NUM_MMU_FTR_KEYS.
>     Use the kconfig.
> 
> diff --git a/arch/powerpc/include/asm/mmu.h
> b/arch/powerpc/include/asm/mmu.h index e3eff365e55d..3900cb7fe7cf
> 100644 --- a/arch/powerpc/include/asm/mmu.h
> +++ b/arch/powerpc/include/asm/mmu.h
> @@ -140,6 +140,41 @@ static inline bool __mmu_has_feature(unsigned
> long feature) return !!(MMU_FTRS_POSSIBLE &
> cur_cpu_spec->mmu_features & feature); }
>  
> +#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
> +#include <linux/jump_label.h>
> +
> +#define NUM_MMU_FTR_KEYS	32
> +
> +extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
> +
> +extern void mmu_feature_keys_init(void);
> +
> +static __always_inline bool mmu_has_feature(unsigned long feature)
> +{
> +	int i;
> +
> +	if (!(MMU_FTRS_POSSIBLE & feature))
> +		return false;
> +
> +	i = __builtin_ctzl(feature);
> +	return static_branch_likely(&mmu_feature_keys[i]);
> +}
> +
> +static inline void mmu_clear_feature(unsigned long feature)
> +{
> +	int i;
> +
> +	i = __builtin_ctzl(feature);
> +	cur_cpu_spec->mmu_features &= ~feature;
> +	static_branch_disable(&mmu_feature_keys[i]);
> +}
> +#else
> +
> +static inline void mmu_feature_keys_init(void)
> +{
> +
> +}
> +
>  static inline bool mmu_has_feature(unsigned long feature)
>  {
>  	return __mmu_has_feature(feature);
> @@ -149,6 +184,7 @@ static inline void mmu_clear_feature(unsigned
> long feature) {
>  	cur_cpu_spec->mmu_features &= ~feature;
>  }
> +#endif /* CONFIG_JUMP_LABEL */
>  
>  extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
>  
> diff --git a/arch/powerpc/kernel/cputable.c
> b/arch/powerpc/kernel/cputable.c index f268850f8fda..db14efc7d3e0
> 100644 --- a/arch/powerpc/kernel/cputable.c
> +++ b/arch/powerpc/kernel/cputable.c
> @@ -2243,4 +2243,21 @@ void __init cpu_feature_keys_init(void)
>  			static_branch_disable(&cpu_feature_keys[i]);
>  	}
>  }
> +
> +struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
> +			[0 ... NUM_MMU_FTR_KEYS - 1] =
> STATIC_KEY_TRUE_INIT +};
> +EXPORT_SYMBOL_GPL(mmu_feature_keys);
> +
> +void __init mmu_feature_keys_init(void)
> +{
> +	int i;
> +
> +	for (i = 0; i < NUM_MMU_FTR_KEYS; i++) {
> +		unsigned long f = 1ul << i;
> +
> +		if (!(cur_cpu_spec->mmu_features & f))
> +			static_branch_disable(&mmu_feature_keys[i]);
> +	}
> +}
>  #endif
> diff --git a/arch/powerpc/lib/feature-fixups.c
> b/arch/powerpc/lib/feature-fixups.c index f90423faade0..8db370cec547
> 100644 --- a/arch/powerpc/lib/feature-fixups.c
> +++ b/arch/powerpc/lib/feature-fixups.c
> @@ -196,6 +196,7 @@ void __init apply_feature_fixups(void)
>  	 */
>  	jump_label_init();
>  	cpu_feature_keys_init();
> +	mmu_feature_keys_init();
>  }
>  
>  static int __init check_features(void)

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature()
  2016-08-08  0:59   ` Anton Blanchard
@ 2016-08-08  7:35     ` Anton Blanchard
  0 siblings, 0 replies; 42+ messages in thread
From: Anton Blanchard @ 2016-08-08  7:35 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: linuxppc-dev, haokexin, aneesh.kumar

Hi,

> This patch causes an oops when building with the gold linker:

Found the problem. On binutils .meminit.text is within _stext/_etext:

  [Nr] Name              Type            Address          Off    Size   ES Flg Lk Inf Al
  [ 3] .meminit.text     PROGBITS        c000000000989d14 999d14 00225c 00  AX  0   0  4

c000000000990000 R _etext

But on gold it is not:

c000000000970000 A _etext

  [Nr] Name              Type            Address          Off    Size   ES Flg Lk Inf Al
  [ 3] .meminit.text     PROGBITS        c000000000970bcc 980bcc 002220 00  AX  0   0  4

As a result kernel_text_address() returns false, and
__jump_label_update() fails to update:

    if (entry->code && kernel_text_address(entry->code))
            arch_jump_label_transform(entry, jump_label_type(entry));

Seems like we get the correct layout on binutils by luck and we
need to explicitly handle .meminit.text.

Anton

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 14/21] powerpc: Call jump_label_init() in apply_feature_fixups()
       [not found] ` <1469629097-30859-14-git-send-email-mpe__30163.7288918302$1469630223$gmane$org@ellerman.id.au>
@ 2016-08-13 23:55   ` Andreas Schwab
  2016-08-14  4:44     ` Michael Ellerman
  0 siblings, 1 reply; 42+ messages in thread
From: Andreas Schwab @ 2016-08-13 23:55 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: linuxppc-dev, haokexin, aneesh.kumar

On Jul 28 2016, Michael Ellerman <mpe@ellerman.id.au> wrote:

> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
>
> Call jump_label_init() early so that we can use static keys for CPU and
> MMU feature checks.

That breaks PPC32.  jump_label_init doesn't use PTRRELOC.

Andreas.

-- 
Andreas Schwab, schwab@linux-m68k.org
GPG Key fingerprint = 58CA 54C7 6D53 942B 1756  01D3 44D5 214B 8276 4ED5
"And now for something completely different."

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v3 14/21] powerpc: Call jump_label_init() in apply_feature_fixups()
  2016-08-13 23:55   ` [PATCH v3 14/21] powerpc: Call jump_label_init() in apply_feature_fixups() Andreas Schwab
@ 2016-08-14  4:44     ` Michael Ellerman
  0 siblings, 0 replies; 42+ messages in thread
From: Michael Ellerman @ 2016-08-14  4:44 UTC (permalink / raw)
  To: Andreas Schwab; +Cc: linuxppc-dev, haokexin, aneesh.kumar

Andreas Schwab <schwab@linux-m68k.org> writes:

> On Jul 28 2016, Michael Ellerman <mpe@ellerman.id.au> wrote:
>
>> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
>>
>> Call jump_label_init() early so that we can use static keys for CPU and
>> MMU feature checks.
>
> That breaks PPC32.  jump_label_init doesn't use PTRRELOC.

Should be fixed in Linus' tree:

  https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=97f6e0cc35026a2a09147a6da636d901525e1969

cheers

^ permalink raw reply	[flat|nested] 42+ messages in thread

end of thread, other threads:[~2016-08-14  4:44 UTC | newest]

Thread overview: 42+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-27 14:17 [PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree() Michael Ellerman
2016-07-27 14:17 ` [PATCH v3 02/21] powerpc/mm: Move disable_radix handling into mmu_early_init_devtree() Michael Ellerman
2016-07-28  3:14   ` Balbir Singh
2016-07-27 14:17 ` [PATCH v3 03/21] powerpc/mm: Do hash device tree scanning earlier Michael Ellerman
2016-07-28 12:40   ` [PATCH v4] " Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 04/21] powerpc/mm: Do radix " Michael Ellerman
2016-07-28  3:48   ` Balbir Singh
2016-07-28  8:11     ` Michael Ellerman
2016-07-28 11:55       ` Balbir Singh
2016-07-27 14:18 ` [PATCH v3 05/21] powerpc/64: Do feature patching before MMU init Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 06/21] powerpc/kernel: Check features don't change after patching Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 07/21] powerpc/mm: Make MMU_FTR_RADIX a MMU family feature Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 08/21] powerpc/kernel: Convert mmu_has_feature() to returning bool Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 09/21] powerpc/kernel: Convert cpu_has_feature() " Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline Michael Ellerman
2016-07-28  7:46   ` Nicholas Piggin
2016-07-29 11:42     ` Michael Ellerman
2016-07-29 12:54       ` Balbir Singh
2016-07-30 10:08         ` Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 11/21] powerpc/mm: Add __cpu/__mmu_has_feature() Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers Michael Ellerman
2016-07-27 21:37   ` Benjamin Herrenschmidt
2016-07-28 11:24     ` Michael Ellerman
2016-07-27 21:42   ` Benjamin Herrenschmidt
2016-07-28  7:49   ` Nicholas Piggin
2016-07-28 13:04     ` Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 13/21] jump_label: Make it possible for arches to invoke jump_label_init() earlier Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 14/21] powerpc: Call jump_label_init() in apply_feature_fixups() Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 15/21] powerpc: Remove mfvtb() Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 16/21] powerpc: Move cpu_has_feature() to a separate file Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 17/21] powerpc: Add kconfig option to use jump labels for cpu/mmu_has_feature() Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 18/21] powerpc: Add option to use jump label for cpu_has_feature() Michael Ellerman
2016-07-28  7:51   ` Nicholas Piggin
2016-07-27 14:18 ` [PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature() Michael Ellerman
2016-07-28  7:52   ` Nicholas Piggin
2016-08-08  0:59   ` Anton Blanchard
2016-08-08  7:35     ` Anton Blanchard
2016-07-27 14:18 ` [PATCH v3 20/21] powerpc/mm: Catch usage of cpu/mmu_has_feature() before jump label init Michael Ellerman
2016-07-27 14:18 ` [PATCH v3 21/21] powerpc/jump_label: Annotate jump label assembly Michael Ellerman
2016-07-28  7:56   ` Nicholas Piggin
     [not found] ` <1469629097-30859-14-git-send-email-mpe__30163.7288918302$1469630223$gmane$org@ellerman.id.au>
2016-08-13 23:55   ` [PATCH v3 14/21] powerpc: Call jump_label_init() in apply_feature_fixups() Andreas Schwab
2016-08-14  4:44     ` Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.