All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC 1/8] powerpc/slb: Remove a duplicate extern variable
@ 2015-07-21  6:58 Anshuman Khandual
  2015-07-21  6:58 ` [RFC 2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot' Anshuman Khandual
                   ` (6 more replies)
  0 siblings, 7 replies; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21  6:58 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: mikey, mpe

From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>

This patch just removes one redundant entry for one extern variable
'slb_compare_rr_to_size' from the scope. This patch does not change
any functionality.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6e450ca..62fafb3 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -253,7 +253,6 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
 	patch_instruction(insn_addr, insn);
 }
 
-extern u32 slb_compare_rr_to_size[];
 extern u32 slb_miss_kernel_load_linear[];
 extern u32 slb_miss_kernel_load_io[];
 extern u32 slb_compare_rr_to_size[];
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [RFC 2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot'
  2015-07-21  6:58 [RFC 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
@ 2015-07-21  6:58 ` Anshuman Khandual
  2015-07-21  9:46   ` [RFC, " Michael Ellerman
  2015-07-21  6:58 ` [RFC 3/8] powerpc/slb: Define macros for the bolted slots Anshuman Khandual
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21  6:58 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: mikey, mpe

From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>

These are essentially SLB individual slots what we are dealing with
in these functions. Usage of both 'entry' and 'slot' synonyms makes
it real confusing sometimes. This patch makes it uniform across the
file by replacing all those 'entry's with 'slot's.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb.c | 29 ++++++++++++++---------------
 1 file changed, 14 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 62fafb3..3842a54 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -55,39 +55,39 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
 
 static inline void slb_shadow_update(unsigned long ea, int ssize,
 				     unsigned long flags,
-				     unsigned long entry)
+				     unsigned long slot)
 {
 	/*
-	 * Clear the ESID first so the entry is not valid while we are
+	 * Clear the ESID first so the slot is not valid while we are
 	 * updating it.  No write barriers are needed here, provided
 	 * we only update the current CPU's SLB shadow buffer.
 	 */
-	get_slb_shadow()->save_area[entry].esid = 0;
-	get_slb_shadow()->save_area[entry].vsid =
+	get_slb_shadow()->save_area[slot].esid = 0;
+	get_slb_shadow()->save_area[slot].vsid =
 				cpu_to_be64(mk_vsid_data(ea, ssize, flags));
-	get_slb_shadow()->save_area[entry].esid =
-				cpu_to_be64(mk_esid_data(ea, ssize, entry));
+	get_slb_shadow()->save_area[slot].esid =
+				cpu_to_be64(mk_esid_data(ea, ssize, slot));
 }
 
-static inline void slb_shadow_clear(unsigned long entry)
+static inline void slb_shadow_clear(unsigned long slot)
 {
-	get_slb_shadow()->save_area[entry].esid = 0;
+	get_slb_shadow()->save_area[slot].esid = 0;
 }
 
 static inline void create_shadowed_slbe(unsigned long ea, int ssize,
 					unsigned long flags,
-					unsigned long entry)
+					unsigned long slot)
 {
 	/*
 	 * Updating the shadow buffer before writing the SLB ensures
-	 * we don't get a stale entry here if we get preempted by PHYP
+	 * we don't get a stale slot here if we get preempted by PHYP
 	 * between these two statements.
 	 */
-	slb_shadow_update(ea, ssize, flags, entry);
+	slb_shadow_update(ea, ssize, flags, slot);
 
 	asm volatile("slbmte  %0,%1" :
 		     : "r" (mk_vsid_data(ea, ssize, flags)),
-		       "r" (mk_esid_data(ea, ssize, entry))
+		       "r" (mk_esid_data(ea, ssize, slot))
 		     : "memory" );
 }
 
@@ -109,7 +109,7 @@ static void __slb_flush_and_rebolt(void)
 		ksp_vsid_data = 0;
 		slb_shadow_clear(2);
 	} else {
-		/* Update stack entry; others don't change */
+		/* Update stack slot; others don't change */
 		slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
 		ksp_vsid_data =
 			be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
@@ -313,13 +313,12 @@ void slb_initialize(void)
 	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
 	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-
 	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 
 	/* For the boot cpu, we're running on the stack in init_thread_union,
 	 * which is in the first segment of the linear mapping, and also
 	 * get_paca()->kstack hasn't been initialized yet.
-	 * For secondary cpus, we need to bolt the kernel stack entry now.
+	 * For secondary cpus, we need to bolt the kernel stack slot now.
 	 */
 	slb_shadow_clear(2);
 	if (raw_smp_processor_id() != boot_cpuid &&
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [RFC 3/8] powerpc/slb: Define macros for the bolted slots
  2015-07-21  6:58 [RFC 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
  2015-07-21  6:58 ` [RFC 2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot' Anshuman Khandual
@ 2015-07-21  6:58 ` Anshuman Khandual
  2015-07-22  9:32   ` Michael Ellerman
  2015-07-21  6:58 ` [RFC 4/8] powerpc/slb: Add some helper functions to improve modularization Anshuman Khandual
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21  6:58 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: mikey, mpe

From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>

This patch defines macros for all the three bolted SLB slots. This also
renames the 'create_shadowed_slb' function as 'new_shadowed_slb'.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb.c | 27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 3842a54..cbeaaa2 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -25,6 +25,9 @@
 #include <asm/udbg.h>
 #include <asm/code-patching.h>
 
+#define SLOT_KLINR  0	/* kernel linear map  (0xc00000000) */
+#define SLOT_KVIRT  1	/* kernel virtual map (0xd00000000) */
+#define SLOT_KSTACK 2	/* kernel stack map   (0xf00000000) */
 
 extern void slb_allocate_realmode(unsigned long ea);
 extern void slb_allocate_user(unsigned long ea);
@@ -74,7 +77,7 @@ static inline void slb_shadow_clear(unsigned long slot)
 	get_slb_shadow()->save_area[slot].esid = 0;
 }
 
-static inline void create_shadowed_slbe(unsigned long ea, int ssize,
+static inline void new_shadowed_slbe(unsigned long ea, int ssize,
 					unsigned long flags,
 					unsigned long slot)
 {
@@ -103,16 +106,16 @@ static void __slb_flush_and_rebolt(void)
 	lflags = SLB_VSID_KERNEL | linear_llp;
 	vflags = SLB_VSID_KERNEL | vmalloc_llp;
 
-	ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
+	ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, SLOT_KSTACK);
 	if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
 		ksp_esid_data &= ~SLB_ESID_V;
 		ksp_vsid_data = 0;
-		slb_shadow_clear(2);
+		slb_shadow_clear(SLOT_KSTACK);
 	} else {
 		/* Update stack slot; others don't change */
-		slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
+		slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, SLOT_KSTACK);
 		ksp_vsid_data =
-			be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
+			be64_to_cpu(get_slb_shadow()->save_area[SLOT_KSTACK].vsid);
 	}
 
 	/* We need to do this all in asm, so we're sure we don't touch
@@ -125,7 +128,7 @@ static void __slb_flush_and_rebolt(void)
 		     "slbmte	%2,%3\n"
 		     "isync"
 		     :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
-		        "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
+		        "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, SLOT_KVIRT)),
 		        "r"(ksp_vsid_data),
 		        "r"(ksp_esid_data)
 		     : "memory");
@@ -151,7 +154,7 @@ void slb_vmalloc_update(void)
 	unsigned long vflags;
 
 	vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
-	slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+	slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, SLOT_KVIRT);
 	slb_flush_and_rebolt();
 }
 
@@ -312,19 +315,19 @@ void slb_initialize(void)
 	asm volatile("isync":::"memory");
 	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
-	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+	new_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, SLOT_KLINR);
+	new_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, SLOT_KVIRT);
 
 	/* For the boot cpu, we're running on the stack in init_thread_union,
 	 * which is in the first segment of the linear mapping, and also
 	 * get_paca()->kstack hasn't been initialized yet.
 	 * For secondary cpus, we need to bolt the kernel stack slot now.
 	 */
-	slb_shadow_clear(2);
+	slb_shadow_clear(SLOT_KSTACK);
 	if (raw_smp_processor_id() != boot_cpuid &&
 	    (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
-		create_shadowed_slbe(get_paca()->kstack,
-				     mmu_kernel_ssize, lflags, 2);
+		new_shadowed_slbe(get_paca()->kstack,
+				     mmu_kernel_ssize, lflags, SLOT_KSTACK);
 
 	asm volatile("isync":::"memory");
 }
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [RFC 4/8] powerpc/slb: Add some helper functions to improve modularization
  2015-07-21  6:58 [RFC 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
  2015-07-21  6:58 ` [RFC 2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot' Anshuman Khandual
  2015-07-21  6:58 ` [RFC 3/8] powerpc/slb: Define macros for the bolted slots Anshuman Khandual
@ 2015-07-21  6:58 ` Anshuman Khandual
  2015-07-22  9:19   ` Michael Ellerman
  2015-07-21  6:58 ` [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding Anshuman Khandual
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21  6:58 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: mikey, mpe

From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>

This patch adds the following helper functions to improve modularization
and readability of the code.

(1) slb_invalid_all: 		Invalidates entire SLB
(2) slb_invalid_paca_slots: 	Invalidate SLB entries present in PACA
(3) kernel_linear_vsid_flags:	VSID flags for kernel linear mapping
(4) kernel_virtual_vsid_flags:	VSID flags for kernel virtual mapping

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb.c | 87 ++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 59 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index cbeaaa2..dcba4c2 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -94,18 +94,37 @@ static inline void new_shadowed_slbe(unsigned long ea, int ssize,
 		     : "memory" );
 }
 
+static inline unsigned long kernel_linear_vsid_flags(void)
+{
+	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
+}
+
+static inline unsigned long kernel_virtual_vsid_flags(void)
+{
+	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
+}
+
+static inline unsigned long kernel_io_vsid_flags(void)
+{
+	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long kernel_vmemmap_vsid_flags(void)
+{
+	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
+}
+#endif
+
 static void __slb_flush_and_rebolt(void)
 {
 	/* If you change this make sure you change SLB_NUM_BOLTED
 	 * and PR KVM appropriately too. */
-	unsigned long linear_llp, vmalloc_llp, lflags, vflags;
+	unsigned long lflags, vflags;
 	unsigned long ksp_esid_data, ksp_vsid_data;
 
-	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
-	vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
-	lflags = SLB_VSID_KERNEL | linear_llp;
-	vflags = SLB_VSID_KERNEL | vmalloc_llp;
-
+	lflags = kernel_linear_vsid_flags();
+	vflags = kernel_virtual_vsid_flags();
 	ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, SLOT_KSTACK);
 	if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
 		ksp_esid_data &= ~SLB_ESID_V;
@@ -153,7 +172,7 @@ void slb_vmalloc_update(void)
 {
 	unsigned long vflags;
 
-	vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
+	vflags = kernel_virtual_vsid_flags();
 	slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, SLOT_KVIRT);
 	slb_flush_and_rebolt();
 }
@@ -187,6 +206,23 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
 	return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
 }
 
+static void slb_invalid_paca_slots(unsigned long offset)
+{
+	unsigned long slbie_data;
+	int i;
+
+	asm volatile("isync" : : : "memory");
+	for (i = 0; i < offset; i++) {
+		slbie_data = (unsigned long)get_paca()->slb_cache[i]
+			<< SID_SHIFT; /* EA */
+		slbie_data |= user_segment_size(slbie_data)
+			<< SLBIE_SSIZE_SHIFT;
+		slbie_data |= SLBIE_C; /* C set for user addresses */
+		asm volatile("slbie %0" : : "r" (slbie_data));
+	}
+	asm volatile("isync" : : : "memory");
+}
+
 /* Flush all user entries from the segment table of the current processor. */
 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 {
@@ -206,17 +242,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 	offset = get_paca()->slb_cache_ptr;
 	if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
 	    offset <= SLB_CACHE_ENTRIES) {
-		int i;
-		asm volatile("isync" : : : "memory");
-		for (i = 0; i < offset; i++) {
-			slbie_data = (unsigned long)get_paca()->slb_cache[i]
-				<< SID_SHIFT; /* EA */
-			slbie_data |= user_segment_size(slbie_data)
-				<< SLBIE_SSIZE_SHIFT;
-			slbie_data |= SLBIE_C; /* C set for user addresses */
-			asm volatile("slbie %0" : : "r" (slbie_data));
-		}
-		asm volatile("isync" : : : "memory");
+		slb_invalid_paca_slots(offset);
 	} else {
 		__slb_flush_and_rebolt();
 	}
@@ -256,6 +282,14 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
 	patch_instruction(insn_addr, insn);
 }
 
+/* Invalidate the entire SLB (even slot 0) & all the ERATS */
+static inline void slb_invalid_all(void)
+{
+	asm volatile("isync":::"memory");
+	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
+	asm volatile("isync; slbia; isync":::"memory");
+}
+
 extern u32 slb_miss_kernel_load_linear[];
 extern u32 slb_miss_kernel_load_io[];
 extern u32 slb_compare_rr_to_size[];
@@ -283,16 +317,16 @@ void slb_initialize(void)
 	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
 	io_llp = mmu_psize_defs[mmu_io_psize].sllp;
 	vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
-	get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
+	get_paca()->vmalloc_sllp = kernel_virtual_vsid_flags();
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 	vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
 #endif
 	if (!slb_encoding_inited) {
 		slb_encoding_inited = 1;
 		patch_slb_encoding(slb_miss_kernel_load_linear,
-				   SLB_VSID_KERNEL | linear_llp);
+				   kernel_linear_vsid_flags());
 		patch_slb_encoding(slb_miss_kernel_load_io,
-				   SLB_VSID_KERNEL | io_llp);
+				   kernel_io_vsid_flags());
 		patch_slb_encoding(slb_compare_rr_to_size,
 				   mmu_slb_size);
 
@@ -301,20 +335,17 @@ void slb_initialize(void)
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 		patch_slb_encoding(slb_miss_kernel_load_vmemmap,
-				   SLB_VSID_KERNEL | vmemmap_llp);
+				   kernel_vmemmap_vsid_flags());
 		pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
 #endif
 	}
 
 	get_paca()->stab_rr = SLB_NUM_BOLTED;
 
-	lflags = SLB_VSID_KERNEL | linear_llp;
-	vflags = SLB_VSID_KERNEL | vmalloc_llp;
+	lflags = kernel_linear_vsid_flags();
+	vflags = kernel_virtual_vsid_flags();
 
-	/* Invalidate the entire SLB (even slot 0) & all the ERATS */
-	asm volatile("isync":::"memory");
-	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
-	asm volatile("isync; slbia; isync":::"memory");
+	slb_invalid_all();
 	new_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, SLOT_KLINR);
 	new_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, SLOT_KVIRT);
 
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding
  2015-07-21  6:58 [RFC 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
                   ` (2 preceding siblings ...)
  2015-07-21  6:58 ` [RFC 4/8] powerpc/slb: Add some helper functions to improve modularization Anshuman Khandual
@ 2015-07-21  6:58 ` Anshuman Khandual
  2015-07-22  5:51   ` Michael Ellerman
  2015-07-21  6:58 ` [RFC 6/8] powerpc/prom: Simplify the logic while fetching SLB size Anshuman Khandual
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21  6:58 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: mikey, mpe

From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>

This patch adds some documentation to 'patch_slb_encoding' function
explaining about how it clears the existing immediate value in the
given instruction and inserts a new one there.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index dcba4c2..8083a9e 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -278,7 +278,13 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 static inline void patch_slb_encoding(unsigned int *insn_addr,
 				      unsigned int immed)
 {
-	int insn = (*insn_addr & 0xffff0000) | immed;
+	/*
+	 * Currently this patches only "li" and "cmpldi"
+	 * instructions with an immediate value. Here it
+	 * just clears the existing immediate value from
+	 * the instruction and inserts a new one there.
+	 */
+	unsigned int insn = (*insn_addr & 0xffff0000) | immed;
 	patch_instruction(insn_addr, insn);
 }
 
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [RFC 6/8] powerpc/prom: Simplify the logic while fetching SLB size
  2015-07-21  6:58 [RFC 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
                   ` (3 preceding siblings ...)
  2015-07-21  6:58 ` [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding Anshuman Khandual
@ 2015-07-21  6:58 ` Anshuman Khandual
  2015-07-21 10:21   ` [RFC, " Michael Ellerman
  2015-07-21  6:58 ` [RFC 7/8] powerpc/xmon: Drop 'valid' from the condition inside 'dump_segments' Anshuman Khandual
  2015-07-21  6:58 ` [RFC 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list Anshuman Khandual
  6 siblings, 1 reply; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21  6:58 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: mikey, mpe

From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>

This patch just simplifies the existing code logic while fetching
the SLB size property from the device tree.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 arch/powerpc/kernel/prom.c | 12 +++++-------
 1 file changed, 5 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b888b1..f6168e2 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -223,14 +223,12 @@ static void __init check_cpu_slb_size(unsigned long node)
 	const __be32 *slb_size_ptr;
 
 	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
-	if (slb_size_ptr != NULL) {
-		mmu_slb_size = be32_to_cpup(slb_size_ptr);
-		return;
-	}
-	slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
-	if (slb_size_ptr != NULL) {
-		mmu_slb_size = be32_to_cpup(slb_size_ptr);
+	if (!slb_size_ptr) {
+		slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+		if (!slb_size_ptr)
+			return;
 	}
+	mmu_slb_size = be32_to_cpup(slb_size_ptr);
 }
 #else
 #define check_cpu_slb_size(node) do { } while(0)
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [RFC 7/8] powerpc/xmon: Drop 'valid' from the condition inside 'dump_segments'
  2015-07-21  6:58 [RFC 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
                   ` (4 preceding siblings ...)
  2015-07-21  6:58 ` [RFC 6/8] powerpc/prom: Simplify the logic while fetching SLB size Anshuman Khandual
@ 2015-07-21  6:58 ` Anshuman Khandual
  2015-07-21 10:00   ` [RFC, " Michael Ellerman
  2015-07-21  6:58 ` [RFC 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list Anshuman Khandual
  6 siblings, 1 reply; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21  6:58 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: mikey, mpe

From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>

Value of 'valid' is zero when 'esid' is zero and it does not matter
when 'esid' is non-zero. Hence the variable 'value' can be dropped
from the conditional statement. This patch does that.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 arch/powerpc/xmon/xmon.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index e599259..1798e21 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2740,7 +2740,7 @@ void dump_segments(void)
 		asm volatile("slbmfee  %0,%1" : "=r" (esid) : "r" (i));
 		asm volatile("slbmfev  %0,%1" : "=r" (vsid) : "r" (i));
 		valid = (esid & SLB_ESID_V);
-		if (valid | esid | vsid) {
+		if (esid | vsid) {
 			printf("%02d %016lx %016lx", i, esid, vsid);
 			if (valid) {
 				llp = vsid & SLB_VSID_LLP;
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [RFC 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list
  2015-07-21  6:58 [RFC 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
                   ` (5 preceding siblings ...)
  2015-07-21  6:58 ` [RFC 7/8] powerpc/xmon: Drop 'valid' from the condition inside 'dump_segments' Anshuman Khandual
@ 2015-07-21  6:58 ` Anshuman Khandual
  2015-07-21 10:08   ` [RFC, " Michael Ellerman
  6 siblings, 1 reply; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21  6:58 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: mikey, mpe

From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>

This patch adds some more elements to the existing PACA dump list
inside a xmon session which can be listed here.

	- hmi_event_available
	- dscr_default
	- vmalloc_sllp
	- slb_cache_ptr
	- sprg_vdso
	- tm_scratch
	- core_idle_state_ptr
	- thread_idle_state
	- thread_mask

With this patch, a typical xmon PACA dump looks something like this.

 possible             = yes
 present              = yes
 online               = yes
 lock_token           = 0x8000            	(0x8)
 paca_index           = 0x0               	(0xa)
 kernel_toc           = 0xc000000000e79300	(0x10)
 kernelbase           = 0xc000000000000000	(0x18)
 kernel_msr           = 0xb000000000001032	(0x20)
 emergency_sp         = 0xc00000003fff0000	(0x28)
 mc_emergency_sp      = 0xc00000003ffec000	(0x2e0)
 in_mce               = 0x0               	(0x2e8)
 hmi_event_available  = 0x0               	(0x2ea)
 data_offset          = 0xfa9f0000        	(0x30)
 hw_cpu_id            = 0x0               	(0x38)
 cpu_start            = 0x1               	(0x3a)
 kexec_state          = 0x0               	(0x3b)
 dscr_default         = 0x0               	(0x58)
 vmalloc_sllp         = 0x510             	(0x1b8)
 slb_cache_ptr        = 0x3               	(0x1ba)
 slb_cache[0]:        = 0x3f000
 slb_cache[1]:        = 0x1
 slb_cache[2]:        = 0x1000
 __current            = 0xc00000009ce96620	(0x290)
 kstack               = 0xc00000009cf2be30	(0x298)
 stab_rr              = 0x8               	(0x2a0)
 saved_r1             = 0xc00000009cf2b360	(0x2a8)
 trap_save            = 0x0               	(0x2b8)
 soft_enabled         = 0x0               	(0x2ba)
 irq_happened         = 0x1               	(0x2bb)
 io_sync              = 0x0               	(0x2bc)
 irq_work_pending     = 0x0               	(0x2bd)
 nap_state_lost       = 0x0               	(0x2be)
 sprg_vdso            = 0x0               	(0x2c0)
 tm_scratch           = 0x800000010280f032	(0x2c8)
 core_idle_state_ptr  = (null)            	(0x2d0)
 thread_idle_state    = 0x0               	(0x2d8)
 thread_mask          = 0x0               	(0x2d9)
 subcore_sibling_mask = 0x0               	(0x2da)

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 arch/powerpc/xmon/xmon.c | 27 +++++++++++++++++++++++----
 1 file changed, 23 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 1798e21..bc42f6e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2073,6 +2073,7 @@ static void xmon_rawdump (unsigned long adrs, long ndump)
 static void dump_one_paca(int cpu)
 {
 	struct paca_struct *p;
+	int i;
 
 	if (setjmp(bus_error_jmp) != 0) {
 		printf("*** Error dumping paca for cpu 0x%x!\n", cpu);
@@ -2086,12 +2087,12 @@ static void dump_one_paca(int cpu)
 
 	printf("paca for cpu 0x%x @ %p:\n", cpu, p);
 
-	printf(" %-*s = %s\n", 16, "possible", cpu_possible(cpu) ? "yes" : "no");
-	printf(" %-*s = %s\n", 16, "present", cpu_present(cpu) ? "yes" : "no");
-	printf(" %-*s = %s\n", 16, "online", cpu_online(cpu) ? "yes" : "no");
+	printf(" %-*s = %s\n", 20, "possible", cpu_possible(cpu) ? "yes" : "no");
+	printf(" %-*s = %s\n", 20, "present", cpu_present(cpu) ? "yes" : "no");
+	printf(" %-*s = %s\n", 20, "online", cpu_online(cpu) ? "yes" : "no");
 
 #define DUMP(paca, name, format) \
-	printf(" %-*s = %#-*"format"\t(0x%lx)\n", 16, #name, 18, paca->name, \
+	printf(" %-*s = %#-*"format"\t(0x%lx)\n", 20, #name, 18, paca->name, \
 		offsetof(struct paca_struct, name));
 
 	DUMP(p, lock_token, "x");
@@ -2103,11 +2104,17 @@ static void dump_one_paca(int cpu)
 #ifdef CONFIG_PPC_BOOK3S_64
 	DUMP(p, mc_emergency_sp, "p");
 	DUMP(p, in_mce, "x");
+	DUMP(p, hmi_event_available, "x");
 #endif
 	DUMP(p, data_offset, "lx");
 	DUMP(p, hw_cpu_id, "x");
 	DUMP(p, cpu_start, "x");
 	DUMP(p, kexec_state, "x");
+	DUMP(p, dscr_default, "llx");
+	DUMP(p, vmalloc_sllp, "x");
+	DUMP(p, slb_cache_ptr, "x");
+	for (i = 0; i < p->slb_cache_ptr; i++)
+		printf(" slb_cache[%d]:        = 0x%lx\n", i, p->slb_cache[i]);
 	DUMP(p, __current, "p");
 	DUMP(p, kstack, "lx");
 	DUMP(p, stab_rr, "lx");
@@ -2118,6 +2125,18 @@ static void dump_one_paca(int cpu)
 	DUMP(p, io_sync, "x");
 	DUMP(p, irq_work_pending, "x");
 	DUMP(p, nap_state_lost, "x");
+	DUMP(p, sprg_vdso, "llx");
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	DUMP(p, tm_scratch, "llx");
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+	DUMP(p, core_idle_state_ptr, "p");
+	DUMP(p, thread_idle_state, "x");
+	DUMP(p, thread_mask, "x");
+	DUMP(p, subcore_sibling_mask, "x");
+#endif
 
 #undef DUMP
 
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [RFC, 2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot'
  2015-07-21  6:58 ` [RFC 2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot' Anshuman Khandual
@ 2015-07-21  9:46   ` Michael Ellerman
  2015-07-21 11:23     ` [RFC,2/8] " Anshuman Khandual
  0 siblings, 1 reply; 23+ messages in thread
From: Michael Ellerman @ 2015-07-21  9:46 UTC (permalink / raw)
  To: Anshuman Khandual, linuxppc-dev; +Cc: mikey

On Tue, 2015-21-07 at 06:58:40 UTC, Anshuman Khandual wrote:
> From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> 
> These are essentially SLB individual slots what we are dealing with
> in these functions. Usage of both 'entry' and 'slot' synonyms makes
> it real confusing sometimes. This patch makes it uniform across the
> file by replacing all those 'entry's with 'slot's.

No I think it would be better the other way around.

Currently we use entry in 14 places and slot in 3.

Both can be correct in some places, but not always.

For example:

> -	 * Clear the ESID first so the entry is not valid while we are
> +	 * Clear the ESID first so the slot is not valid while we are

That doesn't make sense with "slot", a slot is not valid, only an entry in a
slot is valid.

Looking at the existing uses of slot they will all make sense if you change
them to entry.

cheers

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC, 7/8] powerpc/xmon: Drop 'valid' from the condition inside 'dump_segments'
  2015-07-21  6:58 ` [RFC 7/8] powerpc/xmon: Drop 'valid' from the condition inside 'dump_segments' Anshuman Khandual
@ 2015-07-21 10:00   ` Michael Ellerman
  2015-07-21 11:45     ` Anshuman Khandual
  0 siblings, 1 reply; 23+ messages in thread
From: Michael Ellerman @ 2015-07-21 10:00 UTC (permalink / raw)
  To: Anshuman Khandual, linuxppc-dev; +Cc: mikey

On Tue, 2015-21-07 at 06:58:45 UTC, Anshuman Khandual wrote:
> From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> 
> Value of 'valid' is zero when 'esid' is zero and it does not matter
> when 'esid' is non-zero. 

Yes it does. It tells you whether the entry is valid?

In practice maybe you only see invalid entries that are entirely zero, and so
they get skipped anyway, but that's not guaranteed.

cheers

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC, 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list
  2015-07-21  6:58 ` [RFC 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list Anshuman Khandual
@ 2015-07-21 10:08   ` Michael Ellerman
  2015-07-21 11:48     ` Anshuman Khandual
  0 siblings, 1 reply; 23+ messages in thread
From: Michael Ellerman @ 2015-07-21 10:08 UTC (permalink / raw)
  To: Anshuman Khandual, linuxppc-dev; +Cc: mikey

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain, Size: 3988 bytes --]

On Tue, 2015-21-07 at 06:58:46 UTC, Anshuman Khandual wrote:
> From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> 
> This patch adds some more elements to the existing PACA dump list
> inside a xmon session which can be listed here.
> 
> 	- hmi_event_available
> 	- dscr_default
> 	- vmalloc_sllp
> 	- slb_cache_ptr
> 	- sprg_vdso
> 	- tm_scratch
> 	- core_idle_state_ptr
> 	- thread_idle_state
> 	- thread_mask

This is probably OK, except you broke the ppc64e build again.

cheers


../arch/powerpc/xmon/xmon.c: In function ‘dump_one_paca’:
../arch/powerpc/xmon/xmon.c:2095:63: error: ‘struct paca_struct’ has no member named ‘vmalloc_sllp’
  printf(" %-*s = %#-*"format"\t(0x%lx)\n", 20, #name, 18, paca->name, \
                                                               ^
../arch/powerpc/xmon/xmon.c:2114:2: note: in expansion of macro ‘DUMP’
  DUMP(p, vmalloc_sllp, "x");
  ^
In file included from ../include/linux/compiler.h:56:0,
                 from ../include/uapi/linux/stddef.h:1,
                 from ../include/linux/stddef.h:4,
                 from ../include/uapi/linux/posix_types.h:4,
                 from ../include/uapi/linux/types.h:13,
                 from ../include/linux/types.h:5,
                 from ../include/uapi/linux/capability.h:16,
                 from ../include/linux/capability.h:15,
                 from ../include/linux/sched.h:15,
                 from ../arch/powerpc/xmon/xmon.c:14:
../include/linux/compiler-gcc.h:158:2: error: ‘struct paca_struct’ has no member named ‘vmalloc_sllp’
  __builtin_offsetof(a, b)
  ^
../include/linux/stddef.h:16:32: note: in expansion of macro ‘__compiler_offsetof’
 #define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
                                ^
../arch/powerpc/xmon/xmon.c:2096:3: note: in expansion of macro ‘offsetof’
   offsetof(struct paca_struct, name));
   ^
../arch/powerpc/xmon/xmon.c:2114:2: note: in expansion of macro ‘DUMP’
  DUMP(p, vmalloc_sllp, "x");
  ^
../arch/powerpc/xmon/xmon.c:2095:63: error: ‘struct paca_struct’ has no member named ‘slb_cache_ptr’
  printf(" %-*s = %#-*"format"\t(0x%lx)\n", 20, #name, 18, paca->name, \
                                                               ^
../arch/powerpc/xmon/xmon.c:2115:2: note: in expansion of macro ‘DUMP’
  DUMP(p, slb_cache_ptr, "x");
  ^
In file included from ../include/linux/compiler.h:56:0,
                 from ../include/uapi/linux/stddef.h:1,
                 from ../include/linux/stddef.h:4,
                 from ../include/uapi/linux/posix_types.h:4,
                 from ../include/uapi/linux/types.h:13,
                 from ../include/linux/types.h:5,
                 from ../include/uapi/linux/capability.h:16,
                 from ../include/linux/capability.h:15,
                 from ../include/linux/sched.h:15,
                 from ../arch/powerpc/xmon/xmon.c:14:
../include/linux/compiler-gcc.h:158:2: error: ‘struct paca_struct’ has no member named ‘slb_cache_ptr’
  __builtin_offsetof(a, b)
  ^
../include/linux/stddef.h:16:32: note: in expansion of macro ‘__compiler_offsetof’
 #define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
                                ^
../arch/powerpc/xmon/xmon.c:2096:3: note: in expansion of macro ‘offsetof’
   offsetof(struct paca_struct, name));
   ^
../arch/powerpc/xmon/xmon.c:2115:2: note: in expansion of macro ‘DUMP’
  DUMP(p, slb_cache_ptr, "x");
  ^
../arch/powerpc/xmon/xmon.c:2116:19: error: ‘struct paca_struct’ has no member named ‘slb_cache_ptr’
  for (i = 0; i < p->slb_cache_ptr; i++)
                   ^
../arch/powerpc/xmon/xmon.c:2117:50: error: ‘struct paca_struct’ has no member named ‘slb_cache’
   printf(" slb_cache[%d]:        = 0x%lx\n", i, p->slb_cache[i]);
                                                  ^
make[2]: *** [arch/powerpc/xmon/xmon.o] Error 1
make[1]: *** [arch/powerpc/xmon] Error 2

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC, 6/8] powerpc/prom: Simplify the logic while fetching SLB size
  2015-07-21  6:58 ` [RFC 6/8] powerpc/prom: Simplify the logic while fetching SLB size Anshuman Khandual
@ 2015-07-21 10:21   ` Michael Ellerman
  2015-07-21 11:24     ` Anshuman Khandual
  0 siblings, 1 reply; 23+ messages in thread
From: Michael Ellerman @ 2015-07-21 10:21 UTC (permalink / raw)
  To: Anshuman Khandual, linuxppc-dev; +Cc: mikey

On Tue, 2015-21-07 at 06:58:44 UTC, Anshuman Khandual wrote:
> From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> 
> This patch just simplifies the existing code logic while fetching
> the SLB size property from the device tree.
> 
> Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
> ---
>  arch/powerpc/kernel/prom.c | 12 +++++-------
>  1 file changed, 5 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
> index 8b888b1..f6168e2 100644
> --- a/arch/powerpc/kernel/prom.c
> +++ b/arch/powerpc/kernel/prom.c
> @@ -223,14 +223,12 @@ static void __init check_cpu_slb_size(unsigned long node)
>  	const __be32 *slb_size_ptr;
>  
>  	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
> -	if (slb_size_ptr != NULL) {
> -		mmu_slb_size = be32_to_cpup(slb_size_ptr);
> -		return;
> -	}
> -	slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
> -	if (slb_size_ptr != NULL) {
> -		mmu_slb_size = be32_to_cpup(slb_size_ptr);
> +	if (!slb_size_ptr) {
> +		slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
> +		if (!slb_size_ptr)
> +			return;
>  	}
> +	mmu_slb_size = be32_to_cpup(slb_size_ptr);
>  }

It's still ugly. Why not go the whole way:


	p = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
	    of_get_flat_dt_prop(node, "ibm,slb-size", NULL);

	if (p)
		mmu_slb_size = be32_to_cpup(p);


And while you're at it, rename the function, it doesn't check anything. It
initialises mmu_slb_size, so call it init_mmu_slb_size()?

cheers

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC,2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot'
  2015-07-21  9:46   ` [RFC, " Michael Ellerman
@ 2015-07-21 11:23     ` Anshuman Khandual
  0 siblings, 0 replies; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21 11:23 UTC (permalink / raw)
  To: Michael Ellerman, linuxppc-dev; +Cc: mikey

On 07/21/2015 03:16 PM, Michael Ellerman wrote:
> On Tue, 2015-21-07 at 06:58:40 UTC, Anshuman Khandual wrote:
>> > From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
>> > 
>> > These are essentially SLB individual slots what we are dealing with
>> > in these functions. Usage of both 'entry' and 'slot' synonyms makes
>> > it real confusing sometimes. This patch makes it uniform across the
>> > file by replacing all those 'entry's with 'slot's.
> No I think it would be better the other way around.
> 
> Currently we use entry in 14 places and slot in 3.
> 
> Both can be correct in some places, but not always.
> 
> For example:
> 
>> > -	 * Clear the ESID first so the entry is not valid while we are
>> > +	 * Clear the ESID first so the slot is not valid while we are
> That doesn't make sense with "slot", a slot is not valid, only an entry in a
> slot is valid.
> 
> Looking at the existing uses of slot they will all make sense if you change
> them to entry.

Sure, yeah will do the other way around.

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC, 6/8] powerpc/prom: Simplify the logic while fetching SLB size
  2015-07-21 10:21   ` [RFC, " Michael Ellerman
@ 2015-07-21 11:24     ` Anshuman Khandual
  0 siblings, 0 replies; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21 11:24 UTC (permalink / raw)
  To: Michael Ellerman, linuxppc-dev; +Cc: mikey

On 07/21/2015 03:51 PM, Michael Ellerman wrote:
> On Tue, 2015-21-07 at 06:58:44 UTC, Anshuman Khandual wrote:
>> > From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
>> > 
>> > This patch just simplifies the existing code logic while fetching
>> > the SLB size property from the device tree.
>> > 
>> > Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
>> > ---
>> >  arch/powerpc/kernel/prom.c | 12 +++++-------
>> >  1 file changed, 5 insertions(+), 7 deletions(-)
>> > 
>> > diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
>> > index 8b888b1..f6168e2 100644
>> > --- a/arch/powerpc/kernel/prom.c
>> > +++ b/arch/powerpc/kernel/prom.c
>> > @@ -223,14 +223,12 @@ static void __init check_cpu_slb_size(unsigned long node)
>> >  	const __be32 *slb_size_ptr;
>> >  
>> >  	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
>> > -	if (slb_size_ptr != NULL) {
>> > -		mmu_slb_size = be32_to_cpup(slb_size_ptr);
>> > -		return;
>> > -	}
>> > -	slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
>> > -	if (slb_size_ptr != NULL) {
>> > -		mmu_slb_size = be32_to_cpup(slb_size_ptr);
>> > +	if (!slb_size_ptr) {
>> > +		slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
>> > +		if (!slb_size_ptr)
>> > +			return;
>> >  	}
>> > +	mmu_slb_size = be32_to_cpup(slb_size_ptr);
>> >  }
> It's still ugly. Why not go the whole way:
> 
> 
> 	p = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
> 	    of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
> 
> 	if (p)
> 		mmu_slb_size = be32_to_cpup(p);

Yeah this is better.

> 
> 
> And while you're at it, rename the function, it doesn't check anything. It
> initialises mmu_slb_size, so call it init_mmu_slb_size()?

Sure, will do.

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC, 7/8] powerpc/xmon: Drop 'valid' from the condition inside 'dump_segments'
  2015-07-21 10:00   ` [RFC, " Michael Ellerman
@ 2015-07-21 11:45     ` Anshuman Khandual
  2015-07-22  4:52       ` Michael Ellerman
  0 siblings, 1 reply; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21 11:45 UTC (permalink / raw)
  To: Michael Ellerman, linuxppc-dev; +Cc: mikey

On 07/21/2015 03:30 PM, Michael Ellerman wrote:
> On Tue, 2015-21-07 at 06:58:45 UTC, Anshuman Khandual wrote:
>> > From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
>> > 
>> > Value of 'valid' is zero when 'esid' is zero and it does not matter
>> > when 'esid' is non-zero. 
> Yes it does. It tells you whether the entry is valid?

Yeah but it does not change the outcome of the if condition check
here. Non-zero esid will make the condition test pass irrespective
of the value of 'valid'. Yes, valid will be checked inside the code
block to print details, the point was value of valid does not make
any difference to the 'if' condition check in the first place.
Unless I am getting tricked here some how :)

> 
> In practice maybe you only see invalid entries that are entirely zero, and so
> they get skipped anyway, but that's not guaranteed.

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC, 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list
  2015-07-21 10:08   ` [RFC, " Michael Ellerman
@ 2015-07-21 11:48     ` Anshuman Khandual
  0 siblings, 0 replies; 23+ messages in thread
From: Anshuman Khandual @ 2015-07-21 11:48 UTC (permalink / raw)
  To: Michael Ellerman, linuxppc-dev; +Cc: mikey

On 07/21/2015 03:38 PM, Michael Ellerman wrote:
> On Tue, 2015-21-07 at 06:58:46 UTC, Anshuman Khandual wrote:
>> > From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
>> > 
>> > This patch adds some more elements to the existing PACA dump list
>> > inside a xmon session which can be listed here.
>> > 
>> > 	- hmi_event_available
>> > 	- dscr_default
>> > 	- vmalloc_sllp
>> > 	- slb_cache_ptr
>> > 	- sprg_vdso
>> > 	- tm_scratch
>> > 	- core_idle_state_ptr
>> > 	- thread_idle_state
>> > 	- thread_mask
> This is probably OK, except you broke the ppc64e build again.

Will fix it up while sending this as a patch series. Thanks for
the quick review of the series.

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC, 7/8] powerpc/xmon: Drop 'valid' from the condition inside 'dump_segments'
  2015-07-21 11:45     ` Anshuman Khandual
@ 2015-07-22  4:52       ` Michael Ellerman
  0 siblings, 0 replies; 23+ messages in thread
From: Michael Ellerman @ 2015-07-22  4:52 UTC (permalink / raw)
  To: Anshuman Khandual; +Cc: linuxppc-dev, mikey

On Tue, 2015-07-21 at 17:15 +0530, Anshuman Khandual wrote:
> On 07/21/2015 03:30 PM, Michael Ellerman wrote:
> > On Tue, 2015-21-07 at 06:58:45 UTC, Anshuman Khandual wrote:
> >> > From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> >> > 
> >> > Value of 'valid' is zero when 'esid' is zero and it does not matter
> >> > when 'esid' is non-zero. 
> > Yes it does. It tells you whether the entry is valid?
> 
> Yeah but it does not change the outcome of the if condition check
> here. Non-zero esid will make the condition test pass irrespective
> of the value of 'valid'. Yes, valid will be checked inside the code
> block to print details, the point was value of valid does not make
> any difference to the 'if' condition check in the first place.
> Unless I am getting tricked here some how :)

No you're right, I was confused by the bitwise or.

Please make it:  if (esid || vsid)

And drop valid entirely, just do the check in the if condition.

And fix the change log:

  Hence the variable 'value'
                      ^
		      valid


cheers

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding
  2015-07-21  6:58 ` [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding Anshuman Khandual
@ 2015-07-22  5:51   ` Michael Ellerman
  2015-07-22  5:57     ` Gabriel Paubert
  2015-07-22 12:17     ` Segher Boessenkool
  0 siblings, 2 replies; 23+ messages in thread
From: Michael Ellerman @ 2015-07-22  5:51 UTC (permalink / raw)
  To: Anshuman Khandual; +Cc: linuxppc-dev, mikey

On Tue, 2015-07-21 at 12:28 +0530, Anshuman Khandual wrote:
> From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> 
> This patch adds some documentation to 'patch_slb_encoding' function
> explaining about how it clears the existing immediate value in the
> given instruction and inserts a new one there.
> 
> diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> index dcba4c2..8083a9e 100644
> --- a/arch/powerpc/mm/slb.c
> +++ b/arch/powerpc/mm/slb.c
> @@ -278,7 +278,13 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
>  static inline void patch_slb_encoding(unsigned int *insn_addr,
>  				      unsigned int immed)
>  {
> -	int insn = (*insn_addr & 0xffff0000) | immed;
> +	/*
> +	 * Currently this patches only "li" and "cmpldi"
> +	 * instructions with an immediate value. Here it
> +	 * just clears the existing immediate value from
> +	 * the instruction and inserts a new one there.
> +	 */
> +	unsigned int insn = (*insn_addr & 0xffff0000) | immed;
>  	patch_instruction(insn_addr, insn);
>  }


How about:

	/*
	 * This function patches either an li or a cmpldi instruction with
	 * a new immediate value. This relies on the fact that both li
	 * (which is actually ori) and cmpldi both take a 16-bit immediate
	 * value, and it is situated in the same location in the instruction,
	 * ie. bits 0-15.
	 * To patch the value we read the existing instruction, clear the
	 * immediate value, and or in our new value, then write the instruction
	 * back.
	 */

cheers

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding
  2015-07-22  5:51   ` Michael Ellerman
@ 2015-07-22  5:57     ` Gabriel Paubert
  2015-07-22  9:01       ` Michael Ellerman
  2015-07-22 12:17     ` Segher Boessenkool
  1 sibling, 1 reply; 23+ messages in thread
From: Gabriel Paubert @ 2015-07-22  5:57 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: Anshuman Khandual, linuxppc-dev, mikey

On Wed, Jul 22, 2015 at 03:51:03PM +1000, Michael Ellerman wrote:
> On Tue, 2015-07-21 at 12:28 +0530, Anshuman Khandual wrote:
> > From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> > 
> > This patch adds some documentation to 'patch_slb_encoding' function
> > explaining about how it clears the existing immediate value in the
> > given instruction and inserts a new one there.
> > 
> > diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> > index dcba4c2..8083a9e 100644
> > --- a/arch/powerpc/mm/slb.c
> > +++ b/arch/powerpc/mm/slb.c
> > @@ -278,7 +278,13 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
> >  static inline void patch_slb_encoding(unsigned int *insn_addr,
> >  				      unsigned int immed)
> >  {
> > -	int insn = (*insn_addr & 0xffff0000) | immed;
> > +	/*
> > +	 * Currently this patches only "li" and "cmpldi"
> > +	 * instructions with an immediate value. Here it
> > +	 * just clears the existing immediate value from
> > +	 * the instruction and inserts a new one there.
> > +	 */
> > +	unsigned int insn = (*insn_addr & 0xffff0000) | immed;
> >  	patch_instruction(insn_addr, insn);
> >  }
> 
> 
> How about:
> 
> 	/*
> 	 * This function patches either an li or a cmpldi instruction with
> 	 * a new immediate value. This relies on the fact that both li
> 	 * (which is actually ori) and cmpldi both take a 16-bit immediate

Hmm, li is actually encoded as addi with r0 as source register...

> 	 * value, and it is situated in the same location in the instruction,
> 	 * ie. bits 0-15.

In PPC documentation, it's rather bits 16-31 (big endian bit order).
Or say lower half which is endian agnostic.

    Cheers,
    Gabriel

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding
  2015-07-22  5:57     ` Gabriel Paubert
@ 2015-07-22  9:01       ` Michael Ellerman
  0 siblings, 0 replies; 23+ messages in thread
From: Michael Ellerman @ 2015-07-22  9:01 UTC (permalink / raw)
  To: Gabriel Paubert; +Cc: Anshuman Khandual, linuxppc-dev, mikey

On Wed, 2015-07-22 at 07:57 +0200, Gabriel Paubert wrote:
> On Wed, Jul 22, 2015 at 03:51:03PM +1000, Michael Ellerman wrote:
> > On Tue, 2015-07-21 at 12:28 +0530, Anshuman Khandual wrote:
> > > From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> > > 
> > > This patch adds some documentation to 'patch_slb_encoding' function
> > > explaining about how it clears the existing immediate value in the
> > > given instruction and inserts a new one there.
> > > 
> > > diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> > > index dcba4c2..8083a9e 100644
> > > --- a/arch/powerpc/mm/slb.c
> > > +++ b/arch/powerpc/mm/slb.c
> > > @@ -278,7 +278,13 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
> > >  static inline void patch_slb_encoding(unsigned int *insn_addr,
> > >  				      unsigned int immed)
> > >  {
> > > -	int insn = (*insn_addr & 0xffff0000) | immed;
> > > +	/*
> > > +	 * Currently this patches only "li" and "cmpldi"
> > > +	 * instructions with an immediate value. Here it
> > > +	 * just clears the existing immediate value from
> > > +	 * the instruction and inserts a new one there.
> > > +	 */
> > > +	unsigned int insn = (*insn_addr & 0xffff0000) | immed;
> > >  	patch_instruction(insn_addr, insn);
> > >  }
> > 
> > 
> > How about:
> > 
> > 	/*
> > 	 * This function patches either an li or a cmpldi instruction with
> > 	 * a new immediate value. This relies on the fact that both li
> > 	 * (which is actually ori) and cmpldi both take a 16-bit immediate
> 
> Hmm, li is actually encoded as addi with r0 as source register...

Correct.

> > 	 * value, and it is situated in the same location in the instruction,
> > 	 * ie. bits 0-15.
> 
> In PPC documentation, it's rather bits 16-31 (big endian bit order).
> Or say lower half which is endian agnostic.

Yeah, but who reads the PPC documentation ;)

In the kernel we almost always use the sane bit numbering, so I'd use that, but
maybe "low 16-bits" will avoid confusion.

cheers

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC 4/8] powerpc/slb: Add some helper functions to improve modularization
  2015-07-21  6:58 ` [RFC 4/8] powerpc/slb: Add some helper functions to improve modularization Anshuman Khandual
@ 2015-07-22  9:19   ` Michael Ellerman
  0 siblings, 0 replies; 23+ messages in thread
From: Michael Ellerman @ 2015-07-22  9:19 UTC (permalink / raw)
  To: Anshuman Khandual; +Cc: linuxppc-dev, mikey

On Tue, 2015-07-21 at 12:28 +0530, Anshuman Khandual wrote:
> From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> 
> This patch adds the following helper functions to improve modularization
> and readability of the code.
> 
> (1) slb_invalid_all: 		Invalidates entire SLB

This reads badly. Although invalid can be a verb, the meaning of invalid as a
verb is not correct here. You want "invalidate".

> (2) slb_invalid_paca_slots: 	Invalidate SLB entries present in PACA

Ditto.

But, I think that's the wrong abstraction.

We should just have one routine, slb_invalidate(), which deals with all the
mess. ie. checking the MMU_FTR and the offset etc. So basically the whole
if/else.

> (3) kernel_linear_vsid_flags:	VSID flags for kernel linear mapping
> (4) kernel_virtual_vsid_flags:	VSID flags for kernel virtual mapping

I don't like these names either.

> diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> index cbeaaa2..dcba4c2 100644
> --- a/arch/powerpc/mm/slb.c
> +++ b/arch/powerpc/mm/slb.c
> @@ -94,18 +94,37 @@ static inline void new_shadowed_slbe(unsigned long ea, int ssize,
>  		     : "memory" );
>  }
>  
> +static inline unsigned long kernel_linear_vsid_flags(void)
> +{
> +	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
> +}

mmu_linear_vsid_flags() ?

> +
> +static inline unsigned long kernel_virtual_vsid_flags(void)
> +{
> +	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
> +}

mmu_vmalloc_vsid_flags() ?

etc.

ie. have the function names match the mmu psize names. I don't think we need
"kernel" in the name, I think that's implied.

cheers

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC 3/8] powerpc/slb: Define macros for the bolted slots
  2015-07-21  6:58 ` [RFC 3/8] powerpc/slb: Define macros for the bolted slots Anshuman Khandual
@ 2015-07-22  9:32   ` Michael Ellerman
  0 siblings, 0 replies; 23+ messages in thread
From: Michael Ellerman @ 2015-07-22  9:32 UTC (permalink / raw)
  To: Anshuman Khandual; +Cc: linuxppc-dev, mikey

On Tue, 2015-07-21 at 12:28 +0530, Anshuman Khandual wrote:
> From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>
> 
> This patch defines macros for all the three bolted SLB slots. This also
> renames the 'create_shadowed_slb' function as 'new_shadowed_slb'.
> 
> Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
> ---
>  arch/powerpc/mm/slb.c | 27 +++++++++++++++------------
>  1 file changed, 15 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> index 3842a54..cbeaaa2 100644
> --- a/arch/powerpc/mm/slb.c
> +++ b/arch/powerpc/mm/slb.c
> @@ -25,6 +25,9 @@
>  #include <asm/udbg.h>
>  #include <asm/code-patching.h>
>  
> +#define SLOT_KLINR  0	/* kernel linear map  (0xc00000000) */

Call it LINEAR_SLOT ?

> +#define SLOT_KVIRT  1	/* kernel virtual map (0xd00000000) */

VMALLOC_SLOT

> +#define SLOT_KSTACK 2	/* kernel stack map   (0xf00000000) */

KSTACK_SLOT

And the comment is wrong, it's not 0xf00.., that's the vmemmap.


I know we're inconsistent about FOO_SLOT vs SLOT_FOO at times, but I think in
this case it reads better as FOO_SLOT.

Actually even better, make it an enum?

cheers

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding
  2015-07-22  5:51   ` Michael Ellerman
  2015-07-22  5:57     ` Gabriel Paubert
@ 2015-07-22 12:17     ` Segher Boessenkool
  1 sibling, 0 replies; 23+ messages in thread
From: Segher Boessenkool @ 2015-07-22 12:17 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: Anshuman Khandual, linuxppc-dev, mikey

On Wed, Jul 22, 2015 at 03:51:03PM +1000, Michael Ellerman wrote:
> How about:
> 
> 	/*
> 	 * This function patches either an li or a cmpldi instruction with
> 	 * a new immediate value. This relies on the fact that both li
> 	 * (which is actually ori) and cmpldi both take a 16-bit immediate
> 	 * value, and it is situated in the same location in the instruction,
> 	 * ie. bits 0-15.
> 	 * To patch the value we read the existing instruction, clear the
> 	 * immediate value, and or in our new value, then write the instruction
> 	 * back.
> 	 */

As Gabriel says, li is addi.  It takes a 16-bit sign-extended immediate,
while cmpldi takes a 16-bit zero-extended immediate.  This function
doesn't deal with that difference, it probably should (I didn't check if
the callers take care; there should be an assertion somewhere).


Segher

^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2015-07-22 12:17 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-07-21  6:58 [RFC 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
2015-07-21  6:58 ` [RFC 2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot' Anshuman Khandual
2015-07-21  9:46   ` [RFC, " Michael Ellerman
2015-07-21 11:23     ` [RFC,2/8] " Anshuman Khandual
2015-07-21  6:58 ` [RFC 3/8] powerpc/slb: Define macros for the bolted slots Anshuman Khandual
2015-07-22  9:32   ` Michael Ellerman
2015-07-21  6:58 ` [RFC 4/8] powerpc/slb: Add some helper functions to improve modularization Anshuman Khandual
2015-07-22  9:19   ` Michael Ellerman
2015-07-21  6:58 ` [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding Anshuman Khandual
2015-07-22  5:51   ` Michael Ellerman
2015-07-22  5:57     ` Gabriel Paubert
2015-07-22  9:01       ` Michael Ellerman
2015-07-22 12:17     ` Segher Boessenkool
2015-07-21  6:58 ` [RFC 6/8] powerpc/prom: Simplify the logic while fetching SLB size Anshuman Khandual
2015-07-21 10:21   ` [RFC, " Michael Ellerman
2015-07-21 11:24     ` Anshuman Khandual
2015-07-21  6:58 ` [RFC 7/8] powerpc/xmon: Drop 'valid' from the condition inside 'dump_segments' Anshuman Khandual
2015-07-21 10:00   ` [RFC, " Michael Ellerman
2015-07-21 11:45     ` Anshuman Khandual
2015-07-22  4:52       ` Michael Ellerman
2015-07-21  6:58 ` [RFC 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list Anshuman Khandual
2015-07-21 10:08   ` [RFC, " Michael Ellerman
2015-07-21 11:48     ` Anshuman Khandual

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.