All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] powerpc/fadump: use static allocation for reserved memory ranges
@ 2020-03-10 20:27 Hari Bathini
  2020-03-10 20:27 ` [PATCH 2/2] powerpc/fadump: consider reserved ranges while reserving memory Hari Bathini
  2020-04-20  5:10 ` [PATCH 1/2] powerpc/fadump: use static allocation for reserved memory ranges Mahesh J Salgaonkar
  0 siblings, 2 replies; 5+ messages in thread
From: Hari Bathini @ 2020-03-10 20:27 UTC (permalink / raw)
  To: Michael Ellerman, linuxppc-dev
  Cc: Vasant Hegde, stable, Sourabh Jain, Mahesh J Salgaonkar

At times, memory ranges have to be looked up during early boot, when
kernel couldn't be initialized for dynamic memory allocation. In fact,
reserved-ranges look up is needed during FADump memory reservation.
Without accounting for reserved-ranges in reserving memory for FADump,
MPIPL boot fails with memory corruption issues. So, extend memory
ranges handling to support static allocation and populate reserved
memory ranges during early boot.

Fixes: dda9dbfeeb7a ("powerpc/fadump: consider reserved ranges while releasing memory")
Cc: stable@vger.kernel.org # v5.4+
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
---
 arch/powerpc/include/asm/fadump-internal.h |    4 +
 arch/powerpc/kernel/fadump.c               |   77 ++++++++++++++++------------
 2 files changed, 48 insertions(+), 33 deletions(-)

diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
index c814a2b..8d61c8f 100644
--- a/arch/powerpc/include/asm/fadump-internal.h
+++ b/arch/powerpc/include/asm/fadump-internal.h
@@ -64,12 +64,14 @@ struct fadump_memory_range {
 };
 
 /* fadump memory ranges info */
+#define RNG_NAME_SZ			16
 struct fadump_mrange_info {
-	char				name[16];
+	char				name[RNG_NAME_SZ];
 	struct fadump_memory_range	*mem_ranges;
 	u32				mem_ranges_sz;
 	u32				mem_range_cnt;
 	u32				max_mem_ranges;
+	bool				is_static;
 };
 
 /* Platform specific callback functions */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index ff0114a..7fcf4a8f 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -38,8 +38,17 @@ static void __init fadump_reserve_crash_area(u64 base);
 
 #ifndef CONFIG_PRESERVE_FA_DUMP
 static DEFINE_MUTEX(fadump_mutex);
-struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 };
-struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 };
+struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
+
+#define RESERVED_RNGS_SZ	16384 /* 16K - 128 entries */
+#define RESERVED_RNGS_CNT	(RESERVED_RNGS_SZ / \
+				 sizeof(struct fadump_memory_range))
+static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
+struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
+						   RESERVED_RNGS_SZ, 0,
+						   RESERVED_RNGS_CNT, true };
+
+static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
 
 #ifdef CONFIG_CMA
 static struct cma *fadump_cma;
@@ -108,6 +117,11 @@ static int __init fadump_cma_init(void) { return 1; }
 int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
 				      int depth, void *data)
 {
+	if (depth == 0) {
+		early_init_dt_scan_reserved_ranges(node);
+		return 0;
+	}
+
 	if (depth != 1)
 		return 0;
 
@@ -726,10 +740,14 @@ void fadump_free_cpu_notes_buf(void)
 
 static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
 {
+	if (mrange_info->is_static) {
+		mrange_info->mem_range_cnt = 0;
+		return;
+	}
+
 	kfree(mrange_info->mem_ranges);
-	mrange_info->mem_ranges = NULL;
-	mrange_info->mem_ranges_sz = 0;
-	mrange_info->max_mem_ranges = 0;
+	memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
+	       (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
 }
 
 /*
@@ -786,6 +804,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
 		if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
 			int ret;
 
+			if (mrange_info->is_static) {
+				pr_err("Reached array size limit for %s memory ranges\n",
+				       mrange_info->name);
+				return -ENOSPC;
+			}
+
 			ret = fadump_alloc_mem_ranges(mrange_info);
 			if (ret)
 				return ret;
@@ -1202,20 +1226,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
  * Scan reserved-ranges to consider them while reserving/releasing
  * memory for FADump.
  */
-static inline int fadump_scan_reserved_mem_ranges(void)
+static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
 {
-	struct device_node *root;
 	const __be32 *prop;
 	int len, ret = -1;
 	unsigned long i;
 
-	root = of_find_node_by_path("/");
-	if (!root)
-		return ret;
+	/* reserved-ranges already scanned */
+	if (reserved_mrange_info.mem_range_cnt != 0)
+		return;
 
-	prop = of_get_property(root, "reserved-ranges", &len);
+	prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
 	if (!prop)
-		return ret;
+		return;
 
 	/*
 	 * Each reserved range is an (address,size) pair, 2 cells each,
@@ -1237,7 +1260,8 @@ static inline int fadump_scan_reserved_mem_ranges(void)
 		}
 	}
 
-	return ret;
+	/* Compact reserved ranges */
+	sort_and_merge_mem_ranges(&reserved_mrange_info);
 }
 
 /*
@@ -1251,32 +1275,21 @@ static void fadump_release_memory(u64 begin, u64 end)
 	u64 ra_start, ra_end, tstart;
 	int i, ret;
 
-	fadump_scan_reserved_mem_ranges();
-
 	ra_start = fw_dump.reserve_dump_area_start;
 	ra_end = ra_start + fw_dump.reserve_dump_area_size;
 
 	/*
-	 * Add reserved dump area to reserved ranges list
-	 * and exclude all these ranges while releasing memory.
+	 * If reserved ranges array limit is hit, overwrite the last reserved
+	 * memory range with reserved dump area to ensure it is excluded from
+	 * the memory being released (reused for next FADump registration).
 	 */
-	ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
-	if (ret != 0) {
-		/*
-		 * Not enough memory to setup reserved ranges but the system is
-		 * running shortage of memory. So, release all the memory except
-		 * Reserved dump area (reused for next fadump registration).
-		 */
-		if (begin < ra_end && end > ra_start) {
-			if (begin < ra_start)
-				fadump_release_reserved_area(begin, ra_start);
-			if (end > ra_end)
-				fadump_release_reserved_area(ra_end, end);
-		} else
-			fadump_release_reserved_area(begin, end);
+	if (reserved_mrange_info.mem_range_cnt ==
+	    reserved_mrange_info.max_mem_ranges)
+		reserved_mrange_info.mem_range_cnt--;
 
+	ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
+	if (ret != 0)
 		return;
-	}
 
 	/* Get the reserved ranges list in order first. */
 	sort_and_merge_mem_ranges(&reserved_mrange_info);


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/2] powerpc/fadump: consider reserved ranges while reserving memory
  2020-03-10 20:27 [PATCH 1/2] powerpc/fadump: use static allocation for reserved memory ranges Hari Bathini
@ 2020-03-10 20:27 ` Hari Bathini
  2020-04-20  5:20   ` Mahesh J Salgaonkar
  2020-04-20  5:10 ` [PATCH 1/2] powerpc/fadump: use static allocation for reserved memory ranges Mahesh J Salgaonkar
  1 sibling, 1 reply; 5+ messages in thread
From: Hari Bathini @ 2020-03-10 20:27 UTC (permalink / raw)
  To: Michael Ellerman, linuxppc-dev
  Cc: Vasant Hegde, stable, Sourabh Jain, Mahesh J Salgaonkar

Commit 0962e8004e97 ("powerpc/prom: Scan reserved-ranges node for
memory reservations") enabled support to parse reserved-ranges DT
node and reserve kernel memory falling in these ranges for F/W
purposes. Memory reserved for FADump should not overlap with these
ranges as it could corrupt memory meant for F/W or crash'ed kernel
memory to be exported as vmcore.

But since commit 579ca1a27675 ("powerpc/fadump: make use of memblock's
bottom up allocation mode"), memblock_find_in_range() is being used to
find the appropriate area to reserve memory for FADump, which can't
account for reserved-ranges as these ranges are reserved only after
FADump memory reservation.

With reserved-ranges now being populated during early boot, look out
for these memory ranges while reserving memory for FADump. Without
this change, MPIPL on PowerNV systems aborts with hostboot failure,
when memory reserved for FADump is less than 4096MB.

Fixes: 579ca1a27675 ("powerpc/fadump: make use of memblock's bottom up allocation mode")
Cc: stable@vger.kernel.org # v5.4+
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
---
 arch/powerpc/kernel/fadump.c |   76 ++++++++++++++++++++++++++++++++++++------
 1 file changed, 66 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 7fcf4a8f..ab83be9 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -443,10 +443,70 @@ static int __init fadump_get_boot_mem_regions(void)
 	return ret;
 }
 
+/*
+ * Returns true, if the given range overlaps with reserved memory ranges
+ * starting at idx. Also, updates idx to index of overlapping memory range
+ * with the given memory range.
+ * False, otherwise.
+ */
+static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
+{
+	bool ret = false;
+	int i;
+
+	for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
+		u64 rbase = reserved_mrange_info.mem_ranges[i].base;
+		u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
+
+		if (end <= rbase)
+			break;
+
+		if ((end > rbase) &&  (base < rend)) {
+			*idx = i;
+			ret = true;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Locate a suitable memory area to reserve memory for FADump. While at it,
+ * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
+ */
+static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
+{
+	struct fadump_memory_range *mrngs;
+	phys_addr_t mstart, mend;
+	int idx = 0;
+	u64 i;
+
+	mrngs = reserved_mrange_info.mem_ranges;
+	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+				&mstart, &mend, NULL) {
+		pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
+			 i, mstart, mend, base);
+
+		if (mstart > base)
+			base = PAGE_ALIGN(mstart);
+
+		while ((mend > base) && ((mend - base) >= size)) {
+			if (!overlaps_reserved_ranges(base, base + size, &idx))
+				goto out;
+
+			base = mrngs[idx].base + mrngs[idx].size;
+			base = PAGE_ALIGN(base);
+		}
+	}
+
+out:
+	return base;
+}
+
 int __init fadump_reserve_mem(void)
 {
-	u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE;
-	bool is_memblock_bottom_up = memblock_bottom_up();
+	u64 base, size, mem_boundary, bootmem_min;
 	int ret = 1;
 
 	if (!fw_dump.fadump_enabled)
@@ -467,9 +527,9 @@ int __init fadump_reserve_mem(void)
 			PAGE_ALIGN(fadump_calculate_reserve_size());
 #ifdef CONFIG_CMA
 		if (!fw_dump.nocma) {
-			align = FADUMP_CMA_ALIGNMENT;
 			fw_dump.boot_memory_size =
-				ALIGN(fw_dump.boot_memory_size, align);
+				ALIGN(fw_dump.boot_memory_size,
+				      FADUMP_CMA_ALIGNMENT);
 		}
 #endif
 
@@ -537,13 +597,9 @@ int __init fadump_reserve_mem(void)
 		 * Reserve memory at an offset closer to bottom of the RAM to
 		 * minimize the impact of memory hot-remove operation.
 		 */
-		memblock_set_bottom_up(true);
-		base = memblock_find_in_range(base, mem_boundary, size, align);
-
-		/* Restore the previous allocation mode */
-		memblock_set_bottom_up(is_memblock_bottom_up);
+		base = fadump_locate_reserve_mem(base, size);
 
-		if (!base) {
+		if (base > (mem_boundary - size)) {
 			pr_err("Failed to find memory chunk for reservation!\n");
 			goto error_out;
 		}


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] powerpc/fadump: use static allocation for reserved memory ranges
  2020-03-10 20:27 [PATCH 1/2] powerpc/fadump: use static allocation for reserved memory ranges Hari Bathini
  2020-03-10 20:27 ` [PATCH 2/2] powerpc/fadump: consider reserved ranges while reserving memory Hari Bathini
@ 2020-04-20  5:10 ` Mahesh J Salgaonkar
  1 sibling, 0 replies; 5+ messages in thread
From: Mahesh J Salgaonkar @ 2020-04-20  5:10 UTC (permalink / raw)
  To: Hari Bathini; +Cc: Vasant Hegde, Sourabh Jain, linuxppc-dev

On 2020-03-11 01:57:00 Wed, Hari Bathini wrote:
> At times, memory ranges have to be looked up during early boot, when
> kernel couldn't be initialized for dynamic memory allocation. In fact,
> reserved-ranges look up is needed during FADump memory reservation.
> Without accounting for reserved-ranges in reserving memory for FADump,
> MPIPL boot fails with memory corruption issues. So, extend memory
> ranges handling to support static allocation and populate reserved
> memory ranges during early boot.
> 
> Fixes: dda9dbfeeb7a ("powerpc/fadump: consider reserved ranges while releasing memory")
> Cc: stable@vger.kernel.org # v5.4+
> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>

Reviewed-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>

Thanks,
-Mahesh.

> ---
>  arch/powerpc/include/asm/fadump-internal.h |    4 +
>  arch/powerpc/kernel/fadump.c               |   77 ++++++++++++++++------------
>  2 files changed, 48 insertions(+), 33 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
> index c814a2b..8d61c8f 100644
> --- a/arch/powerpc/include/asm/fadump-internal.h
> +++ b/arch/powerpc/include/asm/fadump-internal.h
> @@ -64,12 +64,14 @@ struct fadump_memory_range {
>  };
>  
>  /* fadump memory ranges info */
> +#define RNG_NAME_SZ			16
>  struct fadump_mrange_info {
> -	char				name[16];
> +	char				name[RNG_NAME_SZ];
>  	struct fadump_memory_range	*mem_ranges;
>  	u32				mem_ranges_sz;
>  	u32				mem_range_cnt;
>  	u32				max_mem_ranges;
> +	bool				is_static;
>  };
>  
>  /* Platform specific callback functions */
> diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
> index ff0114a..7fcf4a8f 100644
> --- a/arch/powerpc/kernel/fadump.c
> +++ b/arch/powerpc/kernel/fadump.c
> @@ -38,8 +38,17 @@ static void __init fadump_reserve_crash_area(u64 base);
>  
>  #ifndef CONFIG_PRESERVE_FA_DUMP
>  static DEFINE_MUTEX(fadump_mutex);
> -struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 };
> -struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 };
> +struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
> +
> +#define RESERVED_RNGS_SZ	16384 /* 16K - 128 entries */
> +#define RESERVED_RNGS_CNT	(RESERVED_RNGS_SZ / \
> +				 sizeof(struct fadump_memory_range))
> +static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
> +struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
> +						   RESERVED_RNGS_SZ, 0,
> +						   RESERVED_RNGS_CNT, true };
> +
> +static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
>  
>  #ifdef CONFIG_CMA
>  static struct cma *fadump_cma;
> @@ -108,6 +117,11 @@ static int __init fadump_cma_init(void) { return 1; }
>  int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
>  				      int depth, void *data)
>  {
> +	if (depth == 0) {
> +		early_init_dt_scan_reserved_ranges(node);
> +		return 0;
> +	}
> +
>  	if (depth != 1)
>  		return 0;
>  
> @@ -726,10 +740,14 @@ void fadump_free_cpu_notes_buf(void)
>  
>  static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
>  {
> +	if (mrange_info->is_static) {
> +		mrange_info->mem_range_cnt = 0;
> +		return;
> +	}
> +
>  	kfree(mrange_info->mem_ranges);
> -	mrange_info->mem_ranges = NULL;
> -	mrange_info->mem_ranges_sz = 0;
> -	mrange_info->max_mem_ranges = 0;
> +	memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
> +	       (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
>  }
>  
>  /*
> @@ -786,6 +804,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
>  		if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
>  			int ret;
>  
> +			if (mrange_info->is_static) {
> +				pr_err("Reached array size limit for %s memory ranges\n",
> +				       mrange_info->name);
> +				return -ENOSPC;
> +			}
> +
>  			ret = fadump_alloc_mem_ranges(mrange_info);
>  			if (ret)
>  				return ret;
> @@ -1202,20 +1226,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
>   * Scan reserved-ranges to consider them while reserving/releasing
>   * memory for FADump.
>   */
> -static inline int fadump_scan_reserved_mem_ranges(void)
> +static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
>  {
> -	struct device_node *root;
>  	const __be32 *prop;
>  	int len, ret = -1;
>  	unsigned long i;
>  
> -	root = of_find_node_by_path("/");
> -	if (!root)
> -		return ret;
> +	/* reserved-ranges already scanned */
> +	if (reserved_mrange_info.mem_range_cnt != 0)
> +		return;
>  
> -	prop = of_get_property(root, "reserved-ranges", &len);
> +	prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
>  	if (!prop)
> -		return ret;
> +		return;
>  
>  	/*
>  	 * Each reserved range is an (address,size) pair, 2 cells each,
> @@ -1237,7 +1260,8 @@ static inline int fadump_scan_reserved_mem_ranges(void)
>  		}
>  	}
>  
> -	return ret;
> +	/* Compact reserved ranges */
> +	sort_and_merge_mem_ranges(&reserved_mrange_info);
>  }
>  
>  /*
> @@ -1251,32 +1275,21 @@ static void fadump_release_memory(u64 begin, u64 end)
>  	u64 ra_start, ra_end, tstart;
>  	int i, ret;
>  
> -	fadump_scan_reserved_mem_ranges();
> -
>  	ra_start = fw_dump.reserve_dump_area_start;
>  	ra_end = ra_start + fw_dump.reserve_dump_area_size;
>  
>  	/*
> -	 * Add reserved dump area to reserved ranges list
> -	 * and exclude all these ranges while releasing memory.
> +	 * If reserved ranges array limit is hit, overwrite the last reserved
> +	 * memory range with reserved dump area to ensure it is excluded from
> +	 * the memory being released (reused for next FADump registration).
>  	 */
> -	ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
> -	if (ret != 0) {
> -		/*
> -		 * Not enough memory to setup reserved ranges but the system is
> -		 * running shortage of memory. So, release all the memory except
> -		 * Reserved dump area (reused for next fadump registration).
> -		 */
> -		if (begin < ra_end && end > ra_start) {
> -			if (begin < ra_start)
> -				fadump_release_reserved_area(begin, ra_start);
> -			if (end > ra_end)
> -				fadump_release_reserved_area(ra_end, end);
> -		} else
> -			fadump_release_reserved_area(begin, end);
> +	if (reserved_mrange_info.mem_range_cnt ==
> +	    reserved_mrange_info.max_mem_ranges)
> +		reserved_mrange_info.mem_range_cnt--;
>  
> +	ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
> +	if (ret != 0)
>  		return;
> -	}
>  
>  	/* Get the reserved ranges list in order first. */
>  	sort_and_merge_mem_ranges(&reserved_mrange_info);
> 

-- 
Mahesh J Salgaonkar


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/2] powerpc/fadump: consider reserved ranges while reserving memory
  2020-03-10 20:27 ` [PATCH 2/2] powerpc/fadump: consider reserved ranges while reserving memory Hari Bathini
@ 2020-04-20  5:20   ` Mahesh J Salgaonkar
  2020-04-20  7:30     ` Hari Bathini
  0 siblings, 1 reply; 5+ messages in thread
From: Mahesh J Salgaonkar @ 2020-04-20  5:20 UTC (permalink / raw)
  To: Hari Bathini; +Cc: Vasant Hegde, Sourabh Jain, linuxppc-dev

On 2020-03-11 01:57:10 Wed, Hari Bathini wrote:
> Commit 0962e8004e97 ("powerpc/prom: Scan reserved-ranges node for
> memory reservations") enabled support to parse reserved-ranges DT
> node and reserve kernel memory falling in these ranges for F/W
> purposes. Memory reserved for FADump should not overlap with these
> ranges as it could corrupt memory meant for F/W or crash'ed kernel
> memory to be exported as vmcore.
> 
> But since commit 579ca1a27675 ("powerpc/fadump: make use of memblock's
> bottom up allocation mode"), memblock_find_in_range() is being used to
> find the appropriate area to reserve memory for FADump, which can't
> account for reserved-ranges as these ranges are reserved only after
> FADump memory reservation.
> 
> With reserved-ranges now being populated during early boot, look out
> for these memory ranges while reserving memory for FADump. Without
> this change, MPIPL on PowerNV systems aborts with hostboot failure,
> when memory reserved for FADump is less than 4096MB.
> 
> Fixes: 579ca1a27675 ("powerpc/fadump: make use of memblock's bottom up allocation mode")
> Cc: stable@vger.kernel.org # v5.4+
> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
> ---
>  arch/powerpc/kernel/fadump.c |   76 ++++++++++++++++++++++++++++++++++++------
>  1 file changed, 66 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
> index 7fcf4a8f..ab83be9 100644
> --- a/arch/powerpc/kernel/fadump.c
> +++ b/arch/powerpc/kernel/fadump.c
> @@ -443,10 +443,70 @@ static int __init fadump_get_boot_mem_regions(void)
>  	return ret;
>  }
>  
> +/*
> + * Returns true, if the given range overlaps with reserved memory ranges
> + * starting at idx. Also, updates idx to index of overlapping memory range
> + * with the given memory range.
> + * False, otherwise.
> + */
> +static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
> +{
> +	bool ret = false;
> +	int i;
> +
> +	for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
> +		u64 rbase = reserved_mrange_info.mem_ranges[i].base;
> +		u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
> +
> +		if (end <= rbase)
> +			break;
> +
> +		if ((end > rbase) &&  (base < rend)) {
> +			*idx = i;
> +			ret = true;
> +			break;
> +		}
> +	}
> +
> +	return ret;
> +}
> +
> +/*
> + * Locate a suitable memory area to reserve memory for FADump. While at it,
> + * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
> + */
> +static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
> +{
> +	struct fadump_memory_range *mrngs;
> +	phys_addr_t mstart, mend;
> +	int idx = 0;
> +	u64 i;
> +
> +	mrngs = reserved_mrange_info.mem_ranges;
> +	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
> +				&mstart, &mend, NULL) {
> +		pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
> +			 i, mstart, mend, base);
> +
> +		if (mstart > base)
> +			base = PAGE_ALIGN(mstart);
> +
> +		while ((mend > base) && ((mend - base) >= size)) {
> +			if (!overlaps_reserved_ranges(base, base + size, &idx))
> +				goto out;
> +
> +			base = mrngs[idx].base + mrngs[idx].size;
> +			base = PAGE_ALIGN(base);

What happens when all the memory ranges found to be overlaped with
reserved ranges ? Shoudn't this function return NULL ? Looks like in
that case this function returns the last set base address which is
either still overlaped or not big enough in size.

Rest looks good to me.

Thanks,
-Mahesh.

> +		}
> +	}
> +
> +out:
> +	return base;
> +}
> +


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/2] powerpc/fadump: consider reserved ranges while reserving memory
  2020-04-20  5:20   ` Mahesh J Salgaonkar
@ 2020-04-20  7:30     ` Hari Bathini
  0 siblings, 0 replies; 5+ messages in thread
From: Hari Bathini @ 2020-04-20  7:30 UTC (permalink / raw)
  To: mahesh; +Cc: Vasant Hegde, Sourabh Jain, linuxppc-dev



On 20/04/20 10:50 AM, Mahesh J Salgaonkar wrote:
> On 2020-03-11 01:57:10 Wed, Hari Bathini wrote:
>> Commit 0962e8004e97 ("powerpc/prom: Scan reserved-ranges node for
>> memory reservations") enabled support to parse reserved-ranges DT
>> node and reserve kernel memory falling in these ranges for F/W
>> purposes. Memory reserved for FADump should not overlap with these
>> ranges as it could corrupt memory meant for F/W or crash'ed kernel
>> memory to be exported as vmcore.
>>
>> But since commit 579ca1a27675 ("powerpc/fadump: make use of memblock's
>> bottom up allocation mode"), memblock_find_in_range() is being used to
>> find the appropriate area to reserve memory for FADump, which can't
>> account for reserved-ranges as these ranges are reserved only after
>> FADump memory reservation.
>>
>> With reserved-ranges now being populated during early boot, look out
>> for these memory ranges while reserving memory for FADump. Without
>> this change, MPIPL on PowerNV systems aborts with hostboot failure,
>> when memory reserved for FADump is less than 4096MB.
>>
>> Fixes: 579ca1a27675 ("powerpc/fadump: make use of memblock's bottom up allocation mode")
>> Cc: stable@vger.kernel.org # v5.4+
>> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
>> ---
>>  arch/powerpc/kernel/fadump.c |   76 ++++++++++++++++++++++++++++++++++++------
>>  1 file changed, 66 insertions(+), 10 deletions(-)
>>
>> diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
>> index 7fcf4a8f..ab83be9 100644
>> --- a/arch/powerpc/kernel/fadump.c
>> +++ b/arch/powerpc/kernel/fadump.c
>> @@ -443,10 +443,70 @@ static int __init fadump_get_boot_mem_regions(void)
>>  	return ret;
>>  }
>>  
>> +/*
>> + * Returns true, if the given range overlaps with reserved memory ranges
>> + * starting at idx. Also, updates idx to index of overlapping memory range
>> + * with the given memory range.
>> + * False, otherwise.
>> + */
>> +static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
>> +{
>> +	bool ret = false;
>> +	int i;
>> +
>> +	for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
>> +		u64 rbase = reserved_mrange_info.mem_ranges[i].base;
>> +		u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
>> +
>> +		if (end <= rbase)
>> +			break;
>> +
>> +		if ((end > rbase) &&  (base < rend)) {
>> +			*idx = i;
>> +			ret = true;
>> +			break;
>> +		}
>> +	}
>> +
>> +	return ret;
>> +}
>> +
>> +/*
>> + * Locate a suitable memory area to reserve memory for FADump. While at it,
>> + * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
>> + */
>> +static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
>> +{
>> +	struct fadump_memory_range *mrngs;
>> +	phys_addr_t mstart, mend;
>> +	int idx = 0;
>> +	u64 i;
>> +
>> +	mrngs = reserved_mrange_info.mem_ranges;
>> +	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
>> +				&mstart, &mend, NULL) {
>> +		pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
>> +			 i, mstart, mend, base);
>> +
>> +		if (mstart > base)
>> +			base = PAGE_ALIGN(mstart);
>> +
>> +		while ((mend > base) && ((mend - base) >= size)) {
>> +			if (!overlaps_reserved_ranges(base, base + size, &idx))
>> +				goto out;
>> +
>> +			base = mrngs[idx].base + mrngs[idx].size;
>> +			base = PAGE_ALIGN(base);
> 
> What happens when all the memory ranges found to be overlaped with
> reserved ranges ? Shoudn't this function return NULL ? Looks like in
> that case this function returns the last set base address which is
> either still overlaped or not big enough in size.

Thanks for the review, Mahesh. I overlooked that corner case.
Just posted v2 fixing it.

- Hari


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-04-20  8:18 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-10 20:27 [PATCH 1/2] powerpc/fadump: use static allocation for reserved memory ranges Hari Bathini
2020-03-10 20:27 ` [PATCH 2/2] powerpc/fadump: consider reserved ranges while reserving memory Hari Bathini
2020-04-20  5:20   ` Mahesh J Salgaonkar
2020-04-20  7:30     ` Hari Bathini
2020-04-20  5:10 ` [PATCH 1/2] powerpc/fadump: use static allocation for reserved memory ranges Mahesh J Salgaonkar

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.