All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
@ 2018-07-18 11:25 ` Sudeep Holla
  0 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-18 11:25 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Sudeep Holla, Will Deacon, Linux-Renesas, Geert Uytterhoeven,
	Mark Rutland, Lorenzo Pieralisi

Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
information when hotplugging out CPU") updates the cpu topology when
the CPU is hotplugged out. However the PSCI checker code uses the
topology_core_cpumask pointers for some of the cpu hotplug testing.
Since the pointer to the core_cpumask of the first CPU in the group
is used, which when that CPU itself is hotpugged out is just set to
itself, the testing terminates after that particular CPU is tested out.
But the intention of this tests is to cover all the CPU in the group.

In order to support that, we need to stash the topology_core_cpumask
before the start of the test and use that value instead of pointer to
a cpumask which will be updated on CPU hotplug.

Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
	information when hotplugging out CPU")
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
---
 drivers/firmware/psci_checker.c | 30 ++++++++++++++++++++----------
 1 file changed, 20 insertions(+), 10 deletions(-)

diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index bb1c068bff19..e330c6cb45f5 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,21 +77,23 @@ static int psci_ops_check(void)
 	return 0;
 }
 
-static int find_cpu_groups(const struct cpumask *cpus,
-			   const struct cpumask **cpu_groups)
+static int find_cpu_groups(cpumask_var_t *cpu_groups)
 {
 	unsigned int nb = 0;
 	cpumask_var_t tmp;
 
 	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
 		return -ENOMEM;
-	cpumask_copy(tmp, cpus);
+	cpumask_copy(tmp, cpu_online_mask);
 
 	while (!cpumask_empty(tmp)) {
 		const struct cpumask *cpu_group =
 			topology_core_cpumask(cpumask_any(tmp));
 
-		cpu_groups[nb++] = cpu_group;
+		if (cpu_groups && cpu_groups[nb])
+			cpumask_copy(cpu_groups[nb], cpu_group);
+
+		nb++;
 		cpumask_andnot(tmp, tmp, cpu_group);
 	}
 
@@ -169,25 +171,31 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
 static int hotplug_tests(void)
 {
 	int err;
-	cpumask_var_t offlined_cpus;
+	cpumask_var_t offlined_cpus, *cpu_groups;
 	int i, nb_cpu_group;
-	const struct cpumask **cpu_groups;
 	char *page_buf;
 
+	/* first run to just get the number of cpu groups */
+	nb_cpu_group = find_cpu_groups(NULL);
+
 	err = -ENOMEM;
 	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
 		return err;
-	/* We may have up to nb_available_cpus cpu_groups. */
-	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
-				   GFP_KERNEL);
+	cpu_groups = kcalloc(nb_cpu_group, sizeof(cpu_groups), GFP_KERNEL);
 	if (!cpu_groups)
 		goto out_free_cpus;
+
+	for (i = 0; i < nb_cpu_group; ++i)
+		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL))
+			goto out_free_cpu_groups;
+
 	page_buf = (char *)__get_free_page(GFP_KERNEL);
 	if (!page_buf)
 		goto out_free_cpu_groups;
 
 	err = 0;
-	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
+	/* second run to populate/copy the cpumask */
+	nb_cpu_group = find_cpu_groups(cpu_groups);
 
 	/*
 	 * Of course the last CPU cannot be powered down and cpu_down() should
@@ -212,6 +220,8 @@ static int hotplug_tests(void)
 
 	free_page((unsigned long)page_buf);
 out_free_cpu_groups:
+	for (i = 0; i < nb_cpu_group; ++i)
+		free_cpumask_var(cpu_groups[i]);
 	kfree(cpu_groups);
 out_free_cpus:
 	free_cpumask_var(offlined_cpus);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
@ 2018-07-18 11:25 ` Sudeep Holla
  0 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-18 11:25 UTC (permalink / raw)
  To: linux-arm-kernel

Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
information when hotplugging out CPU") updates the cpu topology when
the CPU is hotplugged out. However the PSCI checker code uses the
topology_core_cpumask pointers for some of the cpu hotplug testing.
Since the pointer to the core_cpumask of the first CPU in the group
is used, which when that CPU itself is hotpugged out is just set to
itself, the testing terminates after that particular CPU is tested out.
But the intention of this tests is to cover all the CPU in the group.

In order to support that, we need to stash the topology_core_cpumask
before the start of the test and use that value instead of pointer to
a cpumask which will be updated on CPU hotplug.

Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
	information when hotplugging out CPU")
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
---
 drivers/firmware/psci_checker.c | 30 ++++++++++++++++++++----------
 1 file changed, 20 insertions(+), 10 deletions(-)

diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index bb1c068bff19..e330c6cb45f5 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,21 +77,23 @@ static int psci_ops_check(void)
 	return 0;
 }
 
-static int find_cpu_groups(const struct cpumask *cpus,
-			   const struct cpumask **cpu_groups)
+static int find_cpu_groups(cpumask_var_t *cpu_groups)
 {
 	unsigned int nb = 0;
 	cpumask_var_t tmp;
 
 	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
 		return -ENOMEM;
-	cpumask_copy(tmp, cpus);
+	cpumask_copy(tmp, cpu_online_mask);
 
 	while (!cpumask_empty(tmp)) {
 		const struct cpumask *cpu_group =
 			topology_core_cpumask(cpumask_any(tmp));
 
-		cpu_groups[nb++] = cpu_group;
+		if (cpu_groups && cpu_groups[nb])
+			cpumask_copy(cpu_groups[nb], cpu_group);
+
+		nb++;
 		cpumask_andnot(tmp, tmp, cpu_group);
 	}
 
@@ -169,25 +171,31 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
 static int hotplug_tests(void)
 {
 	int err;
-	cpumask_var_t offlined_cpus;
+	cpumask_var_t offlined_cpus, *cpu_groups;
 	int i, nb_cpu_group;
-	const struct cpumask **cpu_groups;
 	char *page_buf;
 
+	/* first run to just get the number of cpu groups */
+	nb_cpu_group = find_cpu_groups(NULL);
+
 	err = -ENOMEM;
 	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
 		return err;
-	/* We may have up to nb_available_cpus cpu_groups. */
-	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
-				   GFP_KERNEL);
+	cpu_groups = kcalloc(nb_cpu_group, sizeof(cpu_groups), GFP_KERNEL);
 	if (!cpu_groups)
 		goto out_free_cpus;
+
+	for (i = 0; i < nb_cpu_group; ++i)
+		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL))
+			goto out_free_cpu_groups;
+
 	page_buf = (char *)__get_free_page(GFP_KERNEL);
 	if (!page_buf)
 		goto out_free_cpu_groups;
 
 	err = 0;
-	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
+	/* second run to populate/copy the cpumask */
+	nb_cpu_group = find_cpu_groups(cpu_groups);
 
 	/*
 	 * Of course the last CPU cannot be powered down and cpu_down() should
@@ -212,6 +220,8 @@ static int hotplug_tests(void)
 
 	free_page((unsigned long)page_buf);
 out_free_cpu_groups:
+	for (i = 0; i < nb_cpu_group; ++i)
+		free_cpumask_var(cpu_groups[i]);
 	kfree(cpu_groups);
 out_free_cpus:
 	free_cpumask_var(offlined_cpus);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
  2018-07-18 11:25 ` Sudeep Holla
@ 2018-07-18 16:49   ` Lorenzo Pieralisi
  -1 siblings, 0 replies; 14+ messages in thread
From: Lorenzo Pieralisi @ 2018-07-18 16:49 UTC (permalink / raw)
  To: Sudeep Holla
  Cc: linux-arm-kernel, Will Deacon, Linux-Renesas, Geert Uytterhoeven,
	Mark Rutland

On Wed, Jul 18, 2018 at 12:25:32PM +0100, Sudeep Holla wrote:
> Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
> information when hotplugging out CPU") updates the cpu topology when
> the CPU is hotplugged out. However the PSCI checker code uses the
> topology_core_cpumask pointers for some of the cpu hotplug testing.
> Since the pointer to the core_cpumask of the first CPU in the group
> is used, which when that CPU itself is hotpugged out is just set to
> itself, the testing terminates after that particular CPU is tested out.
> But the intention of this tests is to cover all the CPU in the group.
> 
> In order to support that, we need to stash the topology_core_cpumask
> before the start of the test and use that value instead of pointer to
> a cpumask which will be updated on CPU hotplug.
> 
> Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
> 	information when hotplugging out CPU")
> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
> ---
>  drivers/firmware/psci_checker.c | 30 ++++++++++++++++++++----------
>  1 file changed, 20 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
> index bb1c068bff19..e330c6cb45f5 100644
> --- a/drivers/firmware/psci_checker.c
> +++ b/drivers/firmware/psci_checker.c
> @@ -77,21 +77,23 @@ static int psci_ops_check(void)
>  	return 0;
>  }
>  
> -static int find_cpu_groups(const struct cpumask *cpus,
> -			   const struct cpumask **cpu_groups)
> +static int find_cpu_groups(cpumask_var_t *cpu_groups)
>  {
>  	unsigned int nb = 0;
>  	cpumask_var_t tmp;
>  
>  	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
>  		return -ENOMEM;
> -	cpumask_copy(tmp, cpus);
> +	cpumask_copy(tmp, cpu_online_mask);
>  
>  	while (!cpumask_empty(tmp)) {
>  		const struct cpumask *cpu_group =
>  			topology_core_cpumask(cpumask_any(tmp));
>  
> -		cpu_groups[nb++] = cpu_group;
> +		if (cpu_groups && cpu_groups[nb])
> +			cpumask_copy(cpu_groups[nb], cpu_group);
> +
> +		nb++;
>  		cpumask_andnot(tmp, tmp, cpu_group);
>  	}
>  
> @@ -169,25 +171,31 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
>  static int hotplug_tests(void)
>  {
>  	int err;
> -	cpumask_var_t offlined_cpus;
> +	cpumask_var_t offlined_cpus, *cpu_groups;
>  	int i, nb_cpu_group;
> -	const struct cpumask **cpu_groups;
>  	char *page_buf;
>  
> +	/* first run to just get the number of cpu groups */
> +	nb_cpu_group = find_cpu_groups(NULL);
> +
>  	err = -ENOMEM;
>  	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
>  		return err;
> -	/* We may have up to nb_available_cpus cpu_groups. */
> -	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
> -				   GFP_KERNEL);
> +	cpu_groups = kcalloc(nb_cpu_group, sizeof(cpu_groups), GFP_KERNEL);
>  	if (!cpu_groups)
>  		goto out_free_cpus;
> +
> +	for (i = 0; i < nb_cpu_group; ++i)
> +		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL))
> +			goto out_free_cpu_groups;
> +
>  	page_buf = (char *)__get_free_page(GFP_KERNEL);
>  	if (!page_buf)
>  		goto out_free_cpu_groups;
>  
>  	err = 0;
> -	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
> +	/* second run to populate/copy the cpumask */
> +	nb_cpu_group = find_cpu_groups(cpu_groups);
>  
>  	/*
>  	 * Of course the last CPU cannot be powered down and cpu_down() should
> @@ -212,6 +220,8 @@ static int hotplug_tests(void)
>  
>  	free_page((unsigned long)page_buf);
>  out_free_cpu_groups:
> +	for (i = 0; i < nb_cpu_group; ++i)
> +		free_cpumask_var(cpu_groups[i]);
>  	kfree(cpu_groups);
>  out_free_cpus:
>  	free_cpumask_var(offlined_cpus);

Hi Sudeep,

thanks for the patch. I reckon that adding two functions, say,
alloc_cpu_groups() and free_cpu_groups() would make the code
more readable instead of relying on find_cpu_groups() to first
count then copy; it is for readability rather than correctness.

Thanks,
Lorenzo

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
@ 2018-07-18 16:49   ` Lorenzo Pieralisi
  0 siblings, 0 replies; 14+ messages in thread
From: Lorenzo Pieralisi @ 2018-07-18 16:49 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Jul 18, 2018 at 12:25:32PM +0100, Sudeep Holla wrote:
> Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
> information when hotplugging out CPU") updates the cpu topology when
> the CPU is hotplugged out. However the PSCI checker code uses the
> topology_core_cpumask pointers for some of the cpu hotplug testing.
> Since the pointer to the core_cpumask of the first CPU in the group
> is used, which when that CPU itself is hotpugged out is just set to
> itself, the testing terminates after that particular CPU is tested out.
> But the intention of this tests is to cover all the CPU in the group.
> 
> In order to support that, we need to stash the topology_core_cpumask
> before the start of the test and use that value instead of pointer to
> a cpumask which will be updated on CPU hotplug.
> 
> Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
> 	information when hotplugging out CPU")
> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
> ---
>  drivers/firmware/psci_checker.c | 30 ++++++++++++++++++++----------
>  1 file changed, 20 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
> index bb1c068bff19..e330c6cb45f5 100644
> --- a/drivers/firmware/psci_checker.c
> +++ b/drivers/firmware/psci_checker.c
> @@ -77,21 +77,23 @@ static int psci_ops_check(void)
>  	return 0;
>  }
>  
> -static int find_cpu_groups(const struct cpumask *cpus,
> -			   const struct cpumask **cpu_groups)
> +static int find_cpu_groups(cpumask_var_t *cpu_groups)
>  {
>  	unsigned int nb = 0;
>  	cpumask_var_t tmp;
>  
>  	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
>  		return -ENOMEM;
> -	cpumask_copy(tmp, cpus);
> +	cpumask_copy(tmp, cpu_online_mask);
>  
>  	while (!cpumask_empty(tmp)) {
>  		const struct cpumask *cpu_group =
>  			topology_core_cpumask(cpumask_any(tmp));
>  
> -		cpu_groups[nb++] = cpu_group;
> +		if (cpu_groups && cpu_groups[nb])
> +			cpumask_copy(cpu_groups[nb], cpu_group);
> +
> +		nb++;
>  		cpumask_andnot(tmp, tmp, cpu_group);
>  	}
>  
> @@ -169,25 +171,31 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
>  static int hotplug_tests(void)
>  {
>  	int err;
> -	cpumask_var_t offlined_cpus;
> +	cpumask_var_t offlined_cpus, *cpu_groups;
>  	int i, nb_cpu_group;
> -	const struct cpumask **cpu_groups;
>  	char *page_buf;
>  
> +	/* first run to just get the number of cpu groups */
> +	nb_cpu_group = find_cpu_groups(NULL);
> +
>  	err = -ENOMEM;
>  	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
>  		return err;
> -	/* We may have up to nb_available_cpus cpu_groups. */
> -	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
> -				   GFP_KERNEL);
> +	cpu_groups = kcalloc(nb_cpu_group, sizeof(cpu_groups), GFP_KERNEL);
>  	if (!cpu_groups)
>  		goto out_free_cpus;
> +
> +	for (i = 0; i < nb_cpu_group; ++i)
> +		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL))
> +			goto out_free_cpu_groups;
> +
>  	page_buf = (char *)__get_free_page(GFP_KERNEL);
>  	if (!page_buf)
>  		goto out_free_cpu_groups;
>  
>  	err = 0;
> -	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
> +	/* second run to populate/copy the cpumask */
> +	nb_cpu_group = find_cpu_groups(cpu_groups);
>  
>  	/*
>  	 * Of course the last CPU cannot be powered down and cpu_down() should
> @@ -212,6 +220,8 @@ static int hotplug_tests(void)
>  
>  	free_page((unsigned long)page_buf);
>  out_free_cpu_groups:
> +	for (i = 0; i < nb_cpu_group; ++i)
> +		free_cpumask_var(cpu_groups[i]);
>  	kfree(cpu_groups);
>  out_free_cpus:
>  	free_cpumask_var(offlined_cpus);

Hi Sudeep,

thanks for the patch. I reckon that adding two functions, say,
alloc_cpu_groups() and free_cpu_groups() would make the code
more readable instead of relying on find_cpu_groups() to first
count then copy; it is for readability rather than correctness.

Thanks,
Lorenzo

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
  2018-07-18 16:49   ` Lorenzo Pieralisi
@ 2018-07-19  9:50     ` Sudeep Holla
  -1 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-19  9:50 UTC (permalink / raw)
  To: Lorenzo Pieralisi
  Cc: linux-arm-kernel, Will Deacon, Linux-Renesas, Geert Uytterhoeven,
	Mark Rutland, Sudeep Holla

On Wed, Jul 18, 2018 at 05:49:30PM +0100, Lorenzo Pieralisi wrote:
> On Wed, Jul 18, 2018 at 12:25:32PM +0100, Sudeep Holla wrote:
> > Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
> > information when hotplugging out CPU") updates the cpu topology when
> > the CPU is hotplugged out. However the PSCI checker code uses the
> > topology_core_cpumask pointers for some of the cpu hotplug testing.
> > Since the pointer to the core_cpumask of the first CPU in the group
> > is used, which when that CPU itself is hotpugged out is just set to
> > itself, the testing terminates after that particular CPU is tested out.
> > But the intention of this tests is to cover all the CPU in the group.
> > 
> > In order to support that, we need to stash the topology_core_cpumask
> > before the start of the test and use that value instead of pointer to
> > a cpumask which will be updated on CPU hotplug.
> > 
> > Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
> > 	information when hotplugging out CPU")
> > Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> > Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
> > Cc: Mark Rutland <mark.rutland@arm.com>
> > Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
> > Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
> > ---
> >  drivers/firmware/psci_checker.c | 30 ++++++++++++++++++++----------
> >  1 file changed, 20 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
> > index bb1c068bff19..e330c6cb45f5 100644
> > --- a/drivers/firmware/psci_checker.c
> > +++ b/drivers/firmware/psci_checker.c
> > @@ -77,21 +77,23 @@ static int psci_ops_check(void)
> >  	return 0;
> >  }
> >  
> > -static int find_cpu_groups(const struct cpumask *cpus,
> > -			   const struct cpumask **cpu_groups)
> > +static int find_cpu_groups(cpumask_var_t *cpu_groups)
> >  {
> >  	unsigned int nb = 0;
> >  	cpumask_var_t tmp;
> >  
> >  	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
> >  		return -ENOMEM;
> > -	cpumask_copy(tmp, cpus);
> > +	cpumask_copy(tmp, cpu_online_mask);
> >  
> >  	while (!cpumask_empty(tmp)) {
> >  		const struct cpumask *cpu_group =
> >  			topology_core_cpumask(cpumask_any(tmp));
> >  
> > -		cpu_groups[nb++] = cpu_group;
> > +		if (cpu_groups && cpu_groups[nb])
> > +			cpumask_copy(cpu_groups[nb], cpu_group);
> > +
> > +		nb++;
> >  		cpumask_andnot(tmp, tmp, cpu_group);
> >  	}
> >  
> > @@ -169,25 +171,31 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
> >  static int hotplug_tests(void)
> >  {
> >  	int err;
> > -	cpumask_var_t offlined_cpus;
> > +	cpumask_var_t offlined_cpus, *cpu_groups;
> >  	int i, nb_cpu_group;
> > -	const struct cpumask **cpu_groups;
> >  	char *page_buf;
> >  
> > +	/* first run to just get the number of cpu groups */
> > +	nb_cpu_group = find_cpu_groups(NULL);
> > +
> >  	err = -ENOMEM;
> >  	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
> >  		return err;
> > -	/* We may have up to nb_available_cpus cpu_groups. */
> > -	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
> > -				   GFP_KERNEL);
> > +	cpu_groups = kcalloc(nb_cpu_group, sizeof(cpu_groups), GFP_KERNEL);
> >  	if (!cpu_groups)
> >  		goto out_free_cpus;
> > +
> > +	for (i = 0; i < nb_cpu_group; ++i)
> > +		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL))
> > +			goto out_free_cpu_groups;
> > +
> >  	page_buf = (char *)__get_free_page(GFP_KERNEL);
> >  	if (!page_buf)
> >  		goto out_free_cpu_groups;
> >  
> >  	err = 0;
> > -	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
> > +	/* second run to populate/copy the cpumask */
> > +	nb_cpu_group = find_cpu_groups(cpu_groups);
> >  
> >  	/*
> >  	 * Of course the last CPU cannot be powered down and cpu_down() should
> > @@ -212,6 +220,8 @@ static int hotplug_tests(void)
> >  
> >  	free_page((unsigned long)page_buf);
> >  out_free_cpu_groups:
> > +	for (i = 0; i < nb_cpu_group; ++i)
> > +		free_cpumask_var(cpu_groups[i]);
> >  	kfree(cpu_groups);
> >  out_free_cpus:
> >  	free_cpumask_var(offlined_cpus);
> 
> Hi Sudeep,
> 
> thanks for the patch. I reckon that adding two functions, say,
> alloc_cpu_groups() and free_cpu_groups() would make the code
> more readable instead of relying on find_cpu_groups() to first
> count then copy; it is for readability rather than correctness.
> 

I agree, I can say I was lazy and started trying to keep delta small.
I will respin.

--
Regards,
Sudeep

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
@ 2018-07-19  9:50     ` Sudeep Holla
  0 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-19  9:50 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Jul 18, 2018 at 05:49:30PM +0100, Lorenzo Pieralisi wrote:
> On Wed, Jul 18, 2018 at 12:25:32PM +0100, Sudeep Holla wrote:
> > Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
> > information when hotplugging out CPU") updates the cpu topology when
> > the CPU is hotplugged out. However the PSCI checker code uses the
> > topology_core_cpumask pointers for some of the cpu hotplug testing.
> > Since the pointer to the core_cpumask of the first CPU in the group
> > is used, which when that CPU itself is hotpugged out is just set to
> > itself, the testing terminates after that particular CPU is tested out.
> > But the intention of this tests is to cover all the CPU in the group.
> > 
> > In order to support that, we need to stash the topology_core_cpumask
> > before the start of the test and use that value instead of pointer to
> > a cpumask which will be updated on CPU hotplug.
> > 
> > Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
> > 	information when hotplugging out CPU")
> > Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> > Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
> > Cc: Mark Rutland <mark.rutland@arm.com>
> > Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
> > Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
> > ---
> >  drivers/firmware/psci_checker.c | 30 ++++++++++++++++++++----------
> >  1 file changed, 20 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
> > index bb1c068bff19..e330c6cb45f5 100644
> > --- a/drivers/firmware/psci_checker.c
> > +++ b/drivers/firmware/psci_checker.c
> > @@ -77,21 +77,23 @@ static int psci_ops_check(void)
> >  	return 0;
> >  }
> >  
> > -static int find_cpu_groups(const struct cpumask *cpus,
> > -			   const struct cpumask **cpu_groups)
> > +static int find_cpu_groups(cpumask_var_t *cpu_groups)
> >  {
> >  	unsigned int nb = 0;
> >  	cpumask_var_t tmp;
> >  
> >  	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
> >  		return -ENOMEM;
> > -	cpumask_copy(tmp, cpus);
> > +	cpumask_copy(tmp, cpu_online_mask);
> >  
> >  	while (!cpumask_empty(tmp)) {
> >  		const struct cpumask *cpu_group =
> >  			topology_core_cpumask(cpumask_any(tmp));
> >  
> > -		cpu_groups[nb++] = cpu_group;
> > +		if (cpu_groups && cpu_groups[nb])
> > +			cpumask_copy(cpu_groups[nb], cpu_group);
> > +
> > +		nb++;
> >  		cpumask_andnot(tmp, tmp, cpu_group);
> >  	}
> >  
> > @@ -169,25 +171,31 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
> >  static int hotplug_tests(void)
> >  {
> >  	int err;
> > -	cpumask_var_t offlined_cpus;
> > +	cpumask_var_t offlined_cpus, *cpu_groups;
> >  	int i, nb_cpu_group;
> > -	const struct cpumask **cpu_groups;
> >  	char *page_buf;
> >  
> > +	/* first run to just get the number of cpu groups */
> > +	nb_cpu_group = find_cpu_groups(NULL);
> > +
> >  	err = -ENOMEM;
> >  	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
> >  		return err;
> > -	/* We may have up to nb_available_cpus cpu_groups. */
> > -	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
> > -				   GFP_KERNEL);
> > +	cpu_groups = kcalloc(nb_cpu_group, sizeof(cpu_groups), GFP_KERNEL);
> >  	if (!cpu_groups)
> >  		goto out_free_cpus;
> > +
> > +	for (i = 0; i < nb_cpu_group; ++i)
> > +		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL))
> > +			goto out_free_cpu_groups;
> > +
> >  	page_buf = (char *)__get_free_page(GFP_KERNEL);
> >  	if (!page_buf)
> >  		goto out_free_cpu_groups;
> >  
> >  	err = 0;
> > -	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
> > +	/* second run to populate/copy the cpumask */
> > +	nb_cpu_group = find_cpu_groups(cpu_groups);
> >  
> >  	/*
> >  	 * Of course the last CPU cannot be powered down and cpu_down() should
> > @@ -212,6 +220,8 @@ static int hotplug_tests(void)
> >  
> >  	free_page((unsigned long)page_buf);
> >  out_free_cpu_groups:
> > +	for (i = 0; i < nb_cpu_group; ++i)
> > +		free_cpumask_var(cpu_groups[i]);
> >  	kfree(cpu_groups);
> >  out_free_cpus:
> >  	free_cpumask_var(offlined_cpus);
> 
> Hi Sudeep,
> 
> thanks for the patch. I reckon that adding two functions, say,
> alloc_cpu_groups() and free_cpu_groups() would make the code
> more readable instead of relying on find_cpu_groups() to first
> count then copy; it is for readability rather than correctness.
> 

I agree, I can say I was lazy and started trying to keep delta small.
I will respin.

--
Regards,
Sudeep

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
  2018-07-18 11:25 ` Sudeep Holla
@ 2018-07-19 13:35   ` Sudeep Holla
  -1 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-19 13:35 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Sudeep Holla, Will Deacon, Linux-Renesas, Geert Uytterhoeven,
	Mark Rutland, Lorenzo Pieralisi

Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
information when hotplugging out CPU") updates the cpu topology when
the CPU is hotplugged out. However the PSCI checker code uses the
topology_core_cpumask pointers for some of the cpu hotplug testing.
Since the pointer to the core_cpumask of the first CPU in the group
is used, which when that CPU itself is hotpugged out is just set to
itself, the testing terminates after that particular CPU is tested out.
But the intention of this tests is to cover all the CPU in the group.

In order to support that, we need to stash the topology_core_cpumask
before the start of the test and use that value instead of pointer to
a cpumask which will be updated on CPU hotplug.

Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
	information when hotplugging out CPU")
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
---
 drivers/firmware/psci_checker.c | 53 ++++++++++++++++++++++++++++++++---------
 1 file changed, 42 insertions(+), 11 deletions(-)

v1->v2:
	- Move allocation and freeing of the cpumasks to separate
	  routines as suggested by Lorenzo
	- Reduced the allocation to number of groups instead of number
	  of cpus in the system by making 2 pass

diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index bb1c068bff19..7e6f66b588fd 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,21 +77,23 @@ static int psci_ops_check(void)
 	return 0;
 }

-static int find_cpu_groups(const struct cpumask *cpus,
-			   const struct cpumask **cpu_groups)
+static int find_cpu_groups(cpumask_var_t *cpu_groups)
 {
 	unsigned int nb = 0;
 	cpumask_var_t tmp;

 	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
 		return -ENOMEM;
-	cpumask_copy(tmp, cpus);
+	cpumask_copy(tmp, cpu_online_mask);

 	while (!cpumask_empty(tmp)) {
 		const struct cpumask *cpu_group =
 			topology_core_cpumask(cpumask_any(tmp));

-		cpu_groups[nb++] = cpu_group;
+		if (cpu_groups && cpu_groups[nb])
+			cpumask_copy(cpu_groups[nb], cpu_group);
+
+		nb++;
 		cpumask_andnot(tmp, tmp, cpu_group);
 	}

@@ -166,20 +168,48 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
 	return err;
 }

+static void free_cpu_groups(int num, cpumask_var_t *cpu_groups)
+{
+	int i;
+
+	for (i = 0; i < num; ++i)
+		free_cpumask_var(cpu_groups[i]);
+	kfree(cpu_groups);
+}
+
+static cpumask_var_t *alloc_cpu_groups(int num)
+{
+	int i;
+	cpumask_var_t *cpu_groups;
+
+	cpu_groups = kcalloc(num, sizeof(cpu_groups), GFP_KERNEL);
+	if (!cpu_groups)
+		return NULL;
+
+	for (i = 0; i < num; ++i)
+		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL)) {
+			free_cpu_groups(num, cpu_groups);
+			return NULL;
+		}
+
+	return cpu_groups;
+}
+
 static int hotplug_tests(void)
 {
 	int err;
-	cpumask_var_t offlined_cpus;
+	cpumask_var_t offlined_cpus, *cpu_groups;
 	int i, nb_cpu_group;
-	const struct cpumask **cpu_groups;
 	char *page_buf;

+	/* first run to just get the number of cpu groups */
+	nb_cpu_group = find_cpu_groups(NULL);
+
 	err = -ENOMEM;
 	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
 		return err;
-	/* We may have up to nb_available_cpus cpu_groups. */
-	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
-				   GFP_KERNEL);
+
+	cpu_groups = alloc_cpu_groups(nb_cpu_group);
 	if (!cpu_groups)
 		goto out_free_cpus;
 	page_buf = (char *)__get_free_page(GFP_KERNEL);
@@ -187,7 +217,8 @@ static int hotplug_tests(void)
 		goto out_free_cpu_groups;

 	err = 0;
-	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
+	/* second run to populate/copy the cpumask */
+	nb_cpu_group = find_cpu_groups(cpu_groups);

 	/*
 	 * Of course the last CPU cannot be powered down and cpu_down() should
@@ -212,7 +243,7 @@ static int hotplug_tests(void)

 	free_page((unsigned long)page_buf);
 out_free_cpu_groups:
-	kfree(cpu_groups);
+	free_cpu_groups(nb_cpu_group, cpu_groups);
 out_free_cpus:
 	free_cpumask_var(offlined_cpus);
 	return err;
--
2.7.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v2] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
@ 2018-07-19 13:35   ` Sudeep Holla
  0 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-19 13:35 UTC (permalink / raw)
  To: linux-arm-kernel

Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
information when hotplugging out CPU") updates the cpu topology when
the CPU is hotplugged out. However the PSCI checker code uses the
topology_core_cpumask pointers for some of the cpu hotplug testing.
Since the pointer to the core_cpumask of the first CPU in the group
is used, which when that CPU itself is hotpugged out is just set to
itself, the testing terminates after that particular CPU is tested out.
But the intention of this tests is to cover all the CPU in the group.

In order to support that, we need to stash the topology_core_cpumask
before the start of the test and use that value instead of pointer to
a cpumask which will be updated on CPU hotplug.

Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
	information when hotplugging out CPU")
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
---
 drivers/firmware/psci_checker.c | 53 ++++++++++++++++++++++++++++++++---------
 1 file changed, 42 insertions(+), 11 deletions(-)

v1->v2:
	- Move allocation and freeing of the cpumasks to separate
	  routines as suggested by Lorenzo
	- Reduced the allocation to number of groups instead of number
	  of cpus in the system by making 2 pass

diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index bb1c068bff19..7e6f66b588fd 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,21 +77,23 @@ static int psci_ops_check(void)
 	return 0;
 }

-static int find_cpu_groups(const struct cpumask *cpus,
-			   const struct cpumask **cpu_groups)
+static int find_cpu_groups(cpumask_var_t *cpu_groups)
 {
 	unsigned int nb = 0;
 	cpumask_var_t tmp;

 	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
 		return -ENOMEM;
-	cpumask_copy(tmp, cpus);
+	cpumask_copy(tmp, cpu_online_mask);

 	while (!cpumask_empty(tmp)) {
 		const struct cpumask *cpu_group =
 			topology_core_cpumask(cpumask_any(tmp));

-		cpu_groups[nb++] = cpu_group;
+		if (cpu_groups && cpu_groups[nb])
+			cpumask_copy(cpu_groups[nb], cpu_group);
+
+		nb++;
 		cpumask_andnot(tmp, tmp, cpu_group);
 	}

@@ -166,20 +168,48 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
 	return err;
 }

+static void free_cpu_groups(int num, cpumask_var_t *cpu_groups)
+{
+	int i;
+
+	for (i = 0; i < num; ++i)
+		free_cpumask_var(cpu_groups[i]);
+	kfree(cpu_groups);
+}
+
+static cpumask_var_t *alloc_cpu_groups(int num)
+{
+	int i;
+	cpumask_var_t *cpu_groups;
+
+	cpu_groups = kcalloc(num, sizeof(cpu_groups), GFP_KERNEL);
+	if (!cpu_groups)
+		return NULL;
+
+	for (i = 0; i < num; ++i)
+		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL)) {
+			free_cpu_groups(num, cpu_groups);
+			return NULL;
+		}
+
+	return cpu_groups;
+}
+
 static int hotplug_tests(void)
 {
 	int err;
-	cpumask_var_t offlined_cpus;
+	cpumask_var_t offlined_cpus, *cpu_groups;
 	int i, nb_cpu_group;
-	const struct cpumask **cpu_groups;
 	char *page_buf;

+	/* first run to just get the number of cpu groups */
+	nb_cpu_group = find_cpu_groups(NULL);
+
 	err = -ENOMEM;
 	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
 		return err;
-	/* We may have up to nb_available_cpus cpu_groups. */
-	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
-				   GFP_KERNEL);
+
+	cpu_groups = alloc_cpu_groups(nb_cpu_group);
 	if (!cpu_groups)
 		goto out_free_cpus;
 	page_buf = (char *)__get_free_page(GFP_KERNEL);
@@ -187,7 +217,8 @@ static int hotplug_tests(void)
 		goto out_free_cpu_groups;

 	err = 0;
-	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
+	/* second run to populate/copy the cpumask */
+	nb_cpu_group = find_cpu_groups(cpu_groups);

 	/*
 	 * Of course the last CPU cannot be powered down and cpu_down() should
@@ -212,7 +243,7 @@ static int hotplug_tests(void)

 	free_page((unsigned long)page_buf);
 out_free_cpu_groups:
-	kfree(cpu_groups);
+	free_cpu_groups(nb_cpu_group, cpu_groups);
 out_free_cpus:
 	free_cpumask_var(offlined_cpus);
 	return err;
--
2.7.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH v2] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
  2018-07-19 13:35   ` Sudeep Holla
@ 2018-07-19 14:20     ` Lorenzo Pieralisi
  -1 siblings, 0 replies; 14+ messages in thread
From: Lorenzo Pieralisi @ 2018-07-19 14:20 UTC (permalink / raw)
  To: Sudeep Holla
  Cc: linux-arm-kernel, Will Deacon, Linux-Renesas, Geert Uytterhoeven,
	Mark Rutland

On Thu, Jul 19, 2018 at 02:35:49PM +0100, Sudeep Holla wrote:
> Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
> information when hotplugging out CPU") updates the cpu topology when
> the CPU is hotplugged out. However the PSCI checker code uses the
> topology_core_cpumask pointers for some of the cpu hotplug testing.
> Since the pointer to the core_cpumask of the first CPU in the group
> is used, which when that CPU itself is hotpugged out is just set to
> itself, the testing terminates after that particular CPU is tested out.
> But the intention of this tests is to cover all the CPU in the group.
> 
> In order to support that, we need to stash the topology_core_cpumask
> before the start of the test and use that value instead of pointer to
> a cpumask which will be updated on CPU hotplug.
> 
> Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
> 	information when hotplugging out CPU")
> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
> ---
>  drivers/firmware/psci_checker.c | 53 ++++++++++++++++++++++++++++++++---------
>  1 file changed, 42 insertions(+), 11 deletions(-)
> 
> v1->v2:
> 	- Move allocation and freeing of the cpumasks to separate
> 	  routines as suggested by Lorenzo
> 	- Reduced the allocation to number of groups instead of number
> 	  of cpus in the system by making 2 pass
> 
> diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
> index bb1c068bff19..7e6f66b588fd 100644
> --- a/drivers/firmware/psci_checker.c
> +++ b/drivers/firmware/psci_checker.c
> @@ -77,21 +77,23 @@ static int psci_ops_check(void)
>  	return 0;
>  }
> 
> -static int find_cpu_groups(const struct cpumask *cpus,
> -			   const struct cpumask **cpu_groups)
> +static int find_cpu_groups(cpumask_var_t *cpu_groups)
>  {
>  	unsigned int nb = 0;
>  	cpumask_var_t tmp;
> 
>  	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
>  		return -ENOMEM;
> -	cpumask_copy(tmp, cpus);
> +	cpumask_copy(tmp, cpu_online_mask);
> 
>  	while (!cpumask_empty(tmp)) {
>  		const struct cpumask *cpu_group =
>  			topology_core_cpumask(cpumask_any(tmp));
> 
> -		cpu_groups[nb++] = cpu_group;
> +		if (cpu_groups && cpu_groups[nb])
> +			cpumask_copy(cpu_groups[nb], cpu_group);
> +
> +		nb++;
>  		cpumask_andnot(tmp, tmp, cpu_group);
>  	}
> 
> @@ -166,20 +168,48 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
>  	return err;
>  }
> 
> +static void free_cpu_groups(int num, cpumask_var_t *cpu_groups)
> +{
> +	int i;
> +
> +	for (i = 0; i < num; ++i)
> +		free_cpumask_var(cpu_groups[i]);
> +	kfree(cpu_groups);
> +}
> +
> +static cpumask_var_t *alloc_cpu_groups(int num)
> +{
> +	int i;
> +	cpumask_var_t *cpu_groups;
> +
> +	cpu_groups = kcalloc(num, sizeof(cpu_groups), GFP_KERNEL);
> +	if (!cpu_groups)
> +		return NULL;
> +
> +	for (i = 0; i < num; ++i)
> +		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL)) {
> +			free_cpu_groups(num, cpu_groups);
> +			return NULL;
> +		}
> +
> +	return cpu_groups;
> +}

Sorry for being a PITA - I meant we could remove find_cpu_groups()
entirely and embed it in alloc_cpu_groups(), that takes a cpumask_t
pointer and return the number of groups, again, to make it more
readable but that's just my opinion.

Regardless:

Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>

>  static int hotplug_tests(void)
>  {
>  	int err;
> -	cpumask_var_t offlined_cpus;
> +	cpumask_var_t offlined_cpus, *cpu_groups;
>  	int i, nb_cpu_group;
> -	const struct cpumask **cpu_groups;
>  	char *page_buf;
> 
> +	/* first run to just get the number of cpu groups */
> +	nb_cpu_group = find_cpu_groups(NULL);
> +
>  	err = -ENOMEM;
>  	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
>  		return err;
> -	/* We may have up to nb_available_cpus cpu_groups. */
> -	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
> -				   GFP_KERNEL);
> +
> +	cpu_groups = alloc_cpu_groups(nb_cpu_group);
>  	if (!cpu_groups)
>  		goto out_free_cpus;
>  	page_buf = (char *)__get_free_page(GFP_KERNEL);
> @@ -187,7 +217,8 @@ static int hotplug_tests(void)
>  		goto out_free_cpu_groups;
> 
>  	err = 0;
> -	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
> +	/* second run to populate/copy the cpumask */
> +	nb_cpu_group = find_cpu_groups(cpu_groups);
> 
>  	/*
>  	 * Of course the last CPU cannot be powered down and cpu_down() should
> @@ -212,7 +243,7 @@ static int hotplug_tests(void)
> 
>  	free_page((unsigned long)page_buf);
>  out_free_cpu_groups:
> -	kfree(cpu_groups);
> +	free_cpu_groups(nb_cpu_group, cpu_groups);
>  out_free_cpus:
>  	free_cpumask_var(offlined_cpus);
>  	return err;
> --
> 2.7.4
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
@ 2018-07-19 14:20     ` Lorenzo Pieralisi
  0 siblings, 0 replies; 14+ messages in thread
From: Lorenzo Pieralisi @ 2018-07-19 14:20 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jul 19, 2018 at 02:35:49PM +0100, Sudeep Holla wrote:
> Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
> information when hotplugging out CPU") updates the cpu topology when
> the CPU is hotplugged out. However the PSCI checker code uses the
> topology_core_cpumask pointers for some of the cpu hotplug testing.
> Since the pointer to the core_cpumask of the first CPU in the group
> is used, which when that CPU itself is hotpugged out is just set to
> itself, the testing terminates after that particular CPU is tested out.
> But the intention of this tests is to cover all the CPU in the group.
> 
> In order to support that, we need to stash the topology_core_cpumask
> before the start of the test and use that value instead of pointer to
> a cpumask which will be updated on CPU hotplug.
> 
> Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
> 	information when hotplugging out CPU")
> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
> ---
>  drivers/firmware/psci_checker.c | 53 ++++++++++++++++++++++++++++++++---------
>  1 file changed, 42 insertions(+), 11 deletions(-)
> 
> v1->v2:
> 	- Move allocation and freeing of the cpumasks to separate
> 	  routines as suggested by Lorenzo
> 	- Reduced the allocation to number of groups instead of number
> 	  of cpus in the system by making 2 pass
> 
> diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
> index bb1c068bff19..7e6f66b588fd 100644
> --- a/drivers/firmware/psci_checker.c
> +++ b/drivers/firmware/psci_checker.c
> @@ -77,21 +77,23 @@ static int psci_ops_check(void)
>  	return 0;
>  }
> 
> -static int find_cpu_groups(const struct cpumask *cpus,
> -			   const struct cpumask **cpu_groups)
> +static int find_cpu_groups(cpumask_var_t *cpu_groups)
>  {
>  	unsigned int nb = 0;
>  	cpumask_var_t tmp;
> 
>  	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
>  		return -ENOMEM;
> -	cpumask_copy(tmp, cpus);
> +	cpumask_copy(tmp, cpu_online_mask);
> 
>  	while (!cpumask_empty(tmp)) {
>  		const struct cpumask *cpu_group =
>  			topology_core_cpumask(cpumask_any(tmp));
> 
> -		cpu_groups[nb++] = cpu_group;
> +		if (cpu_groups && cpu_groups[nb])
> +			cpumask_copy(cpu_groups[nb], cpu_group);
> +
> +		nb++;
>  		cpumask_andnot(tmp, tmp, cpu_group);
>  	}
> 
> @@ -166,20 +168,48 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
>  	return err;
>  }
> 
> +static void free_cpu_groups(int num, cpumask_var_t *cpu_groups)
> +{
> +	int i;
> +
> +	for (i = 0; i < num; ++i)
> +		free_cpumask_var(cpu_groups[i]);
> +	kfree(cpu_groups);
> +}
> +
> +static cpumask_var_t *alloc_cpu_groups(int num)
> +{
> +	int i;
> +	cpumask_var_t *cpu_groups;
> +
> +	cpu_groups = kcalloc(num, sizeof(cpu_groups), GFP_KERNEL);
> +	if (!cpu_groups)
> +		return NULL;
> +
> +	for (i = 0; i < num; ++i)
> +		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL)) {
> +			free_cpu_groups(num, cpu_groups);
> +			return NULL;
> +		}
> +
> +	return cpu_groups;
> +}

Sorry for being a PITA - I meant we could remove find_cpu_groups()
entirely and embed it in alloc_cpu_groups(), that takes a cpumask_t
pointer and return the number of groups, again, to make it more
readable but that's just my opinion.

Regardless:

Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>

>  static int hotplug_tests(void)
>  {
>  	int err;
> -	cpumask_var_t offlined_cpus;
> +	cpumask_var_t offlined_cpus, *cpu_groups;
>  	int i, nb_cpu_group;
> -	const struct cpumask **cpu_groups;
>  	char *page_buf;
> 
> +	/* first run to just get the number of cpu groups */
> +	nb_cpu_group = find_cpu_groups(NULL);
> +
>  	err = -ENOMEM;
>  	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
>  		return err;
> -	/* We may have up to nb_available_cpus cpu_groups. */
> -	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
> -				   GFP_KERNEL);
> +
> +	cpu_groups = alloc_cpu_groups(nb_cpu_group);
>  	if (!cpu_groups)
>  		goto out_free_cpus;
>  	page_buf = (char *)__get_free_page(GFP_KERNEL);
> @@ -187,7 +217,8 @@ static int hotplug_tests(void)
>  		goto out_free_cpu_groups;
> 
>  	err = 0;
> -	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
> +	/* second run to populate/copy the cpumask */
> +	nb_cpu_group = find_cpu_groups(cpu_groups);
> 
>  	/*
>  	 * Of course the last CPU cannot be powered down and cpu_down() should
> @@ -212,7 +243,7 @@ static int hotplug_tests(void)
> 
>  	free_page((unsigned long)page_buf);
>  out_free_cpu_groups:
> -	kfree(cpu_groups);
> +	free_cpu_groups(nb_cpu_group, cpu_groups);
>  out_free_cpus:
>  	free_cpumask_var(offlined_cpus);
>  	return err;
> --
> 2.7.4
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
  2018-07-19 14:20     ` Lorenzo Pieralisi
@ 2018-07-19 15:04       ` Sudeep Holla
  -1 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-19 15:04 UTC (permalink / raw)
  To: Lorenzo Pieralisi
  Cc: Sudeep Holla, linux-arm-kernel, Will Deacon, Linux-Renesas,
	Geert Uytterhoeven, Mark Rutland



On 19/07/18 15:20, Lorenzo Pieralisi wrote:
> On Thu, Jul 19, 2018 at 02:35:49PM +0100, Sudeep Holla wrote:

[...]

>> +static cpumask_var_t *alloc_cpu_groups(int num)
>> +{
>> +	int i;
>> +	cpumask_var_t *cpu_groups;
>> +
>> +	cpu_groups = kcalloc(num, sizeof(cpu_groups), GFP_KERNEL);
>> +	if (!cpu_groups)
>> +		return NULL;
>> +
>> +	for (i = 0; i < num; ++i)
>> +		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL)) {
>> +			free_cpu_groups(num, cpu_groups);
>> +			return NULL;
>> +		}
>> +
>> +	return cpu_groups;
>> +}
> 
> Sorry for being a PITA - I meant we could remove find_cpu_groups()


Sorry that's exactly what I understood when I read it, but ...
got distracted with something else and when I returned back to it,
implemented something else.

> entirely and embed it in alloc_cpu_groups(), that takes a cpumask_t
> pointer and return the number of groups, again, to make it more
> readable but that's just my opinion.
> 

Sorry for not showing that much love to this, not paying too much
attention as it's test code :).

-- 
Regards,
Sudeep

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
@ 2018-07-19 15:04       ` Sudeep Holla
  0 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-19 15:04 UTC (permalink / raw)
  To: linux-arm-kernel



On 19/07/18 15:20, Lorenzo Pieralisi wrote:
> On Thu, Jul 19, 2018 at 02:35:49PM +0100, Sudeep Holla wrote:

[...]

>> +static cpumask_var_t *alloc_cpu_groups(int num)
>> +{
>> +	int i;
>> +	cpumask_var_t *cpu_groups;
>> +
>> +	cpu_groups = kcalloc(num, sizeof(cpu_groups), GFP_KERNEL);
>> +	if (!cpu_groups)
>> +		return NULL;
>> +
>> +	for (i = 0; i < num; ++i)
>> +		if (!alloc_cpumask_var(&cpu_groups[i], GFP_KERNEL)) {
>> +			free_cpu_groups(num, cpu_groups);
>> +			return NULL;
>> +		}
>> +
>> +	return cpu_groups;
>> +}
> 
> Sorry for being a PITA - I meant we could remove find_cpu_groups()


Sorry that's exactly what I understood when I read it, but ...
got distracted with something else and when I returned back to it,
implemented something else.

> entirely and embed it in alloc_cpu_groups(), that takes a cpumask_t
> pointer and return the number of groups, again, to make it more
> readable but that's just my opinion.
> 

Sorry for not showing that much love to this, not paying too much
attention as it's test code :).

-- 
Regards,
Sudeep

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v3] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
  2018-07-18 11:25 ` Sudeep Holla
@ 2018-07-19 16:00   ` Sudeep Holla
  -1 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-19 16:00 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: Sudeep Holla, Will Deacon, Linux-Renesas, Geert Uytterhoeven,
	Mark Rutland, Lorenzo Pieralisi

Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
information when hotplugging out CPU") updates the cpu topology when
the CPU is hotplugged out. However the PSCI checker code uses the
topology_core_cpumask pointers for some of the cpu hotplug testing.
Since the pointer to the core_cpumask of the first CPU in the group
is used, which when that CPU itself is hotpugged out is just set to
itself, the testing terminates after that particular CPU is tested out.
But the intention of this tests is to cover all the CPU in the group.

In order to support that, we need to stash the topology_core_cpumask
before the start of the test and use that value instead of pointer to
a cpumask which will be updated on CPU hotplug.

Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
	information when hotplugging out CPU")
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
---
 drivers/firmware/psci_checker.c | 83 ++++++++++++++++++++++++-----------------
 1 file changed, 49 insertions(+), 34 deletions(-)

v2->v3:
	- Got rid of find_cpu_groups as suggested by Lorenzo

diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index bb1c068bff19..346943657962 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,28 +77,6 @@ static int psci_ops_check(void)
 	return 0;
 }

-static int find_cpu_groups(const struct cpumask *cpus,
-			   const struct cpumask **cpu_groups)
-{
-	unsigned int nb = 0;
-	cpumask_var_t tmp;
-
-	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
-		return -ENOMEM;
-	cpumask_copy(tmp, cpus);
-
-	while (!cpumask_empty(tmp)) {
-		const struct cpumask *cpu_group =
-			topology_core_cpumask(cpumask_any(tmp));
-
-		cpu_groups[nb++] = cpu_group;
-		cpumask_andnot(tmp, tmp, cpu_group);
-	}
-
-	free_cpumask_var(tmp);
-	return nb;
-}
-
 /*
  * offlined_cpus is a temporary array but passing it as an argument avoids
  * multiple allocations.
@@ -166,29 +144,66 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
 	return err;
 }

+static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
+{
+	int i;
+	cpumask_var_t *cpu_groups = *pcpu_groups;
+
+	for (i = 0; i < num; ++i)
+		free_cpumask_var(cpu_groups[i]);
+	kfree(cpu_groups);
+}
+
+static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
+{
+	int num_groups = 0;
+	cpumask_var_t tmp, *cpu_groups;
+
+	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+		return -ENOMEM;
+
+	cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
+			     GFP_KERNEL);
+	if (!cpu_groups)
+		return -ENOMEM;
+
+	cpumask_copy(tmp, cpu_online_mask);
+
+	while (!cpumask_empty(tmp)) {
+		const struct cpumask *cpu_group =
+			topology_core_cpumask(cpumask_any(tmp));
+
+		if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+			free_cpu_groups(num_groups, &cpu_groups);
+			return -ENOMEM;
+		}
+		cpumask_copy(cpu_groups[num_groups++], cpu_group);
+		cpumask_andnot(tmp, tmp, cpu_group);
+	}
+
+	free_cpumask_var(tmp);
+	*pcpu_groups = cpu_groups;
+
+	return num_groups;
+}
+
 static int hotplug_tests(void)
 {
-	int err;
-	cpumask_var_t offlined_cpus;
-	int i, nb_cpu_group;
-	const struct cpumask **cpu_groups;
+	int i, nb_cpu_group, err = -ENOMEM;
+	cpumask_var_t offlined_cpus, *cpu_groups;
 	char *page_buf;

-	err = -ENOMEM;
 	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
 		return err;
-	/* We may have up to nb_available_cpus cpu_groups. */
-	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
-				   GFP_KERNEL);
-	if (!cpu_groups)
+
+	nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
+	if (nb_cpu_group < 0)
 		goto out_free_cpus;
 	page_buf = (char *)__get_free_page(GFP_KERNEL);
 	if (!page_buf)
 		goto out_free_cpu_groups;

 	err = 0;
-	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
-
 	/*
 	 * Of course the last CPU cannot be powered down and cpu_down() should
 	 * refuse doing that.
@@ -212,7 +227,7 @@ static int hotplug_tests(void)

 	free_page((unsigned long)page_buf);
 out_free_cpu_groups:
-	kfree(cpu_groups);
+	free_cpu_groups(nb_cpu_group, &cpu_groups);
 out_free_cpus:
 	free_cpumask_var(offlined_cpus);
 	return err;
--
2.7.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v3] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests
@ 2018-07-19 16:00   ` Sudeep Holla
  0 siblings, 0 replies; 14+ messages in thread
From: Sudeep Holla @ 2018-07-19 16:00 UTC (permalink / raw)
  To: linux-arm-kernel

Commit 7f9545aa1a91 ("arm64: smp: remove cpu and numa topology
information when hotplugging out CPU") updates the cpu topology when
the CPU is hotplugged out. However the PSCI checker code uses the
topology_core_cpumask pointers for some of the cpu hotplug testing.
Since the pointer to the core_cpumask of the first CPU in the group
is used, which when that CPU itself is hotpugged out is just set to
itself, the testing terminates after that particular CPU is tested out.
But the intention of this tests is to cover all the CPU in the group.

In order to support that, we need to stash the topology_core_cpumask
before the start of the test and use that value instead of pointer to
a cpumask which will be updated on CPU hotplug.

Fixes: 7f9545aa1a91a9a4 ("arm64: smp: remove cpu and numa topology
	information when hotplugging out CPU")
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
---
 drivers/firmware/psci_checker.c | 83 ++++++++++++++++++++++++-----------------
 1 file changed, 49 insertions(+), 34 deletions(-)

v2->v3:
	- Got rid of find_cpu_groups as suggested by Lorenzo

diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index bb1c068bff19..346943657962 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,28 +77,6 @@ static int psci_ops_check(void)
 	return 0;
 }

-static int find_cpu_groups(const struct cpumask *cpus,
-			   const struct cpumask **cpu_groups)
-{
-	unsigned int nb = 0;
-	cpumask_var_t tmp;
-
-	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
-		return -ENOMEM;
-	cpumask_copy(tmp, cpus);
-
-	while (!cpumask_empty(tmp)) {
-		const struct cpumask *cpu_group =
-			topology_core_cpumask(cpumask_any(tmp));
-
-		cpu_groups[nb++] = cpu_group;
-		cpumask_andnot(tmp, tmp, cpu_group);
-	}
-
-	free_cpumask_var(tmp);
-	return nb;
-}
-
 /*
  * offlined_cpus is a temporary array but passing it as an argument avoids
  * multiple allocations.
@@ -166,29 +144,66 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
 	return err;
 }

+static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
+{
+	int i;
+	cpumask_var_t *cpu_groups = *pcpu_groups;
+
+	for (i = 0; i < num; ++i)
+		free_cpumask_var(cpu_groups[i]);
+	kfree(cpu_groups);
+}
+
+static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
+{
+	int num_groups = 0;
+	cpumask_var_t tmp, *cpu_groups;
+
+	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+		return -ENOMEM;
+
+	cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
+			     GFP_KERNEL);
+	if (!cpu_groups)
+		return -ENOMEM;
+
+	cpumask_copy(tmp, cpu_online_mask);
+
+	while (!cpumask_empty(tmp)) {
+		const struct cpumask *cpu_group =
+			topology_core_cpumask(cpumask_any(tmp));
+
+		if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+			free_cpu_groups(num_groups, &cpu_groups);
+			return -ENOMEM;
+		}
+		cpumask_copy(cpu_groups[num_groups++], cpu_group);
+		cpumask_andnot(tmp, tmp, cpu_group);
+	}
+
+	free_cpumask_var(tmp);
+	*pcpu_groups = cpu_groups;
+
+	return num_groups;
+}
+
 static int hotplug_tests(void)
 {
-	int err;
-	cpumask_var_t offlined_cpus;
-	int i, nb_cpu_group;
-	const struct cpumask **cpu_groups;
+	int i, nb_cpu_group, err = -ENOMEM;
+	cpumask_var_t offlined_cpus, *cpu_groups;
 	char *page_buf;

-	err = -ENOMEM;
 	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
 		return err;
-	/* We may have up to nb_available_cpus cpu_groups. */
-	cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
-				   GFP_KERNEL);
-	if (!cpu_groups)
+
+	nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
+	if (nb_cpu_group < 0)
 		goto out_free_cpus;
 	page_buf = (char *)__get_free_page(GFP_KERNEL);
 	if (!page_buf)
 		goto out_free_cpu_groups;

 	err = 0;
-	nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
-
 	/*
 	 * Of course the last CPU cannot be powered down and cpu_down() should
 	 * refuse doing that.
@@ -212,7 +227,7 @@ static int hotplug_tests(void)

 	free_page((unsigned long)page_buf);
 out_free_cpu_groups:
-	kfree(cpu_groups);
+	free_cpu_groups(nb_cpu_group, &cpu_groups);
 out_free_cpus:
 	free_cpumask_var(offlined_cpus);
 	return err;
--
2.7.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2018-07-19 16:44 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-07-18 11:25 [PATCH] drivers/firmware: psci_checker: stash and use topology_core_cpumask for hotplug tests Sudeep Holla
2018-07-18 11:25 ` Sudeep Holla
2018-07-18 16:49 ` Lorenzo Pieralisi
2018-07-18 16:49   ` Lorenzo Pieralisi
2018-07-19  9:50   ` Sudeep Holla
2018-07-19  9:50     ` Sudeep Holla
2018-07-19 13:35 ` [PATCH v2] " Sudeep Holla
2018-07-19 13:35   ` Sudeep Holla
2018-07-19 14:20   ` Lorenzo Pieralisi
2018-07-19 14:20     ` Lorenzo Pieralisi
2018-07-19 15:04     ` Sudeep Holla
2018-07-19 15:04       ` Sudeep Holla
2018-07-19 16:00 ` [PATCH v3] " Sudeep Holla
2018-07-19 16:00   ` Sudeep Holla

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.