linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Nathan Fontenot <nfont@linux.vnet.ibm.com>
To: Michael Bringmann <mwb@linux.vnet.ibm.com>,
	linuxppc-dev@lists.ozlabs.org
Cc: John Allen <jallen@linux.vnet.ibm.com>,
	Tyrel Datwyler <tyreld@linux.vnet.ibm.com>,
	Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
Subject: Re: [PATCH v07 1/9] hotplug/cpu: Conditionally acquire/release DRC index
Date: Mon, 23 Jul 2018 12:42:14 -0500	[thread overview]
Message-ID: <a7e92a8f-2bb9-95e7-78ca-b7440f80c7f7@linux.vnet.ibm.com> (raw)
In-Reply-To: <a7e202c1-177f-db1b-912f-8773a551feec@linux.vnet.ibm.com>

On 07/13/2018 03:17 PM, Michael Bringmann wrote:
> powerpc/cpu: Modify dlpar_cpu_add and dlpar_cpu_remove to allow the
> skipping of DRC index acquire or release operations during the CPU
> add or remove operations.  This is intended to support subsequent
> changes to provide a 'CPU readd' operation.
> 
> Signed-off-by: Michael Bringmann <mwb@linux.vnet.ibm.com>
> ---
> Changes in patch:
>    -- Move new validity check added to pseries_smp_notifier
>       to another patch
>    -- Revise one of checks for 'acquire_drc' in dlpar_cpu_add.
>    -- Revise one of checks for 'release_drc' in dlpar_cpu_remove.
> ---
>   arch/powerpc/platforms/pseries/hotplug-cpu.c |   71 +++++++++++++++-----------
>   1 file changed, 40 insertions(+), 31 deletions(-)
> 
> diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
> index 6ef77ca..7ede3b0 100644
> --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
> +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
> @@ -432,7 +432,7 @@ static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
>   	return found;
>   }
> 
> -static ssize_t dlpar_cpu_add(u32 drc_index)
> +static ssize_t dlpar_cpu_add(u32 drc_index, bool acquire_drc)
>   {
>   	struct device_node *dn, *parent;
>   	int rc, saved_rc;
> @@ -457,19 +457,22 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
>   		return -EINVAL;
>   	}
> 
> -	rc = dlpar_acquire_drc(drc_index);
> -	if (rc) {
> -		pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
> -			rc, drc_index);
> -		of_node_put(parent);
> -		return -EINVAL;
> +	if (acquire_drc) {
> +		rc = dlpar_acquire_drc(drc_index);
> +		if (rc) {
> +			pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
> +				rc, drc_index);
> +			of_node_put(parent);
> +			return -EINVAL;
> +		}
>   	}
> 
>   	dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
>   	if (!dn) {
>   		pr_warn("Failed call to configure-connector, drc index: %x\n",
>   			drc_index);
> -		dlpar_release_drc(drc_index);
> +		if (acquire_drc)
> +			dlpar_release_drc(drc_index);
>   		of_node_put(parent);
>   		return -EINVAL;
>   	}
> @@ -484,9 +487,11 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
>   		pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
>   			dn->name, rc, drc_index);
> 
> -		rc = dlpar_release_drc(drc_index);
> -		if (!rc)
> -			dlpar_free_cc_nodes(dn);
> +		if (acquire_drc) {
> +			rc = dlpar_release_drc(drc_index);
> +			if (!rc)
> +				dlpar_free_cc_nodes(dn);
> +		}
> 
>   		return saved_rc;
>   	}
> @@ -498,7 +503,7 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
>   			dn->name, rc, drc_index);
> 
>   		rc = dlpar_detach_node(dn);
> -		if (!rc)
> +		if (!rc && acquire_drc)
>   			dlpar_release_drc(drc_index);
> 
>   		return saved_rc;
> @@ -566,7 +571,8 @@ static int dlpar_offline_cpu(struct device_node *dn)
> 
>   }
> 
> -static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
> +static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index,
> +				bool release_drc)
>   {
>   	int rc;
> 
> @@ -579,12 +585,14 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
>   		return -EINVAL;
>   	}
> 
> -	rc = dlpar_release_drc(drc_index);
> -	if (rc) {
> -		pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n",
> -			drc_index, dn->name, rc);
> -		dlpar_online_cpu(dn);
> -		return rc;
> +	if (release_drc) {
> +		rc = dlpar_release_drc(drc_index);
> +		if (rc) {
> +			pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n",
> +				drc_index, dn->name, rc);
> +			dlpar_online_cpu(dn);
> +			return rc;
> +		}
>   	}
> 
>   	rc = dlpar_detach_node(dn);
> @@ -593,8 +601,9 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
> 
>   		pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc);
> 
> -		rc = dlpar_acquire_drc(drc_index);
> -		if (!rc)
> +		if (release_drc)
> +			rc = dlpar_acquire_drc(drc_index);
> +		if (!release_drc || !rc)
>   			dlpar_online_cpu(dn);

This is likely wrong. At this point you're in a if (rc) so rc is already
non-zero. If release_drc is false this checks an invalid rc state.

-Nathan

> 
>   		return saved_rc;
> @@ -622,7 +631,7 @@ static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
>   	return dn;
>   }
> 
> -static int dlpar_cpu_remove_by_index(u32 drc_index)
> +static int dlpar_cpu_remove_by_index(u32 drc_index, bool release_drc)
>   {
>   	struct device_node *dn;
>   	int rc;
> @@ -634,7 +643,7 @@ static int dlpar_cpu_remove_by_index(u32 drc_index)
>   		return -ENODEV;
>   	}
> 
> -	rc = dlpar_cpu_remove(dn, drc_index);
> +	rc = dlpar_cpu_remove(dn, drc_index, release_drc);
>   	of_node_put(dn);
>   	return rc;
>   }
> @@ -699,7 +708,7 @@ static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
>   	}
> 
>   	for (i = 0; i < cpus_to_remove; i++) {
> -		rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
> +		rc = dlpar_cpu_remove_by_index(cpu_drcs[i], true);
>   		if (rc)
>   			break;
> 
> @@ -710,7 +719,7 @@ static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
>   		pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
> 
>   		for (i = 0; i < cpus_removed; i++)
> -			dlpar_cpu_add(cpu_drcs[i]);
> +			dlpar_cpu_add(cpu_drcs[i], true);
> 
>   		rc = -EINVAL;
>   	} else {
> @@ -780,7 +789,7 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
>   	}
> 
>   	for (i = 0; i < cpus_to_add; i++) {
> -		rc = dlpar_cpu_add(cpu_drcs[i]);
> +		rc = dlpar_cpu_add(cpu_drcs[i], true);
>   		if (rc)
>   			break;
> 
> @@ -791,7 +800,7 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
>   		pr_warn("CPU hot-add failed, removing any added CPUs\n");
> 
>   		for (i = 0; i < cpus_added; i++)
> -			dlpar_cpu_remove_by_index(cpu_drcs[i]);
> +			dlpar_cpu_remove_by_index(cpu_drcs[i], true);
> 
>   		rc = -EINVAL;
>   	} else {
> @@ -817,7 +826,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
>   		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
>   			rc = dlpar_cpu_remove_by_count(count);
>   		else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
> -			rc = dlpar_cpu_remove_by_index(drc_index);
> +			rc = dlpar_cpu_remove_by_index(drc_index, true);
>   		else
>   			rc = -EINVAL;
>   		break;
> @@ -825,7 +834,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
>   		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
>   			rc = dlpar_cpu_add_by_count(count);
>   		else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
> -			rc = dlpar_cpu_add(drc_index);
> +			rc = dlpar_cpu_add(drc_index, true);
>   		else
>   			rc = -EINVAL;
>   		break;
> @@ -850,7 +859,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
>   	if (rc)
>   		return -EINVAL;
> 
> -	rc = dlpar_cpu_add(drc_index);
> +	rc = dlpar_cpu_add(drc_index, true);
> 
>   	return rc ? rc : count;
>   }
> @@ -871,7 +880,7 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
>   		return -EINVAL;
>   	}
> 
> -	rc = dlpar_cpu_remove(dn, drc_index);
> +	rc = dlpar_cpu_remove(dn, drc_index, true);
>   	of_node_put(dn);
> 
>   	return rc ? rc : count;
> 

  reply	other threads:[~2018-07-23 17:42 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-13 20:16 [PATCH v07 0/9] powerpc/hotplug: Update affinity for migrated CPUs Michael Bringmann
2018-07-13 20:17 ` [PATCH v07 1/9] hotplug/cpu: Conditionally acquire/release DRC index Michael Bringmann
2018-07-23 17:42   ` Nathan Fontenot [this message]
2018-07-13 20:18 ` [PATCH v07 2/9] hotplug/cpu: Add operation queuing function Michael Bringmann
2018-07-23 15:54   ` John Allen
2018-07-25 15:49     ` Michael Bringmann
2018-07-27  5:57       ` Michael Ellerman
2018-07-23 17:51   ` Nathan Fontenot
2018-07-25 15:57     ` Michael Bringmann
2018-07-27  6:09       ` Michael Ellerman
2018-07-13 20:18 ` [PATCH v07 3/9] hotplug/cpu: Provide CPU readd operation Michael Bringmann
2018-07-13 20:18 ` [PATCH v07 4/9] mobility/numa: Ensure numa update does not overlap Michael Bringmann
2018-07-13 20:18 ` [PATCH v07 5/9] numa: Disable/enable arch_update_cpu_topology Michael Bringmann
2018-07-13 20:18 ` [PATCH v07 6/9] pmt/numa: Disable arch_update_cpu_topology during CPU readd Michael Bringmann
2018-07-24 20:38   ` Nathan Fontenot
2018-07-13 20:18 ` [PATCH v07 7/9] powerpc/rtas: Allow disabling rtas_event_scan Michael Bringmann
2018-07-13 20:18 ` [PATCH v07 8/9] hotplug/rtas: No rtas_event_scan during PMT update Michael Bringmann
2018-07-13 20:18 ` [PATCH v07 9/9] hotplug/pmt: Update topology after PMT Michael Bringmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a7e92a8f-2bb9-95e7-78ca-b7440f80c7f7@linux.vnet.ibm.com \
    --to=nfont@linux.vnet.ibm.com \
    --cc=jallen@linux.vnet.ibm.com \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mwb@linux.vnet.ibm.com \
    --cc=tlfalcon@linux.vnet.ibm.com \
    --cc=tyreld@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).