[2/2] perf/x86/intel/uncore: With > 8 nodes, get pci bus die id from NUMA info
diff mbox series

Message ID 20210108153549.108989-3-steve.wahl@hpe.com
State Accepted
Commit 9a7832ce3d920426a36cdd78eda4b3568d4d09e3
Headers show
Series
  • perf/x86/intel/uncore: Derive die id from NUMA info with more than 8 nodes
Related show

Commit Message

Steve Wahl Jan. 8, 2021, 3:35 p.m. UTC
The registers used to determine which die a pci bus belongs to don't
contain enough information to uniquely specify more than 8 dies, so
when more than 8 dies are present, use NUMA information instead.

Continue to use the previous method for 8 or fewer because it
works there, and covers cases of NUMA being disabled.

Signed-off-by: Steve Wahl <steve.wahl@hpe.com>
---
 arch/x86/events/intel/uncore_snbep.c | 93 +++++++++++++++++++---------
 1 file changed, 65 insertions(+), 28 deletions(-)

Comments

Peter Zijlstra Jan. 11, 2021, 1 p.m. UTC | #1
On Fri, Jan 08, 2021 at 09:35:49AM -0600, Steve Wahl wrote:


> +		/*
> +		 * The nodeid and idmap registers only contain enough
> +		 * information to handle 8 nodes.  On systems with more
> +		 * than 8 nodes, we need to rely on NUMA information,
> +		 * filled in from BIOS supplied information, to determine
> +		 * the topology.
> +		 */

Egads.. do we realy have to trust BIOS data? BIOS crud tends to be
bonghits qualitee :/

> +		if (nr_node_ids <= 8) {
> +			/* get the Node ID of the local register */
> +			err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
> +			if (err)
> +				break;
> +			nodeid = config & NODE_ID_MASK;
> +			/* get the Node ID mapping */
> +			err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
> +			if (err)
> +				break;
>  
> +			segment = pci_domain_nr(ubox_dev->bus);
> +			raw_spin_lock(&pci2phy_map_lock);
> +			map = __find_pci2phy_map(segment);
> +			if (!map) {
> +				raw_spin_unlock(&pci2phy_map_lock);
> +				err = -ENOMEM;
> +				break;
> +			}
> +
> +			/*
> +			 * every three bits in the Node ID mapping register maps
> +			 * to a particular node.
> +			 */
> +			for (i = 0; i < 8; i++) {
> +				if (nodeid == ((config >> (3 * i)) & 0x7)) {
> +					if (topology_max_die_per_package() > 1)
> +						die_id = i;
> +					else
> +						die_id = topology_phys_to_logical_pkg(i);
> +					map->pbus_to_dieid[bus] = die_id;
> +					break;
> +				}
> +			}
>  			raw_spin_unlock(&pci2phy_map_lock);
> +		} else {
> +			int node = pcibus_to_node(ubox_dev->bus);
> +			int cpu;
> +
> +			segment = pci_domain_nr(ubox_dev->bus);
> +			raw_spin_lock(&pci2phy_map_lock);
> +			map = __find_pci2phy_map(segment);
> +			if (!map) {
> +				raw_spin_unlock(&pci2phy_map_lock);
> +				err = -ENOMEM;
> +				break;
> +			}
> +			die_id = -1;
> +			for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
> +				struct cpuinfo_x86 *c = &cpu_data(cpu);
> +
> +				if (c->initialized && cpu_to_node(cpu) == node) {
> +					map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
> +					break;
> +				}
> +			}
> +			raw_spin_unlock(&pci2phy_map_lock);
> +
> +			if (WARN_ON_ONCE(die_id == -1)) {
> +				err = -EINVAL;
>  				break;
>  			}

This seems to assume a single die per node; is that fundemantally
correct?

Did you consider malicious BIOS data? I think we're good, but I didn't
look too hard.

>  		}
>  	}
>  
>  	if (!err) {
> -- 
> 2.26.2
>
Steve Wahl Jan. 11, 2021, 3:45 p.m. UTC | #2
On Mon, Jan 11, 2021 at 02:00:33PM +0100, Peter Zijlstra wrote:
> On Fri, Jan 08, 2021 at 09:35:49AM -0600, Steve Wahl wrote:
> 
> 
> > +		/*
> > +		 * The nodeid and idmap registers only contain enough
> > +		 * information to handle 8 nodes.  On systems with more
> > +		 * than 8 nodes, we need to rely on NUMA information,
> > +		 * filled in from BIOS supplied information, to determine
> > +		 * the topology.
> > +		 */
> 
> Egads.. do we realy have to trust BIOS data? BIOS crud tends to be
> bonghits qualitee :/

I work too close to BIOS people (virtually, at least for the moment)
to safely make disparaging remarks. :-) While the origin is the BIOS,
I'm using pieces that were already being pulled from the BIOS tables
for NUMA purposes.  

> > +		if (nr_node_ids <= 8) {
> > +			/* get the Node ID of the local register */
> > +			err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
> > +			if (err)
> > +				break;
> > +			nodeid = config & NODE_ID_MASK;
> > +			/* get the Node ID mapping */
> > +			err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
> > +			if (err)
> > +				break;
> >  
> > +			segment = pci_domain_nr(ubox_dev->bus);
> > +			raw_spin_lock(&pci2phy_map_lock);
> > +			map = __find_pci2phy_map(segment);
> > +			if (!map) {
> > +				raw_spin_unlock(&pci2phy_map_lock);
> > +				err = -ENOMEM;
> > +				break;
> > +			}
> > +
> > +			/*
> > +			 * every three bits in the Node ID mapping register maps
> > +			 * to a particular node.
> > +			 */
> > +			for (i = 0; i < 8; i++) {
> > +				if (nodeid == ((config >> (3 * i)) & 0x7)) {
> > +					if (topology_max_die_per_package() > 1)
> > +						die_id = i;
> > +					else
> > +						die_id = topology_phys_to_logical_pkg(i);
> > +					map->pbus_to_dieid[bus] = die_id;
> > +					break;
> > +				}
> > +			}
> >  			raw_spin_unlock(&pci2phy_map_lock);
> > +		} else {
> > +			int node = pcibus_to_node(ubox_dev->bus);
> > +			int cpu;
> > +
> > +			segment = pci_domain_nr(ubox_dev->bus);
> > +			raw_spin_lock(&pci2phy_map_lock);
> > +			map = __find_pci2phy_map(segment);
> > +			if (!map) {
> > +				raw_spin_unlock(&pci2phy_map_lock);
> > +				err = -ENOMEM;
> > +				break;
> > +			}
> > +			die_id = -1;
> > +			for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
> > +				struct cpuinfo_x86 *c = &cpu_data(cpu);
> > +
> > +				if (c->initialized && cpu_to_node(cpu) == node) {
> > +					map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
> > +					break;
> > +				}
> > +			}
> > +			raw_spin_unlock(&pci2phy_map_lock);
> > +
> > +			if (WARN_ON_ONCE(die_id == -1)) {
> > +				err = -EINVAL;
> >  				break;
> >  			}
> 
> This seems to assume a single die per node; is that fundemantally
> correct?

It should work for one or more nodes per die; i.e. sub-NUMA clustering
should work.  If there are any systems with fewer nodes than dies
(more than one die in a NUMA node) it will likely fail.  It's not
clear to me whether nodes < dies is a possibility or not; however,
note that this situation would be broken with or without my changes.

> Did you consider malicious BIOS data? I think we're good, but I didn't
> look too hard.

I did not consider malicious BIOS data.  With quick thought toward it,
I believe the worst that could happen is the counters get associated
with the wrong die, and only under circumstances where the previous
code would have aborted mapping the counters to dies completely (which
it does when there are more than 8 dies).

Thank you for taking the time to look at this!

--> Steve
Peter Zijlstra Jan. 12, 2021, 3:07 p.m. UTC | #3
On Mon, Jan 11, 2021 at 09:45:16AM -0600, Steve Wahl wrote:
> On Mon, Jan 11, 2021 at 02:00:33PM +0100, Peter Zijlstra wrote:
> > On Fri, Jan 08, 2021 at 09:35:49AM -0600, Steve Wahl wrote:
> > 
> > 
> > > +		/*
> > > +		 * The nodeid and idmap registers only contain enough
> > > +		 * information to handle 8 nodes.  On systems with more
> > > +		 * than 8 nodes, we need to rely on NUMA information,
> > > +		 * filled in from BIOS supplied information, to determine
> > > +		 * the topology.
> > > +		 */
> > 
> > Egads.. do we realy have to trust BIOS data? BIOS crud tends to be
> > bonghits qualitee :/
> 
> I work too close to BIOS people (virtually, at least for the moment)
> to safely make disparaging remarks. :-) While the origin is the BIOS,
> I'm using pieces that were already being pulled from the BIOS tables
> for NUMA purposes.

:-) It's just that we've had too much 'fun' with PCI node bindings in
the past.
Steve Wahl Jan. 12, 2021, 7:42 p.m. UTC | #4
On Tue, Jan 12, 2021 at 04:07:15PM +0100, Peter Zijlstra wrote:
> On Mon, Jan 11, 2021 at 09:45:16AM -0600, Steve Wahl wrote:
> > On Mon, Jan 11, 2021 at 02:00:33PM +0100, Peter Zijlstra wrote:
> > > On Fri, Jan 08, 2021 at 09:35:49AM -0600, Steve Wahl wrote:
> > > 
> > > 
> > > > +		/*
> > > > +		 * The nodeid and idmap registers only contain enough
> > > > +		 * information to handle 8 nodes.  On systems with more
> > > > +		 * than 8 nodes, we need to rely on NUMA information,
> > > > +		 * filled in from BIOS supplied information, to determine
> > > > +		 * the topology.
> > > > +		 */
> > > 
> > > Egads.. do we realy have to trust BIOS data? BIOS crud tends to be
> > > bonghits qualitee :/
> > 
> > I work too close to BIOS people (virtually, at least for the moment)
> > to safely make disparaging remarks. :-) While the origin is the BIOS,
> > I'm using pieces that were already being pulled from the BIOS tables
> > for NUMA purposes.
> 
> :-) It's just that we've had too much 'fun' with PCI node bindings in
> the past.

I wasn't aware of that, but I understand.  Fortunately, this patch
should't touch cases that aren't already broken (>8 nodes); working
cases continue to use the existing methods.

Thanks!

--> Steve Wahl

Patch
diff mbox series

diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 2d7014dc46f6..b79951d0707c 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1370,40 +1370,77 @@  static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
 		if (!ubox_dev)
 			break;
 		bus = ubox_dev->bus->number;
-		/* get the Node ID of the local register */
-		err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
-		if (err)
-			break;
-		nodeid = config & NODE_ID_MASK;
-		/* get the Node ID mapping */
-		err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
-		if (err)
-			break;
+		/*
+		 * The nodeid and idmap registers only contain enough
+		 * information to handle 8 nodes.  On systems with more
+		 * than 8 nodes, we need to rely on NUMA information,
+		 * filled in from BIOS supplied information, to determine
+		 * the topology.
+		 */
+		if (nr_node_ids <= 8) {
+			/* get the Node ID of the local register */
+			err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
+			if (err)
+				break;
+			nodeid = config & NODE_ID_MASK;
+			/* get the Node ID mapping */
+			err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
+			if (err)
+				break;
 
-		segment = pci_domain_nr(ubox_dev->bus);
-		raw_spin_lock(&pci2phy_map_lock);
-		map = __find_pci2phy_map(segment);
-		if (!map) {
+			segment = pci_domain_nr(ubox_dev->bus);
+			raw_spin_lock(&pci2phy_map_lock);
+			map = __find_pci2phy_map(segment);
+			if (!map) {
+				raw_spin_unlock(&pci2phy_map_lock);
+				err = -ENOMEM;
+				break;
+			}
+
+			/*
+			 * every three bits in the Node ID mapping register maps
+			 * to a particular node.
+			 */
+			for (i = 0; i < 8; i++) {
+				if (nodeid == ((config >> (3 * i)) & 0x7)) {
+					if (topology_max_die_per_package() > 1)
+						die_id = i;
+					else
+						die_id = topology_phys_to_logical_pkg(i);
+					map->pbus_to_dieid[bus] = die_id;
+					break;
+				}
+			}
 			raw_spin_unlock(&pci2phy_map_lock);
-			err = -ENOMEM;
-			break;
-		}
+		} else {
+			int node = pcibus_to_node(ubox_dev->bus);
+			int cpu;
+
+			segment = pci_domain_nr(ubox_dev->bus);
+			raw_spin_lock(&pci2phy_map_lock);
+			map = __find_pci2phy_map(segment);
+			if (!map) {
+				raw_spin_unlock(&pci2phy_map_lock);
+				err = -ENOMEM;
+				break;
+			}
 
-		/*
-		 * every three bits in the Node ID mapping register maps
-		 * to a particular node.
-		 */
-		for (i = 0; i < 8; i++) {
-			if (nodeid == ((config >> (3 * i)) & 0x7)) {
-				if (topology_max_die_per_package() > 1)
-					die_id = i;
-				else
-					die_id = topology_phys_to_logical_pkg(i);
-				map->pbus_to_dieid[bus] = die_id;
+			die_id = -1;
+			for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
+				struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+				if (c->initialized && cpu_to_node(cpu) == node) {
+					map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
+					break;
+				}
+			}
+			raw_spin_unlock(&pci2phy_map_lock);
+
+			if (WARN_ON_ONCE(die_id == -1)) {
+				err = -EINVAL;
 				break;
 			}
 		}
-		raw_spin_unlock(&pci2phy_map_lock);
 	}
 
 	if (!err) {