linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/5] cpumask, sched/topology: NUMA-aware CPU spreading interface
@ 2022-08-16 18:07 Valentin Schneider
  2022-08-16 18:07 ` [PATCH 1/5] bitops: Introduce find_next_andnot_bit() Valentin Schneider
                   ` (4 more replies)
  0 siblings, 5 replies; 10+ messages in thread
From: Valentin Schneider @ 2022-08-16 18:07 UTC (permalink / raw)
  To: netdev, linux-rdma, linux-kernel
  Cc: Saeed Mahameed, Leon Romanovsky, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Paolo Abeni, Yury Norov, Andy Shevchenko,
	Rasmus Villemoes, Ingo Molnar, Peter Zijlstra, Vincent Guittot,
	Dietmar Eggemann, Steven Rostedt, Mel Gorman, Greg Kroah-Hartman,
	Barry Song, Heiko Carstens, Tony Luck, Jonathan Cameron,
	Gal Pressman, Tariq Toukan

Hi folks,

Tariq pointed out in [1] that drivers allocating IRQ vectors would benefit
from having smarter NUMA-awareness (cpumask_local_spread() doesn't quite cut
it).

The proposed interface involved an array of CPUs and a temporary cpumask, and
being my difficult self what I'm proposing here is an interface that doesn't
require any temporary storage other than some stack variables (at the cost of
one wild macro).

Patch 5/5 is just there to showcase how the thing would be used. If this doesn't
get hated on, I'll let Tariq pick this up and push it with his networking driver
changes (with actual changelogs).

[1]: https://lore.kernel.org/all/20220728191203.4055-1-tariqt@nvidia.com/

Cheers,
Valentin

Valentin Schneider (5):
  bitops: Introduce find_next_andnot_bit()
  cpumask: Introduce for_each_cpu_andnot()
  sched/topology: Introduce sched_numa_hop_mask()
  sched/topology: Introduce for_each_numa_hop_cpu()
  SHOWCASE: net/mlx5e: Leverage for_each_numa_hop_cpu()

 drivers/net/ethernet/mellanox/mlx5/core/eq.c | 12 ++++-
 include/linux/cpumask.h                      | 32 ++++++++++++++
 include/linux/find.h                         | 44 ++++++++++++++++---
 include/linux/topology.h                     | 46 ++++++++++++++++++++
 kernel/sched/topology.c                      | 28 ++++++++++++
 lib/cpumask.c                                | 19 ++++++++
 lib/find_bit.c                               | 16 ++++---
 7 files changed, 184 insertions(+), 13 deletions(-)

--
2.31.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/5] bitops: Introduce find_next_andnot_bit()
  2022-08-16 18:07 [PATCH 0/5] cpumask, sched/topology: NUMA-aware CPU spreading interface Valentin Schneider
@ 2022-08-16 18:07 ` Valentin Schneider
  2022-08-16 22:13   ` Yury Norov
  2022-08-16 18:07 ` [PATCH 2/5] cpumask: Introduce for_each_cpu_andnot() Valentin Schneider
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 10+ messages in thread
From: Valentin Schneider @ 2022-08-16 18:07 UTC (permalink / raw)
  To: netdev, linux-rdma, linux-kernel
  Cc: Saeed Mahameed, Leon Romanovsky, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Paolo Abeni, Yury Norov, Andy Shevchenko,
	Rasmus Villemoes, Ingo Molnar, Peter Zijlstra, Vincent Guittot,
	Dietmar Eggemann, Steven Rostedt, Mel Gorman, Greg Kroah-Hartman,
	Barry Song, Heiko Carstens, Tony Luck, Jonathan Cameron,
	Gal Pressman, Tariq Toukan

In preparation of introducing for_each_cpu_andnot(), add a variant of
find_next_bit() that negate the bits in @addr2 when ANDing them with the
bits in @addr1.

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
 include/linux/find.h | 44 ++++++++++++++++++++++++++++++++++++++------
 lib/find_bit.c       | 16 +++++++++++-----
 2 files changed, 49 insertions(+), 11 deletions(-)

diff --git a/include/linux/find.h b/include/linux/find.h
index 424ef67d4a42..454cde69b30b 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -10,7 +10,8 @@
 
 extern unsigned long _find_next_bit(const unsigned long *addr1,
 		const unsigned long *addr2, unsigned long nbits,
-		unsigned long start, unsigned long invert, unsigned long le);
+		unsigned long start, unsigned long invert, unsigned long le,
+		bool negate);
 extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
 extern unsigned long _find_first_and_bit(const unsigned long *addr1,
 					 const unsigned long *addr2, unsigned long size);
@@ -41,7 +42,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
 		return val ? __ffs(val) : size;
 	}
 
-	return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
+	return _find_next_bit(addr, NULL, size, offset, 0UL, 0, 0);
 }
 #endif
 
@@ -71,7 +72,38 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
 		return val ? __ffs(val) : size;
 	}
 
-	return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
+	return _find_next_bit(addr1, addr2, size, offset, 0UL, 0, 0);
+}
+#endif
+
+#ifndef find_next_andnot_bit
+/**
+ * find_next_andnot_bit - find the next set bit in one memory region
+ *                        but not in the other
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_andnot_bit(const unsigned long *addr1,
+		const unsigned long *addr2, unsigned long size,
+		unsigned long offset)
+{
+	if (small_const_nbits(size)) {
+		unsigned long val;
+
+		if (unlikely(offset >= size))
+			return size;
+
+		val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
+		return val ? __ffs(val) : size;
+	}
+
+	return _find_next_bit(addr1, addr2, size, offset, 0UL, 0, 1);
 }
 #endif
 
@@ -99,7 +131,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
 		return val == ~0UL ? size : ffz(val);
 	}
 
-	return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
+	return _find_next_bit(addr, NULL, size, offset, ~0UL, 0, 0);
 }
 #endif
 
@@ -247,7 +279,7 @@ unsigned long find_next_zero_bit_le(const void *addr, unsigned
 		return val == ~0UL ? size : ffz(val);
 	}
 
-	return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
+	return _find_next_bit(addr, NULL, size, offset, ~0UL, 1, 0);
 }
 #endif
 
@@ -266,7 +298,7 @@ unsigned long find_next_bit_le(const void *addr, unsigned
 		return val ? __ffs(val) : size;
 	}
 
-	return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
+	return _find_next_bit(addr, NULL, size, offset, 0UL, 1, 0);
 }
 #endif
 
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 1b8e4b2a9cba..6e5f42c621a9 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -21,17 +21,19 @@
 
 #if !defined(find_next_bit) || !defined(find_next_zero_bit) ||			\
 	!defined(find_next_bit_le) || !defined(find_next_zero_bit_le) ||	\
-	!defined(find_next_and_bit)
+	!defined(find_next_and_bit) || !defined(find_next_andnot_bit)
 /*
  * This is a common helper function for find_next_bit, find_next_zero_bit, and
  * find_next_and_bit. The differences are:
  *  - The "invert" argument, which is XORed with each fetched word before
  *    searching it for one bits.
- *  - The optional "addr2", which is anded with "addr1" if present.
+ *  - The optional "addr2", negated if "negate" and ANDed with "addr1" if
+ *    present.
  */
 unsigned long _find_next_bit(const unsigned long *addr1,
 		const unsigned long *addr2, unsigned long nbits,
-		unsigned long start, unsigned long invert, unsigned long le)
+		unsigned long start, unsigned long invert, unsigned long le,
+		bool negate)
 {
 	unsigned long tmp, mask;
 
@@ -40,7 +42,9 @@ unsigned long _find_next_bit(const unsigned long *addr1,
 
 	tmp = addr1[start / BITS_PER_LONG];
 	if (addr2)
-		tmp &= addr2[start / BITS_PER_LONG];
+		tmp &= negate ?
+		       ~addr2[start / BITS_PER_LONG] :
+			addr2[start / BITS_PER_LONG];
 	tmp ^= invert;
 
 	/* Handle 1st word. */
@@ -59,7 +63,9 @@ unsigned long _find_next_bit(const unsigned long *addr1,
 
 		tmp = addr1[start / BITS_PER_LONG];
 		if (addr2)
-			tmp &= addr2[start / BITS_PER_LONG];
+			tmp &= negate ?
+			       ~addr2[start / BITS_PER_LONG] :
+				addr2[start / BITS_PER_LONG];
 		tmp ^= invert;
 	}
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/5] cpumask: Introduce for_each_cpu_andnot()
  2022-08-16 18:07 [PATCH 0/5] cpumask, sched/topology: NUMA-aware CPU spreading interface Valentin Schneider
  2022-08-16 18:07 ` [PATCH 1/5] bitops: Introduce find_next_andnot_bit() Valentin Schneider
@ 2022-08-16 18:07 ` Valentin Schneider
  2022-08-16 22:24   ` Yury Norov
  2022-08-16 18:07 ` [PATCH 3/5] sched/topology: Introduce sched_numa_hop_mask() Valentin Schneider
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 10+ messages in thread
From: Valentin Schneider @ 2022-08-16 18:07 UTC (permalink / raw)
  To: netdev, linux-rdma, linux-kernel
  Cc: Saeed Mahameed, Leon Romanovsky, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Paolo Abeni, Yury Norov, Andy Shevchenko,
	Rasmus Villemoes, Ingo Molnar, Peter Zijlstra, Vincent Guittot,
	Dietmar Eggemann, Steven Rostedt, Mel Gorman, Greg Kroah-Hartman,
	Barry Song, Heiko Carstens, Tony Luck, Jonathan Cameron,
	Gal Pressman, Tariq Toukan

for_each_cpu_and() is very convenient as it saves having to allocate a
temporary cpumask to store the result of cpumask_and(). The same issue
applies to cpumask_andnot() which doesn't actually need temporary storage
for iteration purposes.

Following what has been done for for_each_cpu_and(), introduce
for_each_cpu_andnot().

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
 include/linux/cpumask.h | 32 ++++++++++++++++++++++++++++++++
 lib/cpumask.c           | 19 +++++++++++++++++++
 2 files changed, 51 insertions(+)

diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index fe29ac7cc469..a8b2ca160e57 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -157,6 +157,13 @@ static inline unsigned int cpumask_next_and(int n,
 	return n+1;
 }
 
+static inline unsigned int cpumask_next_andnot(int n,
+					    const struct cpumask *srcp,
+					    const struct cpumask *andp)
+{
+	return n+1;
+}
+
 static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
 					     int start, bool wrap)
 {
@@ -194,6 +201,8 @@ static inline int cpumask_any_distribute(const struct cpumask *srcp)
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
 #define for_each_cpu_and(cpu, mask1, mask2)	\
 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
+#define for_each_cpu_andnot(cpu, mask1, mask2)	\
+	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
 #else
 /**
  * cpumask_first - get the first cpu in a cpumask
@@ -259,6 +268,9 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
 }
 
 int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
+int __pure cpumask_next_andnot(int n,
+			       const struct cpumask *src1p,
+			       const struct cpumask *src2p);
 int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
 unsigned int cpumask_local_spread(unsigned int i, int node);
 int cpumask_any_and_distribute(const struct cpumask *src1p,
@@ -324,6 +336,26 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
 	for ((cpu) = -1;						\
 		(cpu) = cpumask_next_and((cpu), (mask1), (mask2)),	\
 		(cpu) < nr_cpu_ids;)
+
+/**
+ * for_each_cpu_andnot - iterate over every cpu in one mask but not in another
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places.  It is equivalent to:
+ *	struct cpumask tmp;
+ *	cpumask_andnot(&tmp, &mask1, &mask2);
+ *	for_each_cpu(cpu, &tmp)
+ *		...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_andnot(cpu, mask1, mask2)				\
+	for ((cpu) = -1;						\
+		(cpu) = cpumask_next_andnot((cpu), (mask1), (mask2)),	\
+		(cpu) < nr_cpu_ids;)
+
 #endif /* SMP */
 
 #define CPU_BITS_NONE						\
diff --git a/lib/cpumask.c b/lib/cpumask.c
index a971a82d2f43..6896ff4a08fd 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -42,6 +42,25 @@ int cpumask_next_and(int n, const struct cpumask *src1p,
 }
 EXPORT_SYMBOL(cpumask_next_and);
 
+/**
+ * cpumask_next_andnot - get the next cpu in *src1p & ~*src2p
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set in *src1p & ~*src2p.
+ */
+int cpumask_next_andnot(int n, const struct cpumask *src1p,
+		     const struct cpumask *src2p)
+{
+	/* -1 is a legal arg here. */
+	if (n != -1)
+		cpumask_check(n);
+	return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+		nr_cpumask_bits, n + 1);
+}
+EXPORT_SYMBOL(cpumask_next_andnot);
+
 /**
  * cpumask_any_but - return a "random" in a cpumask, but not this one.
  * @mask: the cpumask to search
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 3/5] sched/topology: Introduce sched_numa_hop_mask()
  2022-08-16 18:07 [PATCH 0/5] cpumask, sched/topology: NUMA-aware CPU spreading interface Valentin Schneider
  2022-08-16 18:07 ` [PATCH 1/5] bitops: Introduce find_next_andnot_bit() Valentin Schneider
  2022-08-16 18:07 ` [PATCH 2/5] cpumask: Introduce for_each_cpu_andnot() Valentin Schneider
@ 2022-08-16 18:07 ` Valentin Schneider
  2022-08-16 18:07 ` [PATCH 4/5] sched/topology: Introduce for_each_numa_hop_cpu() Valentin Schneider
  2022-08-16 18:07 ` [PATCH 5/5] SHOWCASE: net/mlx5e: Leverage for_each_numa_hop_cpu() Valentin Schneider
  4 siblings, 0 replies; 10+ messages in thread
From: Valentin Schneider @ 2022-08-16 18:07 UTC (permalink / raw)
  To: netdev, linux-rdma, linux-kernel
  Cc: Saeed Mahameed, Leon Romanovsky, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Paolo Abeni, Yury Norov, Andy Shevchenko,
	Rasmus Villemoes, Ingo Molnar, Peter Zijlstra, Vincent Guittot,
	Dietmar Eggemann, Steven Rostedt, Mel Gorman, Greg Kroah-Hartman,
	Barry Song, Heiko Carstens, Tony Luck, Jonathan Cameron,
	Gal Pressman, Tariq Toukan

Tariq has pointed out that drivers allocating IRQ vectors would benefit
from having smarter NUMA-awareness - cpumask_local_spread() only knows
about the local node and everything outside is in the same bucket.

sched_domains_numa_masks is pretty much what we want to hand out (a cpumask
of CPUs reachable within a given distance budget), introduce
sched_numa_hop_mask() to export those cpumasks.

Link: http://lore.kernel.org/r/20220728191203.4055-1-tariqt@nvidia.com
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
 include/linux/topology.h |  9 +++++++++
 kernel/sched/topology.c  | 28 ++++++++++++++++++++++++++++
 2 files changed, 37 insertions(+)

diff --git a/include/linux/topology.h b/include/linux/topology.h
index 4564faafd0e1..13b82b83e547 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -245,5 +245,14 @@ static inline const struct cpumask *cpu_cpu_mask(int cpu)
 	return cpumask_of_node(cpu_to_node(cpu));
 }
 
+#ifdef CONFIG_NUMA
+extern const struct cpumask *sched_numa_hop_mask(int node, int hops);
+#else
+static inline const struct cpumask *sched_numa_hop_mask(int node, int hops)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+#endif	/* CONFIG_NUMA */
+
 
 #endif /* _LINUX_TOPOLOGY_H */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8739c2a5a54e..f0236a0ae65c 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2067,6 +2067,34 @@ int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
 	return found;
 }
 
+/**
+ * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away.
+ * @node: The node to count hops from.
+ * @hops: Include CPUs up to that many hops away. 0 means local node.
+ *
+ * Requires rcu_lock to be held. Returned cpumask is only valid within that
+ * read-side section, copy it if required beyond that.
+ *
+ * Note that not all hops are equal in size; see sched_init_numa() for how
+ * distances and masks are handled.
+ *
+ * Also note that this is a reflection of sched_domains_numa_masks, which may change
+ * during the lifetime of the system (offline nodes are taken out of the masks).
+ */
+const struct cpumask *sched_numa_hop_mask(int node, int hops)
+{
+	struct cpumask ***masks = rcu_dereference(sched_domains_numa_masks);
+
+	if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
+		return ERR_PTR(-EINVAL);
+
+	if (!masks)
+		return NULL;
+
+	return masks[hops][node];
+}
+EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
+
 #endif /* CONFIG_NUMA */
 
 static int __sdt_alloc(const struct cpumask *cpu_map)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 4/5] sched/topology: Introduce for_each_numa_hop_cpu()
  2022-08-16 18:07 [PATCH 0/5] cpumask, sched/topology: NUMA-aware CPU spreading interface Valentin Schneider
                   ` (2 preceding siblings ...)
  2022-08-16 18:07 ` [PATCH 3/5] sched/topology: Introduce sched_numa_hop_mask() Valentin Schneider
@ 2022-08-16 18:07 ` Valentin Schneider
  2022-08-16 18:07 ` [PATCH 5/5] SHOWCASE: net/mlx5e: Leverage for_each_numa_hop_cpu() Valentin Schneider
  4 siblings, 0 replies; 10+ messages in thread
From: Valentin Schneider @ 2022-08-16 18:07 UTC (permalink / raw)
  To: netdev, linux-rdma, linux-kernel
  Cc: Saeed Mahameed, Leon Romanovsky, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Paolo Abeni, Yury Norov, Andy Shevchenko,
	Rasmus Villemoes, Ingo Molnar, Peter Zijlstra, Vincent Guittot,
	Dietmar Eggemann, Steven Rostedt, Mel Gorman, Greg Kroah-Hartman,
	Barry Song, Heiko Carstens, Tony Luck, Jonathan Cameron,
	Gal Pressman, Tariq Toukan

The recently introduced sched_numa_hop_mask() exposes cpumasks of CPUs
reachable within a given distance budget, but this means each successive
cpumask is a superset of the previous one.

Code wanting to allocate one item per CPU (e.g. IRQs) at increasing
distances would thus need to allocate a temporary cpumask to note which
CPUs have already been visited. This can be prevented by leveraging
for_each_cpu_andnot() - package all that logic into one ugl^D fancy macro.

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
 include/linux/topology.h | 37 +++++++++++++++++++++++++++++++++++++
 1 file changed, 37 insertions(+)

diff --git a/include/linux/topology.h b/include/linux/topology.h
index 13b82b83e547..6c671dc3252c 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -254,5 +254,42 @@ static inline const struct cpumask *sched_numa_hop_mask(int node, int hops)
 }
 #endif	/* CONFIG_NUMA */
 
+/**
+ * for_each_numa_hop_cpu - iterate over CPUs by increasing NUMA distance,
+ *                         starting from a given node.
+ * @cpu: the iteration variable.
+ * @node: the NUMA node to start the search from.
+ *
+ * Requires rcu_lock to be held.
+ * Careful: this is a double loop, 'break' won't work as expected.
+ *
+ *
+ * Implementation notes:
+ *
+ * Providing it is valid, the mask returned by
+ *  sched_numa_hop_mask(node, hops+1)
+ * is a superset of the one returned by
+ *   sched_numa_hop_mask(node, hops)
+ * which may not be that useful for drivers that try to spread things out and
+ * want to visit a CPU not more than once.
+ *
+ * To accommodate for that, we use for_each_cpu_andnot() to iterate over the cpus
+ * of sched_numa_hop_mask(node, hops+1) with the CPUs of
+ * sched_numa_hop_mask(node, hops) removed, IOW we only iterate over CPUs
+ * a given distance away (rather than *up to* a given distance).
+ *
+ * hops=0 forces us to play silly games: we pass cpu_none_mask to
+ * for_each_cpu_andnot(), which turns it into for_each_cpu().
+ */
+#define for_each_numa_hop_cpu(cpu, node)				       \
+	for (struct { const struct cpumask *curr, *prev; int hops; } __v =     \
+		     { sched_numa_hop_mask(node, 0), NULL, 0 };		       \
+	     !IS_ERR_OR_NULL(__v.curr);					       \
+	     __v.hops++,                                                       \
+	     __v.prev = __v.curr,					       \
+	     __v.curr = sched_numa_hop_mask(node, __v.hops))                   \
+		for_each_cpu_andnot(cpu,				       \
+				    __v.curr,				       \
+				    __v.hops ? __v.prev : cpu_none_mask)
 
 #endif /* _LINUX_TOPOLOGY_H */
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 5/5] SHOWCASE: net/mlx5e: Leverage for_each_numa_hop_cpu()
  2022-08-16 18:07 [PATCH 0/5] cpumask, sched/topology: NUMA-aware CPU spreading interface Valentin Schneider
                   ` (3 preceding siblings ...)
  2022-08-16 18:07 ` [PATCH 4/5] sched/topology: Introduce for_each_numa_hop_cpu() Valentin Schneider
@ 2022-08-16 18:07 ` Valentin Schneider
  4 siblings, 0 replies; 10+ messages in thread
From: Valentin Schneider @ 2022-08-16 18:07 UTC (permalink / raw)
  To: netdev, linux-rdma, linux-kernel
  Cc: Saeed Mahameed, Leon Romanovsky, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Paolo Abeni, Yury Norov, Andy Shevchenko,
	Rasmus Villemoes, Ingo Molnar, Peter Zijlstra, Vincent Guittot,
	Dietmar Eggemann, Steven Rostedt, Mel Gorman, Greg Kroah-Hartman,
	Barry Song, Heiko Carstens, Tony Luck, Jonathan Cameron,
	Gal Pressman, Tariq Toukan

Not-signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/eq.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 229728c80233..0a5432903edd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -812,6 +812,7 @@ static int comp_irqs_request(struct mlx5_core_dev *dev)
 	int ncomp_eqs = table->num_comp_eqs;
 	u16 *cpus;
 	int ret;
+	int cpu;
 	int i;
 
 	ncomp_eqs = table->num_comp_eqs;
@@ -830,8 +831,15 @@ static int comp_irqs_request(struct mlx5_core_dev *dev)
 		ret = -ENOMEM;
 		goto free_irqs;
 	}
-	for (i = 0; i < ncomp_eqs; i++)
-		cpus[i] = cpumask_local_spread(i, dev->priv.numa_node);
+
+	rcu_read_lock();
+	for_each_numa_hop_cpus(cpu, dev->priv.numa_node) {
+		cpus[i] = cpu;
+		if (++i == ncomp_eqs)
+			goto spread_done;
+	}
+spread_done:
+	rcu_read_unlock();
 	ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs);
 	kfree(cpus);
 	if (ret < 0)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/5] bitops: Introduce find_next_andnot_bit()
  2022-08-16 18:07 ` [PATCH 1/5] bitops: Introduce find_next_andnot_bit() Valentin Schneider
@ 2022-08-16 22:13   ` Yury Norov
  2022-08-17 10:09     ` Valentin Schneider
  0 siblings, 1 reply; 10+ messages in thread
From: Yury Norov @ 2022-08-16 22:13 UTC (permalink / raw)
  To: Valentin Schneider
  Cc: netdev, linux-rdma, linux-kernel, Saeed Mahameed,
	Leon Romanovsky, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Andy Shevchenko, Rasmus Villemoes, Ingo Molnar,
	Peter Zijlstra, Vincent Guittot, Dietmar Eggemann,
	Steven Rostedt, Mel Gorman, Greg Kroah-Hartman, Barry Song,
	Heiko Carstens, Tony Luck, Jonathan Cameron, Gal Pressman,
	Tariq Toukan

On Tue, Aug 16, 2022 at 07:07:23PM +0100, Valentin Schneider wrote:
> In preparation of introducing for_each_cpu_andnot(), add a variant of
> find_next_bit() that negate the bits in @addr2 when ANDing them with the
> bits in @addr1.
> 
> Signed-off-by: Valentin Schneider <vschneid@redhat.com>
> ---
>  include/linux/find.h | 44 ++++++++++++++++++++++++++++++++++++++------
>  lib/find_bit.c       | 16 +++++++++++-----
>  2 files changed, 49 insertions(+), 11 deletions(-)
> 
> diff --git a/include/linux/find.h b/include/linux/find.h
> index 424ef67d4a42..454cde69b30b 100644
> --- a/include/linux/find.h
> +++ b/include/linux/find.h
> @@ -10,7 +10,8 @@
>  
>  extern unsigned long _find_next_bit(const unsigned long *addr1,
>  		const unsigned long *addr2, unsigned long nbits,
> -		unsigned long start, unsigned long invert, unsigned long le);
> +		unsigned long start, unsigned long invert, unsigned long le,
> +		bool negate);
>  extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
>  extern unsigned long _find_first_and_bit(const unsigned long *addr1,
>  					 const unsigned long *addr2, unsigned long size);
> @@ -41,7 +42,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
>  		return val ? __ffs(val) : size;
>  	}
>  
> -	return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
> +	return _find_next_bit(addr, NULL, size, offset, 0UL, 0, 0);
>  }
>  #endif
>  
> @@ -71,7 +72,38 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
>  		return val ? __ffs(val) : size;
>  	}
>  
> -	return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
> +	return _find_next_bit(addr1, addr2, size, offset, 0UL, 0, 0);
> +}
> +#endif
> +
> +#ifndef find_next_andnot_bit
> +/**
> + * find_next_andnot_bit - find the next set bit in one memory region
> + *                        but not in the other
> + * @addr1: The first address to base the search on
> + * @addr2: The second address to base the search on
> + * @size: The bitmap size in bits
> + * @offset: The bitnumber to start searching at
> + *
> + * Returns the bit number for the next set bit
> + * If no bits are set, returns @size.
> + */
> +static inline
> +unsigned long find_next_andnot_bit(const unsigned long *addr1,
> +		const unsigned long *addr2, unsigned long size,
> +		unsigned long offset)
> +{
> +	if (small_const_nbits(size)) {
> +		unsigned long val;
> +
> +		if (unlikely(offset >= size))
> +			return size;
> +
> +		val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
> +		return val ? __ffs(val) : size;
> +	}
> +
> +	return _find_next_bit(addr1, addr2, size, offset, 0UL, 0, 1);
>  }
>  #endif
>  
> @@ -99,7 +131,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
>  		return val == ~0UL ? size : ffz(val);
>  	}
>  
> -	return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
> +	return _find_next_bit(addr, NULL, size, offset, ~0UL, 0, 0);
>  }
>  #endif
>  
> @@ -247,7 +279,7 @@ unsigned long find_next_zero_bit_le(const void *addr, unsigned
>  		return val == ~0UL ? size : ffz(val);
>  	}
>  
> -	return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
> +	return _find_next_bit(addr, NULL, size, offset, ~0UL, 1, 0);
>  }
>  #endif
>  
> @@ -266,7 +298,7 @@ unsigned long find_next_bit_le(const void *addr, unsigned
>  		return val ? __ffs(val) : size;
>  	}
>  
> -	return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
> +	return _find_next_bit(addr, NULL, size, offset, 0UL, 1, 0);
>  }
>  #endif
>  
> diff --git a/lib/find_bit.c b/lib/find_bit.c
> index 1b8e4b2a9cba..6e5f42c621a9 100644
> --- a/lib/find_bit.c
> +++ b/lib/find_bit.c
> @@ -21,17 +21,19 @@
>  
>  #if !defined(find_next_bit) || !defined(find_next_zero_bit) ||			\
>  	!defined(find_next_bit_le) || !defined(find_next_zero_bit_le) ||	\
> -	!defined(find_next_and_bit)
> +	!defined(find_next_and_bit) || !defined(find_next_andnot_bit)
>  /*
>   * This is a common helper function for find_next_bit, find_next_zero_bit, and
>   * find_next_and_bit. The differences are:
>   *  - The "invert" argument, which is XORed with each fetched word before
>   *    searching it for one bits.
> - *  - The optional "addr2", which is anded with "addr1" if present.
> + *  - The optional "addr2", negated if "negate" and ANDed with "addr1" if
> + *    present.
>   */
>  unsigned long _find_next_bit(const unsigned long *addr1,
>  		const unsigned long *addr2, unsigned long nbits,
> -		unsigned long start, unsigned long invert, unsigned long le)
> +		unsigned long start, unsigned long invert, unsigned long le,
> +		bool negate)
>  {
>  	unsigned long tmp, mask;
>  
> @@ -40,7 +42,9 @@ unsigned long _find_next_bit(const unsigned long *addr1,
>  
>  	tmp = addr1[start / BITS_PER_LONG];
>  	if (addr2)
> -		tmp &= addr2[start / BITS_PER_LONG];
> +		tmp &= negate ?
> +		       ~addr2[start / BITS_PER_LONG] :
> +			addr2[start / BITS_PER_LONG];
>  	tmp ^= invert;
>  
>  	/* Handle 1st word. */
> @@ -59,7 +63,9 @@ unsigned long _find_next_bit(const unsigned long *addr1,
>  
>  		tmp = addr1[start / BITS_PER_LONG];
>  		if (addr2)
> -			tmp &= addr2[start / BITS_PER_LONG];
> +			tmp &= negate ?
> +			       ~addr2[start / BITS_PER_LONG] :
> +				addr2[start / BITS_PER_LONG];
>  		tmp ^= invert;
>  	}

So it flips addr2 bits twice - first with new 'negate', and second
with the existing 'invert'. There is no such combination in the
existing code, but the pattern looks ugly, particularly because we use
different inverting approaches. Because of that, and because XOR trick
generates better code, I'd suggest something like this:

        tmp = addr1[start / BITS_PER_LONG] ^ invert1;
        if (addr2)
                tmp &= addr2[start / BITS_PER_LONG] ^ invert2;

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/5] cpumask: Introduce for_each_cpu_andnot()
  2022-08-16 18:07 ` [PATCH 2/5] cpumask: Introduce for_each_cpu_andnot() Valentin Schneider
@ 2022-08-16 22:24   ` Yury Norov
  2022-08-17 10:09     ` Valentin Schneider
  0 siblings, 1 reply; 10+ messages in thread
From: Yury Norov @ 2022-08-16 22:24 UTC (permalink / raw)
  To: Valentin Schneider
  Cc: netdev, linux-rdma, linux-kernel, Saeed Mahameed,
	Leon Romanovsky, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Andy Shevchenko, Rasmus Villemoes, Ingo Molnar,
	Peter Zijlstra, Vincent Guittot, Dietmar Eggemann,
	Steven Rostedt, Mel Gorman, Greg Kroah-Hartman, Barry Song,
	Heiko Carstens, Tony Luck, Jonathan Cameron, Gal Pressman,
	Tariq Toukan

On Tue, Aug 16, 2022 at 07:07:24PM +0100, Valentin Schneider wrote:
> for_each_cpu_and() is very convenient as it saves having to allocate a
> temporary cpumask to store the result of cpumask_and(). The same issue
> applies to cpumask_andnot() which doesn't actually need temporary storage
> for iteration purposes.
> 
> Following what has been done for for_each_cpu_and(), introduce
> for_each_cpu_andnot().
> 
> Signed-off-by: Valentin Schneider <vschneid@redhat.com>
> ---
>  include/linux/cpumask.h | 32 ++++++++++++++++++++++++++++++++
>  lib/cpumask.c           | 19 +++++++++++++++++++
>  2 files changed, 51 insertions(+)
> 
> diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
> index fe29ac7cc469..a8b2ca160e57 100644
> --- a/include/linux/cpumask.h
> +++ b/include/linux/cpumask.h
> @@ -157,6 +157,13 @@ static inline unsigned int cpumask_next_and(int n,
>  	return n+1;
>  }
>  
> +static inline unsigned int cpumask_next_andnot(int n,
> +					    const struct cpumask *srcp,
> +					    const struct cpumask *andp)
> +{
> +	return n+1;
> +}
> +

It looks like the patch is not based on top of 6.0, where UP cpumask
operations were fixed.  Can you please rebase?

Thanks,
Yury

>  static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
>  					     int start, bool wrap)
>  {
> @@ -194,6 +201,8 @@ static inline int cpumask_any_distribute(const struct cpumask *srcp)
>  	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
>  #define for_each_cpu_and(cpu, mask1, mask2)	\
>  	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
> +#define for_each_cpu_andnot(cpu, mask1, mask2)	\
> +	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
>  #else
>  /**
>   * cpumask_first - get the first cpu in a cpumask
> @@ -259,6 +268,9 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
>  }
>  
>  int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
> +int __pure cpumask_next_andnot(int n,
> +			       const struct cpumask *src1p,
> +			       const struct cpumask *src2p);
>  int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
>  unsigned int cpumask_local_spread(unsigned int i, int node);
>  int cpumask_any_and_distribute(const struct cpumask *src1p,
> @@ -324,6 +336,26 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
>  	for ((cpu) = -1;						\
>  		(cpu) = cpumask_next_and((cpu), (mask1), (mask2)),	\
>  		(cpu) < nr_cpu_ids;)
> +
> +/**
> + * for_each_cpu_andnot - iterate over every cpu in one mask but not in another
> + * @cpu: the (optionally unsigned) integer iterator
> + * @mask1: the first cpumask pointer
> + * @mask2: the second cpumask pointer
> + *
> + * This saves a temporary CPU mask in many places.  It is equivalent to:
> + *	struct cpumask tmp;
> + *	cpumask_andnot(&tmp, &mask1, &mask2);
> + *	for_each_cpu(cpu, &tmp)
> + *		...
> + *
> + * After the loop, cpu is >= nr_cpu_ids.
> + */
> +#define for_each_cpu_andnot(cpu, mask1, mask2)				\
> +	for ((cpu) = -1;						\
> +		(cpu) = cpumask_next_andnot((cpu), (mask1), (mask2)),	\
> +		(cpu) < nr_cpu_ids;)
> +
>  #endif /* SMP */
>  
>  #define CPU_BITS_NONE						\
> diff --git a/lib/cpumask.c b/lib/cpumask.c
> index a971a82d2f43..6896ff4a08fd 100644
> --- a/lib/cpumask.c
> +++ b/lib/cpumask.c
> @@ -42,6 +42,25 @@ int cpumask_next_and(int n, const struct cpumask *src1p,
>  }
>  EXPORT_SYMBOL(cpumask_next_and);
>  
> +/**
> + * cpumask_next_andnot - get the next cpu in *src1p & ~*src2p
> + * @n: the cpu prior to the place to search (ie. return will be > @n)
> + * @src1p: the first cpumask pointer
> + * @src2p: the second cpumask pointer
> + *
> + * Returns >= nr_cpu_ids if no further cpus set in *src1p & ~*src2p.
> + */
> +int cpumask_next_andnot(int n, const struct cpumask *src1p,
> +		     const struct cpumask *src2p)
> +{
> +	/* -1 is a legal arg here. */
> +	if (n != -1)
> +		cpumask_check(n);
> +	return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p),
> +		nr_cpumask_bits, n + 1);
> +}
> +EXPORT_SYMBOL(cpumask_next_andnot);
> +
>  /**
>   * cpumask_any_but - return a "random" in a cpumask, but not this one.
>   * @mask: the cpumask to search
> -- 
> 2.31.1

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/5] cpumask: Introduce for_each_cpu_andnot()
  2022-08-16 22:24   ` Yury Norov
@ 2022-08-17 10:09     ` Valentin Schneider
  0 siblings, 0 replies; 10+ messages in thread
From: Valentin Schneider @ 2022-08-17 10:09 UTC (permalink / raw)
  To: Yury Norov
  Cc: netdev, linux-rdma, linux-kernel, Saeed Mahameed,
	Leon Romanovsky, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Andy Shevchenko, Rasmus Villemoes, Ingo Molnar,
	Peter Zijlstra, Vincent Guittot, Dietmar Eggemann,
	Steven Rostedt, Mel Gorman, Greg Kroah-Hartman, Barry Song,
	Heiko Carstens, Tony Luck, Jonathan Cameron, Gal Pressman,
	Tariq Toukan

On 16/08/22 15:24, Yury Norov wrote:
> On Tue, Aug 16, 2022 at 07:07:24PM +0100, Valentin Schneider wrote:
>> for_each_cpu_and() is very convenient as it saves having to allocate a
>> temporary cpumask to store the result of cpumask_and(). The same issue
>> applies to cpumask_andnot() which doesn't actually need temporary storage
>> for iteration purposes.
>>
>> Following what has been done for for_each_cpu_and(), introduce
>> for_each_cpu_andnot().
>>
>> Signed-off-by: Valentin Schneider <vschneid@redhat.com>
>> ---
>>  include/linux/cpumask.h | 32 ++++++++++++++++++++++++++++++++
>>  lib/cpumask.c           | 19 +++++++++++++++++++
>>  2 files changed, 51 insertions(+)
>>
>> diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
>> index fe29ac7cc469..a8b2ca160e57 100644
>> --- a/include/linux/cpumask.h
>> +++ b/include/linux/cpumask.h
>> @@ -157,6 +157,13 @@ static inline unsigned int cpumask_next_and(int n,
>>      return n+1;
>>  }
>>
>> +static inline unsigned int cpumask_next_andnot(int n,
>> +					    const struct cpumask *srcp,
>> +					    const struct cpumask *andp)
>> +{
>> +	return n+1;
>> +}
>> +
>
> It looks like the patch is not based on top of 6.0, where UP cpumask
> operations were fixed.  Can you please rebase?
>

Right, this is based on tip/sched/core, I'll rebase it. Sorry about that!


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/5] bitops: Introduce find_next_andnot_bit()
  2022-08-16 22:13   ` Yury Norov
@ 2022-08-17 10:09     ` Valentin Schneider
  0 siblings, 0 replies; 10+ messages in thread
From: Valentin Schneider @ 2022-08-17 10:09 UTC (permalink / raw)
  To: Yury Norov
  Cc: netdev, linux-rdma, linux-kernel, Saeed Mahameed,
	Leon Romanovsky, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Andy Shevchenko, Rasmus Villemoes, Ingo Molnar,
	Peter Zijlstra, Vincent Guittot, Dietmar Eggemann,
	Steven Rostedt, Mel Gorman, Greg Kroah-Hartman, Barry Song,
	Heiko Carstens, Tony Luck, Jonathan Cameron, Gal Pressman,
	Tariq Toukan

On 16/08/22 15:13, Yury Norov wrote:
> On Tue, Aug 16, 2022 at 07:07:23PM +0100, Valentin Schneider wrote:
>> @@ -59,7 +63,9 @@ unsigned long _find_next_bit(const unsigned long *addr1,
>>
>>              tmp = addr1[start / BITS_PER_LONG];
>>              if (addr2)
>> -			tmp &= addr2[start / BITS_PER_LONG];
>> +			tmp &= negate ?
>> +			       ~addr2[start / BITS_PER_LONG] :
>> +				addr2[start / BITS_PER_LONG];
>>              tmp ^= invert;
>>      }
>
> So it flips addr2 bits twice - first with new 'negate', and second
> with the existing 'invert'. There is no such combination in the
> existing code, but the pattern looks ugly, particularly because we use
> different inverting approaches. Because of that, and because XOR trick
> generates better code, I'd suggest something like this:
>
>         tmp = addr1[start / BITS_PER_LONG] ^ invert1;
>         if (addr2)
>                 tmp &= addr2[start / BITS_PER_LONG] ^ invert2;

That does look much better, and also gets rid of the ternary. Thanks!


^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2022-08-17 10:09 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-16 18:07 [PATCH 0/5] cpumask, sched/topology: NUMA-aware CPU spreading interface Valentin Schneider
2022-08-16 18:07 ` [PATCH 1/5] bitops: Introduce find_next_andnot_bit() Valentin Schneider
2022-08-16 22:13   ` Yury Norov
2022-08-17 10:09     ` Valentin Schneider
2022-08-16 18:07 ` [PATCH 2/5] cpumask: Introduce for_each_cpu_andnot() Valentin Schneider
2022-08-16 22:24   ` Yury Norov
2022-08-17 10:09     ` Valentin Schneider
2022-08-16 18:07 ` [PATCH 3/5] sched/topology: Introduce sched_numa_hop_mask() Valentin Schneider
2022-08-16 18:07 ` [PATCH 4/5] sched/topology: Introduce for_each_numa_hop_cpu() Valentin Schneider
2022-08-16 18:07 ` [PATCH 5/5] SHOWCASE: net/mlx5e: Leverage for_each_numa_hop_cpu() Valentin Schneider

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).