linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 1/2] irq/matrix: Split out the CPU finding code into a helper
@ 2018-09-08 17:58 Dou Liyang
  2018-09-08 17:58 ` [PATCH v3 2/2] irq/matrix: Spread managed interrupts on allocation Dou Liyang
  2018-09-18 16:36 ` [tip:x86/apic] irq/matrix: Split out the CPU selection code into a helper tip-bot for Dou Liyang
  0 siblings, 2 replies; 6+ messages in thread
From: Dou Liyang @ 2018-09-08 17:58 UTC (permalink / raw)
  To: linux-kernel, x86; +Cc: tglx, mingo, hpa, douly.fnst

From: Dou Liyang <douly.fnst@cn.fujitsu.com>

Linux finds the CPU which has the lowest vector allocation count to spread
out the non managed interrupt across the possible target CPUs.

This common CPU finding code will also be used in managed case,

So Split it out into a helper for preparation.

Signed-off-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
---
Changelog v3 --> v2

 - Make the matrix_find_best_cpu() simple and obvious suggested by tglx
 - Remove the indentation totally suggested by tglx

 kernel/irq/matrix.c | 65 +++++++++++++++++++++++++++++++----------------------
 1 file changed, 38 insertions(+), 27 deletions(-)

diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5092494bf261..67768bbe736e 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
 	return area;
 }
 
+/* Find the best CPU which has the lowest vector allocation count */
+static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
+					const struct cpumask *msk)
+{
+	unsigned int cpu, best_cpu, maxavl = 0;
+	struct cpumap *cm;
+
+	best_cpu = UINT_MAX;
+
+	for_each_cpu(cpu, msk) {
+		cm = per_cpu_ptr(m->maps, cpu);
+
+		if (!cm->online || cm->available <= maxavl)
+			continue;
+
+		best_cpu = cpu;
+		maxavl = cm->available;
+	}
+	return best_cpu;
+}
+
 /**
  * irq_matrix_assign_system - Assign system wide entry in the matrix
  * @m:		Matrix pointer
@@ -322,37 +343,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
 		     bool reserved, unsigned int *mapped_cpu)
 {
-	unsigned int cpu, best_cpu, maxavl = 0;
+	unsigned int cpu, bit;
 	struct cpumap *cm;
-	unsigned int bit;
 
-	best_cpu = UINT_MAX;
-	for_each_cpu(cpu, msk) {
-		cm = per_cpu_ptr(m->maps, cpu);
-
-		if (!cm->online || cm->available <= maxavl)
-			continue;
+	cpu = matrix_find_best_cpu(m, msk);
+	if (cpu == UINT_MAX)
+		return -ENOSPC;
 
-		best_cpu = cpu;
-		maxavl = cm->available;
-	}
+	cm = per_cpu_ptr(m->maps, cpu);
+	bit = matrix_alloc_area(m, cm, 1, false);
+	if (bit >= m->alloc_end)
+		return -ENOSPC;
+	cm->allocated++;
+	cm->available--;
+	m->total_allocated++;
+	m->global_available--;
+	if (reserved)
+		m->global_reserved--;
+	*mapped_cpu = cpu;
+	trace_irq_matrix_alloc(bit, cpu, m, cm);
+	return bit;
 
-	if (maxavl) {
-		cm = per_cpu_ptr(m->maps, best_cpu);
-		bit = matrix_alloc_area(m, cm, 1, false);
-		if (bit < m->alloc_end) {
-			cm->allocated++;
-			cm->available--;
-			m->total_allocated++;
-			m->global_available--;
-			if (reserved)
-				m->global_reserved--;
-			*mapped_cpu = best_cpu;
-			trace_irq_matrix_alloc(bit, best_cpu, m, cm);
-			return bit;
-		}
-	}
-	return -ENOSPC;
 }
 
 /**
-- 
2.14.3



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 2/2] irq/matrix: Spread managed interrupts on allocation
  2018-09-08 17:58 [PATCH v3 1/2] irq/matrix: Split out the CPU finding code into a helper Dou Liyang
@ 2018-09-08 17:58 ` Dou Liyang
  2018-09-17 15:32   ` Thomas Gleixner
  2018-09-18 16:37   ` [tip:x86/apic] " tip-bot for Dou Liyang
  2018-09-18 16:36 ` [tip:x86/apic] irq/matrix: Split out the CPU selection code into a helper tip-bot for Dou Liyang
  1 sibling, 2 replies; 6+ messages in thread
From: Dou Liyang @ 2018-09-08 17:58 UTC (permalink / raw)
  To: linux-kernel, x86; +Cc: tglx, mingo, hpa, douly.fnst

From: Dou Liyang <douly.fnst@cn.fujitsu.com>

Linux has spread out the non managed interrupt across the possible
target CPUs to avoid vector space exhaustion.

But, the same situation may happen on the managed interrupts.

Spread managed interrupt on allocation as well.

Note: also change the return value for the empty search mask case
from EINVAL to ENOSPC.

Signed-off-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
---
Changelog v3 --> v2

 - Mention the changes in the changelog suggested by tglx
 - Use the new matrix_find_best_cpu() helper

 arch/x86/kernel/apic/vector.c |  8 +++-----
 include/linux/irq.h           |  3 ++-
 kernel/irq/matrix.c           | 14 +++++++++++---
 3 files changed, 16 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 9f148e3d45b4..b7fc290b4b98 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -313,14 +313,12 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
 	struct apic_chip_data *apicd = apic_chip_data(irqd);
 	int vector, cpu;
 
-	cpumask_and(vector_searchmask, vector_searchmask, affmsk);
-	cpu = cpumask_first(vector_searchmask);
-	if (cpu >= nr_cpu_ids)
-		return -EINVAL;
+	cpumask_and(vector_searchmask, dest, affmsk);
+
 	/* set_affinity might call here for nothing */
 	if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
 		return 0;
-	vector = irq_matrix_alloc_managed(vector_matrix, cpu);
+	vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask, &cpu);
 	trace_vector_alloc_managed(irqd->irq, vector, vector);
 	if (vector < 0)
 		return vector;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12a9957..c9bffda04a45 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+				unsigned int *mapped_cpu);
 void irq_matrix_reserve(struct irq_matrix *m);
 void irq_matrix_remove_reserved(struct irq_matrix *m);
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 67768bbe736e..34f97c4f10d7 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -260,11 +260,18 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
  * @m:		Matrix pointer
  * @cpu:	On which CPU the interrupt should be allocated
  */
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+				unsigned int *mapped_cpu)
 {
-	struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
-	unsigned int bit, end = m->alloc_end;
+	unsigned int bit, cpu, end = m->alloc_end;
+	struct cpumap *cm;
+
+	cpu = matrix_find_best_cpu(m, msk);
+	if (cpu == UINT_MAX)
+		return -ENOSPC;
 
+	cm = per_cpu_ptr(m->maps, cpu);
+	end = m->alloc_end;
 	/* Get managed bit which are not allocated */
 	bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
 	bit = find_first_bit(m->scratch_map, end);
@@ -273,6 +280,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
 	set_bit(bit, cm->alloc_map);
 	cm->allocated++;
 	m->total_allocated++;
+	*mapped_cpu = cpu;
 	trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
 	return bit;
 }
-- 
2.14.3



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 2/2] irq/matrix: Spread managed interrupts on allocation
  2018-09-08 17:58 ` [PATCH v3 2/2] irq/matrix: Spread managed interrupts on allocation Dou Liyang
@ 2018-09-17 15:32   ` Thomas Gleixner
  2018-09-18 16:02     ` Dou Liyang
  2018-09-18 16:37   ` [tip:x86/apic] " tip-bot for Dou Liyang
  1 sibling, 1 reply; 6+ messages in thread
From: Thomas Gleixner @ 2018-09-17 15:32 UTC (permalink / raw)
  To: Dou Liyang; +Cc: linux-kernel, x86, mingo, hpa, douly.fnst

On Sun, 9 Sep 2018, Dou Liyang wrote:

> From: Dou Liyang <douly.fnst@cn.fujitsu.com>
> 
> Linux has spread out the non managed interrupt across the possible
> target CPUs to avoid vector space exhaustion.
> 
> But, the same situation may happen on the managed interrupts.

Second thougts on this.

Spreading the managed interrupts out at vector allocation time does not
prevent vector exhaustion at all, because contrary to regular interrupts
managed interrupts have a guaranteed allocation. IOW when the managed
interrupt is initialized (that's way before the actual vector allocation
happens) a vector is reserved on each CPU which is in the associated
interrupt mask.

This is an essential property of managed interrupts because the kernel
guarantees that they can be moved to any CPU in the supplied mask during
CPU hot unplug and consequently shut down when the last CPU in the mask
goes offline.

So for that special case of pre/post vectors the supplied mask is all CPUs
and the guaranteed reservation will claim a vector on each CPU. What makes
it look unbalanced is that when the interrupts are actually requested, all
end up on CPU0 as that's the first CPU in the mask.

So doing the spreading does not prevent vector exhaustion it merily spreads
the active interrupts more evenly over the CPUs in the mask.

I think it's still worthwhile to do that, but the changelog needs a major
overhaul as right now it's outright misleading. I'll just amend it with
something along the above lines, unless someone disagrees.

That said, it might also be interesting to allow user space affinity
settings on managed interrupts. Not meant for the pre/post vector case,
which just needs to be made non managed. It's meant for the case where a
device has less queues than CPUs, where changing affinity within the spread
range of CPUs could be allowed. Not sure though. Delegating this to the
folks who actually use that in their drivers.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 2/2] irq/matrix: Spread managed interrupts on allocation
  2018-09-17 15:32   ` Thomas Gleixner
@ 2018-09-18 16:02     ` Dou Liyang
  0 siblings, 0 replies; 6+ messages in thread
From: Dou Liyang @ 2018-09-18 16:02 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: linux-kernel, x86, mingo, hpa, douly.fnst

Dear Thomas,

On 2018/9/17 23:32, Thomas Gleixner wrote:
[...]
> I think it's still worthwhile to do that, but the changelog needs a major
> overhaul as right now it's outright misleading. I'll just amend it with
> something along the above lines, unless someone disagrees.
> 

Yeah, Yes, right, I was wrong, can't prevent vector exhaustion, just
make it looks balance as you said. Thank you so much.

> That said, it might also be interesting to allow user space affinity
> settings on managed interrupts. Not meant for the pre/post vector case,
> which just needs to be made non managed. It's meant for the case where a

Yes, I am cooking according to the direction you are proposing.
Recently, I changed my PC and just completed the configuration of the
environment. ;-)

Thanks,
	dou


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [tip:x86/apic] irq/matrix: Split out the CPU selection code into a helper
  2018-09-08 17:58 [PATCH v3 1/2] irq/matrix: Split out the CPU finding code into a helper Dou Liyang
  2018-09-08 17:58 ` [PATCH v3 2/2] irq/matrix: Spread managed interrupts on allocation Dou Liyang
@ 2018-09-18 16:36 ` tip-bot for Dou Liyang
  1 sibling, 0 replies; 6+ messages in thread
From: tip-bot for Dou Liyang @ 2018-09-18 16:36 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: mingo, hpa, linux-kernel, tglx, douly.fnst

Commit-ID:  8ffe4e61c06a48324cfd97f1199bb9838acce2f2
Gitweb:     https://git.kernel.org/tip/8ffe4e61c06a48324cfd97f1199bb9838acce2f2
Author:     Dou Liyang <douly.fnst@cn.fujitsu.com>
AuthorDate: Sun, 9 Sep 2018 01:58:37 +0800
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Tue, 18 Sep 2018 18:27:24 +0200

irq/matrix: Split out the CPU selection code into a helper

Linux finds the CPU which has the lowest vector allocation count to spread
out the non managed interrupts across the possible target CPUs, but does
not do so for managed interrupts.

Split out the CPU selection code into a helper function for reuse. No
functional change.

Signed-off-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/20180908175838.14450-1-dou_liyang@163.com

---
 kernel/irq/matrix.c | 65 +++++++++++++++++++++++++++++++----------------------
 1 file changed, 38 insertions(+), 27 deletions(-)

diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5092494bf261..67768bbe736e 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
 	return area;
 }
 
+/* Find the best CPU which has the lowest vector allocation count */
+static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
+					const struct cpumask *msk)
+{
+	unsigned int cpu, best_cpu, maxavl = 0;
+	struct cpumap *cm;
+
+	best_cpu = UINT_MAX;
+
+	for_each_cpu(cpu, msk) {
+		cm = per_cpu_ptr(m->maps, cpu);
+
+		if (!cm->online || cm->available <= maxavl)
+			continue;
+
+		best_cpu = cpu;
+		maxavl = cm->available;
+	}
+	return best_cpu;
+}
+
 /**
  * irq_matrix_assign_system - Assign system wide entry in the matrix
  * @m:		Matrix pointer
@@ -322,37 +343,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
 		     bool reserved, unsigned int *mapped_cpu)
 {
-	unsigned int cpu, best_cpu, maxavl = 0;
+	unsigned int cpu, bit;
 	struct cpumap *cm;
-	unsigned int bit;
 
-	best_cpu = UINT_MAX;
-	for_each_cpu(cpu, msk) {
-		cm = per_cpu_ptr(m->maps, cpu);
-
-		if (!cm->online || cm->available <= maxavl)
-			continue;
+	cpu = matrix_find_best_cpu(m, msk);
+	if (cpu == UINT_MAX)
+		return -ENOSPC;
 
-		best_cpu = cpu;
-		maxavl = cm->available;
-	}
+	cm = per_cpu_ptr(m->maps, cpu);
+	bit = matrix_alloc_area(m, cm, 1, false);
+	if (bit >= m->alloc_end)
+		return -ENOSPC;
+	cm->allocated++;
+	cm->available--;
+	m->total_allocated++;
+	m->global_available--;
+	if (reserved)
+		m->global_reserved--;
+	*mapped_cpu = cpu;
+	trace_irq_matrix_alloc(bit, cpu, m, cm);
+	return bit;
 
-	if (maxavl) {
-		cm = per_cpu_ptr(m->maps, best_cpu);
-		bit = matrix_alloc_area(m, cm, 1, false);
-		if (bit < m->alloc_end) {
-			cm->allocated++;
-			cm->available--;
-			m->total_allocated++;
-			m->global_available--;
-			if (reserved)
-				m->global_reserved--;
-			*mapped_cpu = best_cpu;
-			trace_irq_matrix_alloc(bit, best_cpu, m, cm);
-			return bit;
-		}
-	}
-	return -ENOSPC;
 }
 
 /**

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [tip:x86/apic] irq/matrix: Spread managed interrupts on allocation
  2018-09-08 17:58 ` [PATCH v3 2/2] irq/matrix: Spread managed interrupts on allocation Dou Liyang
  2018-09-17 15:32   ` Thomas Gleixner
@ 2018-09-18 16:37   ` tip-bot for Dou Liyang
  1 sibling, 0 replies; 6+ messages in thread
From: tip-bot for Dou Liyang @ 2018-09-18 16:37 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, tglx, hpa, douly.fnst, mingo

Commit-ID:  76f99ae5b54d48430d1f0c5512a84da0ff9761e0
Gitweb:     https://git.kernel.org/tip/76f99ae5b54d48430d1f0c5512a84da0ff9761e0
Author:     Dou Liyang <douly.fnst@cn.fujitsu.com>
AuthorDate: Sun, 9 Sep 2018 01:58:38 +0800
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Tue, 18 Sep 2018 18:27:24 +0200

irq/matrix: Spread managed interrupts on allocation

Linux spreads out the non managed interrupt across the possible target CPUs
to avoid vector space exhaustion.

Managed interrupts are treated differently, as for them the vectors are
reserved (with guarantee) when the interrupt descriptors are initialized.

When the interrupt is requested a real vector is assigned. The assignment
logic uses the first CPU in the affinity mask for assignment. If the
interrupt has more than one CPU in the affinity mask, which happens when a
multi queue device has less queues than CPUs, then doing the same search as
for non managed interrupts makes sense as it puts the interrupt on the
least interrupt plagued CPU. For single CPU affine vectors that's obviously
a NOOP.

Restructre the matrix allocation code so it does the 'best CPU' search, add
the sanity check for an empty affinity mask and adapt the call site in the
x86 vector management code.

[ tglx: Added the empty mask check to the core and improved change log ]

Signed-off-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/20180908175838.14450-2-dou_liyang@163.com

---
 arch/x86/kernel/apic/vector.c |  9 ++++-----
 include/linux/irq.h           |  3 ++-
 kernel/irq/matrix.c           | 17 ++++++++++++++---
 3 files changed, 20 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 7654febd5102..652e7ffa9b9d 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
 	struct apic_chip_data *apicd = apic_chip_data(irqd);
 	int vector, cpu;
 
-	cpumask_and(vector_searchmask, vector_searchmask, affmsk);
-	cpu = cpumask_first(vector_searchmask);
-	if (cpu >= nr_cpu_ids)
-		return -EINVAL;
+	cpumask_and(vector_searchmask, dest, affmsk);
+
 	/* set_affinity might call here for nothing */
 	if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
 		return 0;
-	vector = irq_matrix_alloc_managed(vector_matrix, cpu);
+	vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
+					  &cpu);
 	trace_vector_alloc_managed(irqd->irq, vector, vector);
 	if (vector < 0)
 		return vector;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12a9957..c9bffda04a45 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+				unsigned int *mapped_cpu);
 void irq_matrix_reserve(struct irq_matrix *m);
 void irq_matrix_remove_reserved(struct irq_matrix *m);
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 67768bbe736e..6e6d467f3dec 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -260,11 +260,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
  * @m:		Matrix pointer
  * @cpu:	On which CPU the interrupt should be allocated
  */
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+			     unsigned int *mapped_cpu)
 {
-	struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
-	unsigned int bit, end = m->alloc_end;
+	unsigned int bit, cpu, end = m->alloc_end;
+	struct cpumap *cm;
+
+	if (cpumask_empty(msk))
+		return -EINVAL;
 
+	cpu = matrix_find_best_cpu(m, msk);
+	if (cpu == UINT_MAX)
+		return -ENOSPC;
+
+	cm = per_cpu_ptr(m->maps, cpu);
+	end = m->alloc_end;
 	/* Get managed bit which are not allocated */
 	bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
 	bit = find_first_bit(m->scratch_map, end);
@@ -273,6 +283,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
 	set_bit(bit, cm->alloc_map);
 	cm->allocated++;
 	m->total_allocated++;
+	*mapped_cpu = cpu;
 	trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
 	return bit;
 }

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-09-18 16:37 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-08 17:58 [PATCH v3 1/2] irq/matrix: Split out the CPU finding code into a helper Dou Liyang
2018-09-08 17:58 ` [PATCH v3 2/2] irq/matrix: Spread managed interrupts on allocation Dou Liyang
2018-09-17 15:32   ` Thomas Gleixner
2018-09-18 16:02     ` Dou Liyang
2018-09-18 16:37   ` [tip:x86/apic] " tip-bot for Dou Liyang
2018-09-18 16:36 ` [tip:x86/apic] irq/matrix: Split out the CPU selection code into a helper tip-bot for Dou Liyang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).