linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: linux-kernel@vger.kernel.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	linux-pci@vger.kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Bjorn Helgaas <bhelgaas@google.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>
Subject: [RFC 2/2] genirq/matrix: take NUMA into account for managed IRQs
Date: Wed, 17 Jun 2020 10:37:25 +0100	[thread overview]
Message-ID: <20200617093725.1725569-3-stefanha@redhat.com> (raw)
In-Reply-To: <20200617093725.1725569-1-stefanha@redhat.com>

Select CPUs from the IRQ's NUMA node in preference over other CPUs. This
ensures that managed IRQs are assigned to the same NUMA node as the
device.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 include/linux/irq.h           |  2 +-
 arch/x86/kernel/apic/vector.c |  3 ++-
 kernel/irq/matrix.c           | 16 ++++++++++++----
 3 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8d5bc2c237d7..bdc3faa3c280 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1202,7 +1202,7 @@ void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool repla
 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
-				unsigned int *mapped_cpu);
+			     int node, unsigned int *mapped_cpu);
 void irq_matrix_reserve(struct irq_matrix *m);
 void irq_matrix_remove_reserved(struct irq_matrix *m);
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 67768e54438b..8eb10b0d981d 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -309,6 +309,7 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
 {
 	const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
 	struct apic_chip_data *apicd = apic_chip_data(irqd);
+	int node = irq_data_get_node(irqd);
 	int vector, cpu;
 
 	cpumask_and(vector_searchmask, dest, affmsk);
@@ -317,7 +318,7 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
 	if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
 		return 0;
 	vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
-					  &cpu);
+					  node, &cpu);
 	trace_vector_alloc_managed(irqd->irq, vector, vector);
 	if (vector < 0)
 		return vector;
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 30cc217b8631..ee35b6172b64 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -148,7 +148,8 @@ static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
 
 /* Find the best CPU which has the lowest number of managed IRQs allocated */
 static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
-						const struct cpumask *msk)
+						 const struct cpumask *msk,
+						 int node)
 {
 	unsigned int cpu, best_cpu, allocated = UINT_MAX;
 	struct cpumap *cm;
@@ -156,6 +157,9 @@ static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
 	best_cpu = UINT_MAX;
 
 	for_each_cpu(cpu, msk) {
+		if (node != NUMA_NO_NODE && cpu_to_node(cpu) != node)
+			continue;
+
 		cm = per_cpu_ptr(m->maps, cpu);
 
 		if (!cm->online || cm->managed_allocated > allocated)
@@ -280,10 +284,12 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
 /**
  * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
  * @m:		Matrix pointer
- * @cpu:	On which CPU the interrupt should be allocated
+ * @mask:	The mask of CPUs on which the interrupt can be allocated
+ * @node:	The preferred NUMA node
+ * @mapped_cpu:	The resulting CPU on which the interrupt should be allocated
  */
 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
-			     unsigned int *mapped_cpu)
+			     int node, unsigned int *mapped_cpu)
 {
 	unsigned int bit, cpu, end = m->alloc_end;
 	struct cpumap *cm;
@@ -291,7 +297,9 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
 	if (cpumask_empty(msk))
 		return -EINVAL;
 
-	cpu = matrix_find_best_cpu_managed(m, msk);
+	cpu = matrix_find_best_cpu_managed(m, msk, node);
+	if (cpu == UINT_MAX)
+		cpu = matrix_find_best_cpu_managed(m, msk, NUMA_NO_NODE);
 	if (cpu == UINT_MAX)
 		return -ENOSPC;
 
-- 
2.26.2


      parent reply	other threads:[~2020-06-17  9:37 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-17  9:37 [RFC 0/2] genirq: take device NUMA node into account for managed IRQs Stefan Hajnoczi
2020-06-17  9:37 ` [RFC 1/2] genirq: honor device NUMA node when allocating descs Stefan Hajnoczi
2020-06-17  9:37 ` Stefan Hajnoczi [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200617093725.1725569-3-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=bhelgaas@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).