All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: tglx@linutronix.de
Cc: axboe@kernel.dk, linux-block@vger.kernel.org,
	linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH 3/7] genirq/affinity: Handle pre/post vectors in irq_create_affinity_masks()
Date: Tue,  8 Nov 2016 17:15:03 -0800	[thread overview]
Message-ID: <1478654107-7384-4-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1478654107-7384-1-git-send-email-hch@lst.de>

Only calculate the affinity for the main I/O vectors, and skip the
pre or post vectors specified by struct irq_affinity.

Also remove the irq_affinity cpumask argument that has never been used.
If we ever need it in the future we can pass it through struct
irq_affinity.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
---
 drivers/pci/msi.c         |  6 ++++--
 include/linux/interrupt.h |  4 ++--
 kernel/irq/affinity.c     | 46 +++++++++++++++++++++++++---------------------
 3 files changed, 31 insertions(+), 25 deletions(-)

diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index dad2da7..f4a108b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -553,12 +553,13 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
 static struct msi_desc *
 msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity)
 {
+	static const struct irq_affinity default_affd;
 	struct cpumask *masks = NULL;
 	struct msi_desc *entry;
 	u16 control;
 
 	if (affinity) {
-		masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+		masks = irq_create_affinity_masks(nvec, &default_affd);
 		if (!masks)
 			pr_err("Unable to allocate affinity masks, ignoring\n");
 	}
@@ -692,12 +693,13 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
 			      struct msix_entry *entries, int nvec,
 			      bool affinity)
 {
+	static const struct irq_affinity default_affd;
 	struct cpumask *curmsk, *masks = NULL;
 	struct msi_desc *entry;
 	int ret, i;
 
 	if (affinity) {
-		masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+		masks = irq_create_affinity_masks(nvec, &default_affd);
 		if (!masks)
 			pr_err("Unable to allocate affinity masks, ignoring\n");
 	}
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9081f23b..53144e7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -290,7 +290,7 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
 extern int
 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
 
-struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
+struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
 int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd);
 
 #else /* CONFIG_SMP */
@@ -325,7 +325,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 }
 
 static inline struct cpumask *
-irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
+irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
 {
 	return NULL;
 }
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 8d92597..17360bd 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -51,16 +51,16 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
 
 /**
  * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
- * @affinity:		The affinity mask to spread. If NULL cpu_online_mask
- *			is used
- * @nvecs:		The number of vectors
+ * @nvecs:	The total number of vectors
+ * @affd:	Description of the affinity requirements
  *
  * Returns the masks pointer or NULL if allocation failed.
  */
-struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
-					  int nvec)
+struct cpumask *
+irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 {
-	int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0;
+	int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
+	int affv = nvecs - affd->pre_vectors - affd->post_vectors;
 	nodemask_t nodemsk = NODE_MASK_NONE;
 	struct cpumask *masks;
 	cpumask_var_t nmsk;
@@ -68,46 +68,46 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
 	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
 		return NULL;
 
-	masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL);
+	masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
 	if (!masks)
 		goto out;
 
+	/* Fill out vectors at the beginning that don't need affinity */
+	for (curvec = 0; curvec < affd->pre_vectors; curvec++)
+		cpumask_copy(masks + curvec, cpu_possible_mask);
+
 	/* Stabilize the cpumasks */
 	get_online_cpus();
-	/* If the supplied affinity mask is NULL, use cpu online mask */
-	if (!affinity)
-		affinity = cpu_online_mask;
-
-	nodes = get_nodes_in_cpumask(affinity, &nodemsk);
+	nodes = get_nodes_in_cpumask(cpu_online_mask, &nodemsk);
 
 	/*
 	 * If the number of nodes in the mask is less than or equal the
 	 * number of vectors we just spread the vectors across the nodes.
 	 */
-	if (nvec <= nodes) {
+	if (affv <= nodes) {
 		for_each_node_mask(n, nodemsk) {
 			cpumask_copy(masks + curvec, cpumask_of_node(n));
-			if (++curvec == nvec)
+			if (++curvec == affv)
 				break;
 		}
-		goto outonl;
+		goto done;
 	}
 
 	/* Spread the vectors per node */
-	vecs_per_node = nvec / nodes;
+	vecs_per_node = affv / nodes;
 	/* Account for rounding errors */
-	extra_vecs = nvec - (nodes * vecs_per_node);
+	extra_vecs = affv - (nodes * vecs_per_node);
 
 	for_each_node_mask(n, nodemsk) {
 		int ncpus, v, vecs_to_assign = vecs_per_node;
 
 		/* Get the cpus on this node which are in the mask */
-		cpumask_and(nmsk, affinity, cpumask_of_node(n));
+		cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
 
 		/* Calculate the number of cpus per vector */
 		ncpus = cpumask_weight(nmsk);
 
-		for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) {
+		for (v = 0; curvec < affv && v < vecs_to_assign; curvec++, v++) {
 			cpus_per_vec = ncpus / vecs_to_assign;
 
 			/* Account for extra vectors to compensate rounding errors */
@@ -119,12 +119,16 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
 			irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
 		}
 
-		if (curvec >= nvec)
+		if (curvec >= affv)
 			break;
 	}
 
-outonl:
+done:
 	put_online_cpus();
+
+	/* Fill out vectors at the end that don't need affinity */
+	for (; curvec < nvecs; curvec++)
+		cpumask_copy(masks + curvec, cpu_possible_mask);
 out:
 	free_cpumask_var(nmsk);
 	return masks;
-- 
2.1.4

  parent reply	other threads:[~2016-11-09  1:15 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-09  1:15 support for partial irq affinity assignment V3 Christoph Hellwig
2016-11-09  1:15 ` [PATCH 1/7] genirq/affinity: Introduce struct irq_affinity Christoph Hellwig
2016-11-09  7:48   ` [tip:irq/core] " tip-bot for Christoph Hellwig
2016-11-09  1:15 ` [PATCH 2/7] genirq/affinity: Handle pre/post vectors in irq_calc_affinity_vectors() Christoph Hellwig
2016-11-09  7:04   ` Hannes Reinecke
2016-11-09  7:04     ` Hannes Reinecke
2016-11-09  7:49   ` [tip:irq/core] " tip-bot for Christoph Hellwig
2016-11-09  1:15 ` Christoph Hellwig [this message]
2016-11-09  7:05   ` [PATCH 3/7] genirq/affinity: Handle pre/post vectors in irq_create_affinity_masks() Hannes Reinecke
2016-11-09  7:05     ` Hannes Reinecke
2016-11-09  7:49   ` [tip:irq/core] " tip-bot for Christoph Hellwig
2016-11-09  1:15 ` [PATCH 4/7] PCI/MSI: Propagate IRQ affinity description through the MSI code Christoph Hellwig
2016-11-09  7:50   ` [tip:irq/core] " tip-bot for Christoph Hellwig
2016-11-09  1:15 ` [PATCH 5/7] pci/msi: Provide pci_alloc_irq_vectors_affinity() Christoph Hellwig
2016-11-09  1:38   ` Christoph Hellwig
2016-11-09  7:50   ` [tip:irq/core] PCI/MSI: " tip-bot for Christoph Hellwig
2016-11-09  1:15 ` [PATCH 6/7] PCI: Remove the irq_affinity mask from struct pci_dev Christoph Hellwig
2016-11-09  7:51   ` [tip:irq/core] " tip-bot for Christoph Hellwig
2016-11-09  1:15 ` [PATCH 7/7] blk-mq: add a first_vec argument to blk_mq_pci_map_queues Christoph Hellwig
2016-11-09  4:38 ` support for partial irq affinity assignment V3 Jens Axboe
2016-11-09  7:51   ` Thomas Gleixner
2016-11-09 15:10     ` Christoph Hellwig
2016-11-09 15:09       ` Thomas Gleixner
  -- strict thread matches above, loose matches on Subject: below --
2016-11-07 18:47 support for partial irq affinity assignment Christoph Hellwig
2016-11-07 18:47 ` [PATCH 3/7] genirq/affinity: Handle pre/post vectors in irq_create_affinity_masks() Christoph Hellwig
2016-11-08  8:15   ` Hannes Reinecke
2016-11-08  8:15     ` Hannes Reinecke
2016-11-08 14:55     ` Christoph Hellwig
2016-11-08 14:59       ` Hannes Reinecke
2016-11-08 14:59         ` Hannes Reinecke
2016-11-08 15:00         ` Christoph Hellwig
2016-11-08 16:27           ` Thomas Gleixner
2016-11-08 16:33             ` Christoph Hellwig
2016-11-08 21:20   ` Bjorn Helgaas

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1478654107-7384-4-git-send-email-hch@lst.de \
    --to=hch@lst.de \
    --cc=axboe@kernel.dk \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.