ntb.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Bjorn Helgaas <helgaas@kernel.org>, Marc Zygnier <maz@kernel.org>,
	Alex Williamson <alex.williamson@redhat.com>,
	Kevin Tian <kevin.tian@intel.com>,
	Jason Gunthorpe <jgg@nvidia.com>, Megha Dey <megha.dey@intel.com>,
	Ashok Raj <ashok.raj@intel.com>,
	linux-pci@vger.kernel.org, Cedric Le Goater <clg@kaod.org>,
	xen-devel@lists.xenproject.org, Juergen Gross <jgross@suse.com>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Niklas Schnelle <schnelle@linux.ibm.com>,
	linux-s390@vger.kernel.org, Heiko Carstens <hca@linux.ibm.com>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Logan Gunthorpe <logang@deltatee.com>,
	Jon Mason <jdmason@kudzu.us>, Dave Jiang <dave.jiang@intel.com>,
	Allen Hubbe <allenbh@gmail.com>,
	linux-ntb@googlegroups.com
Subject: [patch V2 07/31] PCI/MSI: Protect MSI operations
Date: Mon,  6 Dec 2021 23:51:13 +0100 (CET)	[thread overview]
Message-ID: <20211206210747.982292705@linutronix.de> (raw)
In-Reply-To: 20211206210600.123171746@linutronix.de

To prepare for dynamic extension of MSI-X vectors, protect the MSI
operations for MSI and MSI-X. This requires to move the invocation of
irq_create_affinity_masks() out of the descriptor lock section to avoid
reverse lock ordering vs. CPU hotplug lock as some callers of the PCI/MSI
allocation interfaces already hold it.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 drivers/pci/msi/irqdomain.c |    4 -
 drivers/pci/msi/msi.c       |  120 ++++++++++++++++++++++++++------------------
 2 files changed, 73 insertions(+), 51 deletions(-)

--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -14,7 +14,7 @@ int pci_msi_setup_msi_irqs(struct pci_de
 
 	domain = dev_get_msi_domain(&dev->dev);
 	if (domain && irq_domain_is_hierarchy(domain))
-		return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
+		return msi_domain_alloc_irqs_descs_locked(domain, &dev->dev, nvec);
 
 	return pci_msi_legacy_setup_msi_irqs(dev, nvec, type);
 }
@@ -25,7 +25,7 @@ void pci_msi_teardown_msi_irqs(struct pc
 
 	domain = dev_get_msi_domain(&dev->dev);
 	if (domain && irq_domain_is_hierarchy(domain))
-		msi_domain_free_irqs(domain, &dev->dev);
+		msi_domain_free_irqs_descs_locked(domain, &dev->dev);
 	else
 		pci_msi_legacy_teardown_msi_irqs(dev);
 }
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -322,11 +322,13 @@ static void __pci_restore_msix_state(str
 
 	write_msg = arch_restore_msi_irqs(dev);
 
+	msi_lock_descs(&dev->dev);
 	for_each_pci_msi_entry(entry, dev) {
 		if (write_msg)
 			__pci_write_msi_msg(entry, &entry->msg);
 		pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
 	}
+	msi_unlock_descs(&dev->dev);
 
 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 }
@@ -339,20 +341,16 @@ void pci_restore_msi_state(struct pci_de
 EXPORT_SYMBOL_GPL(pci_restore_msi_state);
 
 static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
+msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
 {
-	struct irq_affinity_desc *masks = NULL;
 	struct msi_desc *entry;
 	unsigned long prop;
 	u16 control;
 
-	if (affd)
-		masks = irq_create_affinity_masks(nvec, affd);
-
 	/* MSI Entry Initialization */
 	entry = alloc_msi_entry(&dev->dev, nvec, masks);
 	if (!entry)
-		goto out;
+		return NULL;
 
 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
 	/* Lies, damned lies, and MSIs */
@@ -379,8 +377,7 @@ msi_setup_entry(struct pci_dev *dev, int
 	if (entry->pci.msi_attrib.is_64)
 		prop |= MSI_PROP_64BIT;
 	msi_device_set_properties(&dev->dev, prop);
-out:
-	kfree(masks);
+
 	return entry;
 }
 
@@ -416,14 +413,21 @@ static int msi_verify_entries(struct pci
 static int msi_capability_init(struct pci_dev *dev, int nvec,
 			       struct irq_affinity *affd)
 {
+	struct irq_affinity_desc *masks = NULL;
 	struct msi_desc *entry;
 	int ret;
 
 	pci_msi_set_enable(dev, 0);	/* Disable MSI during set up */
 
-	entry = msi_setup_entry(dev, nvec, affd);
-	if (!entry)
-		return -ENOMEM;
+	if (affd)
+		masks = irq_create_affinity_masks(nvec, affd);
+
+	msi_lock_descs(&dev->dev);
+	entry = msi_setup_entry(dev, nvec, masks);
+	if (!entry) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
 
 	/* All MSIs are unmasked by default; mask them all */
 	pci_msi_mask(entry, msi_multi_mask(entry));
@@ -446,11 +450,14 @@ static int msi_capability_init(struct pc
 
 	pcibios_free_irq(dev);
 	dev->irq = entry->irq;
-	return 0;
+	goto unlock;
 
 err:
 	pci_msi_unmask(entry, msi_multi_mask(entry));
 	free_msi_irqs(dev);
+unlock:
+	msi_unlock_descs(&dev->dev);
+	kfree(masks);
 	return ret;
 }
 
@@ -477,23 +484,18 @@ static void __iomem *msix_map_region(str
 
 static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
 			      struct msix_entry *entries, int nvec,
-			      struct irq_affinity *affd)
+			      struct irq_affinity_desc *masks)
 {
-	struct irq_affinity_desc *curmsk, *masks = NULL;
+	int i, vec_count = pci_msix_vec_count(dev);
+	struct irq_affinity_desc *curmsk;
 	struct msi_desc *entry;
 	void __iomem *addr;
-	int ret, i;
-	int vec_count = pci_msix_vec_count(dev);
-
-	if (affd)
-		masks = irq_create_affinity_masks(nvec, affd);
 
 	for (i = 0, curmsk = masks; i < nvec; i++) {
 		entry = alloc_msi_entry(&dev->dev, 1, curmsk);
 		if (!entry) {
 			/* No enough memory. Don't try again */
-			ret = -ENOMEM;
-			goto out;
+			return -ENOMEM;
 		}
 
 		entry->pci.msi_attrib.is_msix	= 1;
@@ -522,10 +524,7 @@ static int msix_setup_entries(struct pci
 			curmsk++;
 	}
 	msi_device_set_properties(&dev->dev, MSI_PROP_PCI_MSIX | MSI_PROP_64BIT);
-	ret = 0;
-out:
-	kfree(masks);
-	return ret;
+	return 0;
 }
 
 static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
@@ -552,6 +551,41 @@ static void msix_mask_all(void __iomem *
 		writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
 }
 
+static int msix_setup_interrupts(struct pci_dev *dev, void __iomem *base,
+				 struct msix_entry *entries, int nvec,
+				 struct irq_affinity *affd)
+{
+	struct irq_affinity_desc *masks = NULL;
+	int ret;
+
+	if (affd)
+		masks = irq_create_affinity_masks(nvec, affd);
+
+	msi_lock_descs(&dev->dev);
+	ret = msix_setup_entries(dev, base, entries, nvec, masks);
+	if (ret)
+		goto out_free;
+
+	ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
+	if (ret)
+		goto out_free;
+
+	/* Check if all MSI entries honor device restrictions */
+	ret = msi_verify_entries(dev);
+	if (ret)
+		goto out_free;
+
+	msix_update_entries(dev, entries);
+	goto out_unlock;
+
+out_free:
+	free_msi_irqs(dev);
+out_unlock:
+	msi_unlock_descs(&dev->dev);
+	kfree(masks);
+	return ret;
+}
+
 /**
  * msix_capability_init - configure device's MSI-X capability
  * @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -592,20 +626,9 @@ static int msix_capability_init(struct p
 	/* Ensure that all table entries are masked. */
 	msix_mask_all(base, tsize);
 
-	ret = msix_setup_entries(dev, base, entries, nvec, affd);
+	ret = msix_setup_interrupts(dev, base, entries, nvec, affd);
 	if (ret)
-		goto out_free;
-
-	ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
-	if (ret)
-		goto out_free;
-
-	/* Check if all MSI entries honor device restrictions */
-	ret = msi_verify_entries(dev);
-	if (ret)
-		goto out_free;
-
-	msix_update_entries(dev, entries);
+		goto out_disable;
 
 	/* Set MSI-X enabled bits and unmask the function */
 	pci_intx_for_msi(dev, 0);
@@ -615,12 +638,8 @@ static int msix_capability_init(struct p
 	pcibios_free_irq(dev);
 	return 0;
 
-out_free:
-	free_msi_irqs(dev);
-
 out_disable:
 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
-
 	return ret;
 }
 
@@ -725,8 +744,10 @@ void pci_disable_msi(struct pci_dev *dev
 	if (!pci_msi_enable || !dev || !dev->msi_enabled)
 		return;
 
+	msi_lock_descs(&dev->dev);
 	pci_msi_shutdown(dev);
 	free_msi_irqs(dev);
+	msi_unlock_descs(&dev->dev);
 }
 EXPORT_SYMBOL(pci_disable_msi);
 
@@ -812,8 +833,10 @@ void pci_disable_msix(struct pci_dev *de
 	if (!pci_msi_enable || !dev || !dev->msix_enabled)
 		return;
 
+	msi_lock_descs(&dev->dev);
 	pci_msix_shutdown(dev);
 	free_msi_irqs(dev);
+	msi_unlock_descs(&dev->dev);
 }
 EXPORT_SYMBOL(pci_disable_msix);
 
@@ -874,7 +897,6 @@ int pci_enable_msi(struct pci_dev *dev)
 
 	if (!rc)
 		rc = __pci_enable_msi_range(dev, 1, 1, NULL);
-
 	return rc < 0 ? rc : 0;
 }
 EXPORT_SYMBOL(pci_enable_msi);
@@ -961,11 +983,7 @@ int pci_alloc_irq_vectors_affinity(struc
 				   struct irq_affinity *affd)
 {
 	struct irq_affinity msi_default_affd = {0};
-	int ret = msi_setup_device_data(&dev->dev);
-	int nvecs = -ENOSPC;
-
-	if (ret)
-		return ret;
+	int ret, nvecs;
 
 	if (flags & PCI_IRQ_AFFINITY) {
 		if (!affd)
@@ -975,6 +993,10 @@ int pci_alloc_irq_vectors_affinity(struc
 			affd = NULL;
 	}
 
+	ret = msi_setup_device_data(&dev->dev);
+	if (ret)
+		return ret;
+
 	if (flags & PCI_IRQ_MSIX) {
 		nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
 						affd, flags);
@@ -1003,7 +1025,7 @@ int pci_alloc_irq_vectors_affinity(struc
 		}
 	}
 
-	return nvecs;
+	return -ENOSPC;
 }
 EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
 


  parent reply	other threads:[~2021-12-06 22:51 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-06 22:51 [patch V2 00/31] genirq/msi, PCI/MSI: Spring cleaning - Part 3 Thomas Gleixner
2021-12-06 22:51 ` [patch V2 01/31] genirq/msi: Move descriptor list to struct msi_device_data Thomas Gleixner
2021-12-06 22:51 ` [patch V2 02/31] genirq/msi: Add mutex for MSI list protection Thomas Gleixner
2021-12-09  0:47   ` Jason Gunthorpe
2021-12-09 20:07     ` Thomas Gleixner
2021-12-06 22:51 ` [patch V2 03/31] genirq/msi: Provide msi_domain_alloc/free_irqs_descs_locked() Thomas Gleixner
2021-12-06 22:51 ` [patch V2 04/31] genirq/msi: Provide a set of advanced MSI accessors and iterators Thomas Gleixner
2021-12-06 22:51 ` [patch V2 05/31] genirq/msi: Provide msi_alloc_msi_desc() and a simple allocator Thomas Gleixner
2021-12-06 22:51 ` [patch V2 06/31] genirq/msi: Provide domain flags to allocate/free MSI descriptors automatically Thomas Gleixner
2021-12-06 22:51 ` Thomas Gleixner [this message]
2021-12-07 21:06   ` [patch V2 07/31] PCI/MSI: Protect MSI operations Bjorn Helgaas
2021-12-06 22:51 ` [patch V2 08/31] PCI/MSI: Use msi_add_msi_desc() Thomas Gleixner
2021-12-07 21:07   ` Bjorn Helgaas
2021-12-06 22:51 ` [patch V2 09/31] PCI/MSI: Let core code free MSI descriptors Thomas Gleixner
2021-12-07 21:07   ` Bjorn Helgaas
2021-12-06 22:51 ` [patch V2 10/31] PCI/MSI: Use msi_on_each_desc() Thomas Gleixner
2021-12-07 21:07   ` Bjorn Helgaas
2021-12-06 22:51 ` [patch V2 11/31] x86/pci/xen: Use msi_for_each_desc() Thomas Gleixner
2021-12-06 22:51 ` [patch V2 12/31] xen/pcifront: Rework MSI handling Thomas Gleixner
2021-12-06 22:51 ` [patch V2 13/31] s390/pci: Rework MSI descriptor walk Thomas Gleixner
2021-12-06 22:51 ` [patch V2 14/31] powerpc/4xx/hsta: Rework MSI handling Thomas Gleixner
2021-12-06 22:51 ` [patch V2 15/31] powerpc/cell/axon_msi: Convert to msi_on_each_desc() Thomas Gleixner
2021-12-06 22:51 ` [patch V2 16/31] powerpc/pasemi/msi: Convert to msi_on_each_dec() Thomas Gleixner
2021-12-06 22:51 ` [patch V2 17/31] powerpc/fsl_msi: Use msi_for_each_desc() Thomas Gleixner
2021-12-06 22:51 ` [patch V2 18/31] powerpc/mpic_u3msi: Use msi_for_each-desc() Thomas Gleixner
2021-12-06 22:51 ` [patch V2 19/31] PCI: hv: Rework MSI handling Thomas Gleixner
2021-12-07 21:08   ` Bjorn Helgaas
2021-12-06 22:51 ` [patch V2 20/31] NTB/msi: Convert to msi_on_each_desc() Thomas Gleixner
2021-12-06 22:51 ` [patch V2 21/31] soc: ti: ti_sci_inta_msi: Rework MSI descriptor allocation Thomas Gleixner
2021-12-15 20:50   ` Thomas Gleixner
2021-12-06 22:51 ` [patch V2 22/31] soc: ti: ti_sci_inta_msi: Remove ti_sci_inta_msi_domain_free_irqs() Thomas Gleixner
2021-12-06 22:51 ` [patch V2 23/31] bus: fsl-mc-msi: Simplify MSI descriptor handling Thomas Gleixner
2021-12-06 22:51 ` [patch V2 24/31] platform-msi: Let core code handle MSI descriptors Thomas Gleixner
2021-12-06 22:51 ` [patch V2 25/31] platform-msi: Simplify platform device MSI code Thomas Gleixner
2021-12-06 22:51 ` [patch V2 26/31] genirq/msi: Make interrupt allocation less convoluted Thomas Gleixner
2021-12-06 22:51 ` [patch V2 27/31] genirq/msi: Convert to new functions Thomas Gleixner
2021-12-06 22:51 ` [patch V2 28/31] genirq/msi: Mop up old interfaces Thomas Gleixner
2021-12-06 22:51 ` [patch V2 29/31] genirq/msi: Add abuse prevention comment to msi header Thomas Gleixner
2021-12-07  8:21   ` Greg Kroah-Hartman
2021-12-07 12:46     ` Thomas Gleixner
2021-12-06 22:51 ` [patch V2 30/31] genirq/msi: Simplify sysfs handling Thomas Gleixner
2022-01-10 18:12   ` [patch] genirq/msi: Populate sysfs entry only once Thomas Gleixner
2022-01-10 18:15     ` Borislav Petkov
2022-01-11  8:57     ` Greg Kroah-Hartman
2022-01-11  9:02     ` Cédric Le Goater
2022-01-12  0:05     ` Kunihiko Hayashi
2022-01-18 23:59       ` Thomas Gleixner
2022-01-19  8:45         ` Kunihiko Hayashi
2021-12-06 22:51 ` [patch V2 31/31] genirq/msi: Convert storage to xarray Thomas Gleixner
2021-12-09  1:01 ` [patch V2 00/31] genirq/msi, PCI/MSI: Spring cleaning - Part 3 Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211206210747.982292705@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=alex.williamson@redhat.com \
    --cc=allenbh@gmail.com \
    --cc=ashok.raj@intel.com \
    --cc=borntraeger@de.ibm.com \
    --cc=clg@kaod.org \
    --cc=dave.jiang@intel.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hca@linux.ibm.com \
    --cc=helgaas@kernel.org \
    --cc=jdmason@kudzu.us \
    --cc=jgg@nvidia.com \
    --cc=jgross@suse.com \
    --cc=kevin.tian@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-ntb@googlegroups.com \
    --cc=linux-pci@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=logang@deltatee.com \
    --cc=maz@kernel.org \
    --cc=megha.dey@intel.com \
    --cc=schnelle@linux.ibm.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).