From: Thomas Gleixner <tglx@linutronix.de> To: LKML <linux-kernel@vger.kernel.org> Cc: Bjorn Helgaas <helgaas@kernel.org>, Marc Zygnier <maz@kernel.org>, Alex Williamson <alex.williamson@redhat.com>, Kevin Tian <kevin.tian@intel.com>, Jason Gunthorpe <jgg@nvidia.com>, Megha Dey <megha.dey@intel.com>, Ashok Raj <ashok.raj@intel.com>, linux-pci@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, linux-s390@vger.kernel.org, Heiko Carstens <hca@linux.ibm.com>, Christian Borntraeger <borntraeger@de.ibm.com>, Jon Mason <jdmason@kudzu.us>, Dave Jiang <dave.jiang@intel.com>, Allen Hubbe <allenbh@gmail.com>, linux-ntb@googlegroups.com Subject: [patch 32/32] genirq/msi: Convert storage to xarray Date: Sat, 27 Nov 2021 02:23:17 +0100 (CET) [thread overview] Message-ID: <20211126232736.190744801@linutronix.de> (raw) In-Reply-To: 20211126230957.239391799@linutronix.de The current linked list storage for MSI descriptors is suboptimal in several ways: 1) Looking up a MSI desciptor requires a O(n) list walk in the worst case 2) The upcoming support of runtime expansion of MSI-X vectors would need to do a full list walk to figure out whether a particular index is already associated. 3) Runtime expansion of sparse allocations is even more complex as the current implementation assumes an ordered list (increasing MSI index). Use an xarray which solves all of the above problems nicely. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- include/linux/msi.h | 19 ++--- kernel/irq/msi.c | 188 ++++++++++++++++++++++------------------------------ 2 files changed, 90 insertions(+), 117 deletions(-) --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -17,6 +17,7 @@ */ #include <linux/spinlock.h> +#include <linux/xarray.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/bits.h> @@ -122,7 +123,6 @@ struct pci_msi_desc { /** * struct msi_desc - Descriptor structure for MSI based interrupts - * @list: List head for management * @irq: The base interrupt number * @nvec_used: The number of vectors used * @dev: Pointer to the device which uses this descriptor @@ -139,7 +139,6 @@ struct pci_msi_desc { */ struct msi_desc { /* Shared device/bus type independent data */ - struct list_head list; unsigned int irq; unsigned int nvec_used; struct device *dev; @@ -177,20 +176,20 @@ enum msi_desc_filter { * @properties: MSI properties which are interesting to drivers * @num_descs: The number of allocated MSI descriptors for the device * @platform_data: Platform-MSI specific data - * @list: List of MSI descriptors associated to the device - * @mutex: Mutex protecting the MSI list - * @__next: Cached pointer to the next entry for iterators - * @__filter: Cached descriptor filter + * @mutex: Mutex protecting the MSI descriptor store + * @store: Xarray for storing MSI descriptor pointers + * @__iter_idx: Index to search the next entry for iterators + * @__iter_filter: Cached descriptor filter */ struct msi_device_data { raw_spinlock_t lock; unsigned long properties; unsigned int num_descs; struct platform_msi_priv_data *platform_data; - struct list_head list; struct mutex mutex; - struct msi_desc *__next; - enum msi_desc_filter __filter; + struct xarray store; + unsigned long __iter_idx; + enum msi_desc_filter __iter_filter; }; int msi_setup_device_data(struct device *dev); @@ -266,7 +265,7 @@ static inline struct msi_desc *msi_first * @dev: struct device pointer - device to iterate * @filter: Filter for descriptor selection * - * See msi_for_each_desc_from()for further information. + * See msi_for_each_desc_from() for further information. */ #define msi_for_each_desc(desc, dev, filter) \ msi_for_each_desc_from(desc, dev, filter, 0) --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -20,7 +20,6 @@ #include "internals.h" static inline int msi_sysfs_create_group(struct device *dev); -#define dev_to_msi_list(dev) (&(dev)->msi.data->list) /** * msi_alloc_desc - Allocate an initialized msi_desc @@ -41,7 +40,6 @@ static struct msi_desc *msi_alloc_desc(s if (!desc) return NULL; - INIT_LIST_HEAD(&desc->list); desc->dev = dev; desc->nvec_used = nvec; if (affinity) { @@ -60,6 +58,19 @@ static void msi_free_desc(struct msi_des kfree(desc); } +static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index) +{ + int ret; + + desc->msi_index = index; + ret = xa_insert(&md->store, index, desc, GFP_KERNEL); + if (!ret) + md->num_descs++; + else + msi_free_desc(desc); + return ret; +} + /** * msi_add_msi_desc - Allocate and initialize a MSI descriptor * @dev: Pointer to the device for which the descriptor is allocated @@ -77,13 +88,9 @@ int msi_add_msi_desc(struct device *dev, if (!desc) return -ENOMEM; - /* Copy the MSI index and type specific data to the new descriptor. */ - desc->msi_index = init_desc->msi_index; + /* Copy type specific data to the new descriptor. */ desc->pci = init_desc->pci; - - list_add_tail(&desc->list, &dev->msi.data->list); - dev->msi.data->num_descs++; - return 0; + return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index); } /** @@ -96,29 +103,41 @@ int msi_add_msi_desc(struct device *dev, */ static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc) { - struct msi_desc *desc, *tmp; - LIST_HEAD(list); - unsigned int i; + struct msi_desc *desc; + unsigned long i; + int ret; lockdep_assert_held(&dev->msi.data->mutex); for (i = 0; i < ndesc; i++) { desc = msi_alloc_desc(dev, 1, NULL); if (!desc) + goto fail_mem; + ret = msi_insert_desc(dev->msi.data, desc, index + i); + if (ret) goto fail; - desc->msi_index = index + i; - list_add_tail(&desc->list, &list); } - list_splice_tail(&list, &dev->msi.data->list); - dev->msi.data->num_descs += ndesc; return 0; +fail_mem: + ret = -ENOMEM; fail: - list_for_each_entry_safe(desc, tmp, &list, list) { - list_del(&desc->list); - msi_free_desc(desc); + msi_free_msi_descs_range(dev, MSI_DESC_NOTASSOCIATED, index, ndesc); + return ret; +} + +static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter) +{ + switch (filter) { + case MSI_DESC_ALL: + return true; + case MSI_DESC_NOTASSOCIATED: + return !desc->irq; + case MSI_DESC_ASSOCIATED: + return !!desc->irq; } - return -ENOMEM; + WARN_ON_ONCE(1); + return false; } /** @@ -132,19 +151,16 @@ void msi_free_msi_descs_range(struct dev unsigned int base_index, unsigned int ndesc) { struct msi_desc *desc; + unsigned long idx; lockdep_assert_held(&dev->msi.data->mutex); - msi_for_each_desc(desc, dev, filter) { - /* - * Stupid for now to handle MSI device domain until the - * storage is switched over to an xarray. - */ - if (desc->msi_index < base_index || desc->msi_index >= base_index + ndesc) - continue; - list_del(&desc->list); - msi_free_desc(desc); - dev->msi.data->num_descs--; + xa_for_each_range(&dev->msi.data->store, idx, desc, base_index, base_index + ndesc - 1) { + if (msi_desc_match(desc, filter)) { + xa_erase(&dev->msi.data->store, idx); + msi_free_desc(desc); + dev->msi.data->num_descs--; + } } } @@ -192,7 +208,8 @@ static void msi_device_data_release(stru { struct msi_device_data *md = res; - WARN_ON_ONCE(!list_empty(&md->list)); + WARN_ON_ONCE(!xa_empty(&md->store)); + xa_destroy(&md->store); dev->msi.data = NULL; } @@ -225,7 +242,7 @@ int msi_setup_device_data(struct device } raw_spin_lock_init(&md->lock); - INIT_LIST_HEAD(&md->list); + xa_init(&md->store); mutex_init(&md->mutex); dev->msi.data = md; devres_add(dev, md); @@ -252,38 +269,21 @@ void msi_unlock_descs(struct device *dev { if (WARN_ON_ONCE(!dev->msi.data)) return; - /* Clear the next pointer which was cached by the iterator */ - dev->msi.data->__next = NULL; + /* Invalidate the index wich was cached by the iterator */ + dev->msi.data->__iter_idx = ULONG_MAX; mutex_unlock(&dev->msi.data->mutex); } EXPORT_SYMBOL_GPL(msi_unlock_descs); -static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter) -{ - switch (filter) { - case MSI_DESC_ALL: - return true; - case MSI_DESC_NOTASSOCIATED: - return !desc->irq; - case MSI_DESC_ASSOCIATED: - return !!desc->irq; - } - WARN_ON_ONCE(1); - return false; -} - -static struct msi_desc *msi_find_first_desc(struct device *dev, enum msi_desc_filter filter, - unsigned int base_index) +static struct msi_desc *msi_find_desc(struct msi_device_data *md) { struct msi_desc *desc; - list_for_each_entry(desc, dev_to_msi_list(dev), list) { - if (desc->msi_index < base_index) - continue; - if (msi_desc_match(desc, filter)) - return desc; + xa_for_each_start(&md->store, md->__iter_idx, desc, md->__iter_idx) { + if (msi_desc_match(desc, md->__iter_filter)) + break; } - return NULL; + return desc; } /** @@ -301,43 +301,25 @@ static struct msi_desc *msi_find_first_d struct msi_desc *__msi_first_desc(struct device *dev, enum msi_desc_filter filter, unsigned int base_index) { - struct msi_desc *desc; + struct msi_device_data *md = dev->msi.data; - if (WARN_ON_ONCE(!dev->msi.data)) + if (WARN_ON_ONCE(!md)) return NULL; - lockdep_assert_held(&dev->msi.data->mutex); + lockdep_assert_held(&md->mutex); - /* Invalidate a previous invocation within the same lock section */ - dev->msi.data->__next = NULL; - - desc = msi_find_first_desc(dev, filter, base_index); - if (desc) { - dev->msi.data->__next = list_next_entry(desc, list); - dev->msi.data->__filter = filter; - } - return desc; + md->__iter_filter = filter; + md->__iter_idx = base_index; + return msi_find_desc(md); } EXPORT_SYMBOL_GPL(__msi_first_desc); -static struct msi_desc *__msi_next_desc(struct device *dev, enum msi_desc_filter filter, - struct msi_desc *from) -{ - struct msi_desc *desc = from; - - list_for_each_entry_from(desc, dev_to_msi_list(dev), list) { - if (msi_desc_match(desc, filter)) - return desc; - } - return NULL; -} - /** * msi_next_desc - Get the next MSI descriptor of a device * @dev: Device to operate on * * The first invocation of msi_next_desc() has to be preceeded by a - * successful incovation of __msi_first_desc(). Consecutive invocations are + * successful invocation of __msi_first_desc(). Consecutive invocations are * only valid if the previous one was successful. All these operations have * to be done within the same MSI mutex held region. * @@ -346,20 +328,18 @@ static struct msi_desc *__msi_next_desc( */ struct msi_desc *msi_next_desc(struct device *dev) { - struct msi_device_data *data = dev->msi.data; - struct msi_desc *desc; + struct msi_device_data *md = dev->msi.data; - if (WARN_ON_ONCE(!data)) + if (WARN_ON_ONCE(!md)) return NULL; - lockdep_assert_held(&data->mutex); + lockdep_assert_held(&md->mutex); - if (!data->__next) + if (md->__iter_idx == ULONG_MAX) return NULL; - desc = __msi_next_desc(dev, data->__filter, data->__next); - dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL; - return desc; + md->__iter_idx++; + return msi_find_desc(md); } EXPORT_SYMBOL_GPL(msi_next_desc); @@ -384,21 +364,18 @@ int __msi_get_virq(struct device *dev, u pcimsi = msi_device_has_property(dev, MSI_PROP_PCI_MSI); msi_lock_descs(dev); - msi_for_each_desc_from(desc, dev, MSI_DESC_ASSOCIATED, index) { - /* PCI-MSI has only one descriptor for multiple interrupts. */ - if (pcimsi) { - if (index < desc->nvec_used) - ret = desc->irq + index; - break; - } - + desc = xa_load(&dev->msi.data->store, pcimsi ? 0 : index); + if (desc && desc->irq) { /* + * PCI-MSI has only one descriptor for multiple interrupts. * PCI-MSIX and platform MSI use a descriptor per * interrupt. */ - if (desc->msi_index == index) { + if (pcimsi) { + if (index < desc->nvec_used) + ret = desc->irq + index; + } else { ret = desc->irq; - break; } } msi_unlock_descs(dev); @@ -779,17 +756,13 @@ int msi_domain_populate_irqs(struct irq_ int ret, virq; msi_lock_descs(dev); - for (virq = virq_base; virq < virq_base + nvec; virq++) { - desc = msi_alloc_desc(dev, 1, NULL); - if (!desc) { - ret = -ENOMEM; - goto fail; - } + ret = msi_add_simple_msi_descs(dev, virq_base, nvec); + if (ret) + goto unlock; - desc->msi_index = virq; + for (virq = virq_base; virq < virq_base + nvec; virq++) { + desc = xa_load(&dev->msi.data->store, virq); desc->irq = virq; - list_add_tail(&desc->list, &dev->msi.data->list); - dev->msi.data->num_descs++; ops->set_desc(arg, desc); ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg); @@ -805,6 +778,7 @@ int msi_domain_populate_irqs(struct irq_ for (--virq; virq >= virq_base; virq--) irq_domain_free_irqs_common(domain, virq, 1); msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, nvec); +unlock: msi_unlock_descs(dev); return ret; }
WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de> To: LKML <linux-kernel@vger.kernel.org> Cc: Bjorn Helgaas <helgaas@kernel.org>, Marc Zygnier <maz@kernel.org>, Alex Williamson <alex.williamson@redhat.com>, Kevin Tian <kevin.tian@intel.com>, Jason Gunthorpe <jgg@nvidia.com>, Megha Dey <megha.dey@intel.com>, Ashok Raj <ashok.raj@intel.com>, linux-pci@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, linux-s390@vger.kernel.org, Heiko Carstens <hca@linux.ibm.com>, Christian Borntraeger <borntraeger@de.ibm.com>, Jon Mason <jdmason@kudzu.us>, Dave Jiang <dave.jiang@intel.com>, Allen Hubbe <allenbh@gmail.com>, linux-ntb@googlegroups.com Subject: [patch 32/32] genirq/msi: Convert storage to xarray Date: Sat, 27 Nov 2021 02:24:18 +0100 (CET) [thread overview] Message-ID: <20211126232736.190744801@linutronix.de> (raw) Message-ID: <20211127012418.MKfl5bE2TlDZdovxdfrkXsQeyDhF1KaYcXmQ16BdvEA@z> (raw) In-Reply-To: 20211126230957.239391799@linutronix.de The current linked list storage for MSI descriptors is suboptimal in several ways: 1) Looking up a MSI desciptor requires a O(n) list walk in the worst case 2) The upcoming support of runtime expansion of MSI-X vectors would need to do a full list walk to figure out whether a particular index is already associated. 3) Runtime expansion of sparse allocations is even more complex as the current implementation assumes an ordered list (increasing MSI index). Use an xarray which solves all of the above problems nicely. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- include/linux/msi.h | 19 ++--- kernel/irq/msi.c | 188 ++++++++++++++++++++++------------------------------ 2 files changed, 90 insertions(+), 117 deletions(-) --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -17,6 +17,7 @@ */ #include <linux/spinlock.h> +#include <linux/xarray.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/bits.h> @@ -122,7 +123,6 @@ struct pci_msi_desc { /** * struct msi_desc - Descriptor structure for MSI based interrupts - * @list: List head for management * @irq: The base interrupt number * @nvec_used: The number of vectors used * @dev: Pointer to the device which uses this descriptor @@ -139,7 +139,6 @@ struct pci_msi_desc { */ struct msi_desc { /* Shared device/bus type independent data */ - struct list_head list; unsigned int irq; unsigned int nvec_used; struct device *dev; @@ -177,20 +176,20 @@ enum msi_desc_filter { * @properties: MSI properties which are interesting to drivers * @num_descs: The number of allocated MSI descriptors for the device * @platform_data: Platform-MSI specific data - * @list: List of MSI descriptors associated to the device - * @mutex: Mutex protecting the MSI list - * @__next: Cached pointer to the next entry for iterators - * @__filter: Cached descriptor filter + * @mutex: Mutex protecting the MSI descriptor store + * @store: Xarray for storing MSI descriptor pointers + * @__iter_idx: Index to search the next entry for iterators + * @__iter_filter: Cached descriptor filter */ struct msi_device_data { raw_spinlock_t lock; unsigned long properties; unsigned int num_descs; struct platform_msi_priv_data *platform_data; - struct list_head list; struct mutex mutex; - struct msi_desc *__next; - enum msi_desc_filter __filter; + struct xarray store; + unsigned long __iter_idx; + enum msi_desc_filter __iter_filter; }; int msi_setup_device_data(struct device *dev); @@ -266,7 +265,7 @@ static inline struct msi_desc *msi_first * @dev: struct device pointer - device to iterate * @filter: Filter for descriptor selection * - * See msi_for_each_desc_from()for further information. + * See msi_for_each_desc_from() for further information. */ #define msi_for_each_desc(desc, dev, filter) \ msi_for_each_desc_from(desc, dev, filter, 0) --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -20,7 +20,6 @@ #include "internals.h" static inline int msi_sysfs_create_group(struct device *dev); -#define dev_to_msi_list(dev) (&(dev)->msi.data->list) /** * msi_alloc_desc - Allocate an initialized msi_desc @@ -41,7 +40,6 @@ static struct msi_desc *msi_alloc_desc(s if (!desc) return NULL; - INIT_LIST_HEAD(&desc->list); desc->dev = dev; desc->nvec_used = nvec; if (affinity) { @@ -60,6 +58,19 @@ static void msi_free_desc(struct msi_des kfree(desc); } +static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index) +{ + int ret; + + desc->msi_index = index; + ret = xa_insert(&md->store, index, desc, GFP_KERNEL); + if (!ret) + md->num_descs++; + else + msi_free_desc(desc); + return ret; +} + /** * msi_add_msi_desc - Allocate and initialize a MSI descriptor * @dev: Pointer to the device for which the descriptor is allocated @@ -77,13 +88,9 @@ int msi_add_msi_desc(struct device *dev, if (!desc) return -ENOMEM; - /* Copy the MSI index and type specific data to the new descriptor. */ - desc->msi_index = init_desc->msi_index; + /* Copy type specific data to the new descriptor. */ desc->pci = init_desc->pci; - - list_add_tail(&desc->list, &dev->msi.data->list); - dev->msi.data->num_descs++; - return 0; + return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index); } /** @@ -96,29 +103,41 @@ int msi_add_msi_desc(struct device *dev, */ static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc) { - struct msi_desc *desc, *tmp; - LIST_HEAD(list); - unsigned int i; + struct msi_desc *desc; + unsigned long i; + int ret; lockdep_assert_held(&dev->msi.data->mutex); for (i = 0; i < ndesc; i++) { desc = msi_alloc_desc(dev, 1, NULL); if (!desc) + goto fail_mem; + ret = msi_insert_desc(dev->msi.data, desc, index + i); + if (ret) goto fail; - desc->msi_index = index + i; - list_add_tail(&desc->list, &list); } - list_splice_tail(&list, &dev->msi.data->list); - dev->msi.data->num_descs += ndesc; return 0; +fail_mem: + ret = -ENOMEM; fail: - list_for_each_entry_safe(desc, tmp, &list, list) { - list_del(&desc->list); - msi_free_desc(desc); + msi_free_msi_descs_range(dev, MSI_DESC_NOTASSOCIATED, index, ndesc); + return ret; +} + +static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter) +{ + switch (filter) { + case MSI_DESC_ALL: + return true; + case MSI_DESC_NOTASSOCIATED: + return !desc->irq; + case MSI_DESC_ASSOCIATED: + return !!desc->irq; } - return -ENOMEM; + WARN_ON_ONCE(1); + return false; } /** @@ -132,19 +151,16 @@ void msi_free_msi_descs_range(struct dev unsigned int base_index, unsigned int ndesc) { struct msi_desc *desc; + unsigned long idx; lockdep_assert_held(&dev->msi.data->mutex); - msi_for_each_desc(desc, dev, filter) { - /* - * Stupid for now to handle MSI device domain until the - * storage is switched over to an xarray. - */ - if (desc->msi_index < base_index || desc->msi_index >= base_index + ndesc) - continue; - list_del(&desc->list); - msi_free_desc(desc); - dev->msi.data->num_descs--; + xa_for_each_range(&dev->msi.data->store, idx, desc, base_index, base_index + ndesc - 1) { + if (msi_desc_match(desc, filter)) { + xa_erase(&dev->msi.data->store, idx); + msi_free_desc(desc); + dev->msi.data->num_descs--; + } } } @@ -192,7 +208,8 @@ static void msi_device_data_release(stru { struct msi_device_data *md = res; - WARN_ON_ONCE(!list_empty(&md->list)); + WARN_ON_ONCE(!xa_empty(&md->store)); + xa_destroy(&md->store); dev->msi.data = NULL; } @@ -225,7 +242,7 @@ int msi_setup_device_data(struct device } raw_spin_lock_init(&md->lock); - INIT_LIST_HEAD(&md->list); + xa_init(&md->store); mutex_init(&md->mutex); dev->msi.data = md; devres_add(dev, md); @@ -252,38 +269,21 @@ void msi_unlock_descs(struct device *dev { if (WARN_ON_ONCE(!dev->msi.data)) return; - /* Clear the next pointer which was cached by the iterator */ - dev->msi.data->__next = NULL; + /* Invalidate the index wich was cached by the iterator */ + dev->msi.data->__iter_idx = ULONG_MAX; mutex_unlock(&dev->msi.data->mutex); } EXPORT_SYMBOL_GPL(msi_unlock_descs); -static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter) -{ - switch (filter) { - case MSI_DESC_ALL: - return true; - case MSI_DESC_NOTASSOCIATED: - return !desc->irq; - case MSI_DESC_ASSOCIATED: - return !!desc->irq; - } - WARN_ON_ONCE(1); - return false; -} - -static struct msi_desc *msi_find_first_desc(struct device *dev, enum msi_desc_filter filter, - unsigned int base_index) +static struct msi_desc *msi_find_desc(struct msi_device_data *md) { struct msi_desc *desc; - list_for_each_entry(desc, dev_to_msi_list(dev), list) { - if (desc->msi_index < base_index) - continue; - if (msi_desc_match(desc, filter)) - return desc; + xa_for_each_start(&md->store, md->__iter_idx, desc, md->__iter_idx) { + if (msi_desc_match(desc, md->__iter_filter)) + break; } - return NULL; + return desc; } /** @@ -301,43 +301,25 @@ static struct msi_desc *msi_find_first_d struct msi_desc *__msi_first_desc(struct device *dev, enum msi_desc_filter filter, unsigned int base_index) { - struct msi_desc *desc; + struct msi_device_data *md = dev->msi.data; - if (WARN_ON_ONCE(!dev->msi.data)) + if (WARN_ON_ONCE(!md)) return NULL; - lockdep_assert_held(&dev->msi.data->mutex); + lockdep_assert_held(&md->mutex); - /* Invalidate a previous invocation within the same lock section */ - dev->msi.data->__next = NULL; - - desc = msi_find_first_desc(dev, filter, base_index); - if (desc) { - dev->msi.data->__next = list_next_entry(desc, list); - dev->msi.data->__filter = filter; - } - return desc; + md->__iter_filter = filter; + md->__iter_idx = base_index; + return msi_find_desc(md); } EXPORT_SYMBOL_GPL(__msi_first_desc); -static struct msi_desc *__msi_next_desc(struct device *dev, enum msi_desc_filter filter, - struct msi_desc *from) -{ - struct msi_desc *desc = from; - - list_for_each_entry_from(desc, dev_to_msi_list(dev), list) { - if (msi_desc_match(desc, filter)) - return desc; - } - return NULL; -} - /** * msi_next_desc - Get the next MSI descriptor of a device * @dev: Device to operate on * * The first invocation of msi_next_desc() has to be preceeded by a - * successful incovation of __msi_first_desc(). Consecutive invocations are + * successful invocation of __msi_first_desc(). Consecutive invocations are * only valid if the previous one was successful. All these operations have * to be done within the same MSI mutex held region. * @@ -346,20 +328,18 @@ static struct msi_desc *__msi_next_desc( */ struct msi_desc *msi_next_desc(struct device *dev) { - struct msi_device_data *data = dev->msi.data; - struct msi_desc *desc; + struct msi_device_data *md = dev->msi.data; - if (WARN_ON_ONCE(!data)) + if (WARN_ON_ONCE(!md)) return NULL; - lockdep_assert_held(&data->mutex); + lockdep_assert_held(&md->mutex); - if (!data->__next) + if (md->__iter_idx == ULONG_MAX) return NULL; - desc = __msi_next_desc(dev, data->__filter, data->__next); - dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL; - return desc; + md->__iter_idx++; + return msi_find_desc(md); } EXPORT_SYMBOL_GPL(msi_next_desc); @@ -384,21 +364,18 @@ int __msi_get_virq(struct device *dev, u pcimsi = msi_device_has_property(dev, MSI_PROP_PCI_MSI); msi_lock_descs(dev); - msi_for_each_desc_from(desc, dev, MSI_DESC_ASSOCIATED, index) { - /* PCI-MSI has only one descriptor for multiple interrupts. */ - if (pcimsi) { - if (index < desc->nvec_used) - ret = desc->irq + index; - break; - } - + desc = xa_load(&dev->msi.data->store, pcimsi ? 0 : index); + if (desc && desc->irq) { /* + * PCI-MSI has only one descriptor for multiple interrupts. * PCI-MSIX and platform MSI use a descriptor per * interrupt. */ - if (desc->msi_index == index) { + if (pcimsi) { + if (index < desc->nvec_used) + ret = desc->irq + index; + } else { ret = desc->irq; - break; } } msi_unlock_descs(dev); @@ -779,17 +756,13 @@ int msi_domain_populate_irqs(struct irq_ int ret, virq; msi_lock_descs(dev); - for (virq = virq_base; virq < virq_base + nvec; virq++) { - desc = msi_alloc_desc(dev, 1, NULL); - if (!desc) { - ret = -ENOMEM; - goto fail; - } + ret = msi_add_simple_msi_descs(dev, virq_base, nvec); + if (ret) + goto unlock; - desc->msi_index = virq; + for (virq = virq_base; virq < virq_base + nvec; virq++) { + desc = xa_load(&dev->msi.data->store, virq); desc->irq = virq; - list_add_tail(&desc->list, &dev->msi.data->list); - dev->msi.data->num_descs++; ops->set_desc(arg, desc); ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg); @@ -805,6 +778,7 @@ int msi_domain_populate_irqs(struct irq_ for (--virq; virq >= virq_base; virq--) irq_domain_free_irqs_common(domain, virq, 1); msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, nvec); +unlock: msi_unlock_descs(dev); return ret; }
next prev parent reply other threads:[~2021-11-27 1:32 UTC|newest] Thread overview: 184+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-11-27 1:22 [patch 00/32] genirq/msi, PCI/MSI: Spring cleaning - Part 2 Thomas Gleixner 2021-11-27 1:22 ` [patch 01/32] genirq/msi: Move descriptor list to struct msi_device_data Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 12:19 ` Greg Kroah-Hartman 2021-11-27 1:22 ` [patch 02/32] genirq/msi: Add mutex for MSI list protection Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 03/32] genirq/msi: Provide msi_domain_alloc/free_irqs_descs_locked() Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 04/32] genirq/msi: Provide a set of advanced MSI accessors and iterators Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-28 1:00 ` Jason Gunthorpe 2021-11-28 19:22 ` Thomas Gleixner 2021-11-29 9:26 ` Thomas Gleixner 2021-11-29 14:01 ` Jason Gunthorpe 2021-11-29 14:46 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 05/32] genirq/msi: Provide msi_alloc_msi_desc() and a simple allocator Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 06/32] genirq/msi: Provide domain flags to allocate/free MSI descriptors automatically Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 07/32] genirq/msi: Count the allocated MSI descriptors Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 12:19 ` Greg Kroah-Hartman 2021-11-27 19:22 ` Thomas Gleixner 2021-11-27 19:45 ` Thomas Gleixner 2021-11-28 11:07 ` Greg Kroah-Hartman 2021-11-28 19:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 08/32] PCI/MSI: Protect MSI operations Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 09/32] PCI/MSI: Use msi_add_msi_desc() Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 10/32] PCI/MSI: Let core code free MSI descriptors Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 11/32] PCI/MSI: Use msi_on_each_desc() Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 12/32] x86/pci/xen: Use msi_for_each_desc() Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 13/32] xen/pcifront: Rework MSI handling Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 14/32] s390/pci: Rework MSI descriptor walk Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-29 10:31 ` Niklas Schnelle 2021-11-29 13:04 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 15/32] powerpc/4xx/hsta: Rework MSI handling Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 16/32] powerpc/cell/axon_msi: Convert to msi_on_each_desc() Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 17/32] powerpc/pasemi/msi: Convert to msi_on_each_dec() Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 18/32] powerpc/fsl_msi: Use msi_for_each_desc() Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 19/32] powerpc/mpic_u3msi: Use msi_for_each-desc() Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner 2021-11-27 1:22 ` [patch 20/32] PCI: hv: Rework MSI handling Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 21/32] NTB/msi: Convert to msi_on_each_desc() Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-29 18:21 ` Logan Gunthorpe 2021-11-29 20:51 ` Thomas Gleixner 2021-11-29 22:27 ` Logan Gunthorpe 2021-11-29 22:50 ` Dave Jiang 2021-11-29 23:31 ` Jason Gunthorpe 2021-11-29 23:52 ` Logan Gunthorpe 2021-11-30 0:01 ` Jason Gunthorpe 2021-11-30 0:29 ` Thomas Gleixner 2021-11-30 19:21 ` Logan Gunthorpe 2021-11-30 19:48 ` Thomas Gleixner 2021-11-30 20:14 ` Logan Gunthorpe 2021-11-30 20:28 ` Jason Gunthorpe 2021-11-30 21:23 ` Thomas Gleixner 2021-12-01 0:17 ` Jason Gunthorpe 2021-12-01 10:16 ` Thomas Gleixner 2021-12-01 13:00 ` Jason Gunthorpe 2021-12-01 17:35 ` Thomas Gleixner 2021-12-01 18:14 ` Jason Gunthorpe 2021-12-01 18:46 ` Logan Gunthorpe 2021-12-01 20:21 ` Thomas Gleixner 2021-12-02 0:01 ` Thomas Gleixner 2021-12-02 13:55 ` Jason Gunthorpe 2021-12-02 14:23 ` Greg Kroah-Hartman 2021-12-02 14:45 ` Jason Gunthorpe 2021-12-02 19:25 ` Thomas Gleixner 2021-12-02 20:00 ` Jason Gunthorpe 2021-12-02 22:31 ` Thomas Gleixner 2021-12-03 0:37 ` Jason Gunthorpe 2021-12-03 15:07 ` Thomas Gleixner 2021-12-03 16:41 ` Jason Gunthorpe 2021-12-04 14:20 ` Thomas Gleixner 2021-12-05 14:16 ` Thomas Gleixner 2021-12-06 14:43 ` Jason Gunthorpe 2021-12-06 15:47 ` Thomas Gleixner 2021-12-06 17:00 ` Jason Gunthorpe 2021-12-06 20:28 ` Thomas Gleixner 2021-12-06 21:06 ` Jason Gunthorpe 2021-12-06 22:21 ` Thomas Gleixner 2021-12-06 14:19 ` Jason Gunthorpe 2021-12-06 15:06 ` Thomas Gleixner 2021-12-09 6:26 ` Tian, Kevin 2021-12-09 9:03 ` Thomas Gleixner 2021-12-09 12:17 ` Tian, Kevin 2021-12-09 15:57 ` Thomas Gleixner 2021-12-10 7:37 ` Tian, Kevin 2021-12-09 5:41 ` Tian, Kevin 2021-12-09 5:47 ` Jason Wang 2021-12-01 16:28 ` Dave Jiang 2021-12-01 18:41 ` Thomas Gleixner 2021-12-01 18:47 ` Dave Jiang 2021-12-01 20:25 ` Thomas Gleixner 2021-12-01 21:21 ` Dave Jiang 2021-12-01 21:44 ` Thomas Gleixner 2021-12-01 21:49 ` Dave Jiang 2021-12-01 22:03 ` Thomas Gleixner 2021-12-01 22:53 ` Dave Jiang 2021-12-01 23:57 ` Thomas Gleixner 2021-12-09 5:23 ` Tian, Kevin 2021-12-09 8:37 ` Thomas Gleixner 2021-12-09 12:31 ` Tian, Kevin 2021-12-09 16:21 ` Jason Gunthorpe 2021-12-09 20:32 ` Thomas Gleixner 2021-12-09 20:58 ` Jason Gunthorpe 2021-12-09 22:09 ` Thomas Gleixner 2021-12-10 0:26 ` Thomas Gleixner 2021-12-10 7:29 ` Tian, Kevin 2021-12-10 12:13 ` Thomas Gleixner 2021-12-11 8:06 ` Tian, Kevin 2021-12-10 12:39 ` Jason Gunthorpe 2021-12-10 19:00 ` Thomas Gleixner 2021-12-11 7:44 ` Tian, Kevin 2021-12-11 13:04 ` Thomas Gleixner 2021-12-12 1:56 ` Tian, Kevin 2021-12-12 20:55 ` Thomas Gleixner 2021-12-12 23:37 ` Jason Gunthorpe 2021-12-13 7:50 ` Tian, Kevin 2022-09-15 9:24 ` Tian, Kevin 2022-09-20 14:09 ` Jason Gunthorpe 2022-09-21 7:57 ` Tian, Kevin 2022-09-21 12:48 ` Jason Gunthorpe 2022-09-22 5:11 ` Tian, Kevin 2022-09-22 12:13 ` Jason Gunthorpe 2022-09-22 22:42 ` Tian, Kevin 2022-09-23 13:26 ` Jason Gunthorpe 2021-12-11 7:52 ` Tian, Kevin 2021-12-12 0:12 ` Thomas Gleixner 2021-12-12 2:14 ` Tian, Kevin 2021-12-12 20:50 ` Thomas Gleixner 2021-12-12 23:42 ` Jason Gunthorpe 2021-12-10 7:36 ` Tian, Kevin 2021-12-10 12:30 ` Jason Gunthorpe 2021-12-12 6:44 ` Mika Penttilä 2021-12-12 23:27 ` Jason Gunthorpe 2021-12-01 14:52 ` Thomas Gleixner 2021-12-01 15:11 ` Jason Gunthorpe 2021-12-01 18:37 ` Thomas Gleixner 2021-12-01 18:47 ` Jason Gunthorpe 2021-12-01 20:26 ` Thomas Gleixner 2022-12-05 18:25 ` [tip: irq/core] PCI/MSI: Provide post-enable dynamic allocation interfaces for MSI-X tip-bot2 for Thomas Gleixner 2022-12-05 21:41 ` tip-bot2 for Thomas Gleixner 2021-11-27 1:23 ` [patch 22/32] soc: ti: ti_sci_inta_msi: Rework MSI descriptor allocation Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 23/32] soc: ti: ti_sci_inta_msi: Remove ti_sci_inta_msi_domain_free_irqs() Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 24/32] bus: fsl-mc-msi: Simplify MSI descriptor handling Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 25/32] platform-msi: Let core code handle MSI descriptors Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 26/32] platform-msi: Simplify platform device MSI code Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 27/32] genirq/msi: Make interrupt allocation less convoluted Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 28/32] genirq/msi: Convert to new functions Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 29/32] genirq/msi: Mop up old interfaces Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 30/32] genirq/msi: Add abuse prevention comment to msi header Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 1:23 ` [patch 31/32] genirq/msi: Simplify sysfs handling Thomas Gleixner 2021-11-27 1:24 ` Thomas Gleixner 2021-11-27 12:32 ` Greg Kroah-Hartman 2021-11-27 19:31 ` Thomas Gleixner 2021-11-28 11:07 ` Greg Kroah-Hartman 2021-11-28 19:33 ` Thomas Gleixner 2021-11-27 1:23 ` Thomas Gleixner [this message] 2021-11-27 1:24 ` [patch 32/32] genirq/msi: Convert storage to xarray Thomas Gleixner 2021-11-27 12:33 ` Greg Kroah-Hartman 2021-11-27 1:23 ` [patch 00/32] genirq/msi, PCI/MSI: Spring cleaning - Part 2 Thomas Gleixner
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20211126232736.190744801@linutronix.de \ --to=tglx@linutronix.de \ --cc=alex.williamson@redhat.com \ --cc=allenbh@gmail.com \ --cc=ashok.raj@intel.com \ --cc=borntraeger@de.ibm.com \ --cc=dave.jiang@intel.com \ --cc=gregkh@linuxfoundation.org \ --cc=hca@linux.ibm.com \ --cc=helgaas@kernel.org \ --cc=jdmason@kudzu.us \ --cc=jgg@nvidia.com \ --cc=kevin.tian@intel.com \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-ntb@googlegroups.com \ --cc=linux-pci@vger.kernel.org \ --cc=linux-s390@vger.kernel.org \ --cc=maz@kernel.org \ --cc=megha.dey@intel.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).