From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B6537C4332F for ; Sat, 27 Nov 2021 02:14:54 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1349878AbhK0CSG (ORCPT ); Fri, 26 Nov 2021 21:18:06 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47832 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1349916AbhK0CQD (ORCPT ); Fri, 26 Nov 2021 21:16:03 -0500 Received: from galois.linutronix.de (Galois.linutronix.de [IPv6:2a0a:51c0:0:12e:550::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 772EDC08EB38; Fri, 26 Nov 2021 17:31:04 -0800 (PST) Message-ID: <20211126232734.949173952@linutronix.de> DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020; t=1637976226; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=QuU45eoyNuf+fgvpHD77GSISdDrHu7KnsGWmWgR8zuY=; b=AaDcbnlxaRt2q0bjAJpIQhNbhUQqlGIw44E5DnT92OJAsJQC/8pkzuadWXEyhyIylqgtYL UfY54yNSoV66on/QwYVx+PSPWQpTR+QMwZN9mzFut7IEoL7SB17SJaqqVUwYDzckdGmTR9 Q32/SlxZzkobO7jeanoSWjdO4fNHHDu2eQXmTGAAq4xd4203MkE06/xB/oGJ8x+RcR2D2P 2JCfysbfUhF3Ed4kP9m4n2yObvxoiTp4uIGERyTUMCbI8SBUL/giJNLLivJPGkaVEoRDns 2tMjvdLbfyqYZQTWTylIwhC8Yluyouzb1e7lD4jEuYTBKrMGWYWNqKavjRCNMA== DKIM-Signature: v=1; a=ed25519-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020e; t=1637976226; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=QuU45eoyNuf+fgvpHD77GSISdDrHu7KnsGWmWgR8zuY=; b=oh8zj3SOW2JDK16q0CFVbzqNFpgU1OBRYteuIQ/y4rBAvyqrA+26vtcEzhINuW+SWcSRzu x8L9z+65wc7KRzBw== From: Thomas Gleixner To: LKML Cc: Bjorn Helgaas , Marc Zygnier , Alex Williamson , Kevin Tian , Jason Gunthorpe , Megha Dey , Ashok Raj , linux-pci@vger.kernel.org, Greg Kroah-Hartman , linux-s390@vger.kernel.org, Heiko Carstens , Christian Borntraeger , Jon Mason , Dave Jiang , Allen Hubbe , linux-ntb@googlegroups.com Subject: [patch 11/32] PCI/MSI: Use msi_on_each_desc() References: <20211126230957.239391799@linutronix.de> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Date: Sat, 27 Nov 2021 02:23:46 +0100 (CET) Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Message-ID: <20211127012346.xRa7Zm3p1Sr-6T2Dg82EAccmD48SGhs31tk0_zViL2s@z> Use the new iterator functions which pave the way for dynamically extending MSI-X vectors. Signed-off-by: Thomas Gleixner --- drivers/pci/msi/irqdomain.c | 4 ++-- drivers/pci/msi/legacy.c | 19 ++++++++----------- drivers/pci/msi/msi.c | 30 ++++++++++++++---------------- 3 files changed, 24 insertions(+), 29 deletions(-) --- a/drivers/pci/msi/irqdomain.c +++ b/drivers/pci/msi/irqdomain.c @@ -83,7 +83,7 @@ static int pci_msi_domain_check_cap(stru struct msi_domain_info *info, struct device *dev) { - struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); + struct msi_desc *desc = msi_first_desc(dev); /* Special handling to support __pci_enable_msi_range() */ if (pci_msi_desc_is_multi_msi(desc) && @@ -98,7 +98,7 @@ static int pci_msi_domain_check_cap(stru unsigned int idx = 0; /* Check for gaps in the entry indices */ - for_each_msi_entry(desc, dev) { + msi_for_each_desc(desc, dev, MSI_DESC_ALL) { if (desc->msi_index != idx++) return -ENOTSUPP; } --- a/drivers/pci/msi/legacy.c +++ b/drivers/pci/msi/legacy.c @@ -29,7 +29,7 @@ int __weak arch_setup_msi_irqs(struct pc if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; - for_each_pci_msi_entry(desc, dev) { + msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) { ret = arch_setup_msi_irq(dev, desc); if (ret) return ret < 0 ? ret : -ENOSPC; @@ -43,27 +43,24 @@ void __weak arch_teardown_msi_irqs(struc struct msi_desc *desc; int i; - for_each_pci_msi_entry(desc, dev) { - if (desc->irq) { - for (i = 0; i < entry->nvec_used; i++) - arch_teardown_msi_irq(desc->irq + i); - } + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) { + for (i = 0; i < desc->nvec_used; i++) + arch_teardown_msi_irq(desc->irq + i); } } static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret) { - struct msi_desc *entry; + struct msi_desc *desc; int avail = 0; if (type != PCI_CAP_ID_MSIX || ret >= 0) return ret; /* Scan the MSI descriptors for successfully allocated ones. */ - for_each_pci_msi_entry(entry, dev) { - if (entry->irq != 0) - avail++; - } + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) + avail++; + return avail ? avail : ret; } --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -299,7 +299,6 @@ static void __pci_restore_msix_state(str if (!dev->msix_enabled) return; - BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); /* route the table */ pci_intx_for_msi(dev, 0); @@ -309,7 +308,7 @@ static void __pci_restore_msix_state(str write_msg = arch_restore_msi_irqs(dev); msi_lock_descs(&dev->dev); - for_each_pci_msi_entry(entry, dev) { + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { if (write_msg) __pci_write_msi_msg(entry, &entry->msg); pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); @@ -378,14 +377,14 @@ static int msi_verify_entries(struct pci if (!dev->no_64bit_msi) return 0; - for_each_pci_msi_entry(entry, dev) { + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { if (entry->msg.address_hi) { pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n", entry->msg.address_hi, entry->msg.address_lo); - return -EIO; + break; } } - return 0; + return !entry ? 0 : -EIO; } /** @@ -418,7 +417,7 @@ static int msi_capability_init(struct pc goto unlock; /* All MSIs are unmasked by default; mask them all */ - entry = first_pci_msi_entry(dev); + entry = msi_first_desc(&dev->dev); pci_msi_mask(entry, msi_multi_mask(entry)); /* Configure MSI capability structure */ @@ -508,11 +507,11 @@ static int msix_setup_msi_descs(struct p static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) { - struct msi_desc *entry; + struct msi_desc *desc; if (entries) { - for_each_pci_msi_entry(entry, dev) { - entries->vector = entry->irq; + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { + entries->vector = desc->irq; entries++; } } @@ -705,15 +704,14 @@ static void pci_msi_shutdown(struct pci_ if (!pci_msi_enable || !dev || !dev->msi_enabled) return; - BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); - desc = first_pci_msi_entry(dev); - pci_msi_set_enable(dev, 0); pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; /* Return the device with MSI unmasked as initial states */ - pci_msi_unmask(desc, msi_multi_mask(desc)); + desc = msi_first_desc(&dev->dev); + if (!WARN_ON_ONCE(!desc)) + pci_msi_unmask(desc, msi_multi_mask(desc)); /* Restore dev->irq to its default pin-assertion IRQ */ dev->irq = desc->pci.msi_attrib.default_irq; @@ -789,7 +787,7 @@ static int __pci_enable_msix(struct pci_ static void pci_msix_shutdown(struct pci_dev *dev) { - struct msi_desc *entry; + struct msi_desc *desc; if (!pci_msi_enable || !dev || !dev->msix_enabled) return; @@ -800,8 +798,8 @@ static void pci_msix_shutdown(struct pci } /* Return the device with MSI-X masked as initial states */ - for_each_pci_msi_entry(entry, dev) - pci_msix_mask(entry); + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) + pci_msix_mask(desc); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_intx_for_msi(dev, 1);