All of lore.kernel.org
 help / color / mirror / Atom feed
From: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Oleksandr Andrushchenko" <Oleksandr_Andrushchenko@epam.com>,
	"Volodymyr Babchuk" <Volodymyr_Babchuk@epam.com>,
	"Jan Beulich" <jbeulich@suse.com>, "Paul Durrant" <paul@xen.org>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [RFC PATCH 02/10] xen: pci: add pci_seg->alldevs_lock
Date: Wed, 31 Aug 2022 14:10:59 +0000	[thread overview]
Message-ID: <20220831141040.13231-3-volodymyr_babchuk@epam.com> (raw)
In-Reply-To: <20220831141040.13231-1-volodymyr_babchuk@epam.com>

This lock protects alldevs_list of struct pci_seg. As this, it should
be used when we are adding, removing on enumerating PCI devices
assigned to a PCI segment.

Radix tree that stores PCI segment has own locking mechanism, also
pci_seg structures are only allocated and newer freed, so we need no
additional locking to access pci_seg structures. But we need a lock
that protects alldevs_list field.

This enables more granular locking instead of one huge pcidevs_lock
that locks entire PCI subsystem.  Please note that pcidevs_lock() is
still used, we are going to remove it in subsequent patches.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
---
 xen/drivers/passthrough/pci.c | 20 +++++++++++++++++++-
 1 file changed, 19 insertions(+), 1 deletion(-)

diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 4366f8f965..2dfa1c2875 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -38,6 +38,7 @@
 
 struct pci_seg {
     struct list_head alldevs_list;
+    spinlock_t alldevs_lock;
     u16 nr;
     unsigned long *ro_map;
     /* bus2bridge_lock protects bus2bridge array */
@@ -93,6 +94,7 @@ static struct pci_seg *alloc_pseg(u16 seg)
     pseg->nr = seg;
     INIT_LIST_HEAD(&pseg->alldevs_list);
     spin_lock_init(&pseg->bus2bridge_lock);
+    spin_lock_init(&pseg->alldevs_lock);
 
     if ( radix_tree_insert(&pci_segments, seg, pseg) )
     {
@@ -385,9 +387,13 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn)
     unsigned int pos;
     int rc;
 
+    spin_lock(&pseg->alldevs_lock);
     list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
         if ( pdev->bus == bus && pdev->devfn == devfn )
+        {
+            spin_unlock(&pseg->alldevs_lock);
             return pdev;
+        }
 
     pdev = xzalloc(struct pci_dev);
     if ( !pdev )
@@ -404,10 +410,12 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn)
     if ( rc )
     {
         xfree(pdev);
+        spin_unlock(&pseg->alldevs_lock);
         return NULL;
     }
 
     list_add(&pdev->alldevs_list, &pseg->alldevs_list);
+    spin_unlock(&pseg->alldevs_lock);
 
     /* update bus2bridge */
     switch ( pdev->type = pdev_type(pseg->nr, bus, devfn) )
@@ -611,15 +619,20 @@ struct pci_dev *pci_get_pdev(struct domain *d, pci_sbdf_t sbdf)
      */
     if ( !d || is_hardware_domain(d) )
     {
-        const struct pci_seg *pseg = get_pseg(sbdf.seg);
+        struct pci_seg *pseg = get_pseg(sbdf.seg);
 
         if ( !pseg )
             return NULL;
 
+        spin_lock(&pseg->alldevs_lock);
         list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
             if ( pdev->sbdf.bdf == sbdf.bdf &&
                  (!d || pdev->domain == d) )
+            {
+                spin_unlock(&pseg->alldevs_lock);
                 return pdev;
+            }
+        spin_unlock(&pseg->alldevs_lock);
     }
     else
     {
@@ -893,6 +906,7 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
         return -ENODEV;
 
     pcidevs_lock();
+    spin_lock(&pseg->alldevs_lock);
     list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
         if ( pdev->bus == bus && pdev->devfn == devfn )
         {
@@ -907,10 +921,12 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
             }
             printk(XENLOG_DEBUG "PCI remove device %pp\n", &pdev->sbdf);
             free_pdev(pseg, pdev);
+            list_del(&pdev->alldevs_list);
             break;
         }
 
     pcidevs_unlock();
+    spin_unlock(&pseg->alldevs_lock);
     return ret;
 }
 
@@ -1363,6 +1379,7 @@ static int cf_check _dump_pci_devices(struct pci_seg *pseg, void *arg)
 
     printk("==== segment %04x ====\n", pseg->nr);
 
+    spin_lock(&pseg->alldevs_lock);
     list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
     {
         printk("%pp - ", &pdev->sbdf);
@@ -1376,6 +1393,7 @@ static int cf_check _dump_pci_devices(struct pci_seg *pseg, void *arg)
         pdev_dump_msi(pdev);
         printk("\n");
     }
+    spin_unlock(&pseg->alldevs_lock);
 
     return 0;
 }
-- 
2.36.1


  parent reply	other threads:[~2022-08-31 14:11 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-31 14:10 [RFC PATCH 00/10] Rework PCI locking Volodymyr Babchuk
2022-08-31 14:10 ` [RFC PATCH 01/10] xen: pci: add per-domain pci list lock Volodymyr Babchuk
2023-01-26 23:18   ` Stefano Stabellini
2023-01-27  8:01     ` Jan Beulich
2023-02-14 23:38     ` Volodymyr Babchuk
2023-02-15  9:06       ` Jan Beulich
2022-08-31 14:10 ` [RFC PATCH 04/10] xen: add reference counter support Volodymyr Babchuk
2023-02-15 11:20   ` Jan Beulich
2023-02-17  1:56     ` Volodymyr Babchuk
2023-02-17  7:53       ` Jan Beulich
2023-02-19 22:34         ` Volodymyr Babchuk
2022-08-31 14:10 ` [RFC PATCH 03/10] xen: pci: introduce ats_list_lock Volodymyr Babchuk
2023-01-26 23:56   ` Stefano Stabellini
2023-01-27  8:13     ` Jan Beulich
2023-02-17  1:20       ` Volodymyr Babchuk
2023-02-17  7:39         ` Jan Beulich
2022-08-31 14:10 ` Volodymyr Babchuk [this message]
2023-01-26 23:40   ` [RFC PATCH 02/10] xen: pci: add pci_seg->alldevs_lock Stefano Stabellini
2023-02-28 16:32   ` Jan Beulich
2022-08-31 14:11 ` [RFC PATCH 05/10] xen: pci: introduce reference counting for pdev Volodymyr Babchuk
2023-01-27  0:43   ` Stefano Stabellini
2023-02-20 22:00     ` Volodymyr Babchuk
2023-02-28 17:06   ` Jan Beulich
2022-08-31 14:11 ` [RFC PATCH 06/10] xen: pci: print reference counter when dumping pci_devs Volodymyr Babchuk
2022-08-31 14:11 ` [RFC PATCH 07/10] xen: pci: add per-device locking Volodymyr Babchuk
2023-01-28  0:56   ` Stefano Stabellini
2023-02-20 22:29     ` Volodymyr Babchuk
2023-02-28 16:46   ` Jan Beulich
2022-08-31 14:11 ` [RFC PATCH 09/10] [RFC only] xen: iommu: remove last pcidevs_lock() calls in iommu Volodymyr Babchuk
2023-01-28  1:36   ` Stefano Stabellini
2023-02-20  0:41     ` Volodymyr Babchuk
2023-02-28 16:25   ` Jan Beulich
2022-08-31 14:11 ` [RFC PATCH 08/10] xen: pci: remove pcidev_[un]lock[ed] calls Volodymyr Babchuk
2023-01-28  1:32   ` Stefano Stabellini
2023-02-20 23:13     ` Volodymyr Babchuk
2023-02-21  9:50       ` Jan Beulich
2023-03-09  1:22         ` Volodymyr Babchuk
2023-03-09  9:06           ` Jan Beulich
2023-02-28 16:51     ` Jan Beulich
2022-08-31 14:11 ` [RFC PATCH 10/10] [RFC only] xen: pci: remove pcidev_lock() function Volodymyr Babchuk
2022-09-06 10:32 ` [RFC PATCH 00/10] Rework PCI locking Jan Beulich
2023-01-18 18:21   ` Julien Grall
2023-01-19  9:47     ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220831141040.13231-3-volodymyr_babchuk@epam.com \
    --to=volodymyr_babchuk@epam.com \
    --cc=Oleksandr_Andrushchenko@epam.com \
    --cc=jbeulich@suse.com \
    --cc=paul@xen.org \
    --cc=roger.pau@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.