All of lore.kernel.org
 help / color / mirror / Atom feed
From: Oleksandr Andrushchenko <andr2000@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: julien@xen.org, sstabellini@kernel.org,
	oleksandr_tyshchenko@epam.com, volodymyr_babchuk@epam.com,
	Artem_Mygaiev@epam.com, roger.pau@citrix.com,
	bertrand.marquis@arm.com, rahul.singh@arm.com,
	Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Subject: [PATCH v2 09/11] xen/arm: Setup MMIO range trap handlers for hardware domain
Date: Thu, 23 Sep 2021 15:54:36 +0300	[thread overview]
Message-ID: <20210923125438.234162-10-andr2000@gmail.com> (raw)
In-Reply-To: <20210923125438.234162-1-andr2000@gmail.com>

From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>

In order for vPCI to work it needs to maintain guest and hardware
domain's views on the configuration space. For example, BARs and
COMMAND registers require emulation for guests and the guest view
on the registers needs to be in sync with the real contents of the
relevant registers. For that ECAM address space needs to also be
trapped for the hardware domain, so we need to implement PCI host
bridge specific callbacks to properly setup MMIO handlers for those
ranges depending on particular host bridge implementation.

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>

---
Since v1:
 - Dynamically calculate the number of MMIO handlers required for vPCI
   and update the total number accordingly
 - s/clb/cb
 - Do not introduce a new callback for MMIO handler setup
---
 xen/arch/arm/domain.c              |  2 ++
 xen/arch/arm/pci/pci-host-common.c | 28 +++++++++++++++++++++++++
 xen/arch/arm/vpci.c                | 33 ++++++++++++++++++++++++++++++
 xen/arch/arm/vpci.h                |  6 ++++++
 xen/include/asm-arm/pci.h          |  7 +++++++
 5 files changed, 76 insertions(+)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 854e8fed0393..c7b25bc70439 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -733,6 +733,8 @@ int arch_domain_create(struct domain *d,
     if ( (rc = domain_vgic_register(d, &count)) != 0 )
         goto fail;
 
+    count += domain_vpci_get_num_mmio_handlers(d);
+
     if ( (rc = domain_io_init(d, count + MAX_IO_HANDLER)) != 0 )
         goto fail;
 
diff --git a/xen/arch/arm/pci/pci-host-common.c b/xen/arch/arm/pci/pci-host-common.c
index 1567b6e2956c..155f2a2743af 100644
--- a/xen/arch/arm/pci/pci-host-common.c
+++ b/xen/arch/arm/pci/pci-host-common.c
@@ -300,6 +300,34 @@ struct dt_device_node *pci_find_host_bridge_node(struct device *dev)
     }
     return bridge->dt_node;
 }
+
+int pci_host_iterate_bridges(struct domain *d,
+                             int (*cb)(struct domain *d,
+                                       struct pci_host_bridge *bridge))
+{
+    struct pci_host_bridge *bridge;
+    int err;
+
+    list_for_each_entry( bridge, &pci_host_bridges, node )
+    {
+        err = cb(d, bridge);
+        if ( err )
+            return err;
+    }
+    return 0;
+}
+
+int pci_host_get_num_bridges(void)
+{
+    struct pci_host_bridge *bridge;
+    int count = 0;
+
+    list_for_each_entry( bridge, &pci_host_bridges, node )
+        count++;
+
+    return count;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/vpci.c b/xen/arch/arm/vpci.c
index 76c12b92814f..14947e975d69 100644
--- a/xen/arch/arm/vpci.c
+++ b/xen/arch/arm/vpci.c
@@ -80,17 +80,50 @@ static const struct mmio_handler_ops vpci_mmio_handler = {
     .write = vpci_mmio_write,
 };
 
+static int vpci_setup_mmio_handler(struct domain *d,
+                                   struct pci_host_bridge *bridge)
+{
+    struct pci_config_window *cfg = bridge->cfg;
+
+    register_mmio_handler(d, &vpci_mmio_handler,
+                          cfg->phys_addr, cfg->size, NULL);
+    return 0;
+}
+
 int domain_vpci_init(struct domain *d)
 {
     if ( !has_vpci(d) )
         return 0;
 
+    if ( is_hardware_domain(d) )
+        return pci_host_iterate_bridges(d, vpci_setup_mmio_handler);
+
+    /* Guest domains use what is programmed in their device tree. */
     register_mmio_handler(d, &vpci_mmio_handler,
                           GUEST_VPCI_ECAM_BASE, GUEST_VPCI_ECAM_SIZE, NULL);
 
     return 0;
 }
 
+int domain_vpci_get_num_mmio_handlers(struct domain *d)
+{
+    int count = 0;
+
+    if ( is_hardware_domain(d) )
+        /* For each PCI host bridge's configuration space. */
+        count += pci_host_get_num_bridges();
+    else
+        /*
+         * VPCI_MSIX_MEM_NUM handlers for MSI-X tables per each PCI device
+         * being passed through. Maximum number of supported devices
+         * is 32 as virtual bus topology emulates the devices as embedded
+         * endpoints.
+         * +1 for a single emulated host bridge's configuration space. */
+        count = VPCI_MSIX_MEM_NUM * 32 + 1;
+
+    return count;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/vpci.h b/xen/arch/arm/vpci.h
index d8a7b0e3e802..27a2b069abd2 100644
--- a/xen/arch/arm/vpci.h
+++ b/xen/arch/arm/vpci.h
@@ -17,11 +17,17 @@
 
 #ifdef CONFIG_HAS_VPCI
 int domain_vpci_init(struct domain *d);
+int domain_vpci_get_num_mmio_handlers(struct domain *d);
 #else
 static inline int domain_vpci_init(struct domain *d)
 {
     return 0;
 }
+
+static inline int domain_vpci_get_num_mmio_handlers(struct domain *d)
+{
+    return 0;
+}
 #endif
 
 #endif /* __ARCH_ARM_VPCI_H__ */
diff --git a/xen/include/asm-arm/pci.h b/xen/include/asm-arm/pci.h
index 5b100556225e..7618f0b6725b 100644
--- a/xen/include/asm-arm/pci.h
+++ b/xen/include/asm-arm/pci.h
@@ -15,6 +15,8 @@
 #ifndef __ARM_PCI_H__
 #define __ARM_PCI_H__
 
+#include <asm/mmio.h>
+
 #ifdef CONFIG_HAS_PCI
 
 #define pci_to_dev(pcidev) (&(pcidev)->arch.dev)
@@ -109,6 +111,11 @@ static always_inline bool is_pci_passthrough_enabled(void)
 {
     return !!pci_passthrough_enabled;
 }
+
+int pci_host_iterate_bridges(struct domain *d,
+                             int (*clb)(struct domain *d,
+                                        struct pci_host_bridge *bridge));
+int pci_host_get_num_bridges(void);
 #else   /*!CONFIG_HAS_PCI*/
 
 #define pci_passthrough_enabled (false)
-- 
2.25.1



  parent reply	other threads:[~2021-09-23 12:55 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-23 12:54 [PATCH v2 00/11] PCI devices passthrough on Arm, part 2 Oleksandr Andrushchenko
2021-09-23 12:54 ` [PATCH v2 01/11] xen/arm: Fix dev_is_dt macro definition Oleksandr Andrushchenko
2021-09-24 23:39   ` Stefano Stabellini
2021-09-28  7:31   ` Michal Orzel
2021-09-23 12:54 ` [PATCH v2 02/11] xen/arm: Add new device type for PCI Oleksandr Andrushchenko
2021-09-24 23:45   ` Stefano Stabellini
2021-09-27  7:41   ` Jan Beulich
2021-09-27  8:18     ` Oleksandr Andrushchenko
2021-09-23 12:54 ` [PATCH v2 03/11] xen/arm: Introduce pci_find_host_bridge_node helper Oleksandr Andrushchenko
2021-09-24 23:48   ` Stefano Stabellini
2021-09-23 12:54 ` [PATCH v2 04/11] xen/device-tree: Make dt_find_node_by_phandle global Oleksandr Andrushchenko
2021-09-24 23:51   ` Stefano Stabellini
2021-09-23 12:54 ` [PATCH v2 05/11] xen/arm: Mark device as PCI while creating one Oleksandr Andrushchenko
2021-09-24 23:54   ` Stefano Stabellini
2021-09-27  7:13     ` Oleksandr Andrushchenko
2021-09-27  7:45   ` Jan Beulich
2021-09-27  8:45     ` Oleksandr Andrushchenko
2021-09-27  9:19       ` Jan Beulich
2021-09-27  9:35         ` Oleksandr Andrushchenko
2021-09-27 10:00           ` Jan Beulich
2021-09-27 10:04             ` Oleksandr Andrushchenko
2021-09-27 10:26               ` Jan Beulich
2021-09-28  8:09                 ` Oleksandr Andrushchenko
2021-09-28  8:26                   ` Jan Beulich
2021-09-28  8:29                     ` Oleksandr Andrushchenko
2021-09-28  8:39                       ` Jan Beulich
2021-09-28  8:54                         ` Oleksandr Andrushchenko
2021-09-23 12:54 ` [PATCH v2 06/11] xen/domain: Call pci_release_devices() when releasing domain resources Oleksandr Andrushchenko
2021-09-24 23:57   ` Stefano Stabellini
2021-09-23 12:54 ` [PATCH v2 07/11] libxl: Allow removing PCI devices for all types of domains Oleksandr Andrushchenko
2021-09-25  0:00   ` Stefano Stabellini
2021-09-27  7:17     ` Oleksandr Andrushchenko
2021-09-23 12:54 ` [PATCH v2 08/11] libxl: Only map legacy PCI IRQs if they are supported Oleksandr Andrushchenko
2021-09-25  0:06   ` Stefano Stabellini
2021-09-27 13:02     ` Oleksandr Andrushchenko
2021-09-23 12:54 ` Oleksandr Andrushchenko [this message]
2021-09-25  0:18   ` [PATCH v2 09/11] xen/arm: Setup MMIO range trap handlers for hardware domain Stefano Stabellini
2021-09-27  8:47     ` Oleksandr Andrushchenko
2021-09-23 12:54 ` [PATCH v2 10/11] xen/arm: Do not map PCI ECAM and MMIO space to Domain-0's p2m Oleksandr Andrushchenko
2021-09-25  0:44   ` Stefano Stabellini
2021-09-27 12:44     ` Oleksandr Andrushchenko
2021-09-28  4:00       ` Stefano Stabellini
2021-09-28  4:51         ` Oleksandr Andrushchenko
2021-09-28 14:39           ` Oleksandr Andrushchenko
2021-09-28 16:11             ` Stefano Stabellini
2021-09-23 12:54 ` [PATCH v2 11/11] xen/arm: Process pending vPCI map/unmap operations Oleksandr Andrushchenko
2021-09-25  1:20   ` Stefano Stabellini
2021-09-27  8:06   ` Jan Beulich
2021-09-27  8:39     ` Oleksandr Andrushchenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210923125438.234162-10-andr2000@gmail.com \
    --to=andr2000@gmail.com \
    --cc=Artem_Mygaiev@epam.com \
    --cc=bertrand.marquis@arm.com \
    --cc=julien@xen.org \
    --cc=oleksandr_andrushchenko@epam.com \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=rahul.singh@arm.com \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=volodymyr_babchuk@epam.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.