All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
@ 2014-10-20 22:34 Knut Omang
  2014-10-20 22:34 ` [Qemu-devel] [PATCH 1/2] iommu: Replace bus+devfn arguments with PCIDevice* in PCIIOMMUFunc Knut Omang
                   ` (2 more replies)
  0 siblings, 3 replies; 20+ messages in thread
From: Knut Omang @ 2014-10-20 22:34 UTC (permalink / raw)
  To: qemu-devel
  Cc: Knut Omang, Michael S. Tsirkin, Michael Tokarev,
	Marcel Apfelbaum, Mark Cave-Ayland, Alexander Graf,
	Markus Armbruster, Andreas Färber, Hervé Poussineau,
	Anthony Liguori, Stefan Weil, qemu-ppc, Richard Henderson

This patch set changes the data structure used to handle address spaces within
the emulated Intel iommu to support traversal also if bus numbers are dynamically
allocated, as is the case for devices that sit behind root ports or downstream switches.
This means that we cannot use bus number as index, instead a QLIST is used.

This requires a change in the API for setup of IOMMUs which is taken care of by 
the first patch. The second patch implements the fix.

Knut Omang (2):
  iommu: Replace bus+devfn arguments with PCIDevice* in PCIIOMMUFunc
  intel_iommu: Add support for translation for devices behind bridges.

 hw/alpha/typhoon.c            |  2 +-
 hw/i386/intel_iommu.c         | 58 ++++++++++++++++++-------------------------
 hw/pci-host/apb.c             |  2 +-
 hw/pci-host/prep.c            |  3 +--
 hw/pci-host/q35.c             | 41 +++++++++++++-----------------
 hw/pci/pci.c                  |  7 +++---
 hw/pci/pci_bridge.c           |  6 +++++
 hw/ppc/spapr_pci.c            |  2 +-
 include/hw/i386/intel_iommu.h |  6 +++--
 include/hw/pci/pci.h          |  4 ++-
 10 files changed, 61 insertions(+), 70 deletions(-)

-- 
1.9.0

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [Qemu-devel] [PATCH 1/2] iommu: Replace bus+devfn arguments with PCIDevice* in PCIIOMMUFunc
  2014-10-20 22:34 [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
@ 2014-10-20 22:34 ` Knut Omang
  2014-10-20 22:34 ` [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
  2014-10-20 23:29 ` [Qemu-devel] [PATCH 0/2] " Alexander Graf
  2 siblings, 0 replies; 20+ messages in thread
From: Knut Omang @ 2014-10-20 22:34 UTC (permalink / raw)
  To: qemu-devel
  Cc: Knut Omang, Michael S. Tsirkin, Michael Tokarev,
	Marcel Apfelbaum, Mark Cave-Ayland, Alexander Graf,
	Markus Armbruster, Andreas Färber, Hervé Poussineau,
	Anthony Liguori, Stefan Weil, qemu-ppc, Richard Henderson

The dev pointer is needed by intel_iommu to enable it to store the dma address pointer
with the device.

Signed-off-by: Knut Omang <knut.omang@oracle.com>
---
 hw/alpha/typhoon.c   | 2 +-
 hw/pci-host/apb.c    | 2 +-
 hw/pci-host/prep.c   | 3 +--
 hw/pci-host/q35.c    | 5 +++--
 hw/pci/pci.c         | 7 +++----
 hw/ppc/spapr_pci.c   | 2 +-
 include/hw/pci/pci.h | 4 +++-
 7 files changed, 13 insertions(+), 12 deletions(-)

diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c
index 5310006..62f7178 100644
--- a/hw/alpha/typhoon.c
+++ b/hw/alpha/typhoon.c
@@ -725,7 +725,7 @@ static const MemoryRegionIOMMUOps typhoon_iommu_ops = {
     .translate = typhoon_translate_iommu,
 };
 
-static AddressSpace *typhoon_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
+static AddressSpace *typhoon_pci_dma_iommu(PCIDevice *dev, void *opaque)
 {
     TyphoonState *s = opaque;
     return &s->pchip.iommu_as;
diff --git a/hw/pci-host/apb.c b/hw/pci-host/apb.c
index f573875..06b6daa 100644
--- a/hw/pci-host/apb.c
+++ b/hw/pci-host/apb.c
@@ -198,7 +198,7 @@ static inline void pbm_clear_request(APBState *s, unsigned int irq_num)
     s->irq_request = NO_IRQ_REQUEST;
 }
 
-static AddressSpace *pbm_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
+static AddressSpace *pbm_pci_dma_iommu(PCIDevice *dev, void *opaque)
 {
     IOMMUState *is = opaque;
 
diff --git a/hw/pci-host/prep.c b/hw/pci-host/prep.c
index 1de3681..ba997a8 100644
--- a/hw/pci-host/prep.c
+++ b/hw/pci-host/prep.c
@@ -194,8 +194,7 @@ static void raven_set_irq(void *opaque, int irq_num, int level)
     qemu_set_irq(pic[irq_num] , level);
 }
 
-static AddressSpace *raven_pcihost_set_iommu(PCIBus *bus, void *opaque,
-                                             int devfn)
+static AddressSpace *raven_pcihost_set_iommu(PCIDevice *dev, void *opaque)
 {
     PREPPCIState *s = opaque;
 
diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c
index b20bad8..c087c96 100644
--- a/hw/pci-host/q35.c
+++ b/hw/pci-host/q35.c
@@ -347,11 +347,12 @@ static void mch_reset(DeviceState *qdev)
     mch_update(mch);
 }
 
-static AddressSpace *q35_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
+static AddressSpace *q35_host_dma_iommu(PCIDevice *dev, void *opaque)
 {
     IntelIOMMUState *s = opaque;
     VTDAddressSpace **pvtd_as;
-    int bus_num = pci_bus_num(bus);
+    int bus_num = pci_bus_num(dev->bus);
+    int devfn = dev->devfn;
 
     assert(0 <= bus_num && bus_num <= VTD_PCI_BUS_MAX);
     assert(0 <= devfn && devfn <= VTD_PCI_DEVFN_MAX);
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 6ce75aa..b077173 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -808,7 +808,6 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus,
     PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
     PCIConfigReadFunc *config_read = pc->config_read;
     PCIConfigWriteFunc *config_write = pc->config_write;
-    AddressSpace *dma_as;
 
     if (devfn < 0) {
         for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);
@@ -827,11 +826,11 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus,
 
     pci_dev->bus = bus;
     pci_dev->devfn = devfn;
-    dma_as = pci_device_iommu_address_space(pci_dev);
+    pci_dev->dma_as = pci_device_iommu_address_space(pci_dev);
 
     memory_region_init_alias(&pci_dev->bus_master_enable_region,
                              OBJECT(pci_dev), "bus master",
-                             dma_as->root, 0, memory_region_size(dma_as->root));
+                             pci_dev->dma_as->root, 0, memory_region_size(pci_dev->dma_as->root));
     memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
     address_space_init(&pci_dev->bus_master_as, &pci_dev->bus_master_enable_region,
                        name);
@@ -2280,7 +2279,7 @@ AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
     PCIBus *bus = PCI_BUS(dev->bus);
 
     if (bus->iommu_fn) {
-        return bus->iommu_fn(bus, bus->iommu_opaque, dev->devfn);
+        return bus->iommu_fn(dev, bus->iommu_opaque);
     }
 
     if (bus->parent_dev) {
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index ad0da7f..656fae3 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -472,7 +472,7 @@ static const MemoryRegionOps spapr_msi_ops = {
 /*
  * PHB PCI device
  */
-static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
+static AddressSpace *spapr_pci_dma_iommu(PCIDevice *dev, void *opaque)
 {
     sPAPRPHBState *phb = opaque;
 
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index c352c7b..4bcf151 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -242,6 +242,7 @@ struct PCIDevice {
     char name[64];
     PCIIORegion io_regions[PCI_NUM_REGIONS];
     AddressSpace bus_master_as;
+    AddressSpace *dma_as;
     MemoryRegion bus_master_enable_region;
 
     /* do not access the following fields */
@@ -408,7 +409,8 @@ int pci_parse_devaddr(const char *addr, int *domp, int *busp,
 
 void pci_device_deassert_intx(PCIDevice *dev);
 
-typedef AddressSpace *(*PCIIOMMUFunc)(PCIBus *, void *, int);
+typedef AddressSpace *(*PCIIOMMUFunc)(PCIDevice *, void *);
+void pci_set_dma_address_space(AddressSpace *dma_address_space);
 
 AddressSpace *pci_device_iommu_address_space(PCIDevice *dev);
 void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque);
-- 
1.9.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges.
  2014-10-20 22:34 [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
  2014-10-20 22:34 ` [Qemu-devel] [PATCH 1/2] iommu: Replace bus+devfn arguments with PCIDevice* in PCIIOMMUFunc Knut Omang
@ 2014-10-20 22:34 ` Knut Omang
  2014-10-25 11:36   ` Jan Kiszka
                     ` (2 more replies)
  2014-10-20 23:29 ` [Qemu-devel] [PATCH 0/2] " Alexander Graf
  2 siblings, 3 replies; 20+ messages in thread
From: Knut Omang @ 2014-10-20 22:34 UTC (permalink / raw)
  To: qemu-devel
  Cc: Knut Omang, Michael S. Tsirkin, Michael Tokarev,
	Marcel Apfelbaum, Mark Cave-Ayland, Alexander Graf,
	Markus Armbruster, Andreas Färber, Hervé Poussineau,
	Anthony Liguori, Stefan Weil, qemu-ppc, Richard Henderson

- Add call to pci_setup_iommu for the secondary bus in a bridge.
- Refactor IntelIOMMUState to use a list instead of tables based on
  bus/devfn, as bus numbers can change dynamically.
- Instead store reference to the VTDAddressSpace as an AddressSpace
  pointer, dma_as within PCIDevice.
- Use NULL dev to q35_host_dma_iommu to indicate a special (non-pci)
  device (needed by interrupt remapping logic)

Signed-off-by: Knut Omang <knut.omang@oracle.com>
---
 hw/i386/intel_iommu.c         | 58 ++++++++++++++++++-------------------------
 hw/pci-host/q35.c             | 40 ++++++++++++-----------------
 hw/pci/pci_bridge.c           |  6 +++++
 include/hw/i386/intel_iommu.h |  6 +++--
 4 files changed, 50 insertions(+), 60 deletions(-)

diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 0a4282a..d23c019 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -20,6 +20,7 @@
  */
 
 #include "hw/sysbus.h"
+#include "hw/pci/pci.h"
 #include "exec/address-spaces.h"
 #include "intel_iommu_internal.h"
 
@@ -30,6 +31,7 @@ enum {
     DEBUG_CACHE,
 };
 #define VTD_DBGBIT(x)   (1 << DEBUG_##x)
+
 static int vtd_dbgflags = VTD_DBGBIT(GENERAL) | VTD_DBGBIT(CSR);
 
 #define VTD_DPRINTF(what, fmt, ...) do { \
@@ -166,24 +168,11 @@ static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
  */
 static void vtd_reset_context_cache(IntelIOMMUState *s)
 {
-    VTDAddressSpace **pvtd_as;
     VTDAddressSpace *vtd_as;
-    uint32_t bus_it;
-    uint32_t devfn_it;
 
     VTD_DPRINTF(CACHE, "global context_cache_gen=1");
-    for (bus_it = 0; bus_it < VTD_PCI_BUS_MAX; ++bus_it) {
-        pvtd_as = s->address_spaces[bus_it];
-        if (!pvtd_as) {
-            continue;
-        }
-        for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {
-            vtd_as = pvtd_as[devfn_it];
-            if (!vtd_as) {
-                continue;
-            }
-            vtd_as->context_cache_entry.context_cache_gen = 0;
-        }
+    QLIST_FOREACH(vtd_as, &s->address_spaces, iommu_next) {
+        vtd_as->context_cache_entry.context_cache_gen = 0;
     }
     s->context_cache_gen = 1;
 }
@@ -745,13 +734,11 @@ static inline bool vtd_is_interrupt_addr(hwaddr addr)
 
 /* Map dev to context-entry then do a paging-structures walk to do a iommu
  * translation.
- * @bus_num: The bus number
- * @devfn: The devfn, which is the  combined of device and function number
+ * @vtd_as: The address space to translate against
  * @is_write: The access is a write operation
  * @entry: IOMMUTLBEntry that contain the addr to be translated and result
  */
-static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, uint8_t bus_num,
-                                   uint8_t devfn, hwaddr addr, bool is_write,
+static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, hwaddr addr, bool is_write,
                                    IOMMUTLBEntry *entry)
 {
     IntelIOMMUState *s = vtd_as->iommu_state;
@@ -759,6 +746,8 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, uint8_t bus_num,
     VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
     uint64_t slpte;
     uint32_t level;
+    uint8_t bus_num = pci_bus_num(vtd_as->dev->bus);
+    uint8_t devfn = vtd_as->devfn;
     uint16_t source_id = vtd_make_source_id(bus_num, devfn);
     int ret_fr;
     bool is_fpd_set = false;
@@ -878,10 +867,10 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s,
                                           uint16_t func_mask)
 {
     uint16_t mask;
-    VTDAddressSpace **pvtd_as;
     VTDAddressSpace *vtd_as;
     uint16_t devfn;
-    uint16_t devfn_it;
+    uint16_t devfn_it = 0;
+    uint8_t bus_num, bus_num_it;
 
     switch (func_mask & 3) {
     case 0:
@@ -899,16 +888,18 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s,
     }
     VTD_DPRINTF(INV, "device-selective invalidation source 0x%"PRIx16
                     " mask %"PRIu16, source_id, mask);
-    pvtd_as = s->address_spaces[VTD_SID_TO_BUS(source_id)];
-    if (pvtd_as) {
-        devfn = VTD_SID_TO_DEVFN(source_id);
-        for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {
-            vtd_as = pvtd_as[devfn_it];
-            if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
-                VTD_DPRINTF(INV, "invalidate context-cahce of devfn 0x%"PRIx16,
-                            devfn_it);
-                vtd_as->context_cache_entry.context_cache_gen = 0;
-            }
+    bus_num = VTD_SID_TO_BUS(source_id);
+    devfn = VTD_SID_TO_DEVFN(source_id);
+
+    QLIST_FOREACH(vtd_as, &s->address_spaces, iommu_next) {
+        bus_num_it = pci_bus_num(vtd_as->dev->bus);
+        if (bus_num_it != bus_num) {
+            continue;
+        }
+        if ((devfn_it & mask) == (devfn & mask)) {
+            VTD_DPRINTF(INV, "invalidate context-cahce of devfn 0x%"PRIx16,
+                        devfn_it);
+            vtd_as->context_cache_entry.context_cache_gen = 0;
         }
     }
 }
@@ -1801,8 +1792,7 @@ static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
         return ret;
     }
 
-    vtd_do_iommu_translate(vtd_as, vtd_as->bus_num, vtd_as->devfn, addr,
-                           is_write, &ret);
+    vtd_do_iommu_translate(vtd_as, addr, is_write, &ret);
     VTD_DPRINTF(MMU,
                 "bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
                 " gpa 0x%"PRIx64 " hpa 0x%"PRIx64, vtd_as->bus_num,
@@ -1927,7 +1917,7 @@ static void vtd_realize(DeviceState *dev, Error **errp)
     IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
 
     VTD_DPRINTF(GENERAL, "");
-    memset(s->address_spaces, 0, sizeof(s->address_spaces));
+    QLIST_INIT(&s->address_spaces);
     memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
                           "intel_iommu", DMAR_REG_SIZE);
     sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c
index c087c96..ae90a84 100644
--- a/hw/pci-host/q35.c
+++ b/hw/pci-host/q35.c
@@ -349,33 +349,25 @@ static void mch_reset(DeviceState *qdev)
 
 static AddressSpace *q35_host_dma_iommu(PCIDevice *dev, void *opaque)
 {
-    IntelIOMMUState *s = opaque;
-    VTDAddressSpace **pvtd_as;
-    int bus_num = pci_bus_num(dev->bus);
-    int devfn = dev->devfn;
-
-    assert(0 <= bus_num && bus_num <= VTD_PCI_BUS_MAX);
-    assert(0 <= devfn && devfn <= VTD_PCI_DEVFN_MAX);
-
-    pvtd_as = s->address_spaces[bus_num];
-    if (!pvtd_as) {
-        /* No corresponding free() */
-        pvtd_as = g_malloc0(sizeof(VTDAddressSpace *) * VTD_PCI_DEVFN_MAX);
-        s->address_spaces[bus_num] = pvtd_as;
+    VTDAddressSpace *as = NULL;
+    struct IntelIOMMUState *s = opaque;
+
+    if (dev && dev->dma_as) {
+        as = container_of(dev->dma_as, VTDAddressSpace, as);
     }
-    if (!pvtd_as[devfn]) {
-        pvtd_as[devfn] = g_malloc0(sizeof(VTDAddressSpace));
-
-        pvtd_as[devfn]->bus_num = (uint8_t)bus_num;
-        pvtd_as[devfn]->devfn = (uint8_t)devfn;
-        pvtd_as[devfn]->iommu_state = s;
-        pvtd_as[devfn]->context_cache_entry.context_cache_gen = 0;
-        memory_region_init_iommu(&pvtd_as[devfn]->iommu, OBJECT(s),
+    if (!as) {
+        as = g_malloc0(sizeof(VTDAddressSpace));
+        as->dev = dev;
+        as->devfn = dev->devfn;
+        as->iommu_state = s;
+        as->context_cache_entry.context_cache_gen = 0;
+        memory_region_init_iommu(&as->iommu, OBJECT(s),
                                  &s->iommu_ops, "intel_iommu", UINT64_MAX);
-        address_space_init(&pvtd_as[devfn]->as,
-                           &pvtd_as[devfn]->iommu, "intel_iommu");
+        address_space_init(&as->as,
+                           &as->iommu, "intel_iommu");
+        QLIST_INSERT_HEAD(&s->address_spaces, as, iommu_next);
     }
-    return &pvtd_as[devfn]->as;
+    return &as->as;
 }
 
 static void mch_init_dmar(MCHPCIState *mch)
diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c
index 40c97b1..e6832c4 100644
--- a/hw/pci/pci_bridge.c
+++ b/hw/pci/pci_bridge.c
@@ -376,8 +376,14 @@ int pci_bridge_initfn(PCIDevice *dev, const char *typename)
     sec_bus->address_space_io = &br->address_space_io;
     memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", 65536);
     br->windows = pci_bridge_region_init(br);
+
     QLIST_INIT(&sec_bus->child);
     QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling);
+
+    if (dev->bus->iommu_opaque) {
+        pci_setup_iommu(sec_bus, dev->bus->iommu_fn, dev->bus->iommu_opaque);
+    }
+
     return 0;
 }
 
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
index f4701e1..b349c6e 100644
--- a/include/hw/i386/intel_iommu.h
+++ b/include/hw/i386/intel_iommu.h
@@ -21,6 +21,7 @@
 
 #ifndef INTEL_IOMMU_H
 #define INTEL_IOMMU_H
+#include "qemu/queue.h"
 #include "hw/qdev.h"
 #include "sysemu/dma.h"
 
@@ -65,11 +66,12 @@ struct VTDContextCacheEntry {
 };
 
 struct VTDAddressSpace {
-    uint8_t bus_num;
+    PCIDevice *dev;
     uint8_t devfn;
     AddressSpace as;
     MemoryRegion iommu;
     IntelIOMMUState *iommu_state;
+    QLIST_ENTRY(VTDAddressSpace) iommu_next; /* For traversal by the iommu */
     VTDContextCacheEntry context_cache_entry;
 };
 
@@ -114,7 +116,7 @@ struct IntelIOMMUState {
     GHashTable *iotlb;              /* IOTLB */
 
     MemoryRegionIOMMUOps iommu_ops;
-    VTDAddressSpace **address_spaces[VTD_PCI_BUS_MAX];
+    QLIST_HEAD(, VTDAddressSpace) address_spaces;
 };
 
 #endif
-- 
1.9.0

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-20 22:34 [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
  2014-10-20 22:34 ` [Qemu-devel] [PATCH 1/2] iommu: Replace bus+devfn arguments with PCIDevice* in PCIIOMMUFunc Knut Omang
  2014-10-20 22:34 ` [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
@ 2014-10-20 23:29 ` Alexander Graf
  2014-10-21  5:26   ` Knut Omang
  2 siblings, 1 reply; 20+ messages in thread
From: Alexander Graf @ 2014-10-20 23:29 UTC (permalink / raw)
  To: Knut Omang
  Cc: jroedel, Michael S. Tsirkin, Michael Tokarev, Marcel Apfelbaum,
	Mark Cave-Ayland, qemu-devel, Markus Armbruster,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Richard Henderson




> Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
> 
> This patch set changes the data structure used to handle address spaces within
> the emulated Intel iommu to support traversal also if bus numbers are dynamically
> allocated, as is the case for devices that sit behind root ports or downstream switches.
> This means that we cannot use bus number as index, instead a QLIST is used.
> 
> This requires a change in the API for setup of IOMMUs which is taken care of by 
> the first patch. The second patch implements the fix.

Are you sure that this works on real hardware? How does that one communicate sub-bridge liodns to the iommu? How do they get indexed from software?

Alex

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-20 23:29 ` [Qemu-devel] [PATCH 0/2] " Alexander Graf
@ 2014-10-21  5:26   ` Knut Omang
  2014-10-21  9:07     ` Alexander Graf
  0 siblings, 1 reply; 20+ messages in thread
From: Knut Omang @ 2014-10-21  5:26 UTC (permalink / raw)
  To: Alexander Graf
  Cc: jroedel, Michael S. Tsirkin, Michael Tokarev, Marcel Apfelbaum,
	Mark Cave-Ayland, qemu-devel, Markus Armbruster,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Richard Henderson

On Tue, 2014-10-21 at 01:29 +0200, Alexander Graf wrote:
> 
> 
> > Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
> > 
> > This patch set changes the data structure used to handle address spaces within
> > the emulated Intel iommu to support traversal also if bus numbers are dynamically
> > allocated, as is the case for devices that sit behind root ports or downstream switches.
> > This means that we cannot use bus number as index, instead a QLIST is used.
> > 
> > This requires a change in the API for setup of IOMMUs which is taken care of by 
> > the first patch. The second patch implements the fix.
> 
> Are you sure that this works on real hardware? How does that one
> communicate sub-bridge liodns to the iommu? How do they get indexed
> from software?

I do not claim to fully understand the details of how this is
implemented in hardware, but I believe the implementation I propose here
should be functionally equivalent to what the Intel IOMMU offers, and
similar to the original implementation here, except that the data
structure is valid also before enumeration when behind buses.

After enumeration, the only difference would be that during
invalidation, there is a list search for the right bus rather than an
index lookup as before, slightly less efficient but at the benefit of
being independent of bus numbering during setup.

Wrt the currently implemented IOMMUs for other architectures, they were
all ignoring the bus argument anyway, so the API change did not make
much difference.

Knut

> Alex
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-21  5:26   ` Knut Omang
@ 2014-10-21  9:07     ` Alexander Graf
  2014-10-21  9:35       ` Knut Omang
  0 siblings, 1 reply; 20+ messages in thread
From: Alexander Graf @ 2014-10-21  9:07 UTC (permalink / raw)
  To: Knut Omang
  Cc: jroedel, Alex Williamson, Michael S. Tsirkin, Michael Tokarev,
	Marcel Apfelbaum, Mark Cave-Ayland, qemu-devel,
	Markus Armbruster, Andreas Färber, Hervé Poussineau,
	Anthony Liguori, Stefan Weil, qemu-ppc, Richard Henderson




> Am 21.10.2014 um 07:26 schrieb Knut Omang <knut.omang@oracle.com>:
> 
>> On Tue, 2014-10-21 at 01:29 +0200, Alexander Graf wrote:
>> 
>> 
>>> Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
>>> 
>>> This patch set changes the data structure used to handle address spaces within
>>> the emulated Intel iommu to support traversal also if bus numbers are dynamically
>>> allocated, as is the case for devices that sit behind root ports or downstream switches.
>>> This means that we cannot use bus number as index, instead a QLIST is used.
>>> 
>>> This requires a change in the API for setup of IOMMUs which is taken care of by 
>>> the first patch. The second patch implements the fix.
>> 
>> Are you sure that this works on real hardware? How does that one
>> communicate sub-bridge liodns to the iommu? How do they get indexed
>> from software?
> 
> I do not claim to fully understand the details of how this is
> implemented in hardware, but I believe the implementation I propose here
> should be functionally equivalent to what the Intel IOMMU offers, and
> similar to the original implementation here, except that the data
> structure is valid also before enumeration when behind buses.

Can you please give me a pointer to the vt-d spec's section that explains iommu behavior behind bridges?

I've also added Alex W who has played with PCI bridges behind iommus quite a bit recently.

> 
> After enumeration, the only difference would be that during
> invalidation, there is a list search for the right bus rather than an
> index lookup as before, slightly less efficient but at the benefit of
> being independent of bus numbering during setup.

I don't think the implementation is bad, I'm just not sure that it follows the spec, so I want to confirm :).

Alex

> 
> Wrt the currently implemented IOMMUs for other architectures, they were
> all ignoring the bus argument anyway, so the API change did not make
> much difference.
> 
> Knut
> 
>> Alex
> 
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-21  9:07     ` Alexander Graf
@ 2014-10-21  9:35       ` Knut Omang
  2014-10-21 11:15         ` Alexander Graf
  0 siblings, 1 reply; 20+ messages in thread
From: Knut Omang @ 2014-10-21  9:35 UTC (permalink / raw)
  To: Alexander Graf
  Cc: jroedel, Alex Williamson, Michael S. Tsirkin, Michael Tokarev,
	Marcel Apfelbaum, Mark Cave-Ayland, qemu-devel,
	Markus Armbruster, Andreas Färber, Hervé Poussineau,
	Anthony Liguori, Stefan Weil, qemu-ppc, Richard Henderson

On Tue, 2014-10-21 at 11:07 +0200, Alexander Graf wrote:
> 
> 
> > Am 21.10.2014 um 07:26 schrieb Knut Omang <knut.omang@oracle.com>:
> > 
> >> On Tue, 2014-10-21 at 01:29 +0200, Alexander Graf wrote:
> >> 
> >> 
> >>> Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
> >>> 
> >>> This patch set changes the data structure used to handle address spaces within
> >>> the emulated Intel iommu to support traversal also if bus numbers are dynamically
> >>> allocated, as is the case for devices that sit behind root ports or downstream switches.
> >>> This means that we cannot use bus number as index, instead a QLIST is used.
> >>> 
> >>> This requires a change in the API for setup of IOMMUs which is taken care of by 
> >>> the first patch. The second patch implements the fix.
> >> 
> >> Are you sure that this works on real hardware? How does that one
> >> communicate sub-bridge liodns to the iommu? How do they get indexed
> >> from software?
> > 
> > I do not claim to fully understand the details of how this is
> > implemented in hardware, but I believe the implementation I propose here
> > should be functionally equivalent to what the Intel IOMMU offers, and
> > similar to the original implementation here, except that the data
> > structure is valid also before enumeration when behind buses.
> 
> Can you please give me a pointer to the vt-d spec's section that explains iommu behavior behind bridges?
> 
> I've also added Alex W who has played with PCI bridges behind iommus quite a bit recently.
> 
> > 
> > After enumeration, the only difference would be that during
> > invalidation, there is a list search for the right bus rather than an
> > index lookup as before, slightly less efficient but at the benefit of
> > being independent of bus numbering during setup.
> 
> I don't think the implementation is bad, I'm just not sure that it follows the spec, 
> so I want to confirm :).

http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf

Knut

> Alex
> 
> > 
> > Wrt the currently implemented IOMMUs for other architectures, they were
> > all ignoring the bus argument anyway, so the API change did not make
> > much difference.
> > 
> > Knut
> > 
> >> Alex
> > 
> > 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-21  9:35       ` Knut Omang
@ 2014-10-21 11:15         ` Alexander Graf
  2014-10-21 11:26           ` Michael S. Tsirkin
  2014-10-21 15:07           ` Alex Williamson
  0 siblings, 2 replies; 20+ messages in thread
From: Alexander Graf @ 2014-10-21 11:15 UTC (permalink / raw)
  To: Knut Omang
  Cc: jroedel, Alex Williamson, Michael S. Tsirkin, Michael Tokarev,
	Marcel Apfelbaum, Mark Cave-Ayland, qemu-devel,
	Markus Armbruster, Andreas Färber, Hervé Poussineau,
	Anthony Liguori, Stefan Weil, qemu-ppc, Richard Henderson



On 21.10.14 11:35, Knut Omang wrote:
> On Tue, 2014-10-21 at 11:07 +0200, Alexander Graf wrote:
>>
>>
>>> Am 21.10.2014 um 07:26 schrieb Knut Omang <knut.omang@oracle.com>:
>>>
>>>> On Tue, 2014-10-21 at 01:29 +0200, Alexander Graf wrote:
>>>>
>>>>
>>>>> Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
>>>>>
>>>>> This patch set changes the data structure used to handle address spaces within
>>>>> the emulated Intel iommu to support traversal also if bus numbers are dynamically
>>>>> allocated, as is the case for devices that sit behind root ports or downstream switches.
>>>>> This means that we cannot use bus number as index, instead a QLIST is used.
>>>>>
>>>>> This requires a change in the API for setup of IOMMUs which is taken care of by 
>>>>> the first patch. The second patch implements the fix.
>>>>
>>>> Are you sure that this works on real hardware? How does that one
>>>> communicate sub-bridge liodns to the iommu? How do they get indexed
>>>> from software?
>>>
>>> I do not claim to fully understand the details of how this is
>>> implemented in hardware, but I believe the implementation I propose here
>>> should be functionally equivalent to what the Intel IOMMU offers, and
>>> similar to the original implementation here, except that the data
>>> structure is valid also before enumeration when behind buses.
>>
>> Can you please give me a pointer to the vt-d spec's section that explains iommu behavior behind bridges?
>>
>> I've also added Alex W who has played with PCI bridges behind iommus quite a bit recently.
>>
>>>
>>> After enumeration, the only difference would be that during
>>> invalidation, there is a list search for the right bus rather than an
>>> index lookup as before, slightly less efficient but at the benefit of
>>> being independent of bus numbering during setup.
>>
>> I don't think the implementation is bad, I'm just not sure that it follows the spec, 
>> so I want to confirm :).
> 
> http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf

So if I understand that document correctly, a PCIe / PCI-X bridge can
swizzle the requester id depending on a device behind itself. PCI
bridges can not - there everything behind the bridge will appear as if
the DMA originated from the bridge device.

So conceptually, PCIe / PCI-X bridges should probably be the ones
converting requester IDs.


Alex

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-21 11:15         ` Alexander Graf
@ 2014-10-21 11:26           ` Michael S. Tsirkin
  2014-10-21 11:37             ` Knut Omang
  2014-10-21 15:07           ` Alex Williamson
  1 sibling, 1 reply; 20+ messages in thread
From: Michael S. Tsirkin @ 2014-10-21 11:26 UTC (permalink / raw)
  To: Alexander Graf
  Cc: jroedel, Alex Williamson, Marcel Apfelbaum, Knut Omang,
	Mark Cave-Ayland, qemu-devel, Michael Tokarev, Markus Armbruster,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Richard Henderson

On Tue, Oct 21, 2014 at 01:15:14PM +0200, Alexander Graf wrote:
> 
> 
> On 21.10.14 11:35, Knut Omang wrote:
> > On Tue, 2014-10-21 at 11:07 +0200, Alexander Graf wrote:
> >>
> >>
> >>> Am 21.10.2014 um 07:26 schrieb Knut Omang <knut.omang@oracle.com>:
> >>>
> >>>> On Tue, 2014-10-21 at 01:29 +0200, Alexander Graf wrote:
> >>>>
> >>>>
> >>>>> Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
> >>>>>
> >>>>> This patch set changes the data structure used to handle address spaces within
> >>>>> the emulated Intel iommu to support traversal also if bus numbers are dynamically
> >>>>> allocated, as is the case for devices that sit behind root ports or downstream switches.
> >>>>> This means that we cannot use bus number as index, instead a QLIST is used.
> >>>>>
> >>>>> This requires a change in the API for setup of IOMMUs which is taken care of by 
> >>>>> the first patch. The second patch implements the fix.
> >>>>
> >>>> Are you sure that this works on real hardware? How does that one
> >>>> communicate sub-bridge liodns to the iommu? How do they get indexed
> >>>> from software?
> >>>
> >>> I do not claim to fully understand the details of how this is
> >>> implemented in hardware, but I believe the implementation I propose here
> >>> should be functionally equivalent to what the Intel IOMMU offers, and
> >>> similar to the original implementation here, except that the data
> >>> structure is valid also before enumeration when behind buses.
> >>
> >> Can you please give me a pointer to the vt-d spec's section that explains iommu behavior behind bridges?
> >>
> >> I've also added Alex W who has played with PCI bridges behind iommus quite a bit recently.
> >>
> >>>
> >>> After enumeration, the only difference would be that during
> >>> invalidation, there is a list search for the right bus rather than an
> >>> index lookup as before, slightly less efficient but at the benefit of
> >>> being independent of bus numbering during setup.
> >>
> >> I don't think the implementation is bad, I'm just not sure that it follows the spec, 
> >> so I want to confirm :).
> > 
> > http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf
> 
> So if I understand that document correctly, a PCIe / PCI-X bridge can
> swizzle the requester id depending on a device behind itself. PCI
> bridges can not - there everything behind the bridge will appear as if
> the DMA originated from the bridge device.
> 
> So conceptually, PCIe / PCI-X bridges should probably be the ones
> converting requester IDs.
> 
> 
> Alex

To avoid confusion, when you say PCIe / PCI-X you really mean
PCI-Express-to-PCI/PCI-X.

I think you got this right.

I'd like to add that regular PCIe to PCIe bridges just forward everything
without changes. This applies to root complex and downstream ports.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-21 11:26           ` Michael S. Tsirkin
@ 2014-10-21 11:37             ` Knut Omang
  2014-10-21 12:20               ` Michael S. Tsirkin
  0 siblings, 1 reply; 20+ messages in thread
From: Knut Omang @ 2014-10-21 11:37 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: jroedel, Alex Williamson, Marcel Apfelbaum, Michael Tokarev,
	Mark Cave-Ayland, Alexander Graf, qemu-devel,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Markus Armbruster, Richard Henderson

On Tue, 2014-10-21 at 14:26 +0300, Michael S. Tsirkin wrote:
> On Tue, Oct 21, 2014 at 01:15:14PM +0200, Alexander Graf wrote:
> > 
> > 
> > On 21.10.14 11:35, Knut Omang wrote:
> > > On Tue, 2014-10-21 at 11:07 +0200, Alexander Graf wrote:
> > >>
> > >>
> > >>> Am 21.10.2014 um 07:26 schrieb Knut Omang <knut.omang@oracle.com>:
> > >>>
> > >>>> On Tue, 2014-10-21 at 01:29 +0200, Alexander Graf wrote:
> > >>>>
> > >>>>
> > >>>>> Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
> > >>>>>
> > >>>>> This patch set changes the data structure used to handle address spaces within
> > >>>>> the emulated Intel iommu to support traversal also if bus numbers are dynamically
> > >>>>> allocated, as is the case for devices that sit behind root ports or downstream switches.
> > >>>>> This means that we cannot use bus number as index, instead a QLIST is used.
> > >>>>>
> > >>>>> This requires a change in the API for setup of IOMMUs which is taken care of by 
> > >>>>> the first patch. The second patch implements the fix.
> > >>>>
> > >>>> Are you sure that this works on real hardware? How does that one
> > >>>> communicate sub-bridge liodns to the iommu? How do they get indexed
> > >>>> from software?
> > >>>
> > >>> I do not claim to fully understand the details of how this is
> > >>> implemented in hardware, but I believe the implementation I propose here
> > >>> should be functionally equivalent to what the Intel IOMMU offers, and
> > >>> similar to the original implementation here, except that the data
> > >>> structure is valid also before enumeration when behind buses.
> > >>
> > >> Can you please give me a pointer to the vt-d spec's section that explains iommu behavior behind bridges?
> > >>
> > >> I've also added Alex W who has played with PCI bridges behind iommus quite a bit recently.
> > >>
> > >>>
> > >>> After enumeration, the only difference would be that during
> > >>> invalidation, there is a list search for the right bus rather than an
> > >>> index lookup as before, slightly less efficient but at the benefit of
> > >>> being independent of bus numbering during setup.
> > >>
> > >> I don't think the implementation is bad, I'm just not sure that it follows the spec, 
> > >> so I want to confirm :).
> > > 
> > > http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf
> > 
> > So if I understand that document correctly, a PCIe / PCI-X bridge can
> > swizzle the requester id depending on a device behind itself. PCI
> > bridges can not - there everything behind the bridge will appear as if
> > the DMA originated from the bridge device.
> > 
> > So conceptually, PCIe / PCI-X bridges should probably be the ones
> > converting requester IDs.
> > 
> > 
> > Alex
> 
> To avoid confusion, when you say PCIe / PCI-X you really mean
> PCI-Express-to-PCI/PCI-X.
> 
> I think you got this right.
> 
> I'd like to add that regular PCIe to PCIe bridges just forward everything
> without changes. This applies to root complex and downstream ports.

My test case for this patch set has been just the root port/downstream
port case, where as you say the requester ID seen by the device in the
root port is always the same as the one used/enumerated by the host.

Knut

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-21 11:37             ` Knut Omang
@ 2014-10-21 12:20               ` Michael S. Tsirkin
  0 siblings, 0 replies; 20+ messages in thread
From: Michael S. Tsirkin @ 2014-10-21 12:20 UTC (permalink / raw)
  To: Knut Omang
  Cc: jroedel, Alex Williamson, Marcel Apfelbaum, Michael Tokarev,
	Mark Cave-Ayland, Alexander Graf, qemu-devel,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Markus Armbruster, Richard Henderson

On Tue, Oct 21, 2014 at 01:37:33PM +0200, Knut Omang wrote:
> On Tue, 2014-10-21 at 14:26 +0300, Michael S. Tsirkin wrote:
> > On Tue, Oct 21, 2014 at 01:15:14PM +0200, Alexander Graf wrote:
> > > 
> > > 
> > > On 21.10.14 11:35, Knut Omang wrote:
> > > > On Tue, 2014-10-21 at 11:07 +0200, Alexander Graf wrote:
> > > >>
> > > >>
> > > >>> Am 21.10.2014 um 07:26 schrieb Knut Omang <knut.omang@oracle.com>:
> > > >>>
> > > >>>> On Tue, 2014-10-21 at 01:29 +0200, Alexander Graf wrote:
> > > >>>>
> > > >>>>
> > > >>>>> Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
> > > >>>>>
> > > >>>>> This patch set changes the data structure used to handle address spaces within
> > > >>>>> the emulated Intel iommu to support traversal also if bus numbers are dynamically
> > > >>>>> allocated, as is the case for devices that sit behind root ports or downstream switches.
> > > >>>>> This means that we cannot use bus number as index, instead a QLIST is used.
> > > >>>>>
> > > >>>>> This requires a change in the API for setup of IOMMUs which is taken care of by 
> > > >>>>> the first patch. The second patch implements the fix.
> > > >>>>
> > > >>>> Are you sure that this works on real hardware? How does that one
> > > >>>> communicate sub-bridge liodns to the iommu? How do they get indexed
> > > >>>> from software?
> > > >>>
> > > >>> I do not claim to fully understand the details of how this is
> > > >>> implemented in hardware, but I believe the implementation I propose here
> > > >>> should be functionally equivalent to what the Intel IOMMU offers, and
> > > >>> similar to the original implementation here, except that the data
> > > >>> structure is valid also before enumeration when behind buses.
> > > >>
> > > >> Can you please give me a pointer to the vt-d spec's section that explains iommu behavior behind bridges?
> > > >>
> > > >> I've also added Alex W who has played with PCI bridges behind iommus quite a bit recently.
> > > >>
> > > >>>
> > > >>> After enumeration, the only difference would be that during
> > > >>> invalidation, there is a list search for the right bus rather than an
> > > >>> index lookup as before, slightly less efficient but at the benefit of
> > > >>> being independent of bus numbering during setup.
> > > >>
> > > >> I don't think the implementation is bad, I'm just not sure that it follows the spec, 
> > > >> so I want to confirm :).
> > > > 
> > > > http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf
> > > 
> > > So if I understand that document correctly, a PCIe / PCI-X bridge can
> > > swizzle the requester id depending on a device behind itself. PCI
> > > bridges can not - there everything behind the bridge will appear as if
> > > the DMA originated from the bridge device.
> > > 
> > > So conceptually, PCIe / PCI-X bridges should probably be the ones
> > > converting requester IDs.
> > > 
> > > 
> > > Alex
> > 
> > To avoid confusion, when you say PCIe / PCI-X you really mean
> > PCI-Express-to-PCI/PCI-X.
> > 
> > I think you got this right.
> > 
> > I'd like to add that regular PCIe to PCIe bridges just forward everything
> > without changes. This applies to root complex and downstream ports.
> 
> My test case for this patch set has been just the root port/downstream
> port case, where as you say the requester ID seen by the device in the
> root port is always the same as the one used/enumerated by the host.
> 
> Knut
> 

So pci/pci and pcie/pci cases are still TBD.
Again, if we can't make it in time for 2.2,
I'm fine with disallowing these configurations.

-- 
MST

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-21 11:15         ` Alexander Graf
  2014-10-21 11:26           ` Michael S. Tsirkin
@ 2014-10-21 15:07           ` Alex Williamson
  2014-10-21 22:26             ` Michael S. Tsirkin
  1 sibling, 1 reply; 20+ messages in thread
From: Alex Williamson @ 2014-10-21 15:07 UTC (permalink / raw)
  To: Alexander Graf
  Cc: jroedel, Marcel Apfelbaum, Knut Omang, Michael S. Tsirkin,
	Michael Tokarev, qemu-devel, Markus Armbruster,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Mark Cave-Ayland, Richard Henderson

On Tue, 2014-10-21 at 13:15 +0200, Alexander Graf wrote:
> 
> On 21.10.14 11:35, Knut Omang wrote:
> > On Tue, 2014-10-21 at 11:07 +0200, Alexander Graf wrote:
> >>
> >>
> >>> Am 21.10.2014 um 07:26 schrieb Knut Omang <knut.omang@oracle.com>:
> >>>
> >>>> On Tue, 2014-10-21 at 01:29 +0200, Alexander Graf wrote:
> >>>>
> >>>>
> >>>>> Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
> >>>>>
> >>>>> This patch set changes the data structure used to handle address spaces within
> >>>>> the emulated Intel iommu to support traversal also if bus numbers are dynamically
> >>>>> allocated, as is the case for devices that sit behind root ports or downstream switches.
> >>>>> This means that we cannot use bus number as index, instead a QLIST is used.
> >>>>>
> >>>>> This requires a change in the API for setup of IOMMUs which is taken care of by 
> >>>>> the first patch. The second patch implements the fix.
> >>>>
> >>>> Are you sure that this works on real hardware? How does that one
> >>>> communicate sub-bridge liodns to the iommu? How do they get indexed
> >>>> from software?
> >>>
> >>> I do not claim to fully understand the details of how this is
> >>> implemented in hardware, but I believe the implementation I propose here
> >>> should be functionally equivalent to what the Intel IOMMU offers, and
> >>> similar to the original implementation here, except that the data
> >>> structure is valid also before enumeration when behind buses.
> >>
> >> Can you please give me a pointer to the vt-d spec's section that explains iommu behavior behind bridges?
> >>
> >> I've also added Alex W who has played with PCI bridges behind iommus quite a bit recently.
> >>
> >>>
> >>> After enumeration, the only difference would be that during
> >>> invalidation, there is a list search for the right bus rather than an
> >>> index lookup as before, slightly less efficient but at the benefit of
> >>> being independent of bus numbering during setup.
> >>
> >> I don't think the implementation is bad, I'm just not sure that it follows the spec, 
> >> so I want to confirm :).
> > 
> > http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf
> 
> So if I understand that document correctly, a PCIe / PCI-X bridge can
> swizzle the requester id depending on a device behind itself. PCI
> bridges can not - there everything behind the bridge will appear as if
> the DMA originated from the bridge device.
> 
> So conceptually, PCIe / PCI-X bridges should probably be the ones
> converting requester IDs.

PCIe-to-PCI/X bridges alias requester IDs to the subordinate bus, so all
requests appear to come from subordinate-bus-number:00.0.  PCI-X does
support a requester ID, but there are numerous rules where the bridge
can take ownership of the transaction that require the IOMMU to handle
bridges as an alias of the device.  PCI bridges alias all downstream
devices as the bridge itself, but if you look at
quirk_use_pcie_bridge_dma_alias() in the kernel, there are numerous
cases where bridges behave like a PCIe-to-PCI bridge, but fail to
include a PCIe capability.  Thanks,

Alex

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges
  2014-10-21 15:07           ` Alex Williamson
@ 2014-10-21 22:26             ` Michael S. Tsirkin
  0 siblings, 0 replies; 20+ messages in thread
From: Michael S. Tsirkin @ 2014-10-21 22:26 UTC (permalink / raw)
  To: Alex Williamson
  Cc: jroedel, Marcel Apfelbaum, Mark Cave-Ayland, Michael Tokarev,
	Alexander Graf, qemu-devel, Andreas Färber, Knut Omang,
	Hervé Poussineau, Anthony Liguori, Stefan Weil, qemu-ppc,
	Markus Armbruster, Richard Henderson

On Tue, Oct 21, 2014 at 09:07:40AM -0600, Alex Williamson wrote:
> On Tue, 2014-10-21 at 13:15 +0200, Alexander Graf wrote:
> > 
> > On 21.10.14 11:35, Knut Omang wrote:
> > > On Tue, 2014-10-21 at 11:07 +0200, Alexander Graf wrote:
> > >>
> > >>
> > >>> Am 21.10.2014 um 07:26 schrieb Knut Omang <knut.omang@oracle.com>:
> > >>>
> > >>>> On Tue, 2014-10-21 at 01:29 +0200, Alexander Graf wrote:
> > >>>>
> > >>>>
> > >>>>> Am 21.10.2014 um 00:34 schrieb Knut Omang <knut.omang@oracle.com>:
> > >>>>>
> > >>>>> This patch set changes the data structure used to handle address spaces within
> > >>>>> the emulated Intel iommu to support traversal also if bus numbers are dynamically
> > >>>>> allocated, as is the case for devices that sit behind root ports or downstream switches.
> > >>>>> This means that we cannot use bus number as index, instead a QLIST is used.
> > >>>>>
> > >>>>> This requires a change in the API for setup of IOMMUs which is taken care of by 
> > >>>>> the first patch. The second patch implements the fix.
> > >>>>
> > >>>> Are you sure that this works on real hardware? How does that one
> > >>>> communicate sub-bridge liodns to the iommu? How do they get indexed
> > >>>> from software?
> > >>>
> > >>> I do not claim to fully understand the details of how this is
> > >>> implemented in hardware, but I believe the implementation I propose here
> > >>> should be functionally equivalent to what the Intel IOMMU offers, and
> > >>> similar to the original implementation here, except that the data
> > >>> structure is valid also before enumeration when behind buses.
> > >>
> > >> Can you please give me a pointer to the vt-d spec's section that explains iommu behavior behind bridges?
> > >>
> > >> I've also added Alex W who has played with PCI bridges behind iommus quite a bit recently.
> > >>
> > >>>
> > >>> After enumeration, the only difference would be that during
> > >>> invalidation, there is a list search for the right bus rather than an
> > >>> index lookup as before, slightly less efficient but at the benefit of
> > >>> being independent of bus numbering during setup.
> > >>
> > >> I don't think the implementation is bad, I'm just not sure that it follows the spec, 
> > >> so I want to confirm :).
> > > 
> > > http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf
> > 
> > So if I understand that document correctly, a PCIe / PCI-X bridge can
> > swizzle the requester id depending on a device behind itself. PCI
> > bridges can not - there everything behind the bridge will appear as if
> > the DMA originated from the bridge device.
> > 
> > So conceptually, PCIe / PCI-X bridges should probably be the ones
> > converting requester IDs.
> 
> PCIe-to-PCI/X bridges alias requester IDs to the subordinate bus, so all
> requests appear to come from subordinate-bus-number:00.0.  PCI-X does
> support a requester ID, but there are numerous rules where the bridge
> can take ownership of the transaction that require the IOMMU to handle
> bridges as an alias of the device.  PCI bridges alias all downstream
> devices as the bridge itself, but if you look at
> quirk_use_pcie_bridge_dma_alias() in the kernel, there are numerous
> cases where bridges behave like a PCIe-to-PCI bridge, but fail to
> include a PCIe capability.  Thanks,
> 
> Alex

In fact, apprently exactly 4 such bridges exist. We don't emulate them
nor do we emulate pci-x so it's pretty simple I think.

-- 
MST

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges.
  2014-10-20 22:34 ` [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
@ 2014-10-25 11:36   ` Jan Kiszka
  2014-10-25 12:24     ` Jan Kiszka
  2014-10-25 12:28   ` Jan Kiszka
  2014-10-26 12:06   ` Jan Kiszka
  2 siblings, 1 reply; 20+ messages in thread
From: Jan Kiszka @ 2014-10-25 11:36 UTC (permalink / raw)
  To: Knut Omang, qemu-devel
  Cc: Marcel Apfelbaum, Michael Tokarev, Michael S. Tsirkin,
	Mark Cave-Ayland, Alexander Graf, Markus Armbruster,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Richard Henderson

[-- Attachment #1: Type: text/plain, Size: 932 bytes --]

On 2014-10-21 00:34, Knut Omang wrote:
> diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c
> index 40c97b1..e6832c4 100644
> --- a/hw/pci/pci_bridge.c
> +++ b/hw/pci/pci_bridge.c
> @@ -376,8 +376,14 @@ int pci_bridge_initfn(PCIDevice *dev, const char *typename)
>      sec_bus->address_space_io = &br->address_space_io;
>      memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", 65536);
>      br->windows = pci_bridge_region_init(br);
> +
>      QLIST_INIT(&sec_bus->child);
>      QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling);
> +
> +    if (dev->bus->iommu_opaque) {
> +        pci_setup_iommu(sec_bus, dev->bus->iommu_fn, dev->bus->iommu_opaque);
> +    }
> +

So, if I followed the discussion in the cover-letter thread correctly,
this should rather move into the bridge device init functions because
the PCI[e]-PCI bridge ("pci-bridge") would not call it, right?

Jan



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges.
  2014-10-25 11:36   ` Jan Kiszka
@ 2014-10-25 12:24     ` Jan Kiszka
  2014-10-26  4:46       ` Knut Omang
  0 siblings, 1 reply; 20+ messages in thread
From: Jan Kiszka @ 2014-10-25 12:24 UTC (permalink / raw)
  To: Knut Omang, qemu-devel
  Cc: Marcel Apfelbaum, Michael Tokarev, Michael S. Tsirkin,
	Mark Cave-Ayland, Alexander Graf, Markus Armbruster,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Richard Henderson

[-- Attachment #1: Type: text/plain, Size: 1253 bytes --]

On 2014-10-25 13:36, Jan Kiszka wrote:
> On 2014-10-21 00:34, Knut Omang wrote:
>> diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c
>> index 40c97b1..e6832c4 100644
>> --- a/hw/pci/pci_bridge.c
>> +++ b/hw/pci/pci_bridge.c
>> @@ -376,8 +376,14 @@ int pci_bridge_initfn(PCIDevice *dev, const char *typename)
>>      sec_bus->address_space_io = &br->address_space_io;
>>      memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", 65536);
>>      br->windows = pci_bridge_region_init(br);
>> +
>>      QLIST_INIT(&sec_bus->child);
>>      QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling);
>> +
>> +    if (dev->bus->iommu_opaque) {
>> +        pci_setup_iommu(sec_bus, dev->bus->iommu_fn, dev->bus->iommu_opaque);
>> +    }
>> +
> 
> So, if I followed the discussion in the cover-letter thread correctly,
> this should rather move into the bridge device init functions because
> the PCI[e]-PCI bridge ("pci-bridge") would not call it, right?

Not right. We need the setup in any case (except for the virtio bridges
I'm currently thinking of for encapsulating non-translatable virtio
devices). But something still has to change to reflect the requester ID
aliasing of the PCIe-PCI bridge, no?

Jan



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges.
  2014-10-20 22:34 ` [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
  2014-10-25 11:36   ` Jan Kiszka
@ 2014-10-25 12:28   ` Jan Kiszka
  2014-10-26 12:06   ` Jan Kiszka
  2 siblings, 0 replies; 20+ messages in thread
From: Jan Kiszka @ 2014-10-25 12:28 UTC (permalink / raw)
  To: Knut Omang, qemu-devel
  Cc: Marcel Apfelbaum, Michael Tokarev, Michael S. Tsirkin,
	Mark Cave-Ayland, Alexander Graf, Markus Armbruster,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Richard Henderson

[-- Attachment #1: Type: text/plain, Size: 644 bytes --]

On 2014-10-21 00:34, Knut Omang wrote:
> @@ -1801,8 +1792,7 @@ static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
>          return ret;
>      }
>  
> -    vtd_do_iommu_translate(vtd_as, vtd_as->bus_num, vtd_as->devfn, addr,
> -                           is_write, &ret);
> +    vtd_do_iommu_translate(vtd_as, addr, is_write, &ret);
>      VTD_DPRINTF(MMU,
>                  "bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
>                  " gpa 0x%"PRIx64 " hpa 0x%"PRIx64, vtd_as->bus_num,

You need to update the VTD_DPRINTF as well when removing bus_num from
VTDAddressSpace.

Jan



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges.
  2014-10-25 12:24     ` Jan Kiszka
@ 2014-10-26  4:46       ` Knut Omang
  0 siblings, 0 replies; 20+ messages in thread
From: Knut Omang @ 2014-10-26  4:46 UTC (permalink / raw)
  To: Jan Kiszka
  Cc: Marcel Apfelbaum, Michael Tokarev, Michael S. Tsirkin,
	Mark Cave-Ayland, Alexander Graf, qemu-devel,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Markus Armbruster, Richard Henderson

On Sat, 2014-10-25 at 14:24 +0200, Jan Kiszka wrote:
> On 2014-10-25 13:36, Jan Kiszka wrote:
> > On 2014-10-21 00:34, Knut Omang wrote:
> >> diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c
> >> index 40c97b1..e6832c4 100644
> >> --- a/hw/pci/pci_bridge.c
> >> +++ b/hw/pci/pci_bridge.c
> >> @@ -376,8 +376,14 @@ int pci_bridge_initfn(PCIDevice *dev, const char *typename)
> >>      sec_bus->address_space_io = &br->address_space_io;
> >>      memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", 65536);
> >>      br->windows = pci_bridge_region_init(br);
> >> +
> >>      QLIST_INIT(&sec_bus->child);
> >>      QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling);
> >> +
> >> +    if (dev->bus->iommu_opaque) {
> >> +        pci_setup_iommu(sec_bus, dev->bus->iommu_fn, dev->bus->iommu_opaque);
> >> +    }
> >> +
> > 
> > So, if I followed the discussion in the cover-letter thread correctly,
> > this should rather move into the bridge device init functions because
> > the PCI[e]-PCI bridge ("pci-bridge") would not call it, right?
> 
> Not right. We need the setup in any case (except for the virtio bridges
> I'm currently thinking of for encapsulating non-translatable virtio
> devices). But something still has to change to reflect the requester ID
> aliasing of the PCIe-PCI bridge, no?

Yes, that's my understanding too.

Knut

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges.
  2014-10-20 22:34 ` [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
  2014-10-25 11:36   ` Jan Kiszka
  2014-10-25 12:28   ` Jan Kiszka
@ 2014-10-26 12:06   ` Jan Kiszka
  2014-10-26 13:15     ` Knut Omang
  2 siblings, 1 reply; 20+ messages in thread
From: Jan Kiszka @ 2014-10-26 12:06 UTC (permalink / raw)
  To: Knut Omang, qemu-devel
  Cc: Marcel Apfelbaum, Michael Tokarev, Michael S. Tsirkin,
	Mark Cave-Ayland, Alexander Graf, Markus Armbruster,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Richard Henderson

[-- Attachment #1: Type: text/plain, Size: 722 bytes --]

On 2014-10-21 00:34, Knut Omang wrote:
> @@ -65,11 +66,12 @@ struct VTDContextCacheEntry {
>  };
>  
>  struct VTDAddressSpace {
> -    uint8_t bus_num;
> +    PCIDevice *dev;

This change is not helpful for clean handling of non-PCI devices (i.e.
platform device interrupt remapping => you had to pull
Q35_PSEUDO_BUS_PLATFORM into intel_iommu, which is violating the
layering). Please leave bus_num in place - or convert to a 16-bit SID.

>      uint8_t devfn;
>      AddressSpace as;
>      MemoryRegion iommu;
>      IntelIOMMUState *iommu_state;
> +    QLIST_ENTRY(VTDAddressSpace) iommu_next; /* For traversal by the iommu */
>      VTDContextCacheEntry context_cache_entry;
>  };
>  

Jan



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges.
  2014-10-26 12:06   ` Jan Kiszka
@ 2014-10-26 13:15     ` Knut Omang
  2014-10-26 15:20       ` Michael S. Tsirkin
  0 siblings, 1 reply; 20+ messages in thread
From: Knut Omang @ 2014-10-26 13:15 UTC (permalink / raw)
  To: Jan Kiszka
  Cc: Marcel Apfelbaum, Michael Tokarev, Michael S. Tsirkin,
	Mark Cave-Ayland, Alexander Graf, qemu-devel,
	Andreas Färber, Hervé Poussineau, Anthony Liguori,
	Stefan Weil, qemu-ppc, Markus Armbruster, Richard Henderson

On Sun, 2014-10-26 at 13:06 +0100, Jan Kiszka wrote:
> On 2014-10-21 00:34, Knut Omang wrote:
> > @@ -65,11 +66,12 @@ struct VTDContextCacheEntry {
> >  };
> >  
> >  struct VTDAddressSpace {
> > -    uint8_t bus_num;
> > +    PCIDevice *dev;
> 
> This change is not helpful for clean handling of non-PCI devices (i.e.
> platform device interrupt remapping => you had to pull
> Q35_PSEUDO_BUS_PLATFORM into intel_iommu, which is violating the
> layering). Please leave bus_num in place - or convert to a 16-bit SID.

Hmm - I see..
- the problem I tried to solve is that the bus number of devices below a
root port or downstream switch has not been initialized when
q35_host_dma_iommu is called, so what happens is that the device in the
root port gets indexed as if it were on bus 0.

I am not that familiar with what type of non-pci devices that exists but
I suppose moving this up to the most generic device type that has a bus
associated with it is one way to go.

An alternative implementation that would work in the intel case would be
to keep the list in intel_iommu but provide a callback that iommus can
subscribe to to get notifications when bus numbers change?

Knut

> >      uint8_t devfn;
> >      AddressSpace as;
> >      MemoryRegion iommu;
> >      IntelIOMMUState *iommu_state;
> > +    QLIST_ENTRY(VTDAddressSpace) iommu_next; /* For traversal by the iommu */
> >      VTDContextCacheEntry context_cache_entry;
> >  };
> >  
> 
> Jan
> 
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges.
  2014-10-26 13:15     ` Knut Omang
@ 2014-10-26 15:20       ` Michael S. Tsirkin
  0 siblings, 0 replies; 20+ messages in thread
From: Michael S. Tsirkin @ 2014-10-26 15:20 UTC (permalink / raw)
  To: Knut Omang
  Cc: qemu-ppc, Marcel Apfelbaum, Mark Cave-Ayland, Michael Tokarev,
	qemu-devel, Alexander Graf, Andreas Färber, pbonzini,
	Jan Kiszka, Anthony Liguori, Stefan Weil, Hervé Poussineau,
	Markus Armbruster, Richard Henderson

On Sun, Oct 26, 2014 at 02:15:24PM +0100, Knut Omang wrote:
> On Sun, 2014-10-26 at 13:06 +0100, Jan Kiszka wrote:
> > On 2014-10-21 00:34, Knut Omang wrote:
> > > @@ -65,11 +66,12 @@ struct VTDContextCacheEntry {
> > >  };
> > >  
> > >  struct VTDAddressSpace {
> > > -    uint8_t bus_num;
> > > +    PCIDevice *dev;
> > 
> > This change is not helpful for clean handling of non-PCI devices (i.e.
> > platform device interrupt remapping => you had to pull
> > Q35_PSEUDO_BUS_PLATFORM into intel_iommu, which is violating the
> > layering). Please leave bus_num in place - or convert to a 16-bit SID.
> 
> Hmm - I see..
> - the problem I tried to solve is that the bus number of devices below a
> root port or downstream switch has not been initialized when
> q35_host_dma_iommu is called, so what happens is that the device in the
> root port gets indexed as if it were on bus 0.
> I am not that familiar with what type of non-pci devices that exists but
> I suppose moving this up to the most generic device type that has a bus
> associated with it is one way to go.
> 
> An alternative implementation that would work in the intel case would be
> to keep the list in intel_iommu but provide a callback that iommus can
> subscribe to to get notifications when bus numbers change?
> 
> Knut

I dislike callbacks.

IMO the right thing as usual is to do what real hardware does.

After all it's devices put the requester id in transactions.

How about we add "source id" in AddressSpace structure,
or add a wrapper structure including AddressSpace and
source id.
Update in pci core on config writes into bus number.

Paolo, would that be ok with you?


> > >      uint8_t devfn;
> > >      AddressSpace as;
> > >      MemoryRegion iommu;
> > >      IntelIOMMUState *iommu_state;
> > > +    QLIST_ENTRY(VTDAddressSpace) iommu_next; /* For traversal by the iommu */
> > >      VTDContextCacheEntry context_cache_entry;
> > >  };
> > >  
> > 
> > Jan
> > 
> > 
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2014-10-26 15:20 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-10-20 22:34 [Qemu-devel] [PATCH 0/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
2014-10-20 22:34 ` [Qemu-devel] [PATCH 1/2] iommu: Replace bus+devfn arguments with PCIDevice* in PCIIOMMUFunc Knut Omang
2014-10-20 22:34 ` [Qemu-devel] [PATCH 2/2] intel_iommu: Add support for translation for devices behind bridges Knut Omang
2014-10-25 11:36   ` Jan Kiszka
2014-10-25 12:24     ` Jan Kiszka
2014-10-26  4:46       ` Knut Omang
2014-10-25 12:28   ` Jan Kiszka
2014-10-26 12:06   ` Jan Kiszka
2014-10-26 13:15     ` Knut Omang
2014-10-26 15:20       ` Michael S. Tsirkin
2014-10-20 23:29 ` [Qemu-devel] [PATCH 0/2] " Alexander Graf
2014-10-21  5:26   ` Knut Omang
2014-10-21  9:07     ` Alexander Graf
2014-10-21  9:35       ` Knut Omang
2014-10-21 11:15         ` Alexander Graf
2014-10-21 11:26           ` Michael S. Tsirkin
2014-10-21 11:37             ` Knut Omang
2014-10-21 12:20               ` Michael S. Tsirkin
2014-10-21 15:07           ` Alex Williamson
2014-10-21 22:26             ` Michael S. Tsirkin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.