All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Liu, Yi L" <yi.l.liu@intel.com>
To: qemu-devel@nongnu.org, david@gibson.dropbear.id.au,
	pbonzini@redhat.com, alex.williamson@redhat.com,
	peterx@redhat.com
Cc: mst@redhat.com, eric.auger@redhat.com, kevin.tian@intel.com,
	yi.l.liu@intel.com, jun.j.tian@intel.com, yi.y.sun@intel.com,
	kvm@vger.kernel.org, hao.wu@intel.com,
	Jacob Pan <jacob.jun.pan@linux.intel.com>,
	Yi Sun <yi.y.sun@linux.intel.com>,
	Richard Henderson <rth@twiddle.net>,
	Eduardo Habkost <ehabkost@redhat.com>
Subject: [RFC v3 19/25] intel_iommu: replay guest pasid bindings to host
Date: Wed, 29 Jan 2020 04:16:50 -0800	[thread overview]
Message-ID: <1580300216-86172-20-git-send-email-yi.l.liu@intel.com> (raw)
In-Reply-To: <1580300216-86172-1-git-send-email-yi.l.liu@intel.com>

From: Liu Yi L <yi.l.liu@intel.com>

This patch adds guest pasid bindings replay for domain
selective pasid cache invalidation(dsi) and global pasid
cache invalidation by walking guest pasid table.

Reason:
Guest OS may flush the pasid cache with a larger granularity.
e.g. guest does a svm_bind() but flush the pasid cache with
global or domain selective pasid cache invalidation instead
of pasid selective(psi) pasid cache invalidation. Regards to
such case, it works in host. Per spec, a global or domain
selective pasid cache invalidation should be able to cover
what a pasid selective invalidation does. The only concern
is performance deduction since dsi and global cache invalidation
will flush more than psi. To align with native, vIOMMU needs
emulator needs to do replay for the two invalidation granularity
to reflect the latest pasid bindings in guest pasid table.

Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Yi Sun <yi.y.sun@linux.intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
---
 hw/i386/intel_iommu.c          | 156 ++++++++++++++++++++++++++++++++++++++---
 hw/i386/intel_iommu_internal.h |   1 +
 2 files changed, 147 insertions(+), 10 deletions(-)

diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 319b3df..1665843 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -68,6 +68,8 @@ static void vtd_address_space_refresh_all(IntelIOMMUState *s);
 static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n);
 
 static void vtd_pasid_cache_reset(IntelIOMMUState *s);
+static int vtd_update_pe_cache_for_dev(IntelIOMMUState *s,
+              VTDBus *vtd_bus, int devfn, int pasid, VTDPASIDEntry *pe);
 
 static void vtd_panic_require_caching_mode(void)
 {
@@ -2625,6 +2627,113 @@ remove:
     return true;
 }
 
+/**
+ * Constant information used during pasid table walk
+ * @vtd_icx: VTDIOMMUContext
+ * @flags: indicates if it is domain selective walk
+ * @did: domain ID of the pasid table walk
+ */
+typedef struct {
+    VTDIOMMUContext *vtd_icx;
+#define VTD_PASID_TABLE_DID_SEL_WALK   (1ULL << 0);
+    uint32_t flags;
+    uint16_t did;
+} vtd_pasid_table_walk_info;
+
+static bool vtd_sm_pasid_table_walk_one(IntelIOMMUState *s,
+                                       dma_addr_t pt_base,
+                                       int start,
+                                       int end,
+                                       vtd_pasid_table_walk_info *info)
+{
+    VTDPASIDEntry pe;
+    int pasid = start;
+    int pasid_next;
+    VTDIOMMUContext *vtd_icx = info->vtd_icx;
+
+    while (pasid < end) {
+        pasid_next = pasid + 1;
+
+        if (!vtd_get_pe_in_pasid_leaf_table(s, pasid, pt_base, &pe)
+            && vtd_pe_present(&pe)) {
+            if (vtd_update_pe_cache_for_dev(s, vtd_icx->vtd_bus,
+                                       vtd_icx->devfn, pasid, &pe)) {
+                error_report_once("%s, bus: %d, devfn: %d, pasid: %d",
+                                  __func__,
+                                  pci_bus_num(vtd_icx->vtd_bus->bus),
+                                  vtd_icx->devfn, pasid);
+                return false;
+            }
+        }
+        pasid = pasid_next;
+    }
+    return true;
+}
+
+/*
+ * Currently, VT-d scalable mode pasid table is a two level table, this
+ * function aims to loop a range of PASIDs in a given pasid table to
+ * identify the pasid config in guest.
+ */
+static void vtd_sm_pasid_table_walk(IntelIOMMUState *s, dma_addr_t pdt_base,
+                        int start, int end, vtd_pasid_table_walk_info *info)
+{
+    VTDPASIDDirEntry pdire;
+    int pasid = start;
+    int pasid_next;
+    dma_addr_t pt_base;
+
+    while (pasid < end) {
+        pasid_next = pasid + VTD_PASID_TBL_ENTRY_NUM;
+        if (!vtd_get_pdire_from_pdir_table(pdt_base, pasid, &pdire)
+            && vtd_pdire_present(&pdire)) {
+            pt_base = pdire.val & VTD_PASID_TABLE_BASE_ADDR_MASK;
+            if (!vtd_sm_pasid_table_walk_one(s,
+                              pt_base, pasid, pasid_next, info)) {
+                break;
+            }
+        }
+        pasid = pasid_next;
+    }
+}
+
+/**
+ * This function replay the guest pasid bindings to hots by
+ * walking the guest PASID table. This ensures host will have
+ * latest guest pasid bindings.
+ */
+static void vtd_replay_guest_pasid_bindings(IntelIOMMUState *s,
+                                           uint16_t *did, bool is_dsi)
+{
+    VTDContextEntry ce;
+    vtd_pasid_table_walk_info info;
+    VTDIOMMUContext *vtd_icx;
+
+    if (is_dsi) {
+        info.flags = VTD_PASID_TABLE_DID_SEL_WALK;
+        info.did = *did;
+    }
+
+    /*
+     * In this replay, only needs to care about the devices which
+     * has iommu_context created. For the one not have iommu_context,
+     * it is not necessary to replay the bindings since their cache
+     * could be re-created in the next DMA address transaltion.
+     */
+    QLIST_FOREACH(vtd_icx, &s->vtd_dev_icx_list, next) {
+        if (!vtd_dev_to_context_entry(s,
+                                      pci_bus_num(vtd_icx->vtd_bus->bus),
+                                      vtd_icx->devfn, &ce)) {
+            info.vtd_icx = vtd_icx;
+            vtd_sm_pasid_table_walk(s,
+                                    VTD_CE_GET_PASID_DIR_TABLE(&ce),
+                                    0,
+                                    VTD_MAX_HPASID,
+                                    &info);
+        }
+    }
+}
+
 static int vtd_pasid_cache_dsi(IntelIOMMUState *s, uint16_t domain_id)
 {
     VTDPASIDCacheInfo pc_info;
@@ -2642,12 +2751,13 @@ static int vtd_pasid_cache_dsi(IntelIOMMUState *s, uint16_t domain_id)
                                  vtd_flush_pasid, &pc_info);
 
     /*
-     * TODO: Domain selective PASID cache invalidation
-     * flushes all the pasid caches within a domain. To
-     * be safe, after invalidating the pasid caches, emulator
-     * needs to replay the pasid bindings by walking guest
-     * pasid dir and pasid table.
+     * Domain selective PASID cache invalidation flushes
+     * all the pasid caches within a domain. To be safe,
+     * after invalidating the pasid caches, emulator needs
+     * to replay the pasid bindings by walking guest pasid
+     * dir and pasid table.
      */
+    vtd_replay_guest_pasid_bindings(s, &domain_id, true);
     vtd_iommu_unlock(s);
     return 0;
 }
@@ -2715,6 +2825,31 @@ static inline void vtd_fill_in_pe_cache(
                          pe, VTD_PASID_BIND);
 }
 
+/**
+ * This function updates the pasid entry cached in &vtd_pasid_as.
+ * Caller of this function should hold iommu_lock.
+ */
+static int vtd_update_pe_cache_for_dev(IntelIOMMUState *s, VTDBus *vtd_bus,
+                               int devfn, int pasid, VTDPASIDEntry *pe)
+{
+    VTDPASIDAddressSpace *vtd_pasid_as;
+
+    vtd_pasid_as = vtd_add_find_pasid_as(s, vtd_bus,
+                                        devfn, pasid, true);
+    if (!vtd_pasid_as) {
+        error_report_once("%s, fatal error happened!\n", __func__);
+        return -1;
+    }
+
+    if (vtd_pasid_as->pasid_cache_entry.pasid_cache_gen ==
+                                               s->pasid_cache_gen) {
+        return 0;
+    }
+
+    vtd_fill_in_pe_cache(vtd_pasid_as, pe);
+    return 0;
+}
+
 static int vtd_pasid_cache_psi(IntelIOMMUState *s,
                                uint16_t domain_id, uint32_t pasid)
 {
@@ -2838,12 +2973,13 @@ static int vtd_pasid_cache_gsi(IntelIOMMUState *s)
     vtd_pasid_cache_reset(s);
 
     /*
-     * TODO: Global PASID cache invalidation may be
-     * flushes all the pasid caches. To be safe, after
-     * invalidating the pasid caches, emulator needs
-     * to replay the pasid bindings by walking guest
-     * pasid dir and pasid table.
+     * Global PASID cache invalidation flushes all
+     * the pasid caches. To be safe, after invalidating
+     * the pasid caches, emulator needs to replay the
+     * pasid bindings by walking guest pasid dir and
+     * pasid table.
      */
+    vtd_replay_guest_pasid_bindings(s, NULL, false);
     vtd_iommu_unlock(s);
     return 0;
 }
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index 833c442..cd96b6e 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -558,6 +558,7 @@ typedef struct VTDPASIDCacheInfo VTDPASIDCacheInfo;
 #define VTD_PASID_TABLE_BITS_MASK     (0x3fULL)
 #define VTD_PASID_TABLE_INDEX(pasid)  ((pasid) & VTD_PASID_TABLE_BITS_MASK)
 #define VTD_PASID_ENTRY_FPD           (1ULL << 1) /* Fault Processing Disable */
+#define VTD_PASID_TBL_ENTRY_NUM       (1ULL << 6)
 
 /* PASID Granular Translation Type Mask */
 #define VTD_PASID_ENTRY_P              1ULL
-- 
2.7.4


WARNING: multiple messages have this Message-ID (diff)
From: "Liu, Yi L" <yi.l.liu@intel.com>
To: qemu-devel@nongnu.org, david@gibson.dropbear.id.au,
	pbonzini@redhat.com, alex.williamson@redhat.com,
	peterx@redhat.com
Cc: kevin.tian@intel.com, yi.l.liu@intel.com,
	Yi Sun <yi.y.sun@linux.intel.com>,
	Eduardo Habkost <ehabkost@redhat.com>,
	kvm@vger.kernel.org, mst@redhat.com, jun.j.tian@intel.com,
	eric.auger@redhat.com, yi.y.sun@intel.com,
	Jacob Pan <jacob.jun.pan@linux.intel.com>,
	Richard Henderson <rth@twiddle.net>,
	hao.wu@intel.com
Subject: [RFC v3 19/25] intel_iommu: replay guest pasid bindings to host
Date: Wed, 29 Jan 2020 04:16:50 -0800	[thread overview]
Message-ID: <1580300216-86172-20-git-send-email-yi.l.liu@intel.com> (raw)
In-Reply-To: <1580300216-86172-1-git-send-email-yi.l.liu@intel.com>

From: Liu Yi L <yi.l.liu@intel.com>

This patch adds guest pasid bindings replay for domain
selective pasid cache invalidation(dsi) and global pasid
cache invalidation by walking guest pasid table.

Reason:
Guest OS may flush the pasid cache with a larger granularity.
e.g. guest does a svm_bind() but flush the pasid cache with
global or domain selective pasid cache invalidation instead
of pasid selective(psi) pasid cache invalidation. Regards to
such case, it works in host. Per spec, a global or domain
selective pasid cache invalidation should be able to cover
what a pasid selective invalidation does. The only concern
is performance deduction since dsi and global cache invalidation
will flush more than psi. To align with native, vIOMMU needs
emulator needs to do replay for the two invalidation granularity
to reflect the latest pasid bindings in guest pasid table.

Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Yi Sun <yi.y.sun@linux.intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
---
 hw/i386/intel_iommu.c          | 156 ++++++++++++++++++++++++++++++++++++++---
 hw/i386/intel_iommu_internal.h |   1 +
 2 files changed, 147 insertions(+), 10 deletions(-)

diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 319b3df..1665843 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -68,6 +68,8 @@ static void vtd_address_space_refresh_all(IntelIOMMUState *s);
 static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n);
 
 static void vtd_pasid_cache_reset(IntelIOMMUState *s);
+static int vtd_update_pe_cache_for_dev(IntelIOMMUState *s,
+              VTDBus *vtd_bus, int devfn, int pasid, VTDPASIDEntry *pe);
 
 static void vtd_panic_require_caching_mode(void)
 {
@@ -2625,6 +2627,113 @@ remove:
     return true;
 }
 
+/**
+ * Constant information used during pasid table walk
+ * @vtd_icx: VTDIOMMUContext
+ * @flags: indicates if it is domain selective walk
+ * @did: domain ID of the pasid table walk
+ */
+typedef struct {
+    VTDIOMMUContext *vtd_icx;
+#define VTD_PASID_TABLE_DID_SEL_WALK   (1ULL << 0);
+    uint32_t flags;
+    uint16_t did;
+} vtd_pasid_table_walk_info;
+
+static bool vtd_sm_pasid_table_walk_one(IntelIOMMUState *s,
+                                       dma_addr_t pt_base,
+                                       int start,
+                                       int end,
+                                       vtd_pasid_table_walk_info *info)
+{
+    VTDPASIDEntry pe;
+    int pasid = start;
+    int pasid_next;
+    VTDIOMMUContext *vtd_icx = info->vtd_icx;
+
+    while (pasid < end) {
+        pasid_next = pasid + 1;
+
+        if (!vtd_get_pe_in_pasid_leaf_table(s, pasid, pt_base, &pe)
+            && vtd_pe_present(&pe)) {
+            if (vtd_update_pe_cache_for_dev(s, vtd_icx->vtd_bus,
+                                       vtd_icx->devfn, pasid, &pe)) {
+                error_report_once("%s, bus: %d, devfn: %d, pasid: %d",
+                                  __func__,
+                                  pci_bus_num(vtd_icx->vtd_bus->bus),
+                                  vtd_icx->devfn, pasid);
+                return false;
+            }
+        }
+        pasid = pasid_next;
+    }
+    return true;
+}
+
+/*
+ * Currently, VT-d scalable mode pasid table is a two level table, this
+ * function aims to loop a range of PASIDs in a given pasid table to
+ * identify the pasid config in guest.
+ */
+static void vtd_sm_pasid_table_walk(IntelIOMMUState *s, dma_addr_t pdt_base,
+                        int start, int end, vtd_pasid_table_walk_info *info)
+{
+    VTDPASIDDirEntry pdire;
+    int pasid = start;
+    int pasid_next;
+    dma_addr_t pt_base;
+
+    while (pasid < end) {
+        pasid_next = pasid + VTD_PASID_TBL_ENTRY_NUM;
+        if (!vtd_get_pdire_from_pdir_table(pdt_base, pasid, &pdire)
+            && vtd_pdire_present(&pdire)) {
+            pt_base = pdire.val & VTD_PASID_TABLE_BASE_ADDR_MASK;
+            if (!vtd_sm_pasid_table_walk_one(s,
+                              pt_base, pasid, pasid_next, info)) {
+                break;
+            }
+        }
+        pasid = pasid_next;
+    }
+}
+
+/**
+ * This function replay the guest pasid bindings to hots by
+ * walking the guest PASID table. This ensures host will have
+ * latest guest pasid bindings.
+ */
+static void vtd_replay_guest_pasid_bindings(IntelIOMMUState *s,
+                                           uint16_t *did, bool is_dsi)
+{
+    VTDContextEntry ce;
+    vtd_pasid_table_walk_info info;
+    VTDIOMMUContext *vtd_icx;
+
+    if (is_dsi) {
+        info.flags = VTD_PASID_TABLE_DID_SEL_WALK;
+        info.did = *did;
+    }
+
+    /*
+     * In this replay, only needs to care about the devices which
+     * has iommu_context created. For the one not have iommu_context,
+     * it is not necessary to replay the bindings since their cache
+     * could be re-created in the next DMA address transaltion.
+     */
+    QLIST_FOREACH(vtd_icx, &s->vtd_dev_icx_list, next) {
+        if (!vtd_dev_to_context_entry(s,
+                                      pci_bus_num(vtd_icx->vtd_bus->bus),
+                                      vtd_icx->devfn, &ce)) {
+            info.vtd_icx = vtd_icx;
+            vtd_sm_pasid_table_walk(s,
+                                    VTD_CE_GET_PASID_DIR_TABLE(&ce),
+                                    0,
+                                    VTD_MAX_HPASID,
+                                    &info);
+        }
+    }
+}
+
 static int vtd_pasid_cache_dsi(IntelIOMMUState *s, uint16_t domain_id)
 {
     VTDPASIDCacheInfo pc_info;
@@ -2642,12 +2751,13 @@ static int vtd_pasid_cache_dsi(IntelIOMMUState *s, uint16_t domain_id)
                                  vtd_flush_pasid, &pc_info);
 
     /*
-     * TODO: Domain selective PASID cache invalidation
-     * flushes all the pasid caches within a domain. To
-     * be safe, after invalidating the pasid caches, emulator
-     * needs to replay the pasid bindings by walking guest
-     * pasid dir and pasid table.
+     * Domain selective PASID cache invalidation flushes
+     * all the pasid caches within a domain. To be safe,
+     * after invalidating the pasid caches, emulator needs
+     * to replay the pasid bindings by walking guest pasid
+     * dir and pasid table.
      */
+    vtd_replay_guest_pasid_bindings(s, &domain_id, true);
     vtd_iommu_unlock(s);
     return 0;
 }
@@ -2715,6 +2825,31 @@ static inline void vtd_fill_in_pe_cache(
                          pe, VTD_PASID_BIND);
 }
 
+/**
+ * This function updates the pasid entry cached in &vtd_pasid_as.
+ * Caller of this function should hold iommu_lock.
+ */
+static int vtd_update_pe_cache_for_dev(IntelIOMMUState *s, VTDBus *vtd_bus,
+                               int devfn, int pasid, VTDPASIDEntry *pe)
+{
+    VTDPASIDAddressSpace *vtd_pasid_as;
+
+    vtd_pasid_as = vtd_add_find_pasid_as(s, vtd_bus,
+                                        devfn, pasid, true);
+    if (!vtd_pasid_as) {
+        error_report_once("%s, fatal error happened!\n", __func__);
+        return -1;
+    }
+
+    if (vtd_pasid_as->pasid_cache_entry.pasid_cache_gen ==
+                                               s->pasid_cache_gen) {
+        return 0;
+    }
+
+    vtd_fill_in_pe_cache(vtd_pasid_as, pe);
+    return 0;
+}
+
 static int vtd_pasid_cache_psi(IntelIOMMUState *s,
                                uint16_t domain_id, uint32_t pasid)
 {
@@ -2838,12 +2973,13 @@ static int vtd_pasid_cache_gsi(IntelIOMMUState *s)
     vtd_pasid_cache_reset(s);
 
     /*
-     * TODO: Global PASID cache invalidation may be
-     * flushes all the pasid caches. To be safe, after
-     * invalidating the pasid caches, emulator needs
-     * to replay the pasid bindings by walking guest
-     * pasid dir and pasid table.
+     * Global PASID cache invalidation flushes all
+     * the pasid caches. To be safe, after invalidating
+     * the pasid caches, emulator needs to replay the
+     * pasid bindings by walking guest pasid dir and
+     * pasid table.
      */
+    vtd_replay_guest_pasid_bindings(s, NULL, false);
     vtd_iommu_unlock(s);
     return 0;
 }
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index 833c442..cd96b6e 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -558,6 +558,7 @@ typedef struct VTDPASIDCacheInfo VTDPASIDCacheInfo;
 #define VTD_PASID_TABLE_BITS_MASK     (0x3fULL)
 #define VTD_PASID_TABLE_INDEX(pasid)  ((pasid) & VTD_PASID_TABLE_BITS_MASK)
 #define VTD_PASID_ENTRY_FPD           (1ULL << 1) /* Fault Processing Disable */
+#define VTD_PASID_TBL_ENTRY_NUM       (1ULL << 6)
 
 /* PASID Granular Translation Type Mask */
 #define VTD_PASID_ENTRY_P              1ULL
-- 
2.7.4



  parent reply	other threads:[~2020-01-29 12:12 UTC|newest]

Thread overview: 136+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-29 12:16 [RFC v3 00/25] intel_iommu: expose Shared Virtual Addressing to VMs Liu, Yi L
2020-01-29 12:16 ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 01/25] hw/pci: modify pci_setup_iommu() to set PCIIOMMUOps Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 02/25] hw/iommu: introduce DualStageIOMMUObject Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-31  3:59   ` David Gibson
2020-01-31  3:59     ` David Gibson
2020-01-31 11:42     ` Liu, Yi L
2020-01-31 11:42       ` Liu, Yi L
2020-02-12  6:32       ` David Gibson
2020-02-12  6:32         ` David Gibson
2020-01-29 12:16 ` [RFC v3 03/25] hw/iommu: introduce IOMMUContext Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-31  4:06   ` David Gibson
2020-01-31  4:06     ` David Gibson
2020-01-31 11:42     ` Liu, Yi L
2020-01-31 11:42       ` Liu, Yi L
2020-02-11 16:58       ` Peter Xu
2020-02-11 16:58         ` Peter Xu
2020-02-12  7:15         ` Liu, Yi L
2020-02-12  7:15           ` Liu, Yi L
2020-02-12 15:59           ` Peter Xu
2020-02-12 15:59             ` Peter Xu
2020-02-13  2:46             ` Liu, Yi L
2020-02-13  2:46               ` Liu, Yi L
2020-02-14  5:36           ` David Gibson
2020-02-14  5:36             ` David Gibson
2020-02-15  6:25             ` Liu, Yi L
2020-02-15  6:25               ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 04/25] hw/pci: introduce pci_device_iommu_context() Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 05/25] intel_iommu: provide get_iommu_context() callback Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 06/25] scripts/update-linux-headers: Import iommu.h Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:25   ` Cornelia Huck
2020-01-29 12:25     ` Cornelia Huck
2020-01-31 11:40     ` Liu, Yi L
2020-01-31 11:40       ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 07/25] header file update VFIO/IOMMU vSVA APIs Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:28   ` Cornelia Huck
2020-01-29 12:28     ` Cornelia Huck
2020-01-31 11:41     ` Liu, Yi L
2020-01-31 11:41       ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 08/25] vfio: pass IOMMUContext into vfio_get_group() Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 09/25] vfio: check VFIO_TYPE1_NESTING_IOMMU support Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-02-11 19:08   ` Peter Xu
2020-02-11 19:08     ` Peter Xu
2020-02-12  7:16     ` Liu, Yi L
2020-02-12  7:16       ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 10/25] vfio: register DualStageIOMMUObject to vIOMMU Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 11/25] vfio: get stage-1 pasid formats from Kernel Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-02-11 19:30   ` Peter Xu
2020-02-11 19:30     ` Peter Xu
2020-02-12  7:19     ` Liu, Yi L
2020-02-12  7:19       ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 12/25] vfio/common: add pasid_alloc/free support Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-02-11 19:31   ` Peter Xu
2020-02-11 19:31     ` Peter Xu
2020-02-12  7:20     ` Liu, Yi L
2020-02-12  7:20       ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 13/25] intel_iommu: modify x-scalable-mode to be string option Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-02-11 19:43   ` Peter Xu
2020-02-11 19:43     ` Peter Xu
2020-02-12  7:28     ` Liu, Yi L
2020-02-12  7:28       ` Liu, Yi L
2020-02-12 16:05       ` Peter Xu
2020-02-12 16:05         ` Peter Xu
2020-02-13  2:44         ` Liu, Yi L
2020-02-13  2:44           ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 14/25] intel_iommu: add virtual command capability support Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-02-11 20:16   ` Peter Xu
2020-02-11 20:16     ` Peter Xu
2020-02-12  7:32     ` Liu, Yi L
2020-02-12  7:32       ` Liu, Yi L
2020-02-11 21:56   ` Peter Xu
2020-02-11 21:56     ` Peter Xu
2020-02-13  2:40     ` Liu, Yi L
2020-02-13  2:40       ` Liu, Yi L
2020-02-13 14:31       ` Peter Xu
2020-02-13 14:31         ` Peter Xu
2020-02-13 15:08         ` Peter Xu
2020-02-13 15:08           ` Peter Xu
2020-02-15  8:49           ` Liu, Yi L
2020-02-15  8:49             ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 15/25] intel_iommu: process pasid cache invalidation Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-02-11 20:17   ` Peter Xu
2020-02-11 20:17     ` Peter Xu
2020-02-12  7:33     ` Liu, Yi L
2020-02-12  7:33       ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 16/25] intel_iommu: add PASID cache management infrastructure Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-02-11 23:35   ` Peter Xu
2020-02-11 23:35     ` Peter Xu
2020-02-12  8:37     ` Liu, Yi L
2020-02-12  8:37       ` Liu, Yi L
2020-02-12 15:26       ` Peter Xu
2020-02-12 15:26         ` Peter Xu
2020-02-13  2:59         ` Liu, Yi L
2020-02-13  2:59           ` Liu, Yi L
2020-02-13 15:14           ` Peter Xu
2020-02-13 15:14             ` Peter Xu
2020-02-15  8:50             ` Liu, Yi L
2020-02-15  8:50               ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 17/25] vfio: add bind stage-1 page table support Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 18/25] intel_iommu: bind/unbind guest page table to host Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` Liu, Yi L [this message]
2020-01-29 12:16   ` [RFC v3 19/25] intel_iommu: replay guest pasid bindings " Liu, Yi L
2020-01-29 12:16 ` [RFC v3 20/25] intel_iommu: replay pasid binds after context cache invalidation Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 21/25] intel_iommu: do not pass down pasid bind for PASID #0 Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 22/25] vfio: add support for flush iommu stage-1 cache Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 23/25] intel_iommu: process PASID-based iotlb invalidation Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 24/25] intel_iommu: propagate PASID-based iotlb invalidation to host Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 12:16 ` [RFC v3 25/25] intel_iommu: process PASID-based Device-TLB invalidation Liu, Yi L
2020-01-29 12:16   ` Liu, Yi L
2020-01-29 13:44 ` [RFC v3 00/25] intel_iommu: expose Shared Virtual Addressing to VMs no-reply
2020-01-29 13:44   ` no-reply
2020-01-29 13:48 ` no-reply
2020-01-29 13:48   ` no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1580300216-86172-20-git-send-email-yi.l.liu@intel.com \
    --to=yi.l.liu@intel.com \
    --cc=alex.williamson@redhat.com \
    --cc=david@gibson.dropbear.id.au \
    --cc=ehabkost@redhat.com \
    --cc=eric.auger@redhat.com \
    --cc=hao.wu@intel.com \
    --cc=jacob.jun.pan@linux.intel.com \
    --cc=jun.j.tian@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    --cc=yi.y.sun@intel.com \
    --cc=yi.y.sun@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.