From: Auger Eric <eric.auger@redhat.com> To: Jacob Pan <jacob.jun.pan@linux.intel.com>, iommu@lists.linux-foundation.org, LKML <linux-kernel@vger.kernel.org>, Joerg Roedel <joro@8bytes.org>, David Woodhouse <dwmw2@infradead.org>, Alex Williamson <alex.williamson@redhat.com>, Jean-Philippe Brucker <jean-philippe.brucker@arm.com> Cc: Yi Liu <yi.l.liu@intel.com>, "Tian, Kevin" <kevin.tian@intel.com>, Raj Ashok <ashok.raj@intel.com>, Christoph Hellwig <hch@infradead.org>, Lu Baolu <baolu.lu@linux.intel.com>, Andriy Shevchenko <andriy.shevchenko@linux.intel.com> Subject: Re: [PATCH v2 18/19] iommu/vt-d: Support flushing more translation cache types Date: Sat, 27 Apr 2019 11:04:04 +0200 [thread overview] Message-ID: <5ad35536-4993-13f1-5199-ddd99f7009e5@redhat.com> (raw) In-Reply-To: <1556062279-64135-19-git-send-email-jacob.jun.pan@linux.intel.com> Hi Jacob, On 4/24/19 1:31 AM, Jacob Pan wrote: > When Shared Virtual Memory is exposed to a guest via vIOMMU, extended > IOTLB invalidation may be passed down from outside IOMMU subsystems. > This patch adds invalidation functions that can be used for additional > translation cache types. > > Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> > --- > drivers/iommu/dmar.c | 48 +++++++++++++++++++++++++++++++++++++++++++++ > include/linux/intel-iommu.h | 21 ++++++++++++++++---- > 2 files changed, 65 insertions(+), 4 deletions(-) > > diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c > index 9c49300..680894e 100644 > --- a/drivers/iommu/dmar.c > +++ b/drivers/iommu/dmar.c > @@ -1357,6 +1357,20 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, > qi_submit_sync(&desc, iommu); > } > /* PASID-based IOTLB Invalidate */ > +void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u64 addr, u32 pasid, > + unsigned int size_order, u64 granu) > +{ > + struct qi_desc desc; > + > + desc.qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) | > + QI_EIOTLB_GRAN(granu) | QI_EIOTLB_TYPE; > + desc.qw1 = QI_EIOTLB_ADDR(addr) | QI_EIOTLB_IH(0) | > + QI_EIOTLB_AM(size_order); I see IH it hardcoded to 0. Don't you envision to cascade the IH. On ARM this was needed for perf sake. > + desc.qw2 = 0; > + desc.qw3 = 0; > + qi_submit_sync(&desc, iommu); > +} > + > void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > u16 qdep, u64 addr, unsigned mask) > { > @@ -1380,6 +1394,40 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > qi_submit_sync(&desc, iommu); > } > /* Pasid-based Device-TLB Invalidation */ > +void qi_flush_dev_piotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > + u32 pasid, u16 qdep, u64 addr, unsigned size, u64 granu) > +{ > + struct qi_desc desc; > + > + desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | > + QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | > + QI_DEV_IOTLB_PFSID(pfsid); > + desc.qw1 |= QI_DEV_EIOTLB_GLOB(granu); > + > + /* If S bit is 0, we only flush a single page. If S bit is set, > + * The least significant zero bit indicates the size. VT-d spec > + * 6.5.2.6 > + */ > + if (!size) > + desc.qw0 = QI_DEV_EIOTLB_ADDR(addr) & ~QI_DEV_EIOTLB_SIZE; desc.q1 |= ? > + else { > + unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size); > + > + desc.qw1 = QI_DEV_EIOTLB_ADDR(addr & ~mask) | QI_DEV_EIOTLB_SIZE; desc.q1 |= > + } > + qi_submit_sync(&desc, iommu); > +} > + /* PASID-cache invalidation */ > +void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int pasid) > +{ > + struct qi_desc desc; > + > + desc.qw0 = QI_PC_TYPE | QI_PC_DID(did) | QI_PC_GRAN(granu) | QI_PC_PASID(pasid); > + desc.qw1 = 0; > + desc.qw2 = 0; > + desc.qw3 = 0; > + qi_submit_sync(&desc, iommu); > +} > /* > * Disable Queued Invalidation interface. > */ > diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h > index 5d67d0d4..38e5efb 100644 > --- a/include/linux/intel-iommu.h > +++ b/include/linux/intel-iommu.h > @@ -339,7 +339,7 @@ enum { > #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) > #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) > #define QI_IOTLB_IH(ih) (((u64)ih) << 6) > -#define QI_IOTLB_AM(am) (((u8)am)) > +#define QI_IOTLB_AM(am) (((u8)am) & 0x3f) > > #define QI_CC_FM(fm) (((u64)fm) << 48) > #define QI_CC_SID(sid) (((u64)sid) << 32) > @@ -357,17 +357,22 @@ enum { > #define QI_PC_DID(did) (((u64)did) << 16) > #define QI_PC_GRAN(gran) (((u64)gran) << 4) > > -#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) > -#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) > +/* PASID cache invalidation granu */ > +#define QI_PC_ALL_PASIDS 0 > +#define QI_PC_PASID_SEL 1 > > #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) > #define QI_EIOTLB_GL(gl) (((u64)gl) << 7) > #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) > -#define QI_EIOTLB_AM(am) (((u64)am)) > +#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) > #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) > #define QI_EIOTLB_DID(did) (((u64)did) << 16) > #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) > > +/* QI Dev-IOTLB inv granu */ > +#define QI_DEV_IOTLB_GRAN_ALL 1 > +#define QI_DEV_IOTLB_GRAN_PASID_SEL 0 > + > #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) > #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) > #define QI_DEV_EIOTLB_GLOB(g) ((u64)g) > @@ -658,8 +663,16 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, > u8 fm, u64 type); > extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, > unsigned int size_order, u64 type); > +extern void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u64 addr, > + u32 pasid, unsigned int size_order, u64 type); > extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > u16 qdep, u64 addr, unsigned mask); > + > +extern void qi_flush_dev_piotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > + u32 pasid, u16 qdep, u64 addr, unsigned size, u64 granu); > + > +extern void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int pasid); > + > extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); > > extern int dmar_ir_support(void); > Thanks Eric
WARNING: multiple messages have this Message-ID (diff)
From: Auger Eric <eric.auger@redhat.com> To: Jacob Pan <jacob.jun.pan@linux.intel.com>, iommu@lists.linux-foundation.org, LKML <linux-kernel@vger.kernel.org>, Joerg Roedel <joro@8bytes.org>, David Woodhouse <dwmw2@infradead.org>, Alex Williamson <alex.williamson@redhat.com>, Jean-Philippe Brucker <jean-philippe.brucker@arm.com> Cc: "Tian, Kevin" <kevin.tian@intel.com>, Raj Ashok <ashok.raj@intel.com>, Andriy Shevchenko <andriy.shevchenko@linux.intel.com> Subject: Re: [PATCH v2 18/19] iommu/vt-d: Support flushing more translation cache types Date: Sat, 27 Apr 2019 11:04:04 +0200 [thread overview] Message-ID: <5ad35536-4993-13f1-5199-ddd99f7009e5@redhat.com> (raw) Message-ID: <20190427090404.p8SlVE5GP2jonrmYhjmV5V4h3fg-Dr5-4KxqoHzqvz4@z> (raw) In-Reply-To: <1556062279-64135-19-git-send-email-jacob.jun.pan@linux.intel.com> Hi Jacob, On 4/24/19 1:31 AM, Jacob Pan wrote: > When Shared Virtual Memory is exposed to a guest via vIOMMU, extended > IOTLB invalidation may be passed down from outside IOMMU subsystems. > This patch adds invalidation functions that can be used for additional > translation cache types. > > Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> > --- > drivers/iommu/dmar.c | 48 +++++++++++++++++++++++++++++++++++++++++++++ > include/linux/intel-iommu.h | 21 ++++++++++++++++---- > 2 files changed, 65 insertions(+), 4 deletions(-) > > diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c > index 9c49300..680894e 100644 > --- a/drivers/iommu/dmar.c > +++ b/drivers/iommu/dmar.c > @@ -1357,6 +1357,20 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, > qi_submit_sync(&desc, iommu); > } > /* PASID-based IOTLB Invalidate */ > +void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u64 addr, u32 pasid, > + unsigned int size_order, u64 granu) > +{ > + struct qi_desc desc; > + > + desc.qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) | > + QI_EIOTLB_GRAN(granu) | QI_EIOTLB_TYPE; > + desc.qw1 = QI_EIOTLB_ADDR(addr) | QI_EIOTLB_IH(0) | > + QI_EIOTLB_AM(size_order); I see IH it hardcoded to 0. Don't you envision to cascade the IH. On ARM this was needed for perf sake. > + desc.qw2 = 0; > + desc.qw3 = 0; > + qi_submit_sync(&desc, iommu); > +} > + > void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > u16 qdep, u64 addr, unsigned mask) > { > @@ -1380,6 +1394,40 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > qi_submit_sync(&desc, iommu); > } > /* Pasid-based Device-TLB Invalidation */ > +void qi_flush_dev_piotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > + u32 pasid, u16 qdep, u64 addr, unsigned size, u64 granu) > +{ > + struct qi_desc desc; > + > + desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | > + QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | > + QI_DEV_IOTLB_PFSID(pfsid); > + desc.qw1 |= QI_DEV_EIOTLB_GLOB(granu); > + > + /* If S bit is 0, we only flush a single page. If S bit is set, > + * The least significant zero bit indicates the size. VT-d spec > + * 6.5.2.6 > + */ > + if (!size) > + desc.qw0 = QI_DEV_EIOTLB_ADDR(addr) & ~QI_DEV_EIOTLB_SIZE; desc.q1 |= ? > + else { > + unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size); > + > + desc.qw1 = QI_DEV_EIOTLB_ADDR(addr & ~mask) | QI_DEV_EIOTLB_SIZE; desc.q1 |= > + } > + qi_submit_sync(&desc, iommu); > +} > + /* PASID-cache invalidation */ > +void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int pasid) > +{ > + struct qi_desc desc; > + > + desc.qw0 = QI_PC_TYPE | QI_PC_DID(did) | QI_PC_GRAN(granu) | QI_PC_PASID(pasid); > + desc.qw1 = 0; > + desc.qw2 = 0; > + desc.qw3 = 0; > + qi_submit_sync(&desc, iommu); > +} > /* > * Disable Queued Invalidation interface. > */ > diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h > index 5d67d0d4..38e5efb 100644 > --- a/include/linux/intel-iommu.h > +++ b/include/linux/intel-iommu.h > @@ -339,7 +339,7 @@ enum { > #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) > #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) > #define QI_IOTLB_IH(ih) (((u64)ih) << 6) > -#define QI_IOTLB_AM(am) (((u8)am)) > +#define QI_IOTLB_AM(am) (((u8)am) & 0x3f) > > #define QI_CC_FM(fm) (((u64)fm) << 48) > #define QI_CC_SID(sid) (((u64)sid) << 32) > @@ -357,17 +357,22 @@ enum { > #define QI_PC_DID(did) (((u64)did) << 16) > #define QI_PC_GRAN(gran) (((u64)gran) << 4) > > -#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) > -#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) > +/* PASID cache invalidation granu */ > +#define QI_PC_ALL_PASIDS 0 > +#define QI_PC_PASID_SEL 1 > > #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) > #define QI_EIOTLB_GL(gl) (((u64)gl) << 7) > #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) > -#define QI_EIOTLB_AM(am) (((u64)am)) > +#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) > #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) > #define QI_EIOTLB_DID(did) (((u64)did) << 16) > #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) > > +/* QI Dev-IOTLB inv granu */ > +#define QI_DEV_IOTLB_GRAN_ALL 1 > +#define QI_DEV_IOTLB_GRAN_PASID_SEL 0 > + > #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) > #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) > #define QI_DEV_EIOTLB_GLOB(g) ((u64)g) > @@ -658,8 +663,16 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, > u8 fm, u64 type); > extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, > unsigned int size_order, u64 type); > +extern void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u64 addr, > + u32 pasid, unsigned int size_order, u64 type); > extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > u16 qdep, u64 addr, unsigned mask); > + > +extern void qi_flush_dev_piotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, > + u32 pasid, u16 qdep, u64 addr, unsigned size, u64 granu); > + > +extern void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int pasid); > + > extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); > > extern int dmar_ir_support(void); > Thanks Eric _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2019-04-27 9:04 UTC|newest] Thread overview: 161+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-04-23 23:31 [PATCH v2 00/19] Shared virtual address IOMMU and VT-d support Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 01/19] driver core: add per device iommu param Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 02/19] iommu: introduce device fault data Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-25 12:46 ` Jean-Philippe Brucker 2019-04-25 12:46 ` Jean-Philippe Brucker 2019-04-25 13:21 ` Auger Eric 2019-04-25 13:21 ` Auger Eric 2019-04-25 14:33 ` Jean-Philippe Brucker 2019-04-25 14:33 ` Jean-Philippe Brucker 2019-04-25 18:07 ` Jacob Pan 2019-04-25 18:07 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 03/19] iommu: introduce device fault report API Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 04/19] iommu: Introduce attach/detach_pasid_table API Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 05/19] iommu: Introduce cache_invalidate API Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 06/19] drivers core: Add I/O ASID allocator Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-24 6:19 ` Christoph Hellwig 2019-04-24 6:19 ` Christoph Hellwig 2019-04-25 18:19 ` Jacob Pan 2019-04-25 18:19 ` Jacob Pan 2019-04-25 18:19 ` Jacob Pan 2019-04-26 11:47 ` Jean-Philippe Brucker 2019-04-26 11:47 ` Jean-Philippe Brucker 2019-04-26 12:21 ` Christoph Hellwig 2019-04-26 12:21 ` Christoph Hellwig 2019-04-26 16:58 ` Jacob Pan 2019-04-26 16:58 ` Jacob Pan 2019-04-25 10:17 ` Auger Eric 2019-04-25 10:17 ` Auger Eric 2019-04-25 10:41 ` Jean-Philippe Brucker 2019-04-25 10:41 ` Jean-Philippe Brucker 2019-04-30 20:24 ` Jacob Pan 2019-04-30 20:24 ` Jacob Pan 2019-05-01 17:40 ` Jean-Philippe Brucker 2019-05-01 17:40 ` Jean-Philippe Brucker 2019-04-23 23:31 ` [PATCH v2 07/19] ioasid: Convert ioasid_idr to XArray Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 08/19] ioasid: Add custom IOASID allocator Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-25 10:03 ` Auger Eric 2019-04-25 10:03 ` Auger Eric 2019-04-25 21:29 ` Jacob Pan 2019-04-25 21:29 ` Jacob Pan 2019-04-26 9:06 ` Auger Eric 2019-04-26 9:06 ` Auger Eric 2019-04-26 15:19 ` Jacob Pan 2019-04-26 15:19 ` Jacob Pan 2019-05-06 17:59 ` Jacob Pan 2019-05-06 17:59 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 09/19] iommu/vt-d: Enlightened PASID allocation Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-24 17:27 ` Auger Eric 2019-04-24 17:27 ` Auger Eric 2019-04-24 17:27 ` Auger Eric 2019-04-25 7:12 ` Liu, Yi L 2019-04-25 7:12 ` Liu, Yi L 2019-04-25 7:40 ` Auger Eric 2019-04-25 7:40 ` Auger Eric 2019-04-25 23:01 ` Jacob Pan 2019-04-25 23:01 ` Jacob Pan 2019-04-25 23:01 ` Jacob Pan 2019-04-25 23:40 ` Jacob Pan 2019-04-25 23:40 ` Jacob Pan 2019-04-26 7:24 ` Auger Eric 2019-04-26 7:24 ` Auger Eric 2019-04-26 15:05 ` Jacob Pan 2019-04-26 15:05 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 10/19] iommu/vt-d: Add custom allocator for IOASID Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-24 17:27 ` Auger Eric 2019-04-24 17:27 ` Auger Eric 2019-04-24 17:27 ` Auger Eric 2019-04-26 20:11 ` Jacob Pan 2019-04-26 20:11 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 11/19] iommu/vt-d: Replace Intel specific PASID allocator with IOASID Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-25 10:04 ` Auger Eric 2019-04-25 10:04 ` Auger Eric [not found] ` <e542fd95-acbe-05e9-e441-27dff752c21a-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org> 2019-04-26 21:01 ` Jacob Pan 2019-04-26 21:01 ` Jacob Pan 2019-04-27 8:38 ` Auger Eric 2019-04-27 8:38 ` Auger Eric 2019-04-29 10:00 ` Jean-Philippe Brucker 2019-04-29 10:00 ` Jean-Philippe Brucker 2019-04-23 23:31 ` [PATCH v2 12/19] iommu/vt-d: Move domain helper to header Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-24 17:27 ` Auger Eric 2019-04-24 17:27 ` Auger Eric 2019-04-24 17:27 ` Auger Eric 2019-04-23 23:31 ` [PATCH v2 13/19] iommu/vt-d: Add nested translation support Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-26 15:42 ` Auger Eric 2019-04-26 15:42 ` Auger Eric 2019-04-26 21:57 ` Jacob Pan 2019-04-26 21:57 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 14/19] iommu: Add guest PASID bind function Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-26 15:53 ` Auger Eric 2019-04-26 15:53 ` Auger Eric 2019-04-26 22:11 ` Jacob Pan 2019-04-26 22:11 ` Jacob Pan 2019-04-27 8:37 ` Auger Eric 2019-04-27 8:37 ` Auger Eric 2019-04-23 23:31 ` [PATCH v2 15/19] iommu/vt-d: Add bind guest PASID support Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-26 16:15 ` Auger Eric 2019-04-26 16:15 ` Auger Eric 2019-04-29 15:25 ` Jacob Pan 2019-04-29 15:25 ` Jacob Pan 2019-04-30 7:05 ` Auger Eric 2019-04-30 7:05 ` Auger Eric 2019-04-30 17:49 ` Jacob Pan 2019-04-30 17:49 ` Jacob Pan 2019-04-23 23:31 ` [PATCH v2 16/19] iommu/vtd: Clean up for SVM device list Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-26 16:19 ` Auger Eric 2019-04-26 16:19 ` Auger Eric 2019-04-23 23:31 ` [PATCH v2 17/19] iommu: Add max num of cache and granu types Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-26 16:22 ` Auger Eric 2019-04-26 16:22 ` Auger Eric 2019-04-29 16:17 ` Jacob Pan 2019-04-29 16:17 ` Jacob Pan 2019-04-30 5:15 ` Auger Eric 2019-04-30 5:15 ` Auger Eric 2019-04-23 23:31 ` [PATCH v2 18/19] iommu/vt-d: Support flushing more translation cache types Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-27 9:04 ` Auger Eric [this message] 2019-04-27 9:04 ` Auger Eric 2019-04-29 21:29 ` Jacob Pan 2019-04-29 21:29 ` Jacob Pan 2019-04-30 4:41 ` Auger Eric 2019-04-30 4:41 ` Auger Eric 2019-04-30 4:41 ` Auger Eric 2019-04-30 17:15 ` Jacob Pan 2019-04-30 17:15 ` Jacob Pan 2019-04-30 17:41 ` Auger Eric 2019-04-30 17:41 ` Auger Eric 2019-04-23 23:31 ` [PATCH v2 19/19] iommu/vt-d: Add svm/sva invalidate function Jacob Pan 2019-04-23 23:31 ` Jacob Pan 2019-04-26 17:23 ` Auger Eric 2019-04-26 17:23 ` Auger Eric 2019-04-29 22:41 ` Jacob Pan 2019-04-29 22:41 ` Jacob Pan 2019-04-30 6:57 ` Auger Eric 2019-04-30 6:57 ` Auger Eric 2019-04-30 17:22 ` Jacob Pan 2019-04-30 17:22 ` Jacob Pan 2019-04-30 17:36 ` Auger Eric 2019-04-30 17:36 ` Auger Eric
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=5ad35536-4993-13f1-5199-ddd99f7009e5@redhat.com \ --to=eric.auger@redhat.com \ --cc=alex.williamson@redhat.com \ --cc=andriy.shevchenko@linux.intel.com \ --cc=ashok.raj@intel.com \ --cc=baolu.lu@linux.intel.com \ --cc=dwmw2@infradead.org \ --cc=hch@infradead.org \ --cc=iommu@lists.linux-foundation.org \ --cc=jacob.jun.pan@linux.intel.com \ --cc=jean-philippe.brucker@arm.com \ --cc=joro@8bytes.org \ --cc=kevin.tian@intel.com \ --cc=linux-kernel@vger.kernel.org \ --cc=yi.l.liu@intel.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.