All of lore.kernel.org
 help / color / mirror / Atom feed
From: Eric Auger <eric.auger@redhat.com>
To: eric.auger.pro@gmail.com, eric.auger@redhat.com,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu,
	will@kernel.org, maz@kernel.org, robin.murphy@arm.com,
	joro@8bytes.org, alex.williamson@redhat.com, tn@semihalf.com,
	zhukeqian1@huawei.com
Cc: jacob.jun.pan@linux.intel.com, yi.l.liu@intel.com,
	wangxingang5@huawei.com, jean-philippe@linaro.org,
	zhangfei.gao@linaro.org, zhangfei.gao@gmail.com,
	vivek.gautam@arm.com, shameerali.kolothum.thodi@huawei.com,
	yuzenghui@huawei.com, nicoleotsuka@gmail.com,
	lushenming@huawei.com, vsethi@nvidia.com,
	chenxiang66@hisilicon.com, vdumpa@nvidia.com,
	jiangkunkun@huawei.com
Subject: [PATCH v15 07/12] iommu/smmuv3: Implement cache_invalidate
Date: Sun, 11 Apr 2021 13:12:23 +0200	[thread overview]
Message-ID: <20210411111228.14386-8-eric.auger@redhat.com> (raw)
In-Reply-To: <20210411111228.14386-1-eric.auger@redhat.com>

Implement domain-selective, pasid selective and page-selective
IOTLB invalidations.

Signed-off-by: Eric Auger <eric.auger@redhat.com>

---
v4 -> v15:
- remove the redundant arm_smmu_cmdq_issue_sync(smmu)
  in IOMMU_INV_GRANU_ADDR case (Zenghui)
- if RIL is not supported by the host, make sure the granule_size
  that is passed by the userspace is supported or fix it
  (Chenxiang)

v13 -> v14:
- Add domain invalidation
- do global inval when asid is not provided with addr
  granularity

v7 -> v8:
- ASID based invalidation using iommu_inv_pasid_info
- check ARCHID/PASID flags in addr based invalidation
- use __arm_smmu_tlb_inv_context and __arm_smmu_tlb_inv_range_nosync

v6 -> v7
- check the uapi version

v3 -> v4:
- adapt to changes in the uapi
- add support for leaf parameter
- do not use arm_smmu_tlb_inv_range_nosync or arm_smmu_tlb_inv_context
  anymore

v2 -> v3:
- replace __arm_smmu_tlb_sync by arm_smmu_cmdq_issue_sync

v1 -> v2:
- properly pass the asid
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 89 +++++++++++++++++++++
 1 file changed, 89 insertions(+)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 56a301fbe75a..bfc112cc0d38 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2961,6 +2961,94 @@ static void arm_smmu_detach_pasid_table(struct iommu_domain *domain)
 	mutex_unlock(&smmu_domain->init_mutex);
 }
 
+static int
+arm_smmu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+			  struct iommu_cache_invalidate_info *inv_info)
+{
+	struct arm_smmu_cmdq_ent cmd = {.opcode = CMDQ_OP_TLBI_NSNH_ALL};
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+	if (smmu_domain->stage != ARM_SMMU_DOMAIN_NESTED)
+		return -EINVAL;
+
+	if (!smmu)
+		return -EINVAL;
+
+	if (inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+		return -EINVAL;
+
+	if (inv_info->cache & IOMMU_CACHE_INV_TYPE_PASID ||
+	    inv_info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) {
+		return -ENOENT;
+	}
+
+	if (!(inv_info->cache & IOMMU_CACHE_INV_TYPE_IOTLB))
+		return -EINVAL;
+
+	/* IOTLB invalidation */
+
+	switch (inv_info->granularity) {
+	case IOMMU_INV_GRANU_PASID:
+	{
+		struct iommu_inv_pasid_info *info =
+			&inv_info->granu.pasid_info;
+
+		if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+			return -ENOENT;
+		if (!(info->flags & IOMMU_INV_PASID_FLAGS_ARCHID))
+			return -EINVAL;
+
+		__arm_smmu_tlb_inv_context(smmu_domain, info->archid);
+		return 0;
+	}
+	case IOMMU_INV_GRANU_ADDR:
+	{
+		struct iommu_inv_addr_info *info = &inv_info->granu.addr_info;
+		size_t granule_size  = info->granule_size;
+		size_t size = info->nb_granules * info->granule_size;
+		bool leaf = info->flags & IOMMU_INV_ADDR_FLAGS_LEAF;
+		int tg;
+
+		if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+			return -ENOENT;
+
+		if (!(info->flags & IOMMU_INV_ADDR_FLAGS_ARCHID))
+			break;
+
+		tg = __ffs(granule_size);
+		if (granule_size & ~(1 << tg))
+			return -EINVAL;
+		/*
+		 * When RIL is not supported, make sure the granule size that is
+		 * passed is supported. In RIL mode, this is enforced in
+		 * __arm_smmu_tlb_inv_range()
+		 */
+		if (!(smmu->features & ARM_SMMU_FEAT_RANGE_INV) &&
+		    !(granule_size & smmu_domain->domain.pgsize_bitmap)) {
+			tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+			granule_size = 1 << tg;
+			size = size >> tg;
+		}
+
+		arm_smmu_tlb_inv_range_domain(info->addr, size,
+					      granule_size, leaf,
+					      info->archid, smmu_domain);
+		return 0;
+	}
+	case IOMMU_INV_GRANU_DOMAIN:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Global S1 invalidation */
+	cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
+	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+	arm_smmu_cmdq_issue_sync(smmu);
+	return 0;
+}
+
 static bool arm_smmu_dev_has_feature(struct device *dev,
 				     enum iommu_dev_features feat)
 {
@@ -3060,6 +3148,7 @@ static struct iommu_ops arm_smmu_ops = {
 	.put_resv_regions	= generic_iommu_put_resv_regions,
 	.attach_pasid_table	= arm_smmu_attach_pasid_table,
 	.detach_pasid_table	= arm_smmu_detach_pasid_table,
+	.cache_invalidate	= arm_smmu_cache_invalidate,
 	.dev_has_feat		= arm_smmu_dev_has_feature,
 	.dev_feat_enabled	= arm_smmu_dev_feature_enabled,
 	.dev_enable_feat	= arm_smmu_dev_enable_feature,
-- 
2.26.3


WARNING: multiple messages have this Message-ID (diff)
From: Eric Auger <eric.auger@redhat.com>
To: eric.auger.pro@gmail.com, eric.auger@redhat.com,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu,
	will@kernel.org, maz@kernel.org, robin.murphy@arm.com,
	joro@8bytes.org, alex.williamson@redhat.com, tn@semihalf.com,
	zhukeqian1@huawei.com
Cc: jean-philippe@linaro.org, wangxingang5@huawei.com,
	lushenming@huawei.com, vivek.gautam@arm.com, vsethi@nvidia.com,
	zhangfei.gao@linaro.org, jiangkunkun@huawei.com
Subject: [PATCH v15 07/12] iommu/smmuv3: Implement cache_invalidate
Date: Sun, 11 Apr 2021 13:12:23 +0200	[thread overview]
Message-ID: <20210411111228.14386-8-eric.auger@redhat.com> (raw)
In-Reply-To: <20210411111228.14386-1-eric.auger@redhat.com>

Implement domain-selective, pasid selective and page-selective
IOTLB invalidations.

Signed-off-by: Eric Auger <eric.auger@redhat.com>

---
v4 -> v15:
- remove the redundant arm_smmu_cmdq_issue_sync(smmu)
  in IOMMU_INV_GRANU_ADDR case (Zenghui)
- if RIL is not supported by the host, make sure the granule_size
  that is passed by the userspace is supported or fix it
  (Chenxiang)

v13 -> v14:
- Add domain invalidation
- do global inval when asid is not provided with addr
  granularity

v7 -> v8:
- ASID based invalidation using iommu_inv_pasid_info
- check ARCHID/PASID flags in addr based invalidation
- use __arm_smmu_tlb_inv_context and __arm_smmu_tlb_inv_range_nosync

v6 -> v7
- check the uapi version

v3 -> v4:
- adapt to changes in the uapi
- add support for leaf parameter
- do not use arm_smmu_tlb_inv_range_nosync or arm_smmu_tlb_inv_context
  anymore

v2 -> v3:
- replace __arm_smmu_tlb_sync by arm_smmu_cmdq_issue_sync

v1 -> v2:
- properly pass the asid
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 89 +++++++++++++++++++++
 1 file changed, 89 insertions(+)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 56a301fbe75a..bfc112cc0d38 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2961,6 +2961,94 @@ static void arm_smmu_detach_pasid_table(struct iommu_domain *domain)
 	mutex_unlock(&smmu_domain->init_mutex);
 }
 
+static int
+arm_smmu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+			  struct iommu_cache_invalidate_info *inv_info)
+{
+	struct arm_smmu_cmdq_ent cmd = {.opcode = CMDQ_OP_TLBI_NSNH_ALL};
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+	if (smmu_domain->stage != ARM_SMMU_DOMAIN_NESTED)
+		return -EINVAL;
+
+	if (!smmu)
+		return -EINVAL;
+
+	if (inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+		return -EINVAL;
+
+	if (inv_info->cache & IOMMU_CACHE_INV_TYPE_PASID ||
+	    inv_info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) {
+		return -ENOENT;
+	}
+
+	if (!(inv_info->cache & IOMMU_CACHE_INV_TYPE_IOTLB))
+		return -EINVAL;
+
+	/* IOTLB invalidation */
+
+	switch (inv_info->granularity) {
+	case IOMMU_INV_GRANU_PASID:
+	{
+		struct iommu_inv_pasid_info *info =
+			&inv_info->granu.pasid_info;
+
+		if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+			return -ENOENT;
+		if (!(info->flags & IOMMU_INV_PASID_FLAGS_ARCHID))
+			return -EINVAL;
+
+		__arm_smmu_tlb_inv_context(smmu_domain, info->archid);
+		return 0;
+	}
+	case IOMMU_INV_GRANU_ADDR:
+	{
+		struct iommu_inv_addr_info *info = &inv_info->granu.addr_info;
+		size_t granule_size  = info->granule_size;
+		size_t size = info->nb_granules * info->granule_size;
+		bool leaf = info->flags & IOMMU_INV_ADDR_FLAGS_LEAF;
+		int tg;
+
+		if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+			return -ENOENT;
+
+		if (!(info->flags & IOMMU_INV_ADDR_FLAGS_ARCHID))
+			break;
+
+		tg = __ffs(granule_size);
+		if (granule_size & ~(1 << tg))
+			return -EINVAL;
+		/*
+		 * When RIL is not supported, make sure the granule size that is
+		 * passed is supported. In RIL mode, this is enforced in
+		 * __arm_smmu_tlb_inv_range()
+		 */
+		if (!(smmu->features & ARM_SMMU_FEAT_RANGE_INV) &&
+		    !(granule_size & smmu_domain->domain.pgsize_bitmap)) {
+			tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+			granule_size = 1 << tg;
+			size = size >> tg;
+		}
+
+		arm_smmu_tlb_inv_range_domain(info->addr, size,
+					      granule_size, leaf,
+					      info->archid, smmu_domain);
+		return 0;
+	}
+	case IOMMU_INV_GRANU_DOMAIN:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Global S1 invalidation */
+	cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
+	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+	arm_smmu_cmdq_issue_sync(smmu);
+	return 0;
+}
+
 static bool arm_smmu_dev_has_feature(struct device *dev,
 				     enum iommu_dev_features feat)
 {
@@ -3060,6 +3148,7 @@ static struct iommu_ops arm_smmu_ops = {
 	.put_resv_regions	= generic_iommu_put_resv_regions,
 	.attach_pasid_table	= arm_smmu_attach_pasid_table,
 	.detach_pasid_table	= arm_smmu_detach_pasid_table,
+	.cache_invalidate	= arm_smmu_cache_invalidate,
 	.dev_has_feat		= arm_smmu_dev_has_feature,
 	.dev_feat_enabled	= arm_smmu_dev_feature_enabled,
 	.dev_enable_feat	= arm_smmu_dev_enable_feature,
-- 
2.26.3

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Eric Auger <eric.auger@redhat.com>
To: eric.auger.pro@gmail.com, eric.auger@redhat.com,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu,
	will@kernel.org, maz@kernel.org, robin.murphy@arm.com,
	joro@8bytes.org, alex.williamson@redhat.com, tn@semihalf.com,
	zhukeqian1@huawei.com
Cc: jean-philippe@linaro.org, jacob.jun.pan@linux.intel.com,
	wangxingang5@huawei.com, lushenming@huawei.com,
	chenxiang66@hisilicon.com, nicoleotsuka@gmail.com,
	vivek.gautam@arm.com, vdumpa@nvidia.com, yi.l.liu@intel.com,
	vsethi@nvidia.com, zhangfei.gao@linaro.org
Subject: [PATCH v15 07/12] iommu/smmuv3: Implement cache_invalidate
Date: Sun, 11 Apr 2021 13:12:23 +0200	[thread overview]
Message-ID: <20210411111228.14386-8-eric.auger@redhat.com> (raw)
In-Reply-To: <20210411111228.14386-1-eric.auger@redhat.com>

Implement domain-selective, pasid selective and page-selective
IOTLB invalidations.

Signed-off-by: Eric Auger <eric.auger@redhat.com>

---
v4 -> v15:
- remove the redundant arm_smmu_cmdq_issue_sync(smmu)
  in IOMMU_INV_GRANU_ADDR case (Zenghui)
- if RIL is not supported by the host, make sure the granule_size
  that is passed by the userspace is supported or fix it
  (Chenxiang)

v13 -> v14:
- Add domain invalidation
- do global inval when asid is not provided with addr
  granularity

v7 -> v8:
- ASID based invalidation using iommu_inv_pasid_info
- check ARCHID/PASID flags in addr based invalidation
- use __arm_smmu_tlb_inv_context and __arm_smmu_tlb_inv_range_nosync

v6 -> v7
- check the uapi version

v3 -> v4:
- adapt to changes in the uapi
- add support for leaf parameter
- do not use arm_smmu_tlb_inv_range_nosync or arm_smmu_tlb_inv_context
  anymore

v2 -> v3:
- replace __arm_smmu_tlb_sync by arm_smmu_cmdq_issue_sync

v1 -> v2:
- properly pass the asid
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 89 +++++++++++++++++++++
 1 file changed, 89 insertions(+)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 56a301fbe75a..bfc112cc0d38 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2961,6 +2961,94 @@ static void arm_smmu_detach_pasid_table(struct iommu_domain *domain)
 	mutex_unlock(&smmu_domain->init_mutex);
 }
 
+static int
+arm_smmu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+			  struct iommu_cache_invalidate_info *inv_info)
+{
+	struct arm_smmu_cmdq_ent cmd = {.opcode = CMDQ_OP_TLBI_NSNH_ALL};
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+	if (smmu_domain->stage != ARM_SMMU_DOMAIN_NESTED)
+		return -EINVAL;
+
+	if (!smmu)
+		return -EINVAL;
+
+	if (inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+		return -EINVAL;
+
+	if (inv_info->cache & IOMMU_CACHE_INV_TYPE_PASID ||
+	    inv_info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) {
+		return -ENOENT;
+	}
+
+	if (!(inv_info->cache & IOMMU_CACHE_INV_TYPE_IOTLB))
+		return -EINVAL;
+
+	/* IOTLB invalidation */
+
+	switch (inv_info->granularity) {
+	case IOMMU_INV_GRANU_PASID:
+	{
+		struct iommu_inv_pasid_info *info =
+			&inv_info->granu.pasid_info;
+
+		if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+			return -ENOENT;
+		if (!(info->flags & IOMMU_INV_PASID_FLAGS_ARCHID))
+			return -EINVAL;
+
+		__arm_smmu_tlb_inv_context(smmu_domain, info->archid);
+		return 0;
+	}
+	case IOMMU_INV_GRANU_ADDR:
+	{
+		struct iommu_inv_addr_info *info = &inv_info->granu.addr_info;
+		size_t granule_size  = info->granule_size;
+		size_t size = info->nb_granules * info->granule_size;
+		bool leaf = info->flags & IOMMU_INV_ADDR_FLAGS_LEAF;
+		int tg;
+
+		if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+			return -ENOENT;
+
+		if (!(info->flags & IOMMU_INV_ADDR_FLAGS_ARCHID))
+			break;
+
+		tg = __ffs(granule_size);
+		if (granule_size & ~(1 << tg))
+			return -EINVAL;
+		/*
+		 * When RIL is not supported, make sure the granule size that is
+		 * passed is supported. In RIL mode, this is enforced in
+		 * __arm_smmu_tlb_inv_range()
+		 */
+		if (!(smmu->features & ARM_SMMU_FEAT_RANGE_INV) &&
+		    !(granule_size & smmu_domain->domain.pgsize_bitmap)) {
+			tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+			granule_size = 1 << tg;
+			size = size >> tg;
+		}
+
+		arm_smmu_tlb_inv_range_domain(info->addr, size,
+					      granule_size, leaf,
+					      info->archid, smmu_domain);
+		return 0;
+	}
+	case IOMMU_INV_GRANU_DOMAIN:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Global S1 invalidation */
+	cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
+	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+	arm_smmu_cmdq_issue_sync(smmu);
+	return 0;
+}
+
 static bool arm_smmu_dev_has_feature(struct device *dev,
 				     enum iommu_dev_features feat)
 {
@@ -3060,6 +3148,7 @@ static struct iommu_ops arm_smmu_ops = {
 	.put_resv_regions	= generic_iommu_put_resv_regions,
 	.attach_pasid_table	= arm_smmu_attach_pasid_table,
 	.detach_pasid_table	= arm_smmu_detach_pasid_table,
+	.cache_invalidate	= arm_smmu_cache_invalidate,
 	.dev_has_feat		= arm_smmu_dev_has_feature,
 	.dev_feat_enabled	= arm_smmu_dev_feature_enabled,
 	.dev_enable_feat	= arm_smmu_dev_enable_feature,
-- 
2.26.3

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

  parent reply	other threads:[~2021-04-11 11:14 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-11 11:12 [PATCH v15 00/12] SMMUv3 Nested Stage Setup (IOMMU part) Eric Auger
2021-04-11 11:12 ` Eric Auger
2021-04-11 11:12 ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 01/12] iommu: Introduce attach/detach_pasid_table API Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 02/12] iommu: Introduce bind/unbind_guest_msi Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 03/12] iommu/smmuv3: Allow s1 and s2 configs to coexist Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 04/12] iommu/smmuv3: Get prepared for nested stage support Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 05/12] iommu/smmuv3: Implement attach/detach_pasid_table Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 06/12] iommu/smmuv3: Allow stage 1 invalidation with unmanaged ASIDs Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` Eric Auger [this message]
2021-04-11 11:12   ` [PATCH v15 07/12] iommu/smmuv3: Implement cache_invalidate Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-05-14  3:09   ` Kunkun Jiang
2021-05-14  3:09     ` Kunkun Jiang
2021-05-14  3:09     ` Kunkun Jiang
2021-05-21  6:48   ` Kunkun Jiang
2021-05-21  6:48     ` Kunkun Jiang
2021-05-21  6:48     ` Kunkun Jiang
2021-04-11 11:12 ` [PATCH v15 08/12] dma-iommu: Implement NESTED_MSI cookie Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 09/12] iommu/smmuv3: Nested mode single MSI doorbell per domain enforcement Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 10/12] iommu/smmuv3: Enforce incompatibility between nested mode and HW MSI regions Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 11/12] iommu/smmuv3: Implement bind/unbind_guest_msi Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12 ` [PATCH v15 12/12] iommu/smmuv3: report additional recoverable faults Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-11 11:12   ` Eric Auger
2021-04-14  2:36 ` [PATCH v15 00/12] SMMUv3 Nested Stage Setup (IOMMU part) Xingang Wang
2021-04-14  2:36   ` Xingang Wang
2021-04-14  2:36   ` Xingang Wang
2021-04-14  6:56   ` Shameerali Kolothum Thodi
2021-04-14  6:56     ` Shameerali Kolothum Thodi
2021-04-14  6:56     ` Shameerali Kolothum Thodi
2021-04-14  7:08     ` Xingang Wang
2021-04-14  7:08       ` Xingang Wang
2021-04-14  7:08       ` Xingang Wang
2021-04-21 11:28 ` Vivek Kumar Gautam
2021-04-21 11:28   ` Vivek Kumar Gautam
2021-04-21 11:28   ` Vivek Kumar Gautam
2021-04-23 18:21 ` Sumit Gupta
2021-04-23 18:21   ` Sumit Gupta
2021-04-23 18:21   ` Sumit Gupta
2021-09-27 21:17 ` Krishna Reddy
2021-09-27 21:17   ` Krishna Reddy
2021-09-27 21:17   ` Krishna Reddy via iommu
2021-09-28  6:25   ` Eric Auger
2021-09-28  6:25     ` Eric Auger
2021-09-28  6:25     ` Eric Auger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210411111228.14386-8-eric.auger@redhat.com \
    --to=eric.auger@redhat.com \
    --cc=alex.williamson@redhat.com \
    --cc=chenxiang66@hisilicon.com \
    --cc=eric.auger.pro@gmail.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jacob.jun.pan@linux.intel.com \
    --cc=jean-philippe@linaro.org \
    --cc=jiangkunkun@huawei.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lushenming@huawei.com \
    --cc=maz@kernel.org \
    --cc=nicoleotsuka@gmail.com \
    --cc=robin.murphy@arm.com \
    --cc=shameerali.kolothum.thodi@huawei.com \
    --cc=tn@semihalf.com \
    --cc=vdumpa@nvidia.com \
    --cc=vivek.gautam@arm.com \
    --cc=vsethi@nvidia.com \
    --cc=wangxingang5@huawei.com \
    --cc=will@kernel.org \
    --cc=yi.l.liu@intel.com \
    --cc=yuzenghui@huawei.com \
    --cc=zhangfei.gao@gmail.com \
    --cc=zhangfei.gao@linaro.org \
    --cc=zhukeqian1@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.