All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lu Baolu <baolu.lu@linux.intel.com>
To: Joerg Roedel <joro@8bytes.org>, Jason Gunthorpe <jgg@nvidia.com>,
	Christoph Hellwig <hch@infradead.org>,
	Kevin Tian <kevin.tian@intel.com>,
	Ashok Raj <ashok.raj@intel.com>, Will Deacon <will@kernel.org>,
	Robin Murphy <robin.murphy@arm.com>,
	Jean-Philippe Brucker <jean-philippe@linaro.com>,
	Dave Jiang <dave.jiang@intel.com>, Vinod Koul <vkoul@kernel.org>
Cc: linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
	Jacob jun Pan <jacob.jun.pan@intel.com>
Subject: [PATCH v7 06/10] iommu/sva: Refactoring iommu_sva_bind/unbind_device()
Date: Thu, 19 May 2022 15:20:43 +0800	[thread overview]
Message-ID: <20220519072047.2996983-7-baolu.lu@linux.intel.com> (raw)
In-Reply-To: <20220519072047.2996983-1-baolu.lu@linux.intel.com>

The existing iommu SVA interfaces are implemented by calling the SVA
specific iommu ops provided by the IOMMU drivers. There's no need for
any SVA specific ops in iommu_ops vector anymore as we can achieve
this through the generic attach/detach_dev_pasid domain ops.

This refactors the IOMMU SVA interfaces implementation by using the
set/block_pasid_dev ops and align them with the concept of the SVA
iommu domain. Put the new SVA code in the sva related file in order
to make it self-contained.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 include/linux/iommu.h         |  48 ++++++++------
 drivers/iommu/iommu-sva-lib.h |   1 +
 drivers/iommu/iommu-sva-lib.c | 113 ++++++++++++++++++++++++++++++++
 drivers/iommu/iommu.c         | 119 ++++++++--------------------------
 4 files changed, 170 insertions(+), 111 deletions(-)

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e8cf82d46ce1..d9ac5ebe5bbb 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -635,6 +635,7 @@ struct iommu_fwspec {
  */
 struct iommu_sva {
 	struct device			*dev;
+	refcount_t			users;
 };
 
 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -677,11 +678,6 @@ int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
 
-struct iommu_sva *iommu_sva_bind_device(struct device *dev,
-					struct mm_struct *mm);
-void iommu_sva_unbind_device(struct iommu_sva *handle);
-u32 iommu_sva_get_pasid(struct iommu_sva *handle);
-
 int iommu_device_use_default_domain(struct device *dev);
 void iommu_device_unuse_default_domain(struct device *dev);
 
@@ -693,6 +689,8 @@ int iommu_set_device_pasid(struct iommu_domain *domain, struct device *dev,
 			   ioasid_t pasid);
 void iommu_block_device_pasid(struct iommu_domain *domain, struct device *dev,
 			      ioasid_t pasid);
+struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid);
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
@@ -1023,21 +1021,6 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
 	return -ENODEV;
 }
 
-static inline struct iommu_sva *
-iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
-{
-	return NULL;
-}
-
-static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
-{
-}
-
-static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
-{
-	return IOMMU_PASID_INVALID;
-}
-
 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
 {
 	return NULL;
@@ -1077,6 +1060,12 @@ static inline void iommu_block_device_pasid(struct iommu_domain *domain,
 					    struct device *dev, ioasid_t pasid)
 {
 }
+
+static inline struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+	return NULL;
+}
 #endif /* CONFIG_IOMMU_API */
 
 /**
@@ -1108,6 +1097,10 @@ iommu_sva_alloc_domain(struct bus_type *bus, struct mm_struct *mm);
 void iommu_sva_free_domain(struct iommu_domain *domain);
 int iommu_sva_set_domain(struct iommu_domain *domain, struct device *dev,
 			 ioasid_t pasid);
+struct iommu_sva *iommu_sva_bind_device(struct device *dev,
+					struct mm_struct *mm);
+void iommu_sva_unbind_device(struct iommu_sva *handle);
+u32 iommu_sva_get_pasid(struct iommu_sva *handle);
 #else /* CONFIG_IOMMU_SVA */
 static inline struct iommu_domain *
 iommu_sva_alloc_domain(struct bus_type *bus, struct mm_struct *mm)
@@ -1124,6 +1117,21 @@ static inline int iommu_sva_set_domain(struct iommu_domain *domain,
 {
 	return -EINVAL;
 }
+
+static inline struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+{
+	return NULL;
+}
+
+static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+}
+
+static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+	return IOMMU_PASID_INVALID;
+}
 #endif /* CONFIG_IOMMU_SVA */
 
 #endif /* __LINUX_IOMMU_H */
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
index 1be21e6b93ec..ebab5a8cb126 100644
--- a/drivers/iommu/iommu-sva-lib.h
+++ b/drivers/iommu/iommu-sva-lib.h
@@ -20,6 +20,7 @@ struct iopf_queue;
 struct iommu_sva_domain {
 	struct iommu_domain	domain;
 	struct mm_struct	*mm;
+	struct iommu_sva	bond;
 };
 
 #define to_sva_domain(d) container_of_safe(d, struct iommu_sva_domain, domain)
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
index 210c376f6043..568e0f64edac 100644
--- a/drivers/iommu/iommu-sva-lib.c
+++ b/drivers/iommu/iommu-sva-lib.c
@@ -4,6 +4,8 @@
  */
 #include <linux/mutex.h>
 #include <linux/sched/mm.h>
+#include <linux/pci.h>
+#include <linux/pci-ats.h>
 
 #include "iommu-sva-lib.h"
 
@@ -117,3 +119,114 @@ int iommu_sva_set_domain(struct iommu_domain *domain, struct device *dev,
 
 	return iommu_set_device_pasid(domain, dev, pasid);
 }
+
+/**
+ * iommu_sva_bind_device() - Bind a process address space to a device
+ * @dev: the device
+ * @mm: the mm to bind, caller must hold a reference to mm_users
+ *
+ * Create a bond between device and address space, allowing the device to access
+ * the mm using the returned PASID. If a bond already exists between @device and
+ * @mm, it is returned and an additional reference is taken. Caller must call
+ * iommu_sva_unbind_device() to release each reference.
+ *
+ * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
+ * initialize the required SVA features.
+ *
+ * On error, returns an ERR_PTR value.
+ */
+struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+{
+	struct iommu_sva_domain *sva_domain;
+	struct iommu_domain *domain;
+	ioasid_t max_pasid = 0;
+	int ret = -EINVAL;
+
+	/* Allocate mm->pasid if necessary. */
+	if (!dev->iommu->iommu_dev->pasids)
+		return ERR_PTR(-EOPNOTSUPP);
+
+	if (dev_is_pci(dev)) {
+		max_pasid = pci_max_pasids(to_pci_dev(dev));
+		if (max_pasid < 0)
+			return ERR_PTR(max_pasid);
+	} else {
+		ret = device_property_read_u32(dev, "pasid-num-bits",
+					       &max_pasid);
+		if (ret)
+			return ERR_PTR(ret);
+		max_pasid = (1UL << max_pasid);
+	}
+	max_pasid = min_t(u32, max_pasid, dev->iommu->iommu_dev->pasids);
+	ret = iommu_sva_alloc_pasid(mm, 1, max_pasid - 1);
+	if (ret)
+		return ERR_PTR(ret);
+
+	mutex_lock(&iommu_sva_lock);
+	/* Search for an existing domain. */
+	domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid);
+	if (domain) {
+		sva_domain = to_sva_domain(domain);
+		refcount_inc(&sva_domain->bond.users);
+		goto out_success;
+	}
+
+	/* Allocate a new domain and set it on device pasid. */
+	domain = iommu_sva_alloc_domain(dev->bus, mm);
+	if (IS_ERR(domain)) {
+		ret = PTR_ERR(domain);
+		goto out_unlock;
+	}
+
+	ret = iommu_sva_set_domain(domain, dev, mm->pasid);
+	if (ret)
+		goto out_free_domain;
+	sva_domain = to_sva_domain(domain);
+	sva_domain->bond.dev = dev;
+	refcount_set(&sva_domain->bond.users, 1);
+
+out_success:
+	mutex_unlock(&iommu_sva_lock);
+	return &sva_domain->bond;
+
+out_free_domain:
+	iommu_sva_free_domain(domain);
+out_unlock:
+	mutex_unlock(&iommu_sva_lock);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
+
+/**
+ * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
+ * @handle: the handle returned by iommu_sva_bind_device()
+ *
+ * Put reference to a bond between device and address space. The device should
+ * not be issuing any more transaction for this PASID. All outstanding page
+ * requests for this PASID must have been flushed to the IOMMU.
+ */
+void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+	struct device *dev = handle->dev;
+	struct iommu_sva_domain *sva_domain =
+			container_of(handle, struct iommu_sva_domain, bond);
+	ioasid_t pasid = iommu_sva_get_pasid(handle);
+
+	mutex_lock(&iommu_sva_lock);
+	if (refcount_dec_and_test(&sva_domain->bond.users)) {
+		iommu_block_device_pasid(&sva_domain->domain, dev, pasid);
+		iommu_sva_free_domain(&sva_domain->domain);
+	}
+	mutex_unlock(&iommu_sva_lock);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
+
+u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+	struct iommu_sva_domain *sva_domain =
+			container_of(handle, struct iommu_sva_domain, bond);
+
+	return sva_domain->mm->pasid;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 789816e4b9d6..e49c5a5b8cc1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2766,97 +2766,6 @@ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
 }
 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
 
-/**
- * iommu_sva_bind_device() - Bind a process address space to a device
- * @dev: the device
- * @mm: the mm to bind, caller must hold a reference to it
- *
- * Create a bond between device and address space, allowing the device to access
- * the mm using the returned PASID. If a bond already exists between @device and
- * @mm, it is returned and an additional reference is taken. Caller must call
- * iommu_sva_unbind_device() to release each reference.
- *
- * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
- * initialize the required SVA features.
- *
- * On error, returns an ERR_PTR value.
- */
-struct iommu_sva *
-iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
-{
-	struct iommu_group *group;
-	struct iommu_sva *handle = ERR_PTR(-EINVAL);
-	const struct iommu_ops *ops = dev_iommu_ops(dev);
-
-	if (!ops->sva_bind)
-		return ERR_PTR(-ENODEV);
-
-	group = iommu_group_get(dev);
-	if (!group)
-		return ERR_PTR(-ENODEV);
-
-	/* Ensure device count and domain don't change while we're binding */
-	mutex_lock(&group->mutex);
-
-	/*
-	 * To keep things simple, SVA currently doesn't support IOMMU groups
-	 * with more than one device. Existing SVA-capable systems are not
-	 * affected by the problems that required IOMMU groups (lack of ACS
-	 * isolation, device ID aliasing and other hardware issues).
-	 */
-	if (iommu_group_device_count(group) != 1)
-		goto out_unlock;
-
-	handle = ops->sva_bind(dev, mm);
-
-out_unlock:
-	mutex_unlock(&group->mutex);
-	iommu_group_put(group);
-
-	return handle;
-}
-EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
-
-/**
- * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
- * @handle: the handle returned by iommu_sva_bind_device()
- *
- * Put reference to a bond between device and address space. The device should
- * not be issuing any more transaction for this PASID. All outstanding page
- * requests for this PASID must have been flushed to the IOMMU.
- */
-void iommu_sva_unbind_device(struct iommu_sva *handle)
-{
-	struct iommu_group *group;
-	struct device *dev = handle->dev;
-	const struct iommu_ops *ops = dev_iommu_ops(dev);
-
-	if (!ops->sva_unbind)
-		return;
-
-	group = iommu_group_get(dev);
-	if (!group)
-		return;
-
-	mutex_lock(&group->mutex);
-	ops->sva_unbind(handle);
-	mutex_unlock(&group->mutex);
-
-	iommu_group_put(group);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
-
-u32 iommu_sva_get_pasid(struct iommu_sva *handle)
-{
-	const struct iommu_ops *ops = dev_iommu_ops(handle->dev);
-
-	if (!ops->sva_get_pasid)
-		return IOMMU_PASID_INVALID;
-
-	return ops->sva_get_pasid(handle);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
-
 /*
  * Changes the default domain of an iommu group that has *only* one device
  *
@@ -3322,3 +3231,31 @@ void iommu_block_device_pasid(struct iommu_domain *domain, struct device *dev,
 
 	iommu_group_put(group);
 }
+
+/*
+ * This is a variant of iommu_get_domain_for_dev(). It returns the existing
+ * domain attached to pasid of a device. It's only for internal use of the
+ * IOMMU subsystem. The caller must take care to avoid any possible
+ * use-after-free case.
+ */
+struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+	struct iommu_domain *domain;
+	struct iommu_group *group;
+
+	if (!pasid_valid(pasid))
+		return NULL;
+
+	group = iommu_group_get(dev);
+	if (!group)
+		return NULL;
+	/*
+	 * The xarray protects its internal state with RCU. Hence the domain
+	 * obtained is either NULL or fully formed.
+	 */
+	domain = xa_load(&group->pasid_array, pasid);
+	iommu_group_put(group);
+
+	return domain;
+}
-- 
2.25.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Lu Baolu <baolu.lu@linux.intel.com>
To: Joerg Roedel <joro@8bytes.org>, Jason Gunthorpe <jgg@nvidia.com>,
	Christoph Hellwig <hch@infradead.org>,
	Kevin Tian <kevin.tian@intel.com>,
	Ashok Raj <ashok.raj@intel.com>, Will Deacon <will@kernel.org>,
	Robin Murphy <robin.murphy@arm.com>,
	Jean-Philippe Brucker <jean-philippe@linaro.com>,
	Dave Jiang <dave.jiang@intel.com>, Vinod Koul <vkoul@kernel.org>
Cc: Eric Auger <eric.auger@redhat.com>, Liu Yi L <yi.l.liu@intel.com>,
	Jacob jun Pan <jacob.jun.pan@intel.com>,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
	Lu Baolu <baolu.lu@linux.intel.com>
Subject: [PATCH v7 06/10] iommu/sva: Refactoring iommu_sva_bind/unbind_device()
Date: Thu, 19 May 2022 15:20:43 +0800	[thread overview]
Message-ID: <20220519072047.2996983-7-baolu.lu@linux.intel.com> (raw)
In-Reply-To: <20220519072047.2996983-1-baolu.lu@linux.intel.com>

The existing iommu SVA interfaces are implemented by calling the SVA
specific iommu ops provided by the IOMMU drivers. There's no need for
any SVA specific ops in iommu_ops vector anymore as we can achieve
this through the generic attach/detach_dev_pasid domain ops.

This refactors the IOMMU SVA interfaces implementation by using the
set/block_pasid_dev ops and align them with the concept of the SVA
iommu domain. Put the new SVA code in the sva related file in order
to make it self-contained.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 include/linux/iommu.h         |  48 ++++++++------
 drivers/iommu/iommu-sva-lib.h |   1 +
 drivers/iommu/iommu-sva-lib.c | 113 ++++++++++++++++++++++++++++++++
 drivers/iommu/iommu.c         | 119 ++++++++--------------------------
 4 files changed, 170 insertions(+), 111 deletions(-)

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e8cf82d46ce1..d9ac5ebe5bbb 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -635,6 +635,7 @@ struct iommu_fwspec {
  */
 struct iommu_sva {
 	struct device			*dev;
+	refcount_t			users;
 };
 
 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -677,11 +678,6 @@ int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
 
-struct iommu_sva *iommu_sva_bind_device(struct device *dev,
-					struct mm_struct *mm);
-void iommu_sva_unbind_device(struct iommu_sva *handle);
-u32 iommu_sva_get_pasid(struct iommu_sva *handle);
-
 int iommu_device_use_default_domain(struct device *dev);
 void iommu_device_unuse_default_domain(struct device *dev);
 
@@ -693,6 +689,8 @@ int iommu_set_device_pasid(struct iommu_domain *domain, struct device *dev,
 			   ioasid_t pasid);
 void iommu_block_device_pasid(struct iommu_domain *domain, struct device *dev,
 			      ioasid_t pasid);
+struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid);
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
@@ -1023,21 +1021,6 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
 	return -ENODEV;
 }
 
-static inline struct iommu_sva *
-iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
-{
-	return NULL;
-}
-
-static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
-{
-}
-
-static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
-{
-	return IOMMU_PASID_INVALID;
-}
-
 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
 {
 	return NULL;
@@ -1077,6 +1060,12 @@ static inline void iommu_block_device_pasid(struct iommu_domain *domain,
 					    struct device *dev, ioasid_t pasid)
 {
 }
+
+static inline struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+	return NULL;
+}
 #endif /* CONFIG_IOMMU_API */
 
 /**
@@ -1108,6 +1097,10 @@ iommu_sva_alloc_domain(struct bus_type *bus, struct mm_struct *mm);
 void iommu_sva_free_domain(struct iommu_domain *domain);
 int iommu_sva_set_domain(struct iommu_domain *domain, struct device *dev,
 			 ioasid_t pasid);
+struct iommu_sva *iommu_sva_bind_device(struct device *dev,
+					struct mm_struct *mm);
+void iommu_sva_unbind_device(struct iommu_sva *handle);
+u32 iommu_sva_get_pasid(struct iommu_sva *handle);
 #else /* CONFIG_IOMMU_SVA */
 static inline struct iommu_domain *
 iommu_sva_alloc_domain(struct bus_type *bus, struct mm_struct *mm)
@@ -1124,6 +1117,21 @@ static inline int iommu_sva_set_domain(struct iommu_domain *domain,
 {
 	return -EINVAL;
 }
+
+static inline struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+{
+	return NULL;
+}
+
+static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+}
+
+static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+	return IOMMU_PASID_INVALID;
+}
 #endif /* CONFIG_IOMMU_SVA */
 
 #endif /* __LINUX_IOMMU_H */
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
index 1be21e6b93ec..ebab5a8cb126 100644
--- a/drivers/iommu/iommu-sva-lib.h
+++ b/drivers/iommu/iommu-sva-lib.h
@@ -20,6 +20,7 @@ struct iopf_queue;
 struct iommu_sva_domain {
 	struct iommu_domain	domain;
 	struct mm_struct	*mm;
+	struct iommu_sva	bond;
 };
 
 #define to_sva_domain(d) container_of_safe(d, struct iommu_sva_domain, domain)
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
index 210c376f6043..568e0f64edac 100644
--- a/drivers/iommu/iommu-sva-lib.c
+++ b/drivers/iommu/iommu-sva-lib.c
@@ -4,6 +4,8 @@
  */
 #include <linux/mutex.h>
 #include <linux/sched/mm.h>
+#include <linux/pci.h>
+#include <linux/pci-ats.h>
 
 #include "iommu-sva-lib.h"
 
@@ -117,3 +119,114 @@ int iommu_sva_set_domain(struct iommu_domain *domain, struct device *dev,
 
 	return iommu_set_device_pasid(domain, dev, pasid);
 }
+
+/**
+ * iommu_sva_bind_device() - Bind a process address space to a device
+ * @dev: the device
+ * @mm: the mm to bind, caller must hold a reference to mm_users
+ *
+ * Create a bond between device and address space, allowing the device to access
+ * the mm using the returned PASID. If a bond already exists between @device and
+ * @mm, it is returned and an additional reference is taken. Caller must call
+ * iommu_sva_unbind_device() to release each reference.
+ *
+ * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
+ * initialize the required SVA features.
+ *
+ * On error, returns an ERR_PTR value.
+ */
+struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+{
+	struct iommu_sva_domain *sva_domain;
+	struct iommu_domain *domain;
+	ioasid_t max_pasid = 0;
+	int ret = -EINVAL;
+
+	/* Allocate mm->pasid if necessary. */
+	if (!dev->iommu->iommu_dev->pasids)
+		return ERR_PTR(-EOPNOTSUPP);
+
+	if (dev_is_pci(dev)) {
+		max_pasid = pci_max_pasids(to_pci_dev(dev));
+		if (max_pasid < 0)
+			return ERR_PTR(max_pasid);
+	} else {
+		ret = device_property_read_u32(dev, "pasid-num-bits",
+					       &max_pasid);
+		if (ret)
+			return ERR_PTR(ret);
+		max_pasid = (1UL << max_pasid);
+	}
+	max_pasid = min_t(u32, max_pasid, dev->iommu->iommu_dev->pasids);
+	ret = iommu_sva_alloc_pasid(mm, 1, max_pasid - 1);
+	if (ret)
+		return ERR_PTR(ret);
+
+	mutex_lock(&iommu_sva_lock);
+	/* Search for an existing domain. */
+	domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid);
+	if (domain) {
+		sva_domain = to_sva_domain(domain);
+		refcount_inc(&sva_domain->bond.users);
+		goto out_success;
+	}
+
+	/* Allocate a new domain and set it on device pasid. */
+	domain = iommu_sva_alloc_domain(dev->bus, mm);
+	if (IS_ERR(domain)) {
+		ret = PTR_ERR(domain);
+		goto out_unlock;
+	}
+
+	ret = iommu_sva_set_domain(domain, dev, mm->pasid);
+	if (ret)
+		goto out_free_domain;
+	sva_domain = to_sva_domain(domain);
+	sva_domain->bond.dev = dev;
+	refcount_set(&sva_domain->bond.users, 1);
+
+out_success:
+	mutex_unlock(&iommu_sva_lock);
+	return &sva_domain->bond;
+
+out_free_domain:
+	iommu_sva_free_domain(domain);
+out_unlock:
+	mutex_unlock(&iommu_sva_lock);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
+
+/**
+ * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
+ * @handle: the handle returned by iommu_sva_bind_device()
+ *
+ * Put reference to a bond between device and address space. The device should
+ * not be issuing any more transaction for this PASID. All outstanding page
+ * requests for this PASID must have been flushed to the IOMMU.
+ */
+void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+	struct device *dev = handle->dev;
+	struct iommu_sva_domain *sva_domain =
+			container_of(handle, struct iommu_sva_domain, bond);
+	ioasid_t pasid = iommu_sva_get_pasid(handle);
+
+	mutex_lock(&iommu_sva_lock);
+	if (refcount_dec_and_test(&sva_domain->bond.users)) {
+		iommu_block_device_pasid(&sva_domain->domain, dev, pasid);
+		iommu_sva_free_domain(&sva_domain->domain);
+	}
+	mutex_unlock(&iommu_sva_lock);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
+
+u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+	struct iommu_sva_domain *sva_domain =
+			container_of(handle, struct iommu_sva_domain, bond);
+
+	return sva_domain->mm->pasid;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 789816e4b9d6..e49c5a5b8cc1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2766,97 +2766,6 @@ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
 }
 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
 
-/**
- * iommu_sva_bind_device() - Bind a process address space to a device
- * @dev: the device
- * @mm: the mm to bind, caller must hold a reference to it
- *
- * Create a bond between device and address space, allowing the device to access
- * the mm using the returned PASID. If a bond already exists between @device and
- * @mm, it is returned and an additional reference is taken. Caller must call
- * iommu_sva_unbind_device() to release each reference.
- *
- * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
- * initialize the required SVA features.
- *
- * On error, returns an ERR_PTR value.
- */
-struct iommu_sva *
-iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
-{
-	struct iommu_group *group;
-	struct iommu_sva *handle = ERR_PTR(-EINVAL);
-	const struct iommu_ops *ops = dev_iommu_ops(dev);
-
-	if (!ops->sva_bind)
-		return ERR_PTR(-ENODEV);
-
-	group = iommu_group_get(dev);
-	if (!group)
-		return ERR_PTR(-ENODEV);
-
-	/* Ensure device count and domain don't change while we're binding */
-	mutex_lock(&group->mutex);
-
-	/*
-	 * To keep things simple, SVA currently doesn't support IOMMU groups
-	 * with more than one device. Existing SVA-capable systems are not
-	 * affected by the problems that required IOMMU groups (lack of ACS
-	 * isolation, device ID aliasing and other hardware issues).
-	 */
-	if (iommu_group_device_count(group) != 1)
-		goto out_unlock;
-
-	handle = ops->sva_bind(dev, mm);
-
-out_unlock:
-	mutex_unlock(&group->mutex);
-	iommu_group_put(group);
-
-	return handle;
-}
-EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
-
-/**
- * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
- * @handle: the handle returned by iommu_sva_bind_device()
- *
- * Put reference to a bond between device and address space. The device should
- * not be issuing any more transaction for this PASID. All outstanding page
- * requests for this PASID must have been flushed to the IOMMU.
- */
-void iommu_sva_unbind_device(struct iommu_sva *handle)
-{
-	struct iommu_group *group;
-	struct device *dev = handle->dev;
-	const struct iommu_ops *ops = dev_iommu_ops(dev);
-
-	if (!ops->sva_unbind)
-		return;
-
-	group = iommu_group_get(dev);
-	if (!group)
-		return;
-
-	mutex_lock(&group->mutex);
-	ops->sva_unbind(handle);
-	mutex_unlock(&group->mutex);
-
-	iommu_group_put(group);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
-
-u32 iommu_sva_get_pasid(struct iommu_sva *handle)
-{
-	const struct iommu_ops *ops = dev_iommu_ops(handle->dev);
-
-	if (!ops->sva_get_pasid)
-		return IOMMU_PASID_INVALID;
-
-	return ops->sva_get_pasid(handle);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
-
 /*
  * Changes the default domain of an iommu group that has *only* one device
  *
@@ -3322,3 +3231,31 @@ void iommu_block_device_pasid(struct iommu_domain *domain, struct device *dev,
 
 	iommu_group_put(group);
 }
+
+/*
+ * This is a variant of iommu_get_domain_for_dev(). It returns the existing
+ * domain attached to pasid of a device. It's only for internal use of the
+ * IOMMU subsystem. The caller must take care to avoid any possible
+ * use-after-free case.
+ */
+struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+	struct iommu_domain *domain;
+	struct iommu_group *group;
+
+	if (!pasid_valid(pasid))
+		return NULL;
+
+	group = iommu_group_get(dev);
+	if (!group)
+		return NULL;
+	/*
+	 * The xarray protects its internal state with RCU. Hence the domain
+	 * obtained is either NULL or fully formed.
+	 */
+	domain = xa_load(&group->pasid_array, pasid);
+	iommu_group_put(group);
+
+	return domain;
+}
-- 
2.25.1


  parent reply	other threads:[~2022-05-19  7:24 UTC|newest]

Thread overview: 100+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-19  7:20 [PATCH v7 00/10] iommu: SVA and IOPF refactoring Lu Baolu
2022-05-19  7:20 ` Lu Baolu
2022-05-19  7:20 ` [PATCH v7 01/10] iommu: Add pasids field in struct iommu_device Lu Baolu
2022-05-19  7:20   ` Lu Baolu
2022-05-19 10:37   ` Jean-Philippe Brucker
2022-05-19 10:37     ` Jean-Philippe Brucker
2022-05-19 11:55     ` Baolu Lu
2022-05-19 11:55       ` Baolu Lu
2022-05-24  9:24   ` Tian, Kevin
2022-05-24  9:24     ` Tian, Kevin
2022-05-25  2:03     ` Baolu Lu
2022-05-25  2:03       ` Baolu Lu
2022-05-25  2:13       ` Baolu Lu
2022-05-25  2:13         ` Baolu Lu
2022-05-19  7:20 ` [PATCH v7 02/10] iommu: Remove SVM_FLAG_SUPERVISOR_MODE support Lu Baolu
2022-05-19  7:20   ` Lu Baolu
2022-05-19 16:22   ` Jean-Philippe Brucker
2022-05-19 16:22     ` Jean-Philippe Brucker
2022-05-24  9:27   ` Tian, Kevin
2022-05-24  9:27     ` Tian, Kevin
2022-05-19  7:20 ` [PATCH v7 03/10] iommu/sva: Add iommu_sva_domain support Lu Baolu
2022-05-19  7:20   ` Lu Baolu
2022-05-19 16:33   ` Jean-Philippe Brucker
2022-05-19 16:33     ` Jean-Philippe Brucker
2022-05-20  4:55     ` Baolu Lu
2022-05-20  4:55       ` Baolu Lu
2022-05-23  7:12   ` Baolu Lu
2022-05-23  7:12     ` Baolu Lu
2022-05-24  9:44     ` Tian, Kevin
2022-05-24  9:44       ` Tian, Kevin
2022-05-25  2:18       ` Baolu Lu
2022-05-25  2:18         ` Baolu Lu
2022-05-24  9:39   ` Tian, Kevin
2022-05-24  9:39     ` Tian, Kevin
2022-05-24 13:38     ` Jason Gunthorpe
2022-05-24 13:38       ` Jason Gunthorpe via iommu
2022-05-25  0:44       ` Tian, Kevin
2022-05-25  0:44         ` Tian, Kevin
2022-05-25  2:38         ` Baolu Lu
2022-05-25  2:38           ` Baolu Lu
2022-05-25  4:50     ` Baolu Lu
2022-05-25  4:50       ` Baolu Lu
2022-05-24 13:44   ` Jason Gunthorpe
2022-05-24 13:44     ` Jason Gunthorpe via iommu
2022-05-25  5:19     ` Baolu Lu
2022-05-25  5:19       ` Baolu Lu
2022-05-25 15:25       ` Jason Gunthorpe
2022-05-25 15:25         ` Jason Gunthorpe via iommu
2022-05-26  1:03         ` Baolu Lu
2022-05-26  1:03           ` Baolu Lu
2022-05-25  5:33     ` Baolu Lu
2022-05-25  5:33       ` Baolu Lu
2022-05-24 14:36   ` Robin Murphy
2022-05-24 14:36     ` Robin Murphy
2022-05-25  6:20     ` Baolu Lu
2022-05-25  6:20       ` Baolu Lu
2022-05-25 10:07       ` Robin Murphy
2022-05-25 10:07         ` Robin Murphy
2022-05-25 11:06         ` Jean-Philippe Brucker
2022-05-25 11:06           ` Jean-Philippe Brucker
2022-05-25 13:11           ` Baolu Lu
2022-05-25 13:11             ` Baolu Lu
2022-05-19  7:20 ` [PATCH v7 04/10] iommu/vt-d: Add SVA domain support Lu Baolu
2022-05-19  7:20   ` Lu Baolu
2022-05-19  7:20 ` [PATCH v7 05/10] arm-smmu-v3/sva: " Lu Baolu
2022-05-19  7:20   ` Lu Baolu
2022-05-19 16:37   ` Jean-Philippe Brucker
2022-05-19 16:37     ` Jean-Philippe Brucker
2022-05-19  7:20 ` Lu Baolu [this message]
2022-05-19  7:20   ` [PATCH v7 06/10] iommu/sva: Refactoring iommu_sva_bind/unbind_device() Lu Baolu
2022-05-19 16:39   ` Jean-Philippe Brucker
2022-05-19 16:39     ` Jean-Philippe Brucker
2022-05-20  6:38     ` Baolu Lu
2022-05-20  6:38       ` Baolu Lu
2022-05-20 11:28       ` Jean-Philippe Brucker
2022-05-20 11:28         ` Jean-Philippe Brucker
2022-05-23  3:07         ` Baolu Lu
2022-05-23  3:07           ` Baolu Lu
2022-05-24 10:22   ` Tian, Kevin
2022-05-24 10:22     ` Tian, Kevin
2022-05-24 10:57     ` Jean-Philippe Brucker
2022-05-24 10:57       ` Jean-Philippe Brucker
2022-05-25  2:04       ` Tian, Kevin
2022-05-25  2:04         ` Tian, Kevin
2022-05-25  7:29         ` Jean-Philippe Brucker
2022-05-25  7:29           ` Jean-Philippe Brucker
2022-06-02  6:46           ` Tian, Kevin
2022-06-02  6:46             ` Tian, Kevin
2022-05-19  7:20 ` [PATCH v7 07/10] iommu: Remove SVA related callbacks from iommu ops Lu Baolu
2022-05-19  7:20   ` Lu Baolu
2022-05-24 10:23   ` Tian, Kevin
2022-05-24 10:23     ` Tian, Kevin
2022-05-19  7:20 ` [PATCH v7 08/10] iommu: Prepare IOMMU domain for IOPF Lu Baolu
2022-05-19  7:20   ` Lu Baolu
2022-05-19 16:40   ` Jean-Philippe Brucker
2022-05-19 16:40     ` Jean-Philippe Brucker
2022-05-19  7:20 ` [PATCH v7 09/10] iommu: Per-domain I/O page fault handling Lu Baolu
2022-05-19  7:20   ` Lu Baolu
2022-05-19  7:20 ` [PATCH v7 10/10] iommu: Rename iommu-sva-lib.{c,h} Lu Baolu
2022-05-19  7:20   ` Lu Baolu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220519072047.2996983-7-baolu.lu@linux.intel.com \
    --to=baolu.lu@linux.intel.com \
    --cc=ashok.raj@intel.com \
    --cc=dave.jiang@intel.com \
    --cc=hch@infradead.org \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jacob.jun.pan@intel.com \
    --cc=jean-philippe@linaro.com \
    --cc=jgg@nvidia.com \
    --cc=joro@8bytes.org \
    --cc=kevin.tian@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=vkoul@kernel.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.