All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] dmaengine: idxd: Add enable/disable device IOPF feature
@ 2023-02-03  8:44 Lu Baolu
  2023-02-03  8:44 ` [PATCH 2/2] iommu/vt-d: Move iopf code from SVA to IOPF enabling path Lu Baolu
  2023-02-03 16:08 ` [PATCH 1/2] dmaengine: idxd: Add enable/disable device IOPF feature Dave Jiang
  0 siblings, 2 replies; 5+ messages in thread
From: Lu Baolu @ 2023-02-03  8:44 UTC (permalink / raw)
  To: iommu, dmaengine
  Cc: Joerg Roedel, Will Deacon, Robin Murphy, Kevin Tian, Fenghua Yu,
	Dave Jiang, Vinod Koul, linux-kernel, Lu Baolu

The iommu subsystem requires IOMMU_DEV_FEAT_IOPF must be enabled before
and disabled after IOMMU_DEV_FEAT_SVA, if device's I/O page faults rely
on the IOMMU. Add explicit IOMMU_DEV_FEAT_IOPF enabling/disabling in this
driver.

At present, missing IOPF enabling/disabling doesn't cause any real issue,
because the IOMMU driver places the IOPF enabling/disabling in the path
of SVA feature handling. But this may change.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/dma/idxd/init.c | 31 +++++++++++++++++++++++++------
 1 file changed, 25 insertions(+), 6 deletions(-)

diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 529ea09c9094..d5a709a842a8 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -511,6 +511,27 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd)
 	idxd->sva = NULL;
 }
 
+static int idxd_enable_sva(struct pci_dev *pdev)
+{
+	int ret;
+
+	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+	if (ret)
+		return ret;
+
+	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+	if (ret)
+		iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+
+	return ret;
+}
+
+static void idxd_disable_sva(struct pci_dev *pdev)
+{
+	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+}
+
 static int idxd_probe(struct idxd_device *idxd)
 {
 	struct pci_dev *pdev = idxd->pdev;
@@ -525,7 +546,7 @@ static int idxd_probe(struct idxd_device *idxd)
 	dev_dbg(dev, "IDXD reset complete\n");
 
 	if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
-		if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
+		if (idxd_enable_sva(pdev)) {
 			dev_warn(dev, "Unable to turn on user SVA feature.\n");
 		} else {
 			set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
@@ -573,21 +594,19 @@ static int idxd_probe(struct idxd_device *idxd)
 	if (device_pasid_enabled(idxd))
 		idxd_disable_system_pasid(idxd);
 	if (device_user_pasid_enabled(idxd))
-		iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+		idxd_disable_sva(pdev);
 	return rc;
 }
 
 static void idxd_cleanup(struct idxd_device *idxd)
 {
-	struct device *dev = &idxd->pdev->dev;
-
 	perfmon_pmu_remove(idxd);
 	idxd_cleanup_interrupts(idxd);
 	idxd_cleanup_internals(idxd);
 	if (device_pasid_enabled(idxd))
 		idxd_disable_system_pasid(idxd);
 	if (device_user_pasid_enabled(idxd))
-		iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+		idxd_disable_sva(idxd->pdev);
 }
 
 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -705,7 +724,7 @@ static void idxd_remove(struct pci_dev *pdev)
 	pci_free_irq_vectors(pdev);
 	pci_iounmap(pdev, idxd->reg_base);
 	if (device_user_pasid_enabled(idxd))
-		iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+		idxd_disable_sva(pdev);
 	pci_disable_device(pdev);
 	destroy_workqueue(idxd->wq);
 	perfmon_pmu_remove(idxd);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/2] iommu/vt-d: Move iopf code from SVA to IOPF enabling path
  2023-02-03  8:44 [PATCH 1/2] dmaengine: idxd: Add enable/disable device IOPF feature Lu Baolu
@ 2023-02-03  8:44 ` Lu Baolu
  2023-02-06  3:28   ` Tian, Kevin
  2023-02-03 16:08 ` [PATCH 1/2] dmaengine: idxd: Add enable/disable device IOPF feature Dave Jiang
  1 sibling, 1 reply; 5+ messages in thread
From: Lu Baolu @ 2023-02-03  8:44 UTC (permalink / raw)
  To: iommu, dmaengine
  Cc: Joerg Roedel, Will Deacon, Robin Murphy, Kevin Tian, Fenghua Yu,
	Dave Jiang, Vinod Koul, linux-kernel, Lu Baolu

Generally enabling IOMMU_DEV_FEAT_SVA requires IOMMU_DEV_FEAT_IOPF, but
some devices manage I/O Page Faults themselves instead of relying on the
IOMMU. Move IOPF related code from SVA to IOPF enabling path to make the
driver work for devices that manage IOPF themselves.

For the device drivers that relies on the IOMMU for IOPF through PCI/PRI,
IOMMU_DEV_FEAT_IOPF must be enabled before and disabled after
IOMMU_DEV_FEAT_SVA.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/intel/iommu.c | 30 +++++++++++++++++-------------
 1 file changed, 17 insertions(+), 13 deletions(-)

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index a1a66798e1f0..149cb20d8dd5 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4632,7 +4632,6 @@ static int intel_iommu_enable_sva(struct device *dev)
 {
 	struct device_domain_info *info = dev_iommu_priv_get(dev);
 	struct intel_iommu *iommu;
-	int ret;
 
 	if (!info || dmar_disabled)
 		return -EINVAL;
@@ -4644,17 +4643,13 @@ static int intel_iommu_enable_sva(struct device *dev)
 	if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
 		return -ENODEV;
 
-	if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
+	if (!info->pasid_enabled)
 		return -EINVAL;
 
-	ret = iopf_queue_add_device(iommu->iopf_queue, dev);
-	if (!ret)
-		ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
-
-	return ret;
+	return 0;
 }
 
-static int intel_iommu_disable_sva(struct device *dev)
+static int intel_iommu_disable_iopf(struct device *dev)
 {
 	struct device_domain_info *info = dev_iommu_priv_get(dev);
 	struct intel_iommu *iommu = info->iommu;
@@ -4670,11 +4665,20 @@ static int intel_iommu_disable_sva(struct device *dev)
 static int intel_iommu_enable_iopf(struct device *dev)
 {
 	struct device_domain_info *info = dev_iommu_priv_get(dev);
+	int ret;
 
-	if (info && info->pri_supported)
-		return 0;
+	if (!info || !info->ats_enabled || !info->pri_enabled)
+		return -ENODEV;
 
-	return -ENODEV;
+	ret = iopf_queue_add_device(info->iommu->iopf_queue, dev);
+	if (ret)
+		return ret;
+
+	ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+	if (ret)
+		iopf_queue_remove_device(info->iommu->iopf_queue, dev);
+
+	return ret;
 }
 
 static int
@@ -4697,10 +4701,10 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
 {
 	switch (feat) {
 	case IOMMU_DEV_FEAT_IOPF:
-		return 0;
+		return intel_iommu_disable_iopf(dev);
 
 	case IOMMU_DEV_FEAT_SVA:
-		return intel_iommu_disable_sva(dev);
+		return 0;
 
 	default:
 		return -ENODEV;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] dmaengine: idxd: Add enable/disable device IOPF feature
  2023-02-03  8:44 [PATCH 1/2] dmaengine: idxd: Add enable/disable device IOPF feature Lu Baolu
  2023-02-03  8:44 ` [PATCH 2/2] iommu/vt-d: Move iopf code from SVA to IOPF enabling path Lu Baolu
@ 2023-02-03 16:08 ` Dave Jiang
  1 sibling, 0 replies; 5+ messages in thread
From: Dave Jiang @ 2023-02-03 16:08 UTC (permalink / raw)
  To: Lu Baolu, iommu, dmaengine
  Cc: Joerg Roedel, Will Deacon, Robin Murphy, Kevin Tian, Fenghua Yu,
	Vinod Koul, linux-kernel



On 2/3/23 1:44 AM, Lu Baolu wrote:
> The iommu subsystem requires IOMMU_DEV_FEAT_IOPF must be enabled before
> and disabled after IOMMU_DEV_FEAT_SVA, if device's I/O page faults rely
> on the IOMMU. Add explicit IOMMU_DEV_FEAT_IOPF enabling/disabling in this
> driver.
> 
> At present, missing IOPF enabling/disabling doesn't cause any real issue,
> because the IOMMU driver places the IOPF enabling/disabling in the path
> of SVA feature handling. But this may change.
> 
> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>

Reviewed-by: Dave Jiang <dave.jiang@intel.com>

> ---
>   drivers/dma/idxd/init.c | 31 +++++++++++++++++++++++++------
>   1 file changed, 25 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
> index 529ea09c9094..d5a709a842a8 100644
> --- a/drivers/dma/idxd/init.c
> +++ b/drivers/dma/idxd/init.c
> @@ -511,6 +511,27 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd)
>   	idxd->sva = NULL;
>   }
>   
> +static int idxd_enable_sva(struct pci_dev *pdev)
> +{
> +	int ret;
> +
> +	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
> +	if (ret)
> +		return ret;
> +
> +	ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
> +	if (ret)
> +		iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
> +
> +	return ret;
> +}
> +
> +static void idxd_disable_sva(struct pci_dev *pdev)
> +{
> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
> +	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
> +}
> +
>   static int idxd_probe(struct idxd_device *idxd)
>   {
>   	struct pci_dev *pdev = idxd->pdev;
> @@ -525,7 +546,7 @@ static int idxd_probe(struct idxd_device *idxd)
>   	dev_dbg(dev, "IDXD reset complete\n");
>   
>   	if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
> -		if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
> +		if (idxd_enable_sva(pdev)) {
>   			dev_warn(dev, "Unable to turn on user SVA feature.\n");
>   		} else {
>   			set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
> @@ -573,21 +594,19 @@ static int idxd_probe(struct idxd_device *idxd)
>   	if (device_pasid_enabled(idxd))
>   		idxd_disable_system_pasid(idxd);
>   	if (device_user_pasid_enabled(idxd))
> -		iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
> +		idxd_disable_sva(pdev);
>   	return rc;
>   }
>   
>   static void idxd_cleanup(struct idxd_device *idxd)
>   {
> -	struct device *dev = &idxd->pdev->dev;
> -
>   	perfmon_pmu_remove(idxd);
>   	idxd_cleanup_interrupts(idxd);
>   	idxd_cleanup_internals(idxd);
>   	if (device_pasid_enabled(idxd))
>   		idxd_disable_system_pasid(idxd);
>   	if (device_user_pasid_enabled(idxd))
> -		iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
> +		idxd_disable_sva(idxd->pdev);
>   }
>   
>   static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> @@ -705,7 +724,7 @@ static void idxd_remove(struct pci_dev *pdev)
>   	pci_free_irq_vectors(pdev);
>   	pci_iounmap(pdev, idxd->reg_base);
>   	if (device_user_pasid_enabled(idxd))
> -		iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
> +		idxd_disable_sva(pdev);
>   	pci_disable_device(pdev);
>   	destroy_workqueue(idxd->wq);
>   	perfmon_pmu_remove(idxd);

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH 2/2] iommu/vt-d: Move iopf code from SVA to IOPF enabling path
  2023-02-03  8:44 ` [PATCH 2/2] iommu/vt-d: Move iopf code from SVA to IOPF enabling path Lu Baolu
@ 2023-02-06  3:28   ` Tian, Kevin
  2023-02-07  6:30     ` Baolu Lu
  0 siblings, 1 reply; 5+ messages in thread
From: Tian, Kevin @ 2023-02-06  3:28 UTC (permalink / raw)
  To: Lu Baolu, iommu, dmaengine
  Cc: Joerg Roedel, Will Deacon, Robin Murphy, Yu, Fenghua, Jiang,
	Dave, Vinod Koul, linux-kernel

> From: Lu Baolu <baolu.lu@linux.intel.com>
> Sent: Friday, February 3, 2023 4:45 PM
> 
> Generally enabling IOMMU_DEV_FEAT_SVA requires
> IOMMU_DEV_FEAT_IOPF, but
> some devices manage I/O Page Faults themselves instead of relying on the
> IOMMU. Move IOPF related code from SVA to IOPF enabling path to make
> the
> driver work for devices that manage IOPF themselves.
> 
> For the device drivers that relies on the IOMMU for IOPF through PCI/PRI,
> IOMMU_DEV_FEAT_IOPF must be enabled before and disabled after
> IOMMU_DEV_FEAT_SVA.

ARM still handles this differently:

arm_smmu_master_enable_sva()
  arm_smmu_master_sva_enable_iopf():
{
	/*
	 * Drivers for devices supporting PRI or stall should enable IOPF first.
	 * Others have device-specific fault handlers and don't need IOPF.
	 */
	if (!arm_smmu_master_iopf_supported(master))
		return 0;

	if (!master->iopf_enabled)
		return -EINVAL;
}

i.e. device specific IOPF is allowed only when PRI or stall is not supported.

it's different from what this patch does to allow device specific IOPF even
when PRI is supported.

should we make them consistent given SVA/IOPF capabilities are general
iommu definitions or fine to leave each iommu driver with different
restriction?

> 
> -	ret = iopf_queue_add_device(iommu->iopf_queue, dev);
> -	if (!ret)
> -		ret = iommu_register_device_fault_handler(dev,
> iommu_queue_iopf, dev);
> -
> -	return ret;
> +	return 0;
>  }

here and below...

> +	ret = iopf_queue_add_device(info->iommu->iopf_queue, dev);
> +	if (ret)
> +		return ret;
> +
> +	ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf,
> dev);
> +	if (ret)
> +		iopf_queue_remove_device(info->iommu->iopf_queue, dev);
> +
> +	return ret;
>  }

...indicate a bug fix on error handling. better to have the fix as
a separate patch and then move code.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/2] iommu/vt-d: Move iopf code from SVA to IOPF enabling path
  2023-02-06  3:28   ` Tian, Kevin
@ 2023-02-07  6:30     ` Baolu Lu
  0 siblings, 0 replies; 5+ messages in thread
From: Baolu Lu @ 2023-02-07  6:30 UTC (permalink / raw)
  To: Tian, Kevin, iommu, dmaengine
  Cc: baolu.lu, Joerg Roedel, Will Deacon, Robin Murphy, Yu, Fenghua,
	Jiang, Dave, Vinod Koul, linux-kernel

On 2023/2/6 11:28, Tian, Kevin wrote:
>> From: Lu Baolu <baolu.lu@linux.intel.com>
>> Sent: Friday, February 3, 2023 4:45 PM
>>
>> Generally enabling IOMMU_DEV_FEAT_SVA requires
>> IOMMU_DEV_FEAT_IOPF, but
>> some devices manage I/O Page Faults themselves instead of relying on the
>> IOMMU. Move IOPF related code from SVA to IOPF enabling path to make
>> the
>> driver work for devices that manage IOPF themselves.
>>
>> For the device drivers that relies on the IOMMU for IOPF through PCI/PRI,
>> IOMMU_DEV_FEAT_IOPF must be enabled before and disabled after
>> IOMMU_DEV_FEAT_SVA.
> 
> ARM still handles this differently:
> 
> arm_smmu_master_enable_sva()
>    arm_smmu_master_sva_enable_iopf():
> {
> 	/*
> 	 * Drivers for devices supporting PRI or stall should enable IOPF first.
> 	 * Others have device-specific fault handlers and don't need IOPF.
> 	 */
> 	if (!arm_smmu_master_iopf_supported(master))
> 		return 0;
> 
> 	if (!master->iopf_enabled)
> 		return -EINVAL;
> }
> 
> i.e. device specific IOPF is allowed only when PRI or stall is not supported.
> 
> it's different from what this patch does to allow device specific IOPF even
> when PRI is supported.
> 
> should we make them consistent given SVA/IOPF capabilities are general
> iommu definitions or fine to leave each iommu driver with different
> restriction?

Good point! I prefer the former. I will add a check in sva enabling path
and return failure if device supports PRI but not enabled (that
implies device has its specific IOPF handling).

> 
>>
>> -	ret = iopf_queue_add_device(iommu->iopf_queue, dev);
>> -	if (!ret)
>> -		ret = iommu_register_device_fault_handler(dev,
>> iommu_queue_iopf, dev);
>> -
>> -	return ret;
>> +	return 0;
>>   }
> 
> here and below...
> 
>> +	ret = iopf_queue_add_device(info->iommu->iopf_queue, dev);
>> +	if (ret)
>> +		return ret;
>> +
>> +	ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf,
>> dev);
>> +	if (ret)
>> +		iopf_queue_remove_device(info->iommu->iopf_queue, dev);
>> +
>> +	return ret;
>>   }
> 
> ...indicate a bug fix on error handling. better to have the fix as
> a separate patch and then move code.
> 

Yes. I will post a fix patch before this move.

Best regards,
baolu

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-02-07  6:30 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-03  8:44 [PATCH 1/2] dmaengine: idxd: Add enable/disable device IOPF feature Lu Baolu
2023-02-03  8:44 ` [PATCH 2/2] iommu/vt-d: Move iopf code from SVA to IOPF enabling path Lu Baolu
2023-02-06  3:28   ` Tian, Kevin
2023-02-07  6:30     ` Baolu Lu
2023-02-03 16:08 ` [PATCH 1/2] dmaengine: idxd: Add enable/disable device IOPF feature Dave Jiang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.