All of lore.kernel.org
 help / color / mirror / Atom feed
From: Robin Murphy <robin.murphy@arm.com>
To: Joao Martins <joao.m.martins@oracle.com>,
	iommu@lists.linux-foundation.org
Cc: Joerg Roedel <joro@8bytes.org>,
	Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
	Will Deacon <will@kernel.org>,
	Jean-Philippe Brucker <jean-philippe@linaro.org>,
	Keqian Zhu <zhukeqian1@huawei.com>,
	Shameerali Kolothum Thodi  <shameerali.kolothum.thodi@huawei.com>,
	David Woodhouse <dwmw2@infradead.org>,
	Lu Baolu <baolu.lu@linux.intel.com>,
	Jason Gunthorpe <jgg@nvidia.com>,
	Nicolin Chen <nicolinc@nvidia.com>,
	Yishai Hadas <yishaih@nvidia.com>,
	Kevin Tian <kevin.tian@intel.com>,
	Eric Auger <eric.auger@redhat.com>, Yi Liu <yi.l.liu@intel.com>,
	Alex Williamson <alex.williamson@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	kvm@vger.kernel.org
Subject: Re: [PATCH RFC 17/19] iommu/arm-smmu-v3: Add unmap_read_dirty() support
Date: Fri, 29 Apr 2022 12:53:03 +0100	[thread overview]
Message-ID: <abdbfda9-63f5-8d66-84b9-0d0badf76233@arm.com> (raw)
In-Reply-To: <20220428210933.3583-18-joao.m.martins@oracle.com>

On 2022-04-28 22:09, Joao Martins wrote:
> Mostly reuses unmap existing code with the extra addition of
> marshalling into a bitmap of a page size. To tackle the race,
> switch away from a plain store to a cmpxchg() and check whether
> IOVA was dirtied or not once it succeeds.
> 
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
>   drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 17 +++++
>   drivers/iommu/io-pgtable-arm.c              | 78 +++++++++++++++++----
>   2 files changed, 82 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> index 5f728f8f20a2..d1fb757056cc 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> @@ -2499,6 +2499,22 @@ static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long io
>   	return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
>   }
>   
> +static size_t arm_smmu_unmap_pages_read_dirty(struct iommu_domain *domain,
> +					      unsigned long iova, size_t pgsize,
> +					      size_t pgcount,
> +					      struct iommu_iotlb_gather *gather,
> +					      struct iommu_dirty_bitmap *dirty)
> +{
> +	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> +	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
> +
> +	if (!ops)
> +		return 0;
> +
> +	return ops->unmap_pages_read_dirty(ops, iova, pgsize, pgcount,
> +					   gather, dirty);
> +}
> +
>   static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
>   {
>   	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> @@ -2938,6 +2954,7 @@ static struct iommu_ops arm_smmu_ops = {
>   		.free			= arm_smmu_domain_free,
>   		.read_and_clear_dirty	= arm_smmu_read_and_clear_dirty,
>   		.set_dirty_tracking_range = arm_smmu_set_dirty_tracking,
> +		.unmap_pages_read_dirty	= arm_smmu_unmap_pages_read_dirty,
>   	}
>   };
>   
> diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
> index 361410aa836c..143ee7d73f88 100644
> --- a/drivers/iommu/io-pgtable-arm.c
> +++ b/drivers/iommu/io-pgtable-arm.c
> @@ -259,10 +259,30 @@ static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cf
>   		__arm_lpae_sync_pte(ptep, 1, cfg);
>   }
>   
> +static bool __arm_lpae_clear_dirty_pte(arm_lpae_iopte *ptep,
> +				       struct io_pgtable_cfg *cfg)
> +{
> +	arm_lpae_iopte tmp;
> +	bool dirty = false;
> +
> +	do {
> +		tmp = cmpxchg64(ptep, *ptep, 0);
> +		if ((tmp & ARM_LPAE_PTE_DBM) &&
> +		    !(tmp & ARM_LPAE_PTE_AP_RDONLY))
> +			dirty = true;
> +	} while (tmp);
> +
> +	if (!cfg->coherent_walk)
> +		__arm_lpae_sync_pte(ptep, 1, cfg);

Note that this doesn't do enough, since it's only making the CPU's 
clearing of the PTE visible to the SMMU; the cmpxchg could have happily 
succeeded on a stale cached copy of the writeable-clean PTE regardless 
of what the SMMU might have done in the meantime. If we were to even 
pretend to cope with a non-coherent SMMU writing back to the pagetables, 
I think we'd have to scrap the current DMA API approach and make the CPU 
view of the pagetables non-cacheable as well, but as mentioned, there's 
no guarantee that that would even be useful anyway.

Robin.

> +
> +	return dirty;
> +}
> +
>   static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
>   			       struct iommu_iotlb_gather *gather,
>   			       unsigned long iova, size_t size, size_t pgcount,
> -			       int lvl, arm_lpae_iopte *ptep);
> +			       int lvl, arm_lpae_iopte *ptep,
> +			       struct iommu_dirty_bitmap *dirty);
>   
>   static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
>   				phys_addr_t paddr, arm_lpae_iopte prot,
> @@ -306,8 +326,13 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
>   			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
>   
>   			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
> +
> +			/*
> +			 * No need for dirty bitmap as arm_lpae_init_pte() is
> +			 * only called from __arm_lpae_map()
> +			 */
>   			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
> -					     lvl, tblp) != sz) {
> +					     lvl, tblp, NULL) != sz) {
>   				WARN_ON(1);
>   				return -EINVAL;
>   			}
> @@ -564,7 +589,8 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
>   				       struct iommu_iotlb_gather *gather,
>   				       unsigned long iova, size_t size,
>   				       arm_lpae_iopte blk_pte, int lvl,
> -				       arm_lpae_iopte *ptep, size_t pgcount)
> +				       arm_lpae_iopte *ptep, size_t pgcount,
> +				       struct iommu_dirty_bitmap *dirty)
>   {
>   	struct io_pgtable_cfg *cfg = &data->iop.cfg;
>   	arm_lpae_iopte pte, *tablep;
> @@ -617,13 +643,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
>   		return num_entries * size;
>   	}
>   
> -	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
> +	return __arm_lpae_unmap(data, gather, iova, size, pgcount,
> +				lvl, tablep, dirty);
>   }
>   
>   static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
>   			       struct iommu_iotlb_gather *gather,
>   			       unsigned long iova, size_t size, size_t pgcount,
> -			       int lvl, arm_lpae_iopte *ptep)
> +			       int lvl, arm_lpae_iopte *ptep,
> +			       struct iommu_dirty_bitmap *dirty)
>   {
>   	arm_lpae_iopte pte;
>   	struct io_pgtable *iop = &data->iop;
> @@ -649,7 +677,11 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
>   			if (WARN_ON(!pte))
>   				break;
>   
> -			__arm_lpae_clear_pte(ptep, &iop->cfg);
> +			if (likely(!dirty))
> +				__arm_lpae_clear_pte(ptep, &iop->cfg);
> +			else if (__arm_lpae_clear_dirty_pte(ptep, &iop->cfg))
> +				iommu_dirty_bitmap_record(dirty, iova, size);
> +
>   
>   			if (!iopte_leaf(pte, lvl, iop->fmt)) {
>   				/* Also flush any partial walks */
> @@ -671,17 +703,20 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
>   		 * minus the part we want to unmap
>   		 */
>   		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
> -						lvl + 1, ptep, pgcount);
> +						lvl + 1, ptep, pgcount, dirty);
>   	}
>   
>   	/* Keep on walkin' */
>   	ptep = iopte_deref(pte, data);
> -	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
> +	return __arm_lpae_unmap(data, gather, iova, size, pgcount,
> +				lvl + 1, ptep, dirty);
>   }
>   
> -static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
> -				   size_t pgsize, size_t pgcount,
> -				   struct iommu_iotlb_gather *gather)
> +static size_t __arm_lpae_unmap_pages(struct io_pgtable_ops *ops,
> +				     unsigned long iova,
> +				     size_t pgsize, size_t pgcount,
> +				     struct iommu_iotlb_gather *gather,
> +				     struct iommu_dirty_bitmap *dirty)
>   {
>   	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
>   	struct io_pgtable_cfg *cfg = &data->iop.cfg;
> @@ -697,13 +732,29 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov
>   		return 0;
>   
>   	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
> -				data->start_level, ptep);
> +				data->start_level, ptep, dirty);
> +}
> +
> +static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
> +				   size_t pgsize, size_t pgcount,
> +				   struct iommu_iotlb_gather *gather)
> +{
> +	return __arm_lpae_unmap_pages(ops, iova, pgsize, pgcount, gather, NULL);
>   }
>   
>   static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
>   			     size_t size, struct iommu_iotlb_gather *gather)
>   {
> -	return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
> +	return __arm_lpae_unmap_pages(ops, iova, size, 1, gather, NULL);
> +}
> +
> +static size_t arm_lpae_unmap_pages_read_dirty(struct io_pgtable_ops *ops,
> +					      unsigned long iova,
> +					      size_t pgsize, size_t pgcount,
> +					      struct iommu_iotlb_gather *gather,
> +					      struct iommu_dirty_bitmap *dirty)
> +{
> +	return __arm_lpae_unmap_pages(ops, iova, pgsize, pgcount, gather, dirty);
>   }
>   
>   static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
> @@ -969,6 +1020,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
>   		.iova_to_phys	= arm_lpae_iova_to_phys,
>   		.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
>   		.set_dirty_tracking   = arm_lpae_set_dirty_tracking,
> +		.unmap_pages_read_dirty     = arm_lpae_unmap_pages_read_dirty,
>   	};
>   
>   	return data;

WARNING: multiple messages have this Message-ID (diff)
From: Robin Murphy <robin.murphy@arm.com>
To: Joao Martins <joao.m.martins@oracle.com>,
	iommu@lists.linux-foundation.org
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>,
	Kevin Tian <kevin.tian@intel.com>,
	Yishai Hadas <yishaih@nvidia.com>,
	Jason Gunthorpe <jgg@nvidia.com>,
	kvm@vger.kernel.org, Cornelia Huck <cohuck@redhat.com>,
	Alex Williamson <alex.williamson@redhat.com>,
	Will Deacon <will@kernel.org>,
	David Woodhouse <dwmw2@infradead.org>
Subject: Re: [PATCH RFC 17/19] iommu/arm-smmu-v3: Add unmap_read_dirty() support
Date: Fri, 29 Apr 2022 12:53:03 +0100	[thread overview]
Message-ID: <abdbfda9-63f5-8d66-84b9-0d0badf76233@arm.com> (raw)
In-Reply-To: <20220428210933.3583-18-joao.m.martins@oracle.com>

On 2022-04-28 22:09, Joao Martins wrote:
> Mostly reuses unmap existing code with the extra addition of
> marshalling into a bitmap of a page size. To tackle the race,
> switch away from a plain store to a cmpxchg() and check whether
> IOVA was dirtied or not once it succeeds.
> 
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
>   drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 17 +++++
>   drivers/iommu/io-pgtable-arm.c              | 78 +++++++++++++++++----
>   2 files changed, 82 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> index 5f728f8f20a2..d1fb757056cc 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> @@ -2499,6 +2499,22 @@ static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long io
>   	return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
>   }
>   
> +static size_t arm_smmu_unmap_pages_read_dirty(struct iommu_domain *domain,
> +					      unsigned long iova, size_t pgsize,
> +					      size_t pgcount,
> +					      struct iommu_iotlb_gather *gather,
> +					      struct iommu_dirty_bitmap *dirty)
> +{
> +	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> +	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
> +
> +	if (!ops)
> +		return 0;
> +
> +	return ops->unmap_pages_read_dirty(ops, iova, pgsize, pgcount,
> +					   gather, dirty);
> +}
> +
>   static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
>   {
>   	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> @@ -2938,6 +2954,7 @@ static struct iommu_ops arm_smmu_ops = {
>   		.free			= arm_smmu_domain_free,
>   		.read_and_clear_dirty	= arm_smmu_read_and_clear_dirty,
>   		.set_dirty_tracking_range = arm_smmu_set_dirty_tracking,
> +		.unmap_pages_read_dirty	= arm_smmu_unmap_pages_read_dirty,
>   	}
>   };
>   
> diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
> index 361410aa836c..143ee7d73f88 100644
> --- a/drivers/iommu/io-pgtable-arm.c
> +++ b/drivers/iommu/io-pgtable-arm.c
> @@ -259,10 +259,30 @@ static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cf
>   		__arm_lpae_sync_pte(ptep, 1, cfg);
>   }
>   
> +static bool __arm_lpae_clear_dirty_pte(arm_lpae_iopte *ptep,
> +				       struct io_pgtable_cfg *cfg)
> +{
> +	arm_lpae_iopte tmp;
> +	bool dirty = false;
> +
> +	do {
> +		tmp = cmpxchg64(ptep, *ptep, 0);
> +		if ((tmp & ARM_LPAE_PTE_DBM) &&
> +		    !(tmp & ARM_LPAE_PTE_AP_RDONLY))
> +			dirty = true;
> +	} while (tmp);
> +
> +	if (!cfg->coherent_walk)
> +		__arm_lpae_sync_pte(ptep, 1, cfg);

Note that this doesn't do enough, since it's only making the CPU's 
clearing of the PTE visible to the SMMU; the cmpxchg could have happily 
succeeded on a stale cached copy of the writeable-clean PTE regardless 
of what the SMMU might have done in the meantime. If we were to even 
pretend to cope with a non-coherent SMMU writing back to the pagetables, 
I think we'd have to scrap the current DMA API approach and make the CPU 
view of the pagetables non-cacheable as well, but as mentioned, there's 
no guarantee that that would even be useful anyway.

Robin.

> +
> +	return dirty;
> +}
> +
>   static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
>   			       struct iommu_iotlb_gather *gather,
>   			       unsigned long iova, size_t size, size_t pgcount,
> -			       int lvl, arm_lpae_iopte *ptep);
> +			       int lvl, arm_lpae_iopte *ptep,
> +			       struct iommu_dirty_bitmap *dirty);
>   
>   static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
>   				phys_addr_t paddr, arm_lpae_iopte prot,
> @@ -306,8 +326,13 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
>   			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
>   
>   			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
> +
> +			/*
> +			 * No need for dirty bitmap as arm_lpae_init_pte() is
> +			 * only called from __arm_lpae_map()
> +			 */
>   			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
> -					     lvl, tblp) != sz) {
> +					     lvl, tblp, NULL) != sz) {
>   				WARN_ON(1);
>   				return -EINVAL;
>   			}
> @@ -564,7 +589,8 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
>   				       struct iommu_iotlb_gather *gather,
>   				       unsigned long iova, size_t size,
>   				       arm_lpae_iopte blk_pte, int lvl,
> -				       arm_lpae_iopte *ptep, size_t pgcount)
> +				       arm_lpae_iopte *ptep, size_t pgcount,
> +				       struct iommu_dirty_bitmap *dirty)
>   {
>   	struct io_pgtable_cfg *cfg = &data->iop.cfg;
>   	arm_lpae_iopte pte, *tablep;
> @@ -617,13 +643,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
>   		return num_entries * size;
>   	}
>   
> -	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
> +	return __arm_lpae_unmap(data, gather, iova, size, pgcount,
> +				lvl, tablep, dirty);
>   }
>   
>   static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
>   			       struct iommu_iotlb_gather *gather,
>   			       unsigned long iova, size_t size, size_t pgcount,
> -			       int lvl, arm_lpae_iopte *ptep)
> +			       int lvl, arm_lpae_iopte *ptep,
> +			       struct iommu_dirty_bitmap *dirty)
>   {
>   	arm_lpae_iopte pte;
>   	struct io_pgtable *iop = &data->iop;
> @@ -649,7 +677,11 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
>   			if (WARN_ON(!pte))
>   				break;
>   
> -			__arm_lpae_clear_pte(ptep, &iop->cfg);
> +			if (likely(!dirty))
> +				__arm_lpae_clear_pte(ptep, &iop->cfg);
> +			else if (__arm_lpae_clear_dirty_pte(ptep, &iop->cfg))
> +				iommu_dirty_bitmap_record(dirty, iova, size);
> +
>   
>   			if (!iopte_leaf(pte, lvl, iop->fmt)) {
>   				/* Also flush any partial walks */
> @@ -671,17 +703,20 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
>   		 * minus the part we want to unmap
>   		 */
>   		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
> -						lvl + 1, ptep, pgcount);
> +						lvl + 1, ptep, pgcount, dirty);
>   	}
>   
>   	/* Keep on walkin' */
>   	ptep = iopte_deref(pte, data);
> -	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
> +	return __arm_lpae_unmap(data, gather, iova, size, pgcount,
> +				lvl + 1, ptep, dirty);
>   }
>   
> -static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
> -				   size_t pgsize, size_t pgcount,
> -				   struct iommu_iotlb_gather *gather)
> +static size_t __arm_lpae_unmap_pages(struct io_pgtable_ops *ops,
> +				     unsigned long iova,
> +				     size_t pgsize, size_t pgcount,
> +				     struct iommu_iotlb_gather *gather,
> +				     struct iommu_dirty_bitmap *dirty)
>   {
>   	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
>   	struct io_pgtable_cfg *cfg = &data->iop.cfg;
> @@ -697,13 +732,29 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov
>   		return 0;
>   
>   	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
> -				data->start_level, ptep);
> +				data->start_level, ptep, dirty);
> +}
> +
> +static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
> +				   size_t pgsize, size_t pgcount,
> +				   struct iommu_iotlb_gather *gather)
> +{
> +	return __arm_lpae_unmap_pages(ops, iova, pgsize, pgcount, gather, NULL);
>   }
>   
>   static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
>   			     size_t size, struct iommu_iotlb_gather *gather)
>   {
> -	return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
> +	return __arm_lpae_unmap_pages(ops, iova, size, 1, gather, NULL);
> +}
> +
> +static size_t arm_lpae_unmap_pages_read_dirty(struct io_pgtable_ops *ops,
> +					      unsigned long iova,
> +					      size_t pgsize, size_t pgcount,
> +					      struct iommu_iotlb_gather *gather,
> +					      struct iommu_dirty_bitmap *dirty)
> +{
> +	return __arm_lpae_unmap_pages(ops, iova, pgsize, pgcount, gather, dirty);
>   }
>   
>   static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
> @@ -969,6 +1020,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
>   		.iova_to_phys	= arm_lpae_iova_to_phys,
>   		.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
>   		.set_dirty_tracking   = arm_lpae_set_dirty_tracking,
> +		.unmap_pages_read_dirty     = arm_lpae_unmap_pages_read_dirty,
>   	};
>   
>   	return data;
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

  reply	other threads:[~2022-04-29 11:53 UTC|newest]

Thread overview: 209+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-28 21:09 [PATCH RFC 00/19] IOMMUFD Dirty Tracking Joao Martins
2022-04-28 21:09 ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 01/19] iommu: Add iommu_domain ops for dirty tracking Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  7:54   ` Tian, Kevin
2022-04-29  7:54     ` Tian, Kevin
2022-04-29 10:44     ` Joao Martins
2022-04-29 10:44       ` Joao Martins
2022-04-29 12:08   ` Jason Gunthorpe
2022-04-29 12:08     ` Jason Gunthorpe via iommu
2022-04-29 14:26     ` Joao Martins
2022-04-29 14:26       ` Joao Martins
2022-04-29 14:35       ` Jason Gunthorpe
2022-04-29 14:35         ` Jason Gunthorpe via iommu
2022-04-29 13:40   ` Baolu Lu
2022-04-29 13:40     ` Baolu Lu
2022-04-29 15:27     ` Joao Martins
2022-04-29 15:27       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 02/19] iommufd: Dirty tracking for io_pagetable Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  8:07   ` Tian, Kevin
2022-04-29  8:07     ` Tian, Kevin
2022-04-29 10:48     ` Joao Martins
2022-04-29 10:48       ` Joao Martins
2022-04-29 11:56     ` Jason Gunthorpe
2022-04-29 11:56       ` Jason Gunthorpe via iommu
2022-04-29 14:28       ` Joao Martins
2022-04-29 14:28         ` Joao Martins
2022-04-29 23:51   ` Baolu Lu
2022-04-29 23:51     ` Baolu Lu
2022-05-02 11:57     ` Joao Martins
2022-05-02 11:57       ` Joao Martins
2022-08-29 10:01   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 03/19] iommufd: Dirty tracking data support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  8:12   ` Tian, Kevin
2022-04-29  8:12     ` Tian, Kevin
2022-04-29 10:54     ` Joao Martins
2022-04-29 10:54       ` Joao Martins
2022-04-29 12:09       ` Jason Gunthorpe
2022-04-29 12:09         ` Jason Gunthorpe via iommu
2022-04-29 14:33         ` Joao Martins
2022-04-29 14:33           ` Joao Martins
2022-04-30  4:11   ` Baolu Lu
2022-04-30  4:11     ` Baolu Lu
2022-05-02 12:06     ` Joao Martins
2022-05-02 12:06       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 04/19] iommu: Add an unmap API that returns dirtied IOPTEs Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-30  5:12   ` Baolu Lu
2022-04-30  5:12     ` Baolu Lu
2022-05-02 12:22     ` Joao Martins
2022-05-02 12:22       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 05/19] iommufd: Add a dirty bitmap to iopt_unmap_iova() Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 12:14   ` Jason Gunthorpe
2022-04-29 12:14     ` Jason Gunthorpe via iommu
2022-04-29 14:36     ` Joao Martins
2022-04-29 14:36       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 06/19] iommufd: Dirty tracking IOCTLs for the hw_pagetable Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 07/19] iommufd/vfio-compat: Dirty tracking IOCTLs compatibility Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 12:19   ` Jason Gunthorpe
2022-04-29 12:19     ` Jason Gunthorpe via iommu
2022-04-29 14:27     ` Joao Martins
2022-04-29 14:27       ` Joao Martins
2022-04-29 14:36       ` Jason Gunthorpe via iommu
2022-04-29 14:36         ` Jason Gunthorpe
2022-04-29 14:52         ` Joao Martins
2022-04-29 14:52           ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 08/19] iommufd: Add a test for dirty tracking ioctls Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 09/19] iommu/amd: Access/Dirty bit support in IOPTEs Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-05-31 11:34   ` Suravee Suthikulpanit via iommu
2022-05-31 11:34     ` Suravee Suthikulpanit
2022-05-31 12:15     ` Baolu Lu
2022-05-31 12:15       ` Baolu Lu
2022-05-31 15:22     ` Joao Martins
2022-05-31 15:22       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 10/19] iommu/amd: Add unmap_read_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-05-31 12:39   ` Suravee Suthikulpanit
2022-05-31 12:39     ` Suravee Suthikulpanit via iommu
2022-05-31 15:51     ` Joao Martins
2022-05-31 15:51       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 11/19] iommu/amd: Print access/dirty bits if supported Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 12/19] iommu/arm-smmu-v3: Add feature detection for HTTU Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 13/19] iommu/arm-smmu-v3: Add feature detection for BBML Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 11:11   ` Robin Murphy
2022-04-29 11:11     ` Robin Murphy
2022-04-29 11:54     ` Joao Martins
2022-04-29 11:54       ` Joao Martins
2022-04-29 12:26       ` Robin Murphy
2022-04-29 12:26         ` Robin Murphy
2022-04-29 14:34         ` Joao Martins
2022-04-29 14:34           ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 14/19] iommu/arm-smmu-v3: Add read_and_clear_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-08-29  9:59   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 15/19] iommu/arm-smmu-v3: Add set_dirty_tracking_range() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  8:28   ` Tian, Kevin
2022-04-29  8:28     ` Tian, Kevin
2022-04-29 11:05     ` Joao Martins
2022-04-29 11:05       ` Joao Martins
2022-04-29 11:19       ` Robin Murphy
2022-04-29 11:19         ` Robin Murphy
2022-04-29 12:06         ` Joao Martins
2022-04-29 12:06           ` Joao Martins
2022-04-29 12:23           ` Jason Gunthorpe
2022-04-29 12:23             ` Jason Gunthorpe via iommu
2022-04-29 14:45             ` Joao Martins
2022-04-29 14:45               ` Joao Martins
2022-04-29 16:11               ` Jason Gunthorpe
2022-04-29 16:11                 ` Jason Gunthorpe via iommu
2022-04-29 16:40                 ` Joao Martins
2022-04-29 16:40                   ` Joao Martins
2022-04-29 16:46                   ` Jason Gunthorpe
2022-04-29 16:46                     ` Jason Gunthorpe via iommu
2022-04-29 19:20                   ` Robin Murphy
2022-04-29 19:20                     ` Robin Murphy
2022-05-02 11:52                     ` Joao Martins
2022-05-02 11:52                       ` Joao Martins
2022-05-02 11:57                       ` Joao Martins
2022-05-02 11:57                         ` Joao Martins
2022-05-05  7:25       ` Shameerali Kolothum Thodi
2022-05-05  7:25         ` Shameerali Kolothum Thodi via iommu
2022-05-05  9:52         ` Joao Martins
2022-05-05  9:52           ` Joao Martins
2022-08-29  9:59           ` Shameerali Kolothum Thodi
2022-08-29 10:00   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 16/19] iommu/arm-smmu-v3: Enable HTTU for stage1 with io-pgtable mapping Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 11:35   ` Robin Murphy
2022-04-29 11:35     ` Robin Murphy
2022-04-29 12:10     ` Joao Martins
2022-04-29 12:10       ` Joao Martins
2022-04-29 12:46       ` Robin Murphy
2022-04-29 12:46         ` Robin Murphy
2022-08-29 10:00   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 17/19] iommu/arm-smmu-v3: Add unmap_read_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 11:53   ` Robin Murphy [this message]
2022-04-29 11:53     ` Robin Murphy
2022-04-28 21:09 ` [PATCH RFC 18/19] iommu/intel: Access/Dirty bit support for SL domains Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  9:03   ` Tian, Kevin
2022-04-29  9:03     ` Tian, Kevin
2022-04-29 11:20     ` Joao Martins
2022-04-29 11:20       ` Joao Martins
2022-04-30  6:12   ` Baolu Lu
2022-04-30  6:12     ` Baolu Lu
2022-05-02 12:24     ` Joao Martins
2022-05-02 12:24       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 19/19] iommu/intel: Add unmap_read_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  5:45 ` [PATCH RFC 00/19] IOMMUFD Dirty Tracking Tian, Kevin
2022-04-29  5:45   ` Tian, Kevin
2022-04-29 10:27   ` Joao Martins
2022-04-29 10:27     ` Joao Martins
2022-04-29 12:38     ` Jason Gunthorpe
2022-04-29 12:38       ` Jason Gunthorpe via iommu
2022-04-29 15:20       ` Joao Martins
2022-04-29 15:20         ` Joao Martins
2022-05-05  7:40       ` Tian, Kevin
2022-05-05  7:40         ` Tian, Kevin
2022-05-05 14:07         ` Jason Gunthorpe
2022-05-05 14:07           ` Jason Gunthorpe via iommu
2022-05-06  3:51           ` Tian, Kevin
2022-05-06  3:51             ` Tian, Kevin
2022-05-06 11:46             ` Jason Gunthorpe
2022-05-06 11:46               ` Jason Gunthorpe via iommu
2022-05-10  1:38               ` Tian, Kevin
2022-05-10  1:38                 ` Tian, Kevin
2022-05-10 11:50                 ` Joao Martins
2022-05-10 11:50                   ` Joao Martins
2022-05-11  1:17                   ` Tian, Kevin
2022-05-11  1:17                     ` Tian, Kevin
2022-05-10 13:46                 ` Jason Gunthorpe via iommu
2022-05-10 13:46                   ` Jason Gunthorpe
2022-05-11  1:10                   ` Tian, Kevin
2022-05-11  1:10                     ` Tian, Kevin
2022-07-12 18:34                     ` Joao Martins
2022-07-21 14:24                       ` Jason Gunthorpe
2022-05-02 18:11   ` Alex Williamson
2022-05-02 18:11     ` Alex Williamson
2022-05-02 18:52     ` Jason Gunthorpe
2022-05-02 18:52       ` Jason Gunthorpe via iommu
2022-05-03 10:48       ` Joao Martins
2022-05-03 10:48         ` Joao Martins
2022-05-05  7:42       ` Tian, Kevin
2022-05-05  7:42         ` Tian, Kevin
2022-05-05 10:06         ` Joao Martins
2022-05-05 10:06           ` Joao Martins
2022-05-05 11:03           ` Tian, Kevin
2022-05-05 11:03             ` Tian, Kevin
2022-05-05 11:50             ` Joao Martins
2022-05-05 11:50               ` Joao Martins
2022-05-06  3:14               ` Tian, Kevin
2022-05-06  3:14                 ` Tian, Kevin
2022-05-05 13:55             ` Jason Gunthorpe
2022-05-05 13:55               ` Jason Gunthorpe via iommu
2022-05-06  3:17               ` Tian, Kevin
2022-05-06  3:17                 ` Tian, Kevin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=abdbfda9-63f5-8d66-84b9-0d0badf76233@arm.com \
    --to=robin.murphy@arm.com \
    --cc=alex.williamson@redhat.com \
    --cc=baolu.lu@linux.intel.com \
    --cc=cohuck@redhat.com \
    --cc=dwmw2@infradead.org \
    --cc=eric.auger@redhat.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jean-philippe@linaro.org \
    --cc=jgg@nvidia.com \
    --cc=joao.m.martins@oracle.com \
    --cc=joro@8bytes.org \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=nicolinc@nvidia.com \
    --cc=shameerali.kolothum.thodi@huawei.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=will@kernel.org \
    --cc=yi.l.liu@intel.com \
    --cc=yishaih@nvidia.com \
    --cc=zhukeqian1@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.