All of lore.kernel.org
 help / color / mirror / Atom feed
From: Robin Murphy <robin.murphy@arm.com>
To: Ajay Kumar <ajaykumar.rs@samsung.com>,
	linux-arm-kernel@lists.infradead.org,
	iommu@lists.linux-foundation.org, joro@8bytes.org,
	will@kernel.org
Cc: pankaj.dubey@samsung.com, alim.akhtar@samsung.com
Subject: Re: [PATCH V2 2/6] iommu: iova: properly handle 0 as a valid IOVA address
Date: Mon, 23 May 2022 18:30:10 +0100	[thread overview]
Message-ID: <a21f3016-a9f1-dc86-8604-ae651a763fc8@arm.com> (raw)
In-Reply-To: <20220511121544.5998-3-ajaykumar.rs@samsung.com>

On 2022-05-11 13:15, Ajay Kumar wrote:
> From: Marek Szyprowski <m.szyprowski@samsung.com>
> 
> Zero is a valid DMA and IOVA address on many architectures, so adjust the
> IOVA management code to properly handle it. A new value IOVA_BAD_ADDR
> (~0UL) is introduced as a generic value for the error case. Adjust all
> callers of the alloc_iova_fast() function for the new return value.

And when does anything actually need this? In fact if you were to stop 
iommu-dma from reserving IOVA 0 - which you don't - it would only show 
how patch #3 is broken.

Also note that it's really nothing to do with architectures either way; 
iommu-dma simply chooses to reserve IOVA 0 for its own convenience, 
mostly because it can. Much the same way that 0 is typically a valid CPU 
VA, but mapping something meaningful there is just asking for a world of 
pain debugging NULL-dereference bugs.

Robin.

> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
> Signed-off-by: Ajay Kumar <ajaykumar.rs@samsung.com>
> ---
>   drivers/iommu/dma-iommu.c | 16 +++++++++-------
>   drivers/iommu/iova.c      | 13 +++++++++----
>   include/linux/iova.h      |  1 +
>   3 files changed, 19 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 1ca85d37eeab..16218d6a0703 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -605,7 +605,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
>   {
>   	struct iommu_dma_cookie *cookie = domain->iova_cookie;
>   	struct iova_domain *iovad = &cookie->iovad;
> -	unsigned long shift, iova_len, iova = 0;
> +	unsigned long shift, iova_len, iova = IOVA_BAD_ADDR;
>   
>   	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
>   		cookie->msi_iova += size;
> @@ -625,11 +625,13 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
>   		iova = alloc_iova_fast(iovad, iova_len,
>   				       DMA_BIT_MASK(32) >> shift, false);
>   
> -	if (!iova)
> +	if (iova == IOVA_BAD_ADDR)
>   		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
>   				       true);
>   
> -	return (dma_addr_t)iova << shift;
> +	if (iova != IOVA_BAD_ADDR)
> +		return (dma_addr_t)iova << shift;
> +	return DMA_MAPPING_ERROR;
>   }
>   
>   static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
> @@ -688,7 +690,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
>   	size = iova_align(iovad, size + iova_off);
>   
>   	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
> -	if (!iova)
> +	if (iova == DMA_MAPPING_ERROR)
>   		return DMA_MAPPING_ERROR;
>   
>   	if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
> @@ -799,7 +801,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
>   
>   	size = iova_align(iovad, size);
>   	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
> -	if (!iova)
> +	if (iova == DMA_MAPPING_ERROR)
>   		goto out_free_pages;
>   
>   	if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
> @@ -1204,7 +1206,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   	}
>   
>   	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
> -	if (!iova) {
> +	if (iova == DMA_MAPPING_ERROR) {
>   		ret = -ENOMEM;
>   		goto out_restore_sg;
>   	}
> @@ -1516,7 +1518,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
>   		return NULL;
>   
>   	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
> -	if (!iova)
> +	if (iova == DMA_MAPPING_ERROR)
>   		goto out_free_page;
>   
>   	if (iommu_map(domain, iova, msi_addr, size, prot))
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index db77aa675145..ae0fe0a6714e 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -429,6 +429,8 @@ EXPORT_SYMBOL_GPL(free_iova);
>    * This function tries to satisfy an iova allocation from the rcache,
>    * and falls back to regular allocation on failure. If regular allocation
>    * fails too and the flush_rcache flag is set then the rcache will be flushed.
> + * Returns a pfn the allocated iova starts at or IOVA_BAD_ADDR in the case
> + * of a failure.
>   */
>   unsigned long
>   alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
> @@ -447,7 +449,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
>   		size = roundup_pow_of_two(size);
>   
>   	iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
> -	if (iova_pfn)
> +	if (iova_pfn != IOVA_BAD_ADDR)
>   		return iova_pfn;
>   
>   retry:
> @@ -456,7 +458,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
>   		unsigned int cpu;
>   
>   		if (!flush_rcache)
> -			return 0;
> +			return IOVA_BAD_ADDR;
>   
>   		/* Try replenishing IOVAs by flushing rcache. */
>   		flush_rcache = false;
> @@ -831,7 +833,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
>   				       unsigned long limit_pfn)
>   {
>   	struct iova_cpu_rcache *cpu_rcache;
> -	unsigned long iova_pfn = 0;
> +	unsigned long iova_pfn = IOVA_BAD_ADDR;
>   	bool has_pfn = false;
>   	unsigned long flags;
>   
> @@ -858,6 +860,9 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
>   
>   	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
>   
> +	if (!iova_pfn)
> +		return IOVA_BAD_ADDR;
> +
>   	return iova_pfn;
>   }
>   
> @@ -873,7 +878,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
>   	unsigned int log_size = order_base_2(size);
>   
>   	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
> -		return 0;
> +		return IOVA_BAD_ADDR;
>   
>   	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
>   }
> diff --git a/include/linux/iova.h b/include/linux/iova.h
> index 320a70e40233..46b5b10c532b 100644
> --- a/include/linux/iova.h
> +++ b/include/linux/iova.h
> @@ -21,6 +21,7 @@ struct iova {
>   	unsigned long	pfn_lo; /* Lowest allocated pfn */
>   };
>   
> +#define IOVA_BAD_ADDR	(~0UL)
>   
>   struct iova_rcache;
>   
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Robin Murphy <robin.murphy@arm.com>
To: Ajay Kumar <ajaykumar.rs@samsung.com>,
	linux-arm-kernel@lists.infradead.org,
	iommu@lists.linux-foundation.org, joro@8bytes.org,
	will@kernel.org
Cc: alim.akhtar@samsung.com, pankaj.dubey@samsung.com,
	ajaykumar.rs1989@gmail.com,
	Marek Szyprowski <m.szyprowski@samsung.com>
Subject: Re: [PATCH V2 2/6] iommu: iova: properly handle 0 as a valid IOVA address
Date: Mon, 23 May 2022 18:30:10 +0100	[thread overview]
Message-ID: <a21f3016-a9f1-dc86-8604-ae651a763fc8@arm.com> (raw)
In-Reply-To: <20220511121544.5998-3-ajaykumar.rs@samsung.com>

On 2022-05-11 13:15, Ajay Kumar wrote:
> From: Marek Szyprowski <m.szyprowski@samsung.com>
> 
> Zero is a valid DMA and IOVA address on many architectures, so adjust the
> IOVA management code to properly handle it. A new value IOVA_BAD_ADDR
> (~0UL) is introduced as a generic value for the error case. Adjust all
> callers of the alloc_iova_fast() function for the new return value.

And when does anything actually need this? In fact if you were to stop 
iommu-dma from reserving IOVA 0 - which you don't - it would only show 
how patch #3 is broken.

Also note that it's really nothing to do with architectures either way; 
iommu-dma simply chooses to reserve IOVA 0 for its own convenience, 
mostly because it can. Much the same way that 0 is typically a valid CPU 
VA, but mapping something meaningful there is just asking for a world of 
pain debugging NULL-dereference bugs.

Robin.

> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
> Signed-off-by: Ajay Kumar <ajaykumar.rs@samsung.com>
> ---
>   drivers/iommu/dma-iommu.c | 16 +++++++++-------
>   drivers/iommu/iova.c      | 13 +++++++++----
>   include/linux/iova.h      |  1 +
>   3 files changed, 19 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 1ca85d37eeab..16218d6a0703 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -605,7 +605,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
>   {
>   	struct iommu_dma_cookie *cookie = domain->iova_cookie;
>   	struct iova_domain *iovad = &cookie->iovad;
> -	unsigned long shift, iova_len, iova = 0;
> +	unsigned long shift, iova_len, iova = IOVA_BAD_ADDR;
>   
>   	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
>   		cookie->msi_iova += size;
> @@ -625,11 +625,13 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
>   		iova = alloc_iova_fast(iovad, iova_len,
>   				       DMA_BIT_MASK(32) >> shift, false);
>   
> -	if (!iova)
> +	if (iova == IOVA_BAD_ADDR)
>   		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
>   				       true);
>   
> -	return (dma_addr_t)iova << shift;
> +	if (iova != IOVA_BAD_ADDR)
> +		return (dma_addr_t)iova << shift;
> +	return DMA_MAPPING_ERROR;
>   }
>   
>   static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
> @@ -688,7 +690,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
>   	size = iova_align(iovad, size + iova_off);
>   
>   	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
> -	if (!iova)
> +	if (iova == DMA_MAPPING_ERROR)
>   		return DMA_MAPPING_ERROR;
>   
>   	if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
> @@ -799,7 +801,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
>   
>   	size = iova_align(iovad, size);
>   	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
> -	if (!iova)
> +	if (iova == DMA_MAPPING_ERROR)
>   		goto out_free_pages;
>   
>   	if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
> @@ -1204,7 +1206,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   	}
>   
>   	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
> -	if (!iova) {
> +	if (iova == DMA_MAPPING_ERROR) {
>   		ret = -ENOMEM;
>   		goto out_restore_sg;
>   	}
> @@ -1516,7 +1518,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
>   		return NULL;
>   
>   	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
> -	if (!iova)
> +	if (iova == DMA_MAPPING_ERROR)
>   		goto out_free_page;
>   
>   	if (iommu_map(domain, iova, msi_addr, size, prot))
> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
> index db77aa675145..ae0fe0a6714e 100644
> --- a/drivers/iommu/iova.c
> +++ b/drivers/iommu/iova.c
> @@ -429,6 +429,8 @@ EXPORT_SYMBOL_GPL(free_iova);
>    * This function tries to satisfy an iova allocation from the rcache,
>    * and falls back to regular allocation on failure. If regular allocation
>    * fails too and the flush_rcache flag is set then the rcache will be flushed.
> + * Returns a pfn the allocated iova starts at or IOVA_BAD_ADDR in the case
> + * of a failure.
>   */
>   unsigned long
>   alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
> @@ -447,7 +449,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
>   		size = roundup_pow_of_two(size);
>   
>   	iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
> -	if (iova_pfn)
> +	if (iova_pfn != IOVA_BAD_ADDR)
>   		return iova_pfn;
>   
>   retry:
> @@ -456,7 +458,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
>   		unsigned int cpu;
>   
>   		if (!flush_rcache)
> -			return 0;
> +			return IOVA_BAD_ADDR;
>   
>   		/* Try replenishing IOVAs by flushing rcache. */
>   		flush_rcache = false;
> @@ -831,7 +833,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
>   				       unsigned long limit_pfn)
>   {
>   	struct iova_cpu_rcache *cpu_rcache;
> -	unsigned long iova_pfn = 0;
> +	unsigned long iova_pfn = IOVA_BAD_ADDR;
>   	bool has_pfn = false;
>   	unsigned long flags;
>   
> @@ -858,6 +860,9 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
>   
>   	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
>   
> +	if (!iova_pfn)
> +		return IOVA_BAD_ADDR;
> +
>   	return iova_pfn;
>   }
>   
> @@ -873,7 +878,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
>   	unsigned int log_size = order_base_2(size);
>   
>   	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
> -		return 0;
> +		return IOVA_BAD_ADDR;
>   
>   	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
>   }
> diff --git a/include/linux/iova.h b/include/linux/iova.h
> index 320a70e40233..46b5b10c532b 100644
> --- a/include/linux/iova.h
> +++ b/include/linux/iova.h
> @@ -21,6 +21,7 @@ struct iova {
>   	unsigned long	pfn_lo; /* Lowest allocated pfn */
>   };
>   
> +#define IOVA_BAD_ADDR	(~0UL)
>   
>   struct iova_rcache;
>   

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2022-05-23 17:30 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20220511121425epcas5p256b55554b32dc58566827818a817ac44@epcas5p2.samsung.com>
2022-05-11 12:15 ` [PATCH V2 0/6] IOMMU-DMA - support DMA_ATTR_LOW_ADDRESS attribute Ajay Kumar
2022-05-11 12:15   ` Ajay Kumar
     [not found]   ` <CGME20220511121429epcas5p2d91f585a555e51b1c11e9e02c1b8dc15@epcas5p2.samsung.com>
2022-05-11 12:15     ` [PATCH V2 1/6] dma-mapping: add " Ajay Kumar
2022-05-11 12:15       ` Ajay Kumar
     [not found]   ` <CGME20220511121433epcas5p3de77375a85edf1aa78c0976a7107fdfa@epcas5p3.samsung.com>
2022-05-11 12:15     ` [PATCH V2 2/6] iommu: iova: properly handle 0 as a valid IOVA address Ajay Kumar
2022-05-11 12:15       ` Ajay Kumar
2022-05-23 17:30       ` Robin Murphy [this message]
2022-05-23 17:30         ` Robin Murphy
2022-05-30 13:27         ` Ajay Kumar
2022-05-30 13:27           ` Ajay Kumar
2022-06-02 15:58         ` Marek Szyprowski
2022-06-02 15:58           ` Marek Szyprowski
2022-06-06 12:38           ` Robin Murphy
2022-06-06 12:38             ` Robin Murphy
2022-06-17 14:30             ` Marek Szyprowski
2022-06-17 14:30               ` Marek Szyprowski
     [not found]   ` <CGME20220511121437epcas5p29d2210065b47346840c9c6ac14b0e585@epcas5p2.samsung.com>
2022-05-11 12:15     ` [PATCH V2 3/6] iommu: iova: add support for 'first-fit' algorithm Ajay Kumar
2022-05-11 12:15       ` Ajay Kumar
     [not found]   ` <CGME20220511121439epcas5p493bf4b94c89c8a63fdc0586c89cea8df@epcas5p4.samsung.com>
2022-05-11 12:15     ` [PATCH V2 4/6] iommu: dma-iommu: refactor iommu_dma_alloc_iova() Ajay Kumar
2022-05-11 12:15       ` Ajay Kumar
     [not found]   ` <CGME20220511121442epcas5p26a997a19e8cc1de8eb23123500fb24fb@epcas5p2.samsung.com>
2022-05-11 12:15     ` [PATCH V2 5/6] iommu: dma-iommu: add support for DMA_ATTR_LOW_ADDRESS Ajay Kumar
2022-05-11 12:15       ` Ajay Kumar
     [not found]   ` <CGME20220511121445epcas5p377ef245c4f5a0bf282245877d2b922c8@epcas5p3.samsung.com>
2022-05-11 12:15     ` [PATCH V2 6/6] media: platform: s5p-mfc: use DMA_ATTR_LOW_ADDRESS Ajay Kumar
2022-05-11 12:15       ` Ajay Kumar
2022-05-23 15:54   ` [PATCH V2 0/6] IOMMU-DMA - support DMA_ATTR_LOW_ADDRESS attribute Ajay Kumar
2022-05-23 15:54     ` Ajay Kumar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a21f3016-a9f1-dc86-8604-ae651a763fc8@arm.com \
    --to=robin.murphy@arm.com \
    --cc=ajaykumar.rs@samsung.com \
    --cc=alim.akhtar@samsung.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=pankaj.dubey@samsung.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.