From: Robin Murphy <robin.murphy@arm.com>
To: Christoph Hellwig <hch@lst.de>
Cc: Joerg Roedel <joro@8bytes.org>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will.deacon@arm.com>,
Tom Lendacky <thomas.lendacky@amd.com>,
iommu@lists.linux-foundation.org,
linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org
Subject: Re: [PATCH 12/26] iommu/dma: Refactor the page array remapping allocator
Date: Mon, 29 Apr 2019 14:10:41 +0100 [thread overview]
Message-ID: <847e0d85-36c6-01d1-6547-5ca9d3f0931a@arm.com> (raw)
In-Reply-To: <20190422175942.18788-13-hch@lst.de>
On 22/04/2019 18:59, Christoph Hellwig wrote:
> Move the call to dma_common_pages_remap into __iommu_dma_alloc and
> rename it to iommu_dma_alloc_remap. This creates a self-contained
> helper for remapped pages allocation and mapping.
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> drivers/iommu/dma-iommu.c | 54 +++++++++++++++++++--------------------
> 1 file changed, 26 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 8e2d9733113e..b8e46e89a60a 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -535,9 +535,9 @@ static struct page **__iommu_dma_get_pages(void *cpu_addr)
> }
>
> /**
> - * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
> + * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc_remap()
> * @dev: Device which owns this buffer
> - * @pages: Array of buffer pages as returned by __iommu_dma_alloc()
> + * @pages: Array of buffer pages as returned by __iommu_dma_alloc_remap()
> * @size: Size of buffer in bytes
> * @handle: DMA address of buffer
> *
> @@ -553,33 +553,35 @@ static void __iommu_dma_free(struct device *dev, struct page **pages,
> }
>
> /**
> - * __iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
> + * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
> * @dev: Device to allocate memory for. Must be a real device
> * attached to an iommu_dma_domain
> * @size: Size of buffer in bytes
> + * @dma_handle: Out argument for allocated DMA handle
> * @gfp: Allocation flags
> * @attrs: DMA attributes for this allocation
> - * @prot: IOMMU mapping flags
> - * @handle: Out argument for allocated DMA handle
> *
> * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
> * but an IOMMU which supports smaller pages might not map the whole thing.
> *
> - * Return: Array of struct page pointers describing the buffer,
> - * or NULL on failure.
> + * Return: Mapped virtual address, or NULL on failure.
> */
> -static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
> - gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle)
> +static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
> + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
> {
> struct iommu_domain *domain = iommu_get_dma_domain(dev);
> struct iommu_dma_cookie *cookie = domain->iova_cookie;
> struct iova_domain *iovad = &cookie->iovad;
> + bool coherent = dev_is_dma_coherent(dev);
> + int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
> + pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
> + unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
> struct page **pages;
> struct sg_table sgt;
> dma_addr_t iova;
> - unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
> + void *vaddr;
>
> - *handle = DMA_MAPPING_ERROR;
> + *dma_handle = DMA_MAPPING_ERROR;
>
> min_size = alloc_sizes & -alloc_sizes;
> if (min_size < PAGE_SIZE) {
> @@ -605,7 +607,7 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
> if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
> goto out_free_iova;
>
> - if (!(prot & IOMMU_CACHE)) {
> + if (!(ioprot & IOMMU_CACHE)) {
> struct scatterlist *sg;
> int i;
>
> @@ -613,14 +615,21 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
> arch_dma_prep_coherent(sg_page(sg), sg->length);
> }
>
> - if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
> + if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
> < size)
> goto out_free_sg;
>
> - *handle = iova;
> + vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
> + __builtin_return_address(0));
> + if (!vaddr)
> + goto out_unmap;
> +
> + *dma_handle = iova;
> sg_free_table(&sgt);
> - return pages;
> + return vaddr;
>
> +out_unmap:
> + __iommu_dma_unmap(dev, iova, size);
> out_free_sg:
> sg_free_table(&sgt);
> out_free_iova:
> @@ -989,18 +998,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
> size >> PAGE_SHIFT);
> }
> } else {
> - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
> - struct page **pages;
> -
> - pages = __iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
> - handle);
> - if (!pages)
> - return NULL;
> -
> - addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
> - __builtin_return_address(0));
> - if (!addr)
> - __iommu_dma_free(dev, pages, iosize, handle);
> + addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
> }
> return addr;
> }
> @@ -1014,7 +1012,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
> /*
> * @cpu_addr will be one of 4 things depending on how it was allocated:
> * - A remapped array of pages for contiguous allocations.
> - * - A remapped array of pages from __iommu_dma_alloc(), for all
> + * - A remapped array of pages from iommu_dma_alloc_remap(), for all
> * non-atomic allocations.
> * - A non-cacheable alias from the atomic pool, for atomic
> * allocations by non-coherent devices.
>
next prev parent reply other threads:[~2019-04-29 13:10 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-22 17:59 implement generic dma_map_ops for IOMMUs v3 Christoph Hellwig
2019-04-22 17:59 ` [PATCH 01/26] arm64/iommu: handle non-remapped addresses in ->mmap and ->get_sgtable Christoph Hellwig
2019-04-22 17:59 ` [PATCH 02/26] arm64/iommu: improve mmap bounds checking Christoph Hellwig
2019-04-29 12:35 ` Robin Murphy
2019-04-29 19:01 ` Christoph Hellwig
2019-04-30 11:38 ` Robin Murphy
2019-04-22 17:59 ` [PATCH 03/26] dma-mapping: add a Kconfig symbol to indicated arch_dma_prep_coherent presence Christoph Hellwig
2019-04-22 17:59 ` [PATCH 04/26] iommu/dma: Cleanup dma-iommu.h Christoph Hellwig
2019-04-22 17:59 ` [PATCH 05/26] iommu/dma: Remove the flush_page callback Christoph Hellwig
2019-04-22 17:59 ` [PATCH 06/26] iommu/dma: Use for_each_sg in iommu_dma_alloc Christoph Hellwig
2019-04-22 17:59 ` [PATCH 07/26] iommu/dma: move the arm64 wrappers to common code Christoph Hellwig
2019-04-29 12:56 ` Robin Murphy
2019-06-03 19:47 ` Jon Hunter
2019-06-04 6:05 ` Christoph Hellwig
2019-06-04 11:35 ` Jon Hunter
2019-04-22 17:59 ` [PATCH 08/26] iommu/dma: Move __iommu_dma_map Christoph Hellwig
2019-04-22 17:59 ` [PATCH 09/26] iommu/dma: Move domain lookup into __iommu_dma_{map,unmap} Christoph Hellwig
2019-04-22 17:59 ` [PATCH 10/26] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers Christoph Hellwig
2019-04-22 17:59 ` [PATCH 11/26] iommu/dma: Factor out remapped pages lookup Christoph Hellwig
2019-04-29 13:05 ` Robin Murphy
2019-04-29 19:10 ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 12/26] iommu/dma: Refactor the page array remapping allocator Christoph Hellwig
2019-04-29 13:10 ` Robin Murphy [this message]
2019-04-22 17:59 ` [PATCH 13/26] iommu/dma: Remove __iommu_dma_free Christoph Hellwig
2019-04-29 13:18 ` Robin Murphy
2019-04-22 17:59 ` [PATCH 14/26] iommu/dma: Refactor iommu_dma_free Christoph Hellwig
2019-04-29 13:59 ` Robin Murphy
2019-04-29 19:03 ` Christoph Hellwig
2019-04-29 19:16 ` Christoph Hellwig
2019-04-22 17:59 ` [PATCH 15/26] iommu/dma: Refactor iommu_dma_alloc Christoph Hellwig
2019-04-22 17:59 ` [PATCH 16/26] iommu/dma: Don't remap CMA unnecessarily Christoph Hellwig
2019-04-22 17:59 ` [PATCH 17/26] iommu/dma: Merge the CMA and alloc_pages allocation paths Christoph Hellwig
2019-04-22 17:59 ` [PATCH 18/26] iommu/dma: Split iommu_dma_free Christoph Hellwig
2019-04-22 17:59 ` [PATCH 19/26] iommu/dma: Cleanup variable naming in iommu_dma_alloc Christoph Hellwig
2019-04-29 14:11 ` Robin Murphy
2019-04-22 17:59 ` [PATCH 20/26] iommu/dma: Refactor iommu_dma_alloc, part 2 Christoph Hellwig
2019-04-29 14:45 ` Robin Murphy
2019-04-22 17:59 ` [PATCH 21/26] iommu/dma: Refactor iommu_dma_get_sgtable Christoph Hellwig
2019-04-29 14:08 ` Robin Murphy
2019-04-22 17:59 ` [PATCH 22/26] iommu/dma: Refactor iommu_dma_mmap Christoph Hellwig
2019-04-29 14:04 ` Robin Murphy
2019-04-22 17:59 ` [PATCH 23/26] iommu/dma: Don't depend on CONFIG_DMA_DIRECT_REMAP Christoph Hellwig
2019-04-29 14:46 ` Robin Murphy
2019-04-22 17:59 ` [PATCH 24/26] iommu/dma: Switch copyright boilerplace to SPDX Christoph Hellwig
2019-04-22 17:59 ` [PATCH 25/26] arm64: switch copyright boilerplace to SPDX in dma-mapping.c Christoph Hellwig
2019-04-22 17:59 ` [PATCH 26/26] arm64: trim includes " Christoph Hellwig
2019-04-29 15:00 ` Robin Murphy
2019-04-29 15:03 ` implement generic dma_map_ops for IOMMUs v3 Robin Murphy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=847e0d85-36c6-01d1-6547-5ca9d3f0931a@arm.com \
--to=robin.murphy@arm.com \
--cc=catalin.marinas@arm.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux-foundation.org \
--cc=joro@8bytes.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=thomas.lendacky@amd.com \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).