From: Christoph Hellwig <hch@lst.de> To: Mauro Carvalho Chehab <mchehab@kernel.org>, Marek Szyprowski <m.szyprowski@samsung.com>, Tomasz Figa <tfiga@chromium.org>, Ricardo Ribalda <ribalda@chromium.org>, Sergey Senozhatsky <senozhatsky@google.com>, iommu@lists.linux-foundation.org Cc: Robin Murphy <robin.murphy@arm.com>, linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-media@vger.kernel.org Subject: [PATCH 5/7] dma-iommu: refactor iommu_dma_alloc_remap Date: Tue, 2 Feb 2021 10:51:08 +0100 [thread overview] Message-ID: <20210202095110.1215346-6-hch@lst.de> (raw) In-Reply-To: <20210202095110.1215346-1-hch@lst.de> Split out a new helper that only allocates a sg_table worth of memory without mapping it into contiguous kernel address space. Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/iommu/dma-iommu.c | 67 ++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 32 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 255533faf90599..85cb004d7a44c6 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -661,23 +661,12 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, return pages; } -/** - * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space - * @dev: Device to allocate memory for. Must be a real device - * attached to an iommu_dma_domain - * @size: Size of buffer in bytes - * @dma_handle: Out argument for allocated DMA handle - * @gfp: Allocation flags - * @prot: pgprot_t to use for the remapped mapping - * @attrs: DMA attributes for this allocation - * - * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, +/* + * If size is less than PAGE_SIZE, then a full CPU page will be allocated, * but an IOMMU which supports smaller pages might not map the whole thing. - * - * Return: Mapped virtual address, or NULL on failure. */ -static void *iommu_dma_alloc_remap(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, +static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(dev); @@ -687,11 +676,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; struct page **pages; - struct sg_table sgt; dma_addr_t iova; - void *vaddr; - - *dma_handle = DMA_MAPPING_ERROR; if (unlikely(iommu_dma_deferred_attach(dev, domain))) return NULL; @@ -717,38 +702,56 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, if (!iova) goto out_free_pages; - if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) + if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) goto out_free_iova; if (!(ioprot & IOMMU_CACHE)) { struct scatterlist *sg; int i; - for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) arch_dma_prep_coherent(sg_page(sg), sg->length); } - if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) + if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot) < size) goto out_free_sg; + sgt->sgl->dma_address = iova; + return pages; + +out_free_sg: + sg_free_table(sgt); +out_free_iova: + iommu_dma_free_iova(cookie, iova, size, NULL); +out_free_pages: + __iommu_dma_free_pages(pages, count); + return NULL; +} + +static void *iommu_dma_alloc_remap(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, + unsigned long attrs) +{ + struct page **pages; + struct sg_table sgt; + void *vaddr; + + pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, + attrs); + if (!pages) + return NULL; + *dma_handle = sgt.sgl->dma_address; + sg_free_table(&sgt); vaddr = dma_common_pages_remap(pages, size, prot, __builtin_return_address(0)); if (!vaddr) goto out_unmap; - - *dma_handle = iova; - sg_free_table(&sgt); return vaddr; out_unmap: - __iommu_dma_unmap(dev, iova, size); -out_free_sg: - sg_free_table(&sgt); -out_free_iova: - iommu_dma_free_iova(cookie, iova, size, NULL); -out_free_pages: - __iommu_dma_free_pages(pages, count); + __iommu_dma_unmap(dev, *dma_handle, size); + __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); return NULL; } -- 2.29.2
WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de> To: Mauro Carvalho Chehab <mchehab@kernel.org>, Marek Szyprowski <m.szyprowski@samsung.com>, Tomasz Figa <tfiga@chromium.org>, Ricardo Ribalda <ribalda@chromium.org>, Sergey Senozhatsky <senozhatsky@google.com>, iommu@lists.linux-foundation.org Cc: linux-media@vger.kernel.org, Robin Murphy <robin.murphy@arm.com>, linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org Subject: [PATCH 5/7] dma-iommu: refactor iommu_dma_alloc_remap Date: Tue, 2 Feb 2021 10:51:08 +0100 [thread overview] Message-ID: <20210202095110.1215346-6-hch@lst.de> (raw) In-Reply-To: <20210202095110.1215346-1-hch@lst.de> Split out a new helper that only allocates a sg_table worth of memory without mapping it into contiguous kernel address space. Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/iommu/dma-iommu.c | 67 ++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 32 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 255533faf90599..85cb004d7a44c6 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -661,23 +661,12 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, return pages; } -/** - * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space - * @dev: Device to allocate memory for. Must be a real device - * attached to an iommu_dma_domain - * @size: Size of buffer in bytes - * @dma_handle: Out argument for allocated DMA handle - * @gfp: Allocation flags - * @prot: pgprot_t to use for the remapped mapping - * @attrs: DMA attributes for this allocation - * - * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, +/* + * If size is less than PAGE_SIZE, then a full CPU page will be allocated, * but an IOMMU which supports smaller pages might not map the whole thing. - * - * Return: Mapped virtual address, or NULL on failure. */ -static void *iommu_dma_alloc_remap(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, +static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(dev); @@ -687,11 +676,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; struct page **pages; - struct sg_table sgt; dma_addr_t iova; - void *vaddr; - - *dma_handle = DMA_MAPPING_ERROR; if (unlikely(iommu_dma_deferred_attach(dev, domain))) return NULL; @@ -717,38 +702,56 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, if (!iova) goto out_free_pages; - if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) + if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) goto out_free_iova; if (!(ioprot & IOMMU_CACHE)) { struct scatterlist *sg; int i; - for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) arch_dma_prep_coherent(sg_page(sg), sg->length); } - if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) + if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot) < size) goto out_free_sg; + sgt->sgl->dma_address = iova; + return pages; + +out_free_sg: + sg_free_table(sgt); +out_free_iova: + iommu_dma_free_iova(cookie, iova, size, NULL); +out_free_pages: + __iommu_dma_free_pages(pages, count); + return NULL; +} + +static void *iommu_dma_alloc_remap(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, + unsigned long attrs) +{ + struct page **pages; + struct sg_table sgt; + void *vaddr; + + pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, + attrs); + if (!pages) + return NULL; + *dma_handle = sgt.sgl->dma_address; + sg_free_table(&sgt); vaddr = dma_common_pages_remap(pages, size, prot, __builtin_return_address(0)); if (!vaddr) goto out_unmap; - - *dma_handle = iova; - sg_free_table(&sgt); return vaddr; out_unmap: - __iommu_dma_unmap(dev, iova, size); -out_free_sg: - sg_free_table(&sgt); -out_free_iova: - iommu_dma_free_iova(cookie, iova, size, NULL); -out_free_pages: - __iommu_dma_free_pages(pages, count); + __iommu_dma_unmap(dev, *dma_handle, size); + __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); return NULL; } -- 2.29.2 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2021-02-02 9:54 UTC|newest] Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-02-02 9:51 add a new dma_alloc_noncontiguous API v2 Christoph Hellwig 2021-02-02 9:51 ` Christoph Hellwig 2021-02-02 9:51 ` [PATCH 1/7] dma-mapping: remove the {alloc,free}_noncoherent methods Christoph Hellwig 2021-02-02 9:51 ` Christoph Hellwig 2021-02-02 9:51 ` [PATCH 2/7] dma-mapping: add a dma_mmap_pages helper Christoph Hellwig 2021-02-02 9:51 ` Christoph Hellwig 2021-02-02 9:51 ` [PATCH 3/7] dma-mapping: refactor dma_{alloc,free}_pages Christoph Hellwig 2021-02-02 9:51 ` Christoph Hellwig 2021-02-02 9:51 ` [PATCH 4/7] dma-mapping: add a dma_alloc_noncontiguous API Christoph Hellwig 2021-02-02 9:51 ` Christoph Hellwig 2021-02-16 18:55 ` Robin Murphy 2021-02-16 18:55 ` Robin Murphy 2021-03-01 8:09 ` Christoph Hellwig 2021-03-01 8:09 ` Christoph Hellwig 2021-02-02 9:51 ` Christoph Hellwig [this message] 2021-02-02 9:51 ` [PATCH 5/7] dma-iommu: refactor iommu_dma_alloc_remap Christoph Hellwig 2021-02-02 9:51 ` [PATCH 6/7] dma-iommu: implement ->alloc_noncontiguous Christoph Hellwig 2021-02-02 9:51 ` Christoph Hellwig 2021-02-16 8:14 ` Tomasz Figa 2021-02-16 8:14 ` Tomasz Figa 2021-02-16 8:49 ` Christoph Hellwig 2021-02-16 8:49 ` Christoph Hellwig 2021-03-01 7:17 ` Sergey Senozhatsky 2021-03-01 7:17 ` Sergey Senozhatsky 2021-03-01 7:21 ` Christoph Hellwig 2021-03-01 7:21 ` Christoph Hellwig 2021-03-01 8:02 ` Sergey Senozhatsky 2021-03-01 8:02 ` Sergey Senozhatsky 2021-03-01 8:11 ` Christoph Hellwig 2021-03-01 8:11 ` Christoph Hellwig 2021-02-02 9:51 ` [PATCH 7/7] media: uvcvideo: Use dma_alloc_noncontiguos API Christoph Hellwig 2021-02-02 9:51 ` Christoph Hellwig 2021-02-04 7:39 ` Hillf Danton 2021-02-07 18:48 ` add a new dma_alloc_noncontiguous API v2 Christoph Hellwig 2021-02-07 18:48 ` Christoph Hellwig 2021-02-08 11:33 ` Tomasz Figa 2021-02-08 11:33 ` Tomasz Figa 2021-02-09 8:22 ` Christoph Hellwig 2021-02-09 8:29 ` Ricardo Ribalda 2021-02-09 8:29 ` Ricardo Ribalda 2021-02-09 14:46 ` Ricardo Ribalda 2021-02-09 14:46 ` Ricardo Ribalda 2021-02-09 17:02 ` Christoph Hellwig 2021-02-11 9:08 ` Ricardo Ribalda 2021-02-11 9:08 ` Ricardo Ribalda 2021-02-11 13:06 ` Christoph Hellwig 2021-02-11 13:06 ` Christoph Hellwig 2021-02-11 13:20 ` Ricardo Ribalda 2021-02-11 13:20 ` Ricardo Ribalda 2021-02-11 13:55 ` Laurent Pinchart 2021-02-11 13:55 ` Laurent Pinchart
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20210202095110.1215346-6-hch@lst.de \ --to=hch@lst.de \ --cc=iommu@lists.linux-foundation.org \ --cc=linux-doc@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-media@vger.kernel.org \ --cc=m.szyprowski@samsung.com \ --cc=mchehab@kernel.org \ --cc=ribalda@chromium.org \ --cc=robin.murphy@arm.com \ --cc=senozhatsky@google.com \ --cc=tfiga@chromium.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.