From: Claire Chang <tientzu@chromium.org>
To: Rob Herring <robh+dt@kernel.org>,
mpe@ellerman.id.au, Joerg Roedel <joro@8bytes.org>,
Will Deacon <will@kernel.org>,
Frank Rowand <frowand.list@gmail.com>,
Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
boris.ostrovsky@oracle.com, jgross@suse.com,
Christoph Hellwig <hch@lst.de>,
Marek Szyprowski <m.szyprowski@samsung.com>
Cc: benh@kernel.crashing.org, paulus@samba.org,
"list@263.net:IOMMU DRIVERS" <iommu@lists.linux-foundation.org>,
sstabellini@kernel.org, Robin Murphy <robin.murphy@arm.com>,
grant.likely@arm.com, xypron.glpk@gmx.de,
Thierry Reding <treding@nvidia.com>,
mingo@kernel.org, bauerman@linux.ibm.com, peterz@infradead.org,
Greg KH <gregkh@linuxfoundation.org>,
Saravana Kannan <saravanak@google.com>,
"Rafael J . Wysocki" <rafael.j.wysocki@intel.com>,
heikki.krogerus@linux.intel.com,
Andy Shevchenko <andriy.shevchenko@linux.intel.com>,
Randy Dunlap <rdunlap@infradead.org>,
Dan Williams <dan.j.williams@intel.com>,
Bartosz Golaszewski <bgolaszewski@baylibre.com>,
linux-devicetree <devicetree@vger.kernel.org>,
lkml <linux-kernel@vger.kernel.org>,
linuxppc-dev@lists.ozlabs.org, xen-devel@lists.xenproject.org,
Nicolas Boichat <drinkcat@chromium.org>,
Jim Quinlan <james.quinlan@broadcom.com>,
Claire Chang <tientzu@chromium.org>
Subject: [PATCH v4 09/14] swiotlb: Refactor swiotlb_tbl_{map,unmap}_single
Date: Tue, 9 Feb 2021 14:21:26 +0800 [thread overview]
Message-ID: <20210209062131.2300005-10-tientzu@chromium.org> (raw)
In-Reply-To: <20210209062131.2300005-1-tientzu@chromium.org>
Refactor swiotlb_tbl_{map,unmap}_single to make the code reusable for
dev_swiotlb_{alloc,free}.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
kernel/dma/swiotlb.c | 116 ++++++++++++++++++++++++++-----------------
1 file changed, 71 insertions(+), 45 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 6fdebde8fb1f..f64cbe6e84cc 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -509,14 +509,12 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
}
}
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
- size_t mapping_size, size_t alloc_size,
- enum dma_data_direction dir, unsigned long attrs)
+static int swiotlb_tbl_find_free_region(struct device *hwdev,
+ dma_addr_t tbl_dma_addr,
+ size_t alloc_size, unsigned long attrs)
{
struct swiotlb *swiotlb = get_swiotlb(hwdev);
- dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, swiotlb->start);
unsigned long flags;
- phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
int i;
unsigned long mask;
@@ -531,15 +529,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
#endif
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
- if (mem_encrypt_active())
- pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
-
- if (mapping_size > alloc_size) {
- dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
- mapping_size, alloc_size);
- return (phys_addr_t)DMA_MAPPING_ERROR;
- }
-
mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask;
@@ -601,7 +590,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
swiotlb->list[i] = 0;
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && swiotlb->list[i]; i--)
swiotlb->list[i] = ++count;
- tlb_addr = swiotlb->start + (index << IO_TLB_SHIFT);
/*
* Update the indices to avoid searching in the next
@@ -624,45 +612,20 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
alloc_size, swiotlb->nslabs, tmp_io_tlb_used);
- return (phys_addr_t)DMA_MAPPING_ERROR;
+ return -ENOMEM;
+
found:
swiotlb->used += nslots;
spin_unlock_irqrestore(&swiotlb->lock, flags);
- /*
- * Save away the mapping from the original address to the DMA address.
- * This is needed when we sync the memory. Then we sync the buffer if
- * needed.
- */
- for (i = 0; i < nslots; i++)
- swiotlb->orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
-
- return tlb_addr;
+ return index;
}
-/*
- * tlb_addr is the physical address of the bounce buffer to unmap.
- */
-void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
- size_t mapping_size, size_t alloc_size,
- enum dma_data_direction dir, unsigned long attrs)
+static void swiotlb_tbl_release_region(struct device *hwdev, int index, size_t size)
{
struct swiotlb *swiotlb = get_swiotlb(hwdev);
unsigned long flags;
- int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- int index = (tlb_addr - swiotlb->start) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = swiotlb->orig_addr[index];
-
- /*
- * First, sync the memory before unmapping the entry
- */
- if (orig_addr != INVALID_PHYS_ADDR &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
- swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);
+ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
/*
* Return the buffer to the free list by setting the corresponding
@@ -694,6 +657,69 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
spin_unlock_irqrestore(&swiotlb->lock, flags);
}
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
+ size_t mapping_size, size_t alloc_size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct swiotlb *swiotlb = get_swiotlb(hwdev);
+ dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, swiotlb->start);
+ phys_addr_t tlb_addr;
+ unsigned int nslots, index;
+ int i;
+
+ if (mem_encrypt_active())
+ pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
+
+ if (mapping_size > alloc_size) {
+ dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
+ mapping_size, alloc_size);
+ return (phys_addr_t)DMA_MAPPING_ERROR;
+ }
+
+ index = swiotlb_tbl_find_free_region(hwdev, tbl_dma_addr, alloc_size, attrs);
+ if (index < 0)
+ return (phys_addr_t)DMA_MAPPING_ERROR;
+
+ tlb_addr = swiotlb->start + (index << IO_TLB_SHIFT);
+
+ /*
+ * Save away the mapping from the original address to the DMA address.
+ * This is needed when we sync the memory. Then we sync the buffer if
+ * needed.
+ */
+ nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+ for (i = 0; i < nslots; i++)
+ swiotlb->orig_addr[index + i] = orig_addr + (i << IO_TLB_SHIFT);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+ swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
+
+ return tlb_addr;
+}
+
+/*
+ * tlb_addr is the physical address of the bounce buffer to unmap.
+ */
+void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+ size_t mapping_size, size_t alloc_size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct swiotlb *swiotlb = get_swiotlb(hwdev);
+ int index = (tlb_addr - swiotlb->start) >> IO_TLB_SHIFT;
+ phys_addr_t orig_addr = swiotlb->orig_addr[index];
+
+ /*
+ * First, sync the memory before unmapping the entry
+ */
+ if (orig_addr != INVALID_PHYS_ADDR &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);
+
+ swiotlb_tbl_release_region(hwdev, index, alloc_size);
+}
+
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target)
--
2.30.0.478.g8a0d178c01-goog
next prev parent reply other threads:[~2021-02-09 6:28 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-09 6:21 [PATCH v4 00/14] Restricted DMA Claire Chang
2021-02-09 6:21 ` [PATCH v4 01/14] swiotlb: Remove external access to io_tlb_start Claire Chang
2021-02-09 8:40 ` Christoph Hellwig
2021-02-09 6:21 ` [PATCH v4 02/14] swiotlb: Move is_swiotlb_buffer() to swiotlb.c Claire Chang
2021-02-09 6:21 ` [PATCH v4 03/14] swiotlb: Add struct swiotlb Claire Chang
2021-02-09 6:21 ` [PATCH v4 04/14] swiotlb: Refactor swiotlb_late_init_with_tbl Claire Chang
2021-02-09 6:21 ` [PATCH v4 05/14] swiotlb: Add DMA_RESTRICTED_POOL Claire Chang
2021-02-09 6:21 ` [PATCH v4 06/14] swiotlb: Add restricted DMA pool Claire Chang
2021-02-09 6:21 ` [PATCH v4 07/14] swiotlb: Update swiotlb API to gain a struct device argument Claire Chang
2021-02-09 6:21 ` [PATCH v4 08/14] swiotlb: Use restricted DMA pool if available Claire Chang
2021-02-09 6:21 ` Claire Chang [this message]
2021-02-09 6:21 ` [PATCH v4 10/14] dma-direct: Add a new wrapper __dma_direct_free_pages() Claire Chang
2021-02-09 6:21 ` [PATCH v4 11/14] swiotlb: Add is_dev_swiotlb_force() Claire Chang
2021-02-09 6:21 ` [PATCH v4 12/14] swiotlb: Add restricted DMA alloc/free support Claire Chang
2021-02-26 4:17 ` Claire Chang
2021-02-26 5:17 ` Christoph Hellwig
2021-02-26 9:35 ` Claire Chang
2021-02-09 6:21 ` [PATCH v4 13/14] dt-bindings: of: Add restricted DMA pool Claire Chang
2021-03-10 16:07 ` Will Deacon
2021-03-10 21:40 ` Rob Herring
2021-03-11 5:04 ` Florian Fainelli
2021-02-09 6:21 ` [PATCH v4 14/14] of: Add plumbing for " Claire Chang
2021-04-22 8:20 ` [PATCH v4 00/14] Restricted DMA Claire Chang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210209062131.2300005-10-tientzu@chromium.org \
--to=tientzu@chromium.org \
--cc=andriy.shevchenko@linux.intel.com \
--cc=bauerman@linux.ibm.com \
--cc=benh@kernel.crashing.org \
--cc=bgolaszewski@baylibre.com \
--cc=boris.ostrovsky@oracle.com \
--cc=dan.j.williams@intel.com \
--cc=devicetree@vger.kernel.org \
--cc=drinkcat@chromium.org \
--cc=frowand.list@gmail.com \
--cc=grant.likely@arm.com \
--cc=gregkh@linuxfoundation.org \
--cc=hch@lst.de \
--cc=heikki.krogerus@linux.intel.com \
--cc=iommu@lists.linux-foundation.org \
--cc=james.quinlan@broadcom.com \
--cc=jgross@suse.com \
--cc=joro@8bytes.org \
--cc=konrad.wilk@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=mingo@kernel.org \
--cc=mpe@ellerman.id.au \
--cc=paulus@samba.org \
--cc=peterz@infradead.org \
--cc=rafael.j.wysocki@intel.com \
--cc=rdunlap@infradead.org \
--cc=robh+dt@kernel.org \
--cc=robin.murphy@arm.com \
--cc=saravanak@google.com \
--cc=sstabellini@kernel.org \
--cc=treding@nvidia.com \
--cc=will@kernel.org \
--cc=xen-devel@lists.xenproject.org \
--cc=xypron.glpk@gmx.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).