From: Srivatsa Vaddagiri <vatsa@codeaurora.org>
To: konrad.wilk@oracle.com, mst@redhat.com, jasowang@redhat.com,
jan.kiszka@siemens.com, will@kernel.org,
stefano.stabellini@xilinx.com
Cc: tsoni@codeaurora.org, virtio-dev@lists.oasis-open.org,
alex.bennee@linaro.org, vatsa@codeaurora.org,
christoffer.dall@arm.com,
virtualization@lists.linux-foundation.org,
iommu@lists.linux-foundation.org, pratikp@codeaurora.org,
linux-kernel@vger.kernel.org
Subject: [PATCH 3/5] swiotlb: Add alloc and free APIs
Date: Tue, 28 Apr 2020 17:09:16 +0530 [thread overview]
Message-ID: <1588073958-1793-4-git-send-email-vatsa@codeaurora.org> (raw)
In-Reply-To: <1588073958-1793-1-git-send-email-vatsa@codeaurora.org>
Move the memory allocation and free portion of swiotlb driver
into independent routines. They will be useful for drivers that
need swiotlb driver to just allocate/free memory chunks and not
additionally bounce memory.
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
---
include/linux/swiotlb.h | 17 ++++++
kernel/dma/swiotlb.c | 151 ++++++++++++++++++++++++++++--------------------
2 files changed, 106 insertions(+), 62 deletions(-)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index c634b4d..957697e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -186,6 +186,10 @@ void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
bool is_swiotlb_active(void);
+extern phys_addr_t swiotlb_alloc(struct swiotlb_pool *pool, size_t alloc_size,
+ unsigned long tbl_dma_addr, unsigned long mask);
+extern void swiotlb_free(struct swiotlb_pool *pool,
+ phys_addr_t tlb_addr, size_t alloc_size);
#else
#define swiotlb_force SWIOTLB_NO_FORCE
@@ -219,6 +223,19 @@ static inline bool is_swiotlb_active(void)
{
return false;
}
+
+static inline phys_addr_t swiotlb_alloc(struct swiotlb_pool *pool,
+ size_t alloc_size, unsigned long tbl_dma_addr,
+ unsigned long mask)
+{
+ return DMA_MAPPING_ERROR;
+}
+
+static inline void swiotlb_free(struct swiotlb_pool *pool,
+ phys_addr_t tlb_addr, size_t alloc_size)
+{
+}
+
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 8cf0b57..7411ce5 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -444,37 +444,14 @@ static inline void *tlb_vaddr(struct swiotlb_pool *pool, phys_addr_t tlb_addr)
return pool->io_tlb_vstart + (tlb_addr - pool->io_tlb_start);
}
-phys_addr_t _swiotlb_tbl_map_single(struct swiotlb_pool *pool,
- struct device *hwdev,
- dma_addr_t tbl_dma_addr,
- phys_addr_t orig_addr,
- size_t mapping_size,
- size_t alloc_size,
- enum dma_data_direction dir,
- unsigned long attrs)
+phys_addr_t swiotlb_alloc(struct swiotlb_pool *pool, size_t alloc_size,
+ unsigned long tbl_dma_addr, unsigned long mask)
{
unsigned long flags;
phys_addr_t tlb_addr;
- unsigned int nslots, stride, index, wrap;
- int i;
- unsigned long mask;
+ unsigned int i, nslots, stride, index, wrap;
unsigned long offset_slots;
unsigned long max_slots;
- unsigned long tmp_io_tlb_used;
-
- if (pool->no_iotlb_memory)
- panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
-
- if (mem_encrypt_active())
- pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
-
- if (mapping_size > alloc_size) {
- dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
- mapping_size, alloc_size);
- return (phys_addr_t)DMA_MAPPING_ERROR;
- }
-
- mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask;
@@ -555,54 +532,23 @@ phys_addr_t _swiotlb_tbl_map_single(struct swiotlb_pool *pool,
} while (index != wrap);
not_found:
- tmp_io_tlb_used = pool->io_tlb_used;
-
spin_unlock_irqrestore(&pool->io_tlb_lock, flags);
- if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
- dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
- alloc_size, pool->io_tlb_nslabs, tmp_io_tlb_used);
return (phys_addr_t)DMA_MAPPING_ERROR;
+
found:
pool->io_tlb_used += nslots;
spin_unlock_irqrestore(&pool->io_tlb_lock, flags);
- /*
- * Save away the mapping from the original address to the DMA address.
- * This is needed when we sync the memory. Then we sync the buffer if
- * needed.
- */
- for (i = 0; i < nslots; i++)
- pool->io_tlb_orig_addr[index+i] = orig_addr +
- (i << IO_TLB_SHIFT);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_vaddr(pool, tlb_addr),
- mapping_size, DMA_TO_DEVICE);
-
return tlb_addr;
}
-/*
- * tlb_addr is the physical address of the bounce buffer to unmap.
- */
-void _swiotlb_tbl_unmap_single(struct swiotlb_pool *pool,
- struct device *hwdev, phys_addr_t tlb_addr,
- size_t mapping_size, size_t alloc_size,
- enum dma_data_direction dir, unsigned long attrs)
+void swiotlb_free(struct swiotlb_pool *pool,
+ phys_addr_t tlb_addr, size_t alloc_size)
{
unsigned long flags;
- int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+ int i, count;
+ int nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (tlb_addr - pool->io_tlb_start) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = pool->io_tlb_orig_addr[index];
-
- /*
- * First, sync the memory before unmapping the entry
- */
- if (orig_addr != INVALID_PHYS_ADDR &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
- swiotlb_bounce(orig_addr, tlb_vaddr(pool, tlb_addr),
- mapping_size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
@@ -636,6 +582,87 @@ void _swiotlb_tbl_unmap_single(struct swiotlb_pool *pool,
spin_unlock_irqrestore(&pool->io_tlb_lock, flags);
}
+phys_addr_t _swiotlb_tbl_map_single(struct swiotlb_pool *pool,
+ struct device *hwdev,
+ dma_addr_t tbl_dma_addr,
+ phys_addr_t orig_addr,
+ size_t mapping_size,
+ size_t alloc_size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t tlb_addr;
+ unsigned int nslots, index;
+ int i;
+ unsigned long mask;
+
+ if (pool->no_iotlb_memory)
+ panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+
+ if (mem_encrypt_active())
+ pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
+
+ if (mapping_size > alloc_size) {
+ dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
+ mapping_size, alloc_size);
+ return (phys_addr_t)DMA_MAPPING_ERROR;
+ }
+
+ mask = dma_get_seg_boundary(hwdev);
+
+ tlb_addr = swiotlb_alloc(pool, alloc_size, tbl_dma_addr, mask);
+
+ if (tlb_addr == DMA_MAPPING_ERROR) {
+ if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
+ dev_warn(hwdev, "swiotlb buffer is full (sz: %zd "
+ "bytes), total %lu (slots), used %lu (slots)\n",
+ alloc_size, pool->io_tlb_nslabs,
+ pool->io_tlb_used);
+ return (phys_addr_t)DMA_MAPPING_ERROR;
+ }
+
+ nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+ index = (tlb_addr - pool->io_tlb_start) >> IO_TLB_SHIFT;
+
+ /*
+ * Save away the mapping from the original address to the DMA address.
+ * This is needed when we sync the memory. Then we sync the buffer if
+ * needed.
+ */
+ for (i = 0; i < nslots; i++)
+ pool->io_tlb_orig_addr[index+i] = orig_addr +
+ (i << IO_TLB_SHIFT);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+ swiotlb_bounce(orig_addr, tlb_vaddr(pool, tlb_addr),
+ mapping_size, DMA_TO_DEVICE);
+
+ return tlb_addr;
+}
+
+/*
+ * tlb_addr is the physical address of the bounce buffer to unmap.
+ */
+void _swiotlb_tbl_unmap_single(struct swiotlb_pool *pool,
+ struct device *hwdev, phys_addr_t tlb_addr,
+ size_t mapping_size, size_t alloc_size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ int index = (tlb_addr - pool->io_tlb_start) >> IO_TLB_SHIFT;
+ phys_addr_t orig_addr = pool->io_tlb_orig_addr[index];
+
+ /*
+ * First, sync the memory before unmapping the entry
+ */
+ if (orig_addr != INVALID_PHYS_ADDR &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ swiotlb_bounce(orig_addr, tlb_vaddr(pool, tlb_addr),
+ mapping_size, DMA_FROM_DEVICE);
+
+ swiotlb_free(pool, tlb_addr, alloc_size);
+}
+
void _swiotlb_tbl_sync_single(struct swiotlb_pool *pool,
struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
--
2.7.4
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2020-04-28 12:05 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-28 11:39 [PATCH 0/5] virtio on Type-1 hypervisor Srivatsa Vaddagiri
2020-04-28 11:39 ` [PATCH 1/5] swiotlb: Introduce concept of swiotlb_pool Srivatsa Vaddagiri
2020-04-29 0:31 ` kbuild test robot
2020-04-28 11:39 ` [PATCH 2/5] swiotlb: Allow for non-linear mapping between paddr and vaddr Srivatsa Vaddagiri
2020-04-28 11:39 ` Srivatsa Vaddagiri [this message]
2020-04-30 4:18 ` [PATCH 3/5] swiotlb: Add alloc and free APIs kbuild test robot
2020-04-28 11:39 ` [PATCH 4/5] swiotlb: Add API to register new pool Srivatsa Vaddagiri
2020-04-28 11:39 ` [PATCH 5/5] virtio: Add bounce DMA ops Srivatsa Vaddagiri
2020-04-28 16:17 ` Michael S. Tsirkin
2020-04-28 17:49 ` Srivatsa Vaddagiri
2020-04-28 20:41 ` Michael S. Tsirkin
2020-04-28 23:04 ` Stefano Stabellini
2020-04-29 4:09 ` Srivatsa Vaddagiri
2020-04-29 2:22 ` Lu Baolu
2020-04-29 4:57 ` Michael S. Tsirkin
2020-04-29 5:42 ` Lu Baolu
2020-04-29 6:50 ` Michael S. Tsirkin
2020-04-29 7:01 ` Lu Baolu
2020-04-29 9:44 ` Srivatsa Vaddagiri
2020-04-29 9:52 ` Michael S. Tsirkin
2020-04-29 10:09 ` Srivatsa Vaddagiri
2020-04-29 10:20 ` Michael S. Tsirkin
2020-04-29 10:26 ` Jan Kiszka
2020-04-29 10:45 ` Michael S. Tsirkin
2020-04-29 10:55 ` [virtio-dev] " Jan Kiszka
2020-04-29 10:34 ` Srivatsa Vaddagiri
2020-04-30 15:20 ` Konrad Rzeszutek Wilk
2020-04-29 3:35 ` Srivatsa Vaddagiri
2020-04-28 21:35 ` kbuild test robot
2020-04-28 22:18 ` kbuild test robot
2020-04-28 22:53 ` Stefano Stabellini
2020-04-29 21:06 ` kbuild test robot
2020-04-29 21:06 ` [RFC PATCH] virtio: virtio_pool can be static kbuild test robot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1588073958-1793-4-git-send-email-vatsa@codeaurora.org \
--to=vatsa@codeaurora.org \
--cc=alex.bennee@linaro.org \
--cc=christoffer.dall@arm.com \
--cc=iommu@lists.linux-foundation.org \
--cc=jan.kiszka@siemens.com \
--cc=jasowang@redhat.com \
--cc=konrad.wilk@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mst@redhat.com \
--cc=pratikp@codeaurora.org \
--cc=stefano.stabellini@xilinx.com \
--cc=tsoni@codeaurora.org \
--cc=virtio-dev@lists.oasis-open.org \
--cc=virtualization@lists.linux-foundation.org \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).