* [PATCH v3 1/4] dma-mapping: introduce new dma unmap and sync api variants
2019-11-13 12:24 [PATCH v3 0/4] dma-mapping: introduce new dma unmap and sync variants Laurentiu Tudor
@ 2019-11-13 12:24 ` Laurentiu Tudor
2019-11-13 12:24 ` [PATCH v3 2/4] iommu/dma: wire-up new dma map op .get_virt_addr Laurentiu Tudor
` (3 subsequent siblings)
4 siblings, 0 replies; 9+ messages in thread
From: Laurentiu Tudor @ 2019-11-13 12:24 UTC (permalink / raw)
To: hch, robin.murphy, joro, Ioana Ciocoi Radulescu, linux-kernel,
iommu, netdev, Ioana Ciornei
Cc: Madalin Bucur, Leo Li, Camelia Alexandra Groza, davem
From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Introduce a few new dma unmap and sync variants that, on top of the
original variants, return the virtual address corresponding to the
input dma address. Additionally, provide an api that can be used to
check at runtime if these variants are actually available.
In order to implement them, a new dma map op is added and used:
void *get_virt_addr(dev, dma_handle);
It does the actual conversion of an input dma address to the output
virtual address.
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
---
include/linux/dma-mapping.h | 45 +++++++++++++++++++++++++++++++
kernel/dma/mapping.c | 53 +++++++++++++++++++++++++++++++++++++
2 files changed, 98 insertions(+)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4a1c4fca475a..0940bd75df8e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -132,6 +132,7 @@ struct dma_map_ops {
u64 (*get_required_mask)(struct device *dev);
size_t (*max_mapping_size)(struct device *dev);
unsigned long (*get_merge_boundary)(struct device *dev);
+ void *(*get_virt_addr)(struct device *dev, dma_addr_t dma_handle);
};
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
@@ -442,6 +443,13 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+static inline bool dma_can_unmap_by_dma_addr(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ return dma_is_direct(ops) || (ops && ops->get_virt_addr);
+}
+
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, unsigned long attrs);
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
@@ -458,6 +466,14 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
+void *dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+struct page *
+dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void *dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir);
bool dma_can_mmap(struct device *dev);
int dma_supported(struct device *dev, u64 mask);
int dma_set_mask(struct device *dev, u64 mask);
@@ -534,6 +550,27 @@ static inline void dmam_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
}
+
+static inline void *
+dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return NULL;
+}
+
+static inline struct page *
+dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return NULL;
+}
+
+static inline void *
+dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ return NULL;
+}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
@@ -578,6 +615,11 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
{
return 0;
}
+
+static inline bool dma_can_unmap_by_dma_addr(struct device *dev)
+{
+ return false;
+}
#endif /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
@@ -610,10 +652,13 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single_desc(d, a, s, r) \
+ dma_unmap_single_attrs_desc(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
+#define dma_unmap_page_desc(d, a, s, r) dma_unmap_page_attrs_desc(d, a, s, r, 0)
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index d9334f31a5af..2b6f245c9bb1 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -345,6 +345,59 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
EXPORT_SYMBOL(dma_free_attrs);
+struct page *
+dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *ptr = NULL;
+
+ if (dma_is_direct(ops))
+ ptr = phys_to_virt(dma_to_phys(dev, addr));
+ else if (ops && ops->get_virt_addr)
+ ptr = ops->get_virt_addr(dev, addr);
+
+ dma_unmap_page_attrs(dev, addr, size, dir, attrs);
+
+ return ptr ? virt_to_page(ptr) : NULL;
+}
+EXPORT_SYMBOL_GPL(dma_unmap_page_attrs_desc);
+
+void *dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *ptr = NULL;
+
+ if (dma_is_direct(ops))
+ ptr = phys_to_virt(dma_to_phys(dev, addr));
+ else if (ops && ops->get_virt_addr)
+ ptr = ops->get_virt_addr(dev, addr);
+
+ dma_unmap_single_attrs(dev, addr, size, dir, attrs);
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(dma_unmap_single_attrs_desc);
+
+void *dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *ptr = NULL;
+
+ if (dma_is_direct(ops))
+ ptr = phys_to_virt(dma_to_phys(dev, addr));
+ else if (ops && ops->get_virt_addr)
+ ptr = ops->get_virt_addr(dev, addr);
+
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(dma_sync_single_for_cpu_desc);
+
int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
--
2.17.1
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH v3 3/4] swiotlb: make new {unmap, sync}_desc dma apis work with swiotlb
2019-11-13 12:24 [PATCH v3 0/4] dma-mapping: introduce new dma unmap and sync variants Laurentiu Tudor
2019-11-13 12:24 ` [PATCH v3 1/4] dma-mapping: introduce new dma unmap and sync api variants Laurentiu Tudor
2019-11-13 12:24 ` [PATCH v3 2/4] iommu/dma: wire-up new dma map op .get_virt_addr Laurentiu Tudor
@ 2019-11-13 12:24 ` Laurentiu Tudor
2019-11-13 12:24 ` [PATCH v3 4/4] dpaa2_eth: use new unmap and sync dma api variants Laurentiu Tudor
2019-11-13 20:11 ` [PATCH v3 0/4] dma-mapping: introduce new dma unmap and sync variants David Miller
4 siblings, 0 replies; 9+ messages in thread
From: Laurentiu Tudor @ 2019-11-13 12:24 UTC (permalink / raw)
To: hch, robin.murphy, joro, Ioana Ciocoi Radulescu, linux-kernel,
iommu, netdev, Ioana Ciornei
Cc: Madalin Bucur, Leo Li, Camelia Alexandra Groza, davem
From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Add a new swiotlb helper to retrieve the original physical
address given a swiotlb physical address and use it in the new
dma_unmap_single_attrs_desc(), dma_sync_single_for_cpu_desc() and
dma_unmap_page_attrs_desc() APIs to make them work with swiotlb.
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
---
include/linux/swiotlb.h | 7 +++++++
kernel/dma/mapping.c | 43 ++++++++++++++++++++++++++++++++---------
kernel/dma/swiotlb.c | 8 ++++++++
3 files changed, 49 insertions(+), 9 deletions(-)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index cde3dc18e21a..7a6883a71649 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -73,6 +73,8 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
+phys_addr_t swiotlb_get_orig_phys(phys_addr_t tlb_addr);
+
bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void __init swiotlb_exit(void);
@@ -85,6 +87,11 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
{
return false;
}
+
+static inline phys_addr_t swiotlb_get_orig_phys(phys_addr_t tlb_addr)
+{
+ return PHYS_ADDR_MAX;
+}
static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
unsigned long attrs)
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 2b6f245c9bb1..1a2d02727271 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/swiotlb.h>
/*
* Managed DMA API
@@ -352,10 +353,18 @@ dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
const struct dma_map_ops *ops = get_dma_ops(dev);
void *ptr = NULL;
- if (dma_is_direct(ops))
- ptr = phys_to_virt(dma_to_phys(dev, addr));
- else if (ops && ops->get_virt_addr)
+ if (dma_is_direct(ops)) {
+ phys_addr_t phys = dma_to_phys(dev, addr);
+
+ if (is_swiotlb_buffer(phys)) {
+ phys = swiotlb_get_orig_phys(phys);
+ ptr = phys == PHYS_ADDR_MAX ? NULL : phys_to_virt(phys);
+ } else {
+ ptr = phys_to_virt(phys);
+ }
+ } else if (ops && ops->get_virt_addr) {
ptr = ops->get_virt_addr(dev, addr);
+ }
dma_unmap_page_attrs(dev, addr, size, dir, attrs);
@@ -370,10 +379,18 @@ void *dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
void *ptr = NULL;
- if (dma_is_direct(ops))
- ptr = phys_to_virt(dma_to_phys(dev, addr));
- else if (ops && ops->get_virt_addr)
+ if (dma_is_direct(ops)) {
+ phys_addr_t phys = dma_to_phys(dev, addr);
+
+ if (is_swiotlb_buffer(phys)) {
+ phys = swiotlb_get_orig_phys(phys);
+ ptr = phys == PHYS_ADDR_MAX ? NULL : phys_to_virt(phys);
+ } else {
+ ptr = phys_to_virt(phys);
+ }
+ } else if (ops && ops->get_virt_addr) {
ptr = ops->get_virt_addr(dev, addr);
+ }
dma_unmap_single_attrs(dev, addr, size, dir, attrs);
@@ -387,10 +404,18 @@ void *dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
void *ptr = NULL;
- if (dma_is_direct(ops))
- ptr = phys_to_virt(dma_to_phys(dev, addr));
- else if (ops && ops->get_virt_addr)
+ if (dma_is_direct(ops)) {
+ phys_addr_t phys = dma_to_phys(dev, addr);
+
+ if (is_swiotlb_buffer(phys)) {
+ phys = swiotlb_get_orig_phys(phys);
+ ptr = phys == PHYS_ADDR_MAX ? NULL : phys_to_virt(phys);
+ } else {
+ ptr = phys_to_virt(phys);
+ }
+ } else if (ops && ops->get_virt_addr) {
ptr = ops->get_virt_addr(dev, addr);
+ }
dma_sync_single_for_cpu(dev, addr, size, dir);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 673a2cdb2656..9b241cc0535b 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -701,6 +701,14 @@ bool is_swiotlb_active(void)
return io_tlb_end != 0;
}
+phys_addr_t swiotlb_get_orig_phys(phys_addr_t tlb_addr)
+{
+ int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+ phys_addr_t phys = io_tlb_orig_addr[index];
+
+ return phys == INVALID_PHYS_ADDR ? PHYS_ADDR_MAX : phys;
+}
+
#ifdef CONFIG_DEBUG_FS
static int __init swiotlb_create_debugfs(void)
--
2.17.1
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH v3 4/4] dpaa2_eth: use new unmap and sync dma api variants
2019-11-13 12:24 [PATCH v3 0/4] dma-mapping: introduce new dma unmap and sync variants Laurentiu Tudor
` (2 preceding siblings ...)
2019-11-13 12:24 ` [PATCH v3 3/4] swiotlb: make new {unmap, sync}_desc dma apis work with swiotlb Laurentiu Tudor
@ 2019-11-13 12:24 ` Laurentiu Tudor
2019-11-13 20:11 ` [PATCH v3 0/4] dma-mapping: introduce new dma unmap and sync variants David Miller
4 siblings, 0 replies; 9+ messages in thread
From: Laurentiu Tudor @ 2019-11-13 12:24 UTC (permalink / raw)
To: hch, robin.murphy, joro, Ioana Ciocoi Radulescu, linux-kernel,
iommu, netdev, Ioana Ciornei
Cc: Madalin Bucur, Leo Li, Camelia Alexandra Groza, davem
From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Convert this driver to usage of the newly introduced dma unmap and
sync DMA APIs. This will get rid of the unsupported direct usage of
iommu_iova_to_phys() API.
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
---
.../net/ethernet/freescale/dpaa2/dpaa2-eth.c | 43 ++++++++-----------
.../net/ethernet/freescale/dpaa2/dpaa2-eth.h | 1 -
2 files changed, 18 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 19379bae0144..6b941b753106 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -29,16 +29,6 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
-static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
- dma_addr_t iova_addr)
-{
- phys_addr_t phys_addr;
-
- phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
-
- return phys_to_virt(phys_addr);
-}
-
static void validate_rx_csum(struct dpaa2_eth_priv *priv,
u32 fd_status,
struct sk_buff *skb)
@@ -85,9 +75,9 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
sgt = vaddr + dpaa2_fd_get_offset(fd);
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
- sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ sg_vaddr = dma_unmap_single_desc(dev, addr,
+ DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
free_pages((unsigned long)sg_vaddr, 0);
if (dpaa2_sg_is_final(&sgt[i]))
@@ -143,9 +133,9 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge);
- sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
- dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ sg_vaddr = dma_unmap_single_desc(dev, sg_addr,
+ DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
@@ -210,9 +200,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
int i;
for (i = 0; i < count; i++) {
- vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ vaddr = dma_unmap_single_desc(dev, buf_array[i],
+ DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
free_pages((unsigned long)vaddr, 0);
}
}
@@ -369,9 +359,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
/* Tracing point */
trace_dpaa2_rx_fd(priv->net_dev, fd);
- vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ vaddr = dma_sync_single_for_cpu_desc(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
fas = dpaa2_get_fas(vaddr, false);
prefetch(fas);
@@ -682,7 +671,8 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
u32 fd_len = dpaa2_fd_get_len(fd);
fd_addr = dpaa2_fd_get_addr(fd);
- buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+ buffer_start = dma_sync_single_for_cpu_desc(dev, fd_addr, sizeof(*swa),
+ DMA_BIDIRECTIONAL);
swa = (struct dpaa2_eth_swa *)buffer_start;
if (fd_format == dpaa2_fd_single) {
@@ -3435,6 +3425,11 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dev = &dpni_dev->dev;
+ if (!dma_can_unmap_by_dma_addr(dev)) {
+ dev_err(dev, "required dma unmap/sync APIs not available\n");
+ return -ENOTSUPP;
+ }
+
/* Net device */
net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
if (!net_dev) {
@@ -3448,8 +3443,6 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv = netdev_priv(net_dev);
priv->net_dev = net_dev;
- priv->iommu_domain = iommu_get_domain_for_dev(dev);
-
/* Obtain a MC portal */
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 8a0e65b3267f..4e5183617ebd 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -374,7 +374,6 @@ struct dpaa2_eth_priv {
struct fsl_mc_device *dpbp_dev;
u16 bpid;
- struct iommu_domain *iommu_domain;
bool tx_tstamp; /* Tx timestamping enabled */
bool rx_tstamp; /* Rx timestamping enabled */
--
2.17.1
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
^ permalink raw reply related [flat|nested] 9+ messages in thread