* [PATCH v8 01/15] swiotlb: Refactor swiotlb init functions
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 12:58 ` [PATCH v8 02/15] swiotlb: Refactor swiotlb_create_debugfs Claire Chang
` (10 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Add a new function, swiotlb_init_io_tlb_mem, for the io_tlb_mem struct
initialization to make the code reusable.
Note that we now also call set_memory_decrypted in swiotlb_init_with_tbl.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
kernel/dma/swiotlb.c | 51 ++++++++++++++++++++++----------------------
1 file changed, 25 insertions(+), 26 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 8ca7d505d61c..d3232fc19385 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -168,9 +168,30 @@ void __init swiotlb_update_mem_attributes(void)
memset(vaddr, 0, bytes);
}
-int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
+static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
+ unsigned long nslabs, bool late_alloc)
{
+ void *vaddr = phys_to_virt(start);
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
+
+ mem->nslabs = nslabs;
+ mem->start = start;
+ mem->end = mem->start + bytes;
+ mem->index = 0;
+ mem->late_alloc = late_alloc;
+ spin_lock_init(&mem->lock);
+ for (i = 0; i < mem->nslabs; i++) {
+ mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
+ mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
+ mem->slots[i].alloc_size = 0;
+ }
+
+ set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+ memset(vaddr, 0, bytes);
+}
+
+int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
+{
struct io_tlb_mem *mem;
size_t alloc_size;
@@ -186,16 +207,8 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
if (!mem)
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
__func__, alloc_size, PAGE_SIZE);
- mem->nslabs = nslabs;
- mem->start = __pa(tlb);
- mem->end = mem->start + bytes;
- mem->index = 0;
- spin_lock_init(&mem->lock);
- for (i = 0; i < mem->nslabs; i++) {
- mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
- mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
- mem->slots[i].alloc_size = 0;
- }
+
+ swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
io_tlb_default_mem = mem;
if (verbose)
@@ -282,7 +295,6 @@ swiotlb_late_init_with_default_size(size_t default_size)
int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{
- unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
struct io_tlb_mem *mem;
if (swiotlb_force == SWIOTLB_NO_FORCE)
@@ -297,20 +309,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!mem)
return -ENOMEM;
- mem->nslabs = nslabs;
- mem->start = virt_to_phys(tlb);
- mem->end = mem->start + bytes;
- mem->index = 0;
- mem->late_alloc = 1;
- spin_lock_init(&mem->lock);
- for (i = 0; i < mem->nslabs; i++) {
- mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
- mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
- mem->slots[i].alloc_size = 0;
- }
-
- set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
- memset(tlb, 0, bytes);
+ swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
io_tlb_default_mem = mem;
swiotlb_print_info();
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v8 02/15] swiotlb: Refactor swiotlb_create_debugfs
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
2021-05-27 12:58 ` [PATCH v8 01/15] swiotlb: Refactor swiotlb init functions Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 12:58 ` [PATCH v8 03/15] swiotlb: Add DMA_RESTRICTED_POOL Claire Chang
` (9 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Split the debugfs creation to make the code reusable for supporting
different bounce buffer pools, e.g. restricted DMA pool.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
kernel/dma/swiotlb.c | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d3232fc19385..b849b01a446f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -64,6 +64,7 @@
enum swiotlb_force swiotlb_force;
struct io_tlb_mem *io_tlb_default_mem;
+static struct dentry *debugfs_dir;
/*
* Max segment that we can provide which (if pages are contingous) will
@@ -662,18 +663,30 @@ EXPORT_SYMBOL_GPL(is_swiotlb_active);
#ifdef CONFIG_DEBUG_FS
-static int __init swiotlb_create_debugfs(void)
+static void swiotlb_create_debugfs(struct io_tlb_mem *mem, const char *name)
{
- struct io_tlb_mem *mem = io_tlb_default_mem;
-
if (!mem)
- return 0;
- mem->debugfs = debugfs_create_dir("swiotlb", NULL);
+ return;
+
+ mem->debugfs = debugfs_create_dir(name, debugfs_dir);
debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used);
+}
+
+static int __init swiotlb_create_default_debugfs(void)
+{
+ struct io_tlb_mem *mem = io_tlb_default_mem;
+
+ if (mem) {
+ swiotlb_create_debugfs(mem, "swiotlb");
+ debugfs_dir = mem->debugfs;
+ } else {
+ debugfs_dir = debugfs_create_dir("swiotlb", NULL);
+ }
+
return 0;
}
-late_initcall(swiotlb_create_debugfs);
+late_initcall(swiotlb_create_default_debugfs);
#endif
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v8 03/15] swiotlb: Add DMA_RESTRICTED_POOL
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
2021-05-27 12:58 ` [PATCH v8 01/15] swiotlb: Refactor swiotlb init functions Claire Chang
2021-05-27 12:58 ` [PATCH v8 02/15] swiotlb: Refactor swiotlb_create_debugfs Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 12:58 ` [PATCH v8 04/15] swiotlb: Add restricted DMA pool initialization Claire Chang
` (8 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Add a new kconfig symbol, DMA_RESTRICTED_POOL, for restricted DMA pool.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
kernel/dma/Kconfig | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 77b405508743..3e961dc39634 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -80,6 +80,20 @@ config SWIOTLB
bool
select NEED_DMA_MAP_STATE
+config DMA_RESTRICTED_POOL
+ bool "DMA Restricted Pool"
+ depends on OF && OF_RESERVED_MEM
+ select SWIOTLB
+ help
+ This enables support for restricted DMA pools which provide a level of
+ DMA memory protection on systems with limited hardware protection
+ capabilities, such as those lacking an IOMMU.
+
+ For more information see
+ <Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt>
+ and <kernel/dma/swiotlb.c>.
+ If unsure, say "n".
+
#
# Should be selected if we can mmap non-coherent mappings to userspace.
# The only thing that is really required is a way to set an uncached bit
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v8 04/15] swiotlb: Add restricted DMA pool initialization
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
` (2 preceding siblings ...)
2021-05-27 12:58 ` [PATCH v8 03/15] swiotlb: Add DMA_RESTRICTED_POOL Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 12:58 ` [PATCH v8 05/15] swiotlb: Add a new get_io_tlb_mem getter Claire Chang
` (7 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Add the initialization function to create restricted DMA pools from
matching reserved-memory nodes.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
include/linux/device.h | 4 +++
include/linux/swiotlb.h | 3 +-
kernel/dma/swiotlb.c | 76 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 82 insertions(+), 1 deletion(-)
diff --git a/include/linux/device.h b/include/linux/device.h
index 959cb9d2c9ab..e78e1ce0b1b1 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -416,6 +416,7 @@ struct dev_links_info {
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
+ * @dma_io_tlb_mem: Internal for swiotlb io_tlb_mem override.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@@ -521,6 +522,9 @@ struct device {
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
+#endif
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+ struct io_tlb_mem *dma_io_tlb_mem;
#endif
/* arch specific additions */
struct dev_archdata archdata;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 216854a5e513..03ad6e3b4056 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -72,7 +72,8 @@ extern enum swiotlb_force swiotlb_force;
* range check to see if the memory was in fact allocated by this
* API.
* @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
- * @end. This is command line adjustable via setup_io_tlb_npages.
+ * @end. For default swiotlb, this is command line adjustable via
+ * setup_io_tlb_npages.
* @used: The number of used IO TLB block.
* @list: The free list describing the number of free entries available
* from each index.
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index b849b01a446f..d99b403144a8 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -39,6 +39,13 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/slab.h>
+#endif
#include <asm/io.h>
#include <asm/dma.h>
@@ -690,3 +697,72 @@ static int __init swiotlb_create_default_debugfs(void)
late_initcall(swiotlb_create_default_debugfs);
#endif
+
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct io_tlb_mem *mem = rmem->priv;
+ unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
+
+ if (dev->dma_io_tlb_mem)
+ return 0;
+
+ /*
+ * Since multiple devices can share the same pool, the private data,
+ * io_tlb_mem struct, will be initialized by the first device attached
+ * to it.
+ */
+ if (!mem) {
+ mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
+
+ rmem->priv = mem;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ swiotlb_create_debugfs(mem, rmem->name);
+ }
+
+ dev->dma_io_tlb_mem = mem;
+
+ return 0;
+}
+
+static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ if (dev)
+ dev->dma_io_tlb_mem = NULL;
+}
+
+static const struct reserved_mem_ops rmem_swiotlb_ops = {
+ .device_init = rmem_swiotlb_device_init,
+ .device_release = rmem_swiotlb_device_release,
+};
+
+static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
+{
+ unsigned long node = rmem->fdt_node;
+
+ if (of_get_flat_dt_prop(node, "reusable", NULL) ||
+ of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
+ of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
+ of_get_flat_dt_prop(node, "no-map", NULL))
+ return -EINVAL;
+
+ if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
+ pr_err("Restricted DMA pool must be accessible within the linear mapping.");
+ return -EINVAL;
+ }
+
+ rmem->ops = &rmem_swiotlb_ops;
+ pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
+#endif /* CONFIG_DMA_RESTRICTED_POOL */
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v8 05/15] swiotlb: Add a new get_io_tlb_mem getter
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
` (3 preceding siblings ...)
2021-05-27 12:58 ` [PATCH v8 04/15] swiotlb: Add restricted DMA pool initialization Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 12:58 ` [PATCH v8 06/15] swiotlb: Update is_swiotlb_buffer to add a struct device argument Claire Chang
` (6 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Add a new getter, get_io_tlb_mem, to help select the io_tlb_mem struct.
The restricted DMA pool is preferred if available.
The reason it was done this way instead of assigning the active pool to
dev->dma_io_tlb_mem was because directly using dev->dma_io_tlb_mem might
cause memory allocation issues for existing devices. The pool can't
support atomic coherent allocation so swiotlb_alloc needs to distinguish
it from the default swiotlb pool.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
include/linux/swiotlb.h | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 03ad6e3b4056..b469f04cca26 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_SWIOTLB_H
#define __LINUX_SWIOTLB_H
+#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -102,6 +103,16 @@ struct io_tlb_mem {
};
extern struct io_tlb_mem *io_tlb_default_mem;
+static inline struct io_tlb_mem *get_io_tlb_mem(struct device *dev)
+{
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+ if (dev && dev->dma_io_tlb_mem)
+ return dev->dma_io_tlb_mem;
+#endif /* CONFIG_DMA_RESTRICTED_POOL */
+
+ return io_tlb_default_mem;
+}
+
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
{
struct io_tlb_mem *mem = io_tlb_default_mem;
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v8 06/15] swiotlb: Update is_swiotlb_buffer to add a struct device argument
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
` (4 preceding siblings ...)
2021-05-27 12:58 ` [PATCH v8 05/15] swiotlb: Add a new get_io_tlb_mem getter Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 12:58 ` [PATCH v8 07/15] swiotlb: Update is_swiotlb_active " Claire Chang
` (5 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Update is_swiotlb_buffer to add a struct device argument. This will be
useful later to allow for restricted DMA pool.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
drivers/iommu/dma-iommu.c | 12 ++++++------
drivers/xen/swiotlb-xen.c | 2 +-
include/linux/swiotlb.h | 6 +++---
kernel/dma/direct.c | 6 +++---
kernel/dma/direct.h | 6 +++---
5 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 7bcdd1205535..a5df35bfd150 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -504,7 +504,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
__iommu_dma_unmap(dev, dma_addr, size);
- if (unlikely(is_swiotlb_buffer(phys)))
+ if (unlikely(is_swiotlb_buffer(dev, phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
@@ -575,7 +575,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
}
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
- if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
+ if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
return iova;
}
@@ -781,7 +781,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);
- if (is_swiotlb_buffer(phys))
+ if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
}
@@ -794,7 +794,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- if (is_swiotlb_buffer(phys))
+ if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_device(dev, phys, size, dir);
if (!dev_is_dma_coherent(dev))
@@ -815,7 +815,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
- if (is_swiotlb_buffer(sg_phys(sg)))
+ if (is_swiotlb_buffer(dev, sg_phys(sg)))
swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
sg->length, dir);
}
@@ -832,7 +832,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i) {
- if (is_swiotlb_buffer(sg_phys(sg)))
+ if (is_swiotlb_buffer(dev, sg_phys(sg)))
swiotlb_sync_single_for_device(dev, sg_phys(sg),
sg->length, dir);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 24d11861ac7d..0c4fb34f11ab 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -100,7 +100,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
* in our domain. Therefore _only_ check address within our domain.
*/
if (pfn_valid(PFN_DOWN(paddr)))
- return is_swiotlb_buffer(paddr);
+ return is_swiotlb_buffer(dev, paddr);
return 0;
}
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b469f04cca26..2a6cca07540b 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -113,9 +113,9 @@ static inline struct io_tlb_mem *get_io_tlb_mem(struct device *dev)
return io_tlb_default_mem;
}
-static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
- struct io_tlb_mem *mem = io_tlb_default_mem;
+ struct io_tlb_mem *mem = get_io_tlb_mem(dev);
return mem && paddr >= mem->start && paddr < mem->end;
}
@@ -127,7 +127,7 @@ bool is_swiotlb_active(void);
void __init swiotlb_adjust_size(unsigned long size);
#else
#define swiotlb_force SWIOTLB_NO_FORCE
-static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
return false;
}
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f737e3347059..84c9feb5474a 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -343,7 +343,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
for_each_sg(sgl, sg, nents, i) {
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
- if (unlikely(is_swiotlb_buffer(paddr)))
+ if (unlikely(is_swiotlb_buffer(dev, paddr)))
swiotlb_sync_single_for_device(dev, paddr, sg->length,
dir);
@@ -369,7 +369,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(paddr, sg->length, dir);
- if (unlikely(is_swiotlb_buffer(paddr)))
+ if (unlikely(is_swiotlb_buffer(dev, paddr)))
swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
dir);
@@ -504,7 +504,7 @@ size_t dma_direct_max_mapping_size(struct device *dev)
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return !dev_is_dma_coherent(dev) ||
- is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
+ is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
}
/**
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index 50afc05b6f1d..13e9e7158d94 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -56,7 +56,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev,
{
phys_addr_t paddr = dma_to_phys(dev, addr);
- if (unlikely(is_swiotlb_buffer(paddr)))
+ if (unlikely(is_swiotlb_buffer(dev, paddr)))
swiotlb_sync_single_for_device(dev, paddr, size, dir);
if (!dev_is_dma_coherent(dev))
@@ -73,7 +73,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev,
arch_sync_dma_for_cpu_all();
}
- if (unlikely(is_swiotlb_buffer(paddr)))
+ if (unlikely(is_swiotlb_buffer(dev, paddr)))
swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
if (dir == DMA_FROM_DEVICE)
@@ -113,7 +113,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
- if (unlikely(is_swiotlb_buffer(phys)))
+ if (unlikely(is_swiotlb_buffer(dev, phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
#endif /* _KERNEL_DMA_DIRECT_H */
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v8 07/15] swiotlb: Update is_swiotlb_active to add a struct device argument
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
` (5 preceding siblings ...)
2021-05-27 12:58 ` [PATCH v8 06/15] swiotlb: Update is_swiotlb_buffer to add a struct device argument Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 12:58 ` [PATCH v8 08/15] swiotlb: Bounce data from/to restricted DMA pool if available Claire Chang
` (4 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Update is_swiotlb_active to add a struct device argument. This will be
useful later to allow for restricted DMA pool.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
drivers/gpu/drm/i915/gem/i915_gem_internal.c | 2 +-
drivers/gpu/drm/nouveau/nouveau_ttm.c | 2 +-
drivers/pci/xen-pcifront.c | 2 +-
include/linux/swiotlb.h | 4 ++--
kernel/dma/direct.c | 2 +-
kernel/dma/swiotlb.c | 4 ++--
6 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index ce6b664b10aa..7d48c433446b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
- if (is_swiotlb_active()) {
+ if (is_swiotlb_active(NULL)) {
unsigned int max_segment;
max_segment = swiotlb_max_segment();
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 65430912ff72..d0e998b9e2e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -270,7 +270,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- need_swiotlb = is_swiotlb_active();
+ need_swiotlb = is_swiotlb_active(NULL);
#endif
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index b7a8f3a1921f..6d548ce53ce7 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -693,7 +693,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
spin_unlock(&pcifront_dev_lock);
- if (!err && !is_swiotlb_active()) {
+ if (!err && !is_swiotlb_active(NULL)) {
err = pci_xen_swiotlb_init_late();
if (err)
dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 2a6cca07540b..c530c976d18b 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -123,7 +123,7 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
-bool is_swiotlb_active(void);
+bool is_swiotlb_active(struct device *dev);
void __init swiotlb_adjust_size(unsigned long size);
#else
#define swiotlb_force SWIOTLB_NO_FORCE
@@ -143,7 +143,7 @@ static inline size_t swiotlb_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
-static inline bool is_swiotlb_active(void)
+static inline bool is_swiotlb_active(struct device *dev)
{
return false;
}
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 84c9feb5474a..7a88c34d0867 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -495,7 +495,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
size_t dma_direct_max_mapping_size(struct device *dev)
{
/* If SWIOTLB is active, use its maximum mapping size */
- if (is_swiotlb_active() &&
+ if (is_swiotlb_active(dev) &&
(dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
return swiotlb_max_mapping_size(dev);
return SIZE_MAX;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d99b403144a8..b2b6503ecd88 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -662,9 +662,9 @@ size_t swiotlb_max_mapping_size(struct device *dev)
return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
}
-bool is_swiotlb_active(void)
+bool is_swiotlb_active(struct device *dev)
{
- return io_tlb_default_mem != NULL;
+ return get_io_tlb_mem(dev) != NULL;
}
EXPORT_SYMBOL_GPL(is_swiotlb_active);
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v8 08/15] swiotlb: Bounce data from/to restricted DMA pool if available
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
` (6 preceding siblings ...)
2021-05-27 12:58 ` [PATCH v8 07/15] swiotlb: Update is_swiotlb_active " Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 12:58 ` [PATCH v8 09/15] swiotlb: Move alloc_size to find_slots Claire Chang
` (3 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Regardless of swiotlb setting, the restricted DMA pool is preferred if
available.
The restricted DMA pools provide a basic level of protection against the
DMA overwriting buffer contents at unexpected times. However, to protect
against general data leakage and system memory corruption, the system
needs to provide a way to lock down the memory access, e.g., MPU.
Note that is_dev_swiotlb_force doesn't check if
swiotlb_force == SWIOTLB_FORCE. Otherwise the memory allocation behavior
with default swiotlb will be changed by the following patche
("dma-direct: Allocate memory from restricted DMA pool if available").
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
include/linux/swiotlb.h | 13 +++++++++++++
kernel/dma/direct.c | 3 ++-
kernel/dma/direct.h | 3 ++-
kernel/dma/swiotlb.c | 8 ++++----
4 files changed, 21 insertions(+), 6 deletions(-)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index c530c976d18b..0c5a18d9cf89 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -120,6 +120,15 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
return mem && paddr >= mem->start && paddr < mem->end;
}
+static inline bool is_dev_swiotlb_force(struct device *dev)
+{
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+ if (dev->dma_io_tlb_mem)
+ return true;
+#endif /* CONFIG_DMA_RESTRICTED_POOL */
+ return false;
+}
+
void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
@@ -131,6 +140,10 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
return false;
}
+static inline bool is_dev_swiotlb_force(struct device *dev)
+{
+ return false;
+}
static inline void swiotlb_exit(void)
{
}
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 7a88c34d0867..078f7087e466 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -496,7 +496,8 @@ size_t dma_direct_max_mapping_size(struct device *dev)
{
/* If SWIOTLB is active, use its maximum mapping size */
if (is_swiotlb_active(dev) &&
- (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
+ (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE ||
+ is_dev_swiotlb_force(dev)))
return swiotlb_max_mapping_size(dev);
return SIZE_MAX;
}
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index 13e9e7158d94..f94813674e23 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -87,7 +87,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dma_addr = phys_to_dma(dev, phys);
- if (unlikely(swiotlb_force == SWIOTLB_FORCE))
+ if (unlikely(swiotlb_force == SWIOTLB_FORCE) ||
+ is_dev_swiotlb_force(dev))
return swiotlb_map(dev, phys, size, dir, attrs);
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index b2b6503ecd88..fa7f23fffc81 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -347,7 +347,7 @@ void __init swiotlb_exit(void)
static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
enum dma_data_direction dir)
{
- struct io_tlb_mem *mem = io_tlb_default_mem;
+ struct io_tlb_mem *mem = get_io_tlb_mem(dev);
int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = mem->slots[index].orig_addr;
size_t alloc_size = mem->slots[index].alloc_size;
@@ -429,7 +429,7 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
static int find_slots(struct device *dev, phys_addr_t orig_addr,
size_t alloc_size)
{
- struct io_tlb_mem *mem = io_tlb_default_mem;
+ struct io_tlb_mem *mem = get_io_tlb_mem(dev);
unsigned long boundary_mask = dma_get_seg_boundary(dev);
dma_addr_t tbl_dma_addr =
phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
@@ -506,7 +506,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs)
{
- struct io_tlb_mem *mem = io_tlb_default_mem;
+ struct io_tlb_mem *mem = get_io_tlb_mem(dev);
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
unsigned int i;
int index;
@@ -557,7 +557,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t mapping_size, enum dma_data_direction dir,
unsigned long attrs)
{
- struct io_tlb_mem *mem = io_tlb_default_mem;
+ struct io_tlb_mem *mem = get_io_tlb_mem(hwdev);
unsigned long flags;
unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v8 09/15] swiotlb: Move alloc_size to find_slots
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
` (7 preceding siblings ...)
2021-05-27 12:58 ` [PATCH v8 08/15] swiotlb: Bounce data from/to restricted DMA pool if available Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 12:58 ` [PATCH v8 10/15] swiotlb: Refactor swiotlb_tbl_unmap_single Claire Chang
` (2 subsequent siblings)
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Move the maintenance of alloc_size to find_slots for better code
reusability later.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
kernel/dma/swiotlb.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index fa7f23fffc81..88b3471ac6a8 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -482,8 +482,11 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr,
return -1;
found:
- for (i = index; i < index + nslots; i++)
+ for (i = index; i < index + nslots; i++) {
mem->slots[i].list = 0;
+ mem->slots[i].alloc_size =
+ alloc_size - ((i - index) << IO_TLB_SHIFT);
+ }
for (i = index - 1;
io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
mem->slots[i].list; i--)
@@ -538,11 +541,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
- for (i = 0; i < nr_slots(alloc_size + offset); i++) {
+ for (i = 0; i < nr_slots(alloc_size + offset); i++)
mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
- mem->slots[index + i].alloc_size =
- alloc_size - (i << IO_TLB_SHIFT);
- }
tlb_addr = slot_addr(mem->start, index) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v8 10/15] swiotlb: Refactor swiotlb_tbl_unmap_single
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
` (8 preceding siblings ...)
2021-05-27 12:58 ` [PATCH v8 09/15] swiotlb: Move alloc_size to find_slots Claire Chang
@ 2021-05-27 12:58 ` Claire Chang
2021-05-27 13:32 ` [PATCH v8 00/15] Restricted DMA Christoph Hellwig
2021-06-04 17:48 ` Will Deacon
11 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-05-27 12:58 UTC (permalink / raw)
To: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski
Cc: benh, paulus, list@263.net:IOMMU DRIVERS, sstabellini,
Robin Murphy, grant.likely, xypron.glpk, Thierry Reding, mingo,
bauerman, peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, tientzu, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Add a new function, release_slots, to make the code reusable for supporting
different bounce buffer pools, e.g. restricted DMA pool.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
kernel/dma/swiotlb.c | 35 ++++++++++++++++++++---------------
1 file changed, 20 insertions(+), 15 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 88b3471ac6a8..c4fc2e444e7a 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -550,27 +550,15 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
return tlb_addr;
}
-/*
- * tlb_addr is the physical address of the bounce buffer to unmap.
- */
-void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
- size_t mapping_size, enum dma_data_direction dir,
- unsigned long attrs)
+static void release_slots(struct device *dev, phys_addr_t tlb_addr)
{
- struct io_tlb_mem *mem = get_io_tlb_mem(hwdev);
+ struct io_tlb_mem *mem = get_io_tlb_mem(dev);
unsigned long flags;
- unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
+ unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
int nslots = nr_slots(mem->slots[index].alloc_size + offset);
int count, i;
- /*
- * First, sync the memory before unmapping the entry
- */
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(hwdev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
-
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contiguous entries available.
@@ -605,6 +593,23 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
spin_unlock_irqrestore(&mem->lock, flags);
}
+/*
+ * tlb_addr is the physical address of the bounce buffer to unmap.
+ */
+void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
+ size_t mapping_size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ /*
+ * First, sync the memory before unmapping the entry
+ */
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
+ swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
+
+ release_slots(dev, tlb_addr);
+}
+
void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir)
{
--
2.31.1.818.g46aad6cb9e-goog
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCH v8 00/15] Restricted DMA
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
` (9 preceding siblings ...)
2021-05-27 12:58 ` [PATCH v8 10/15] swiotlb: Refactor swiotlb_tbl_unmap_single Claire Chang
@ 2021-05-27 13:32 ` Christoph Hellwig
2021-06-04 17:48 ` Will Deacon
11 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2021-05-27 13:32 UTC (permalink / raw)
To: Claire Chang
Cc: Rob Herring, mpe, Joerg Roedel, Will Deacon, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski, benh, paulus,
list@263.net:IOMMU DRIVERS, sstabellini, Robin Murphy,
grant.likely, xypron.glpk, Thierry Reding, mingo, bauerman,
peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
I just finished reviewing v7, sorry. Let me find some time to see what
difference this version makes.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v8 00/15] Restricted DMA
2021-05-27 12:58 [PATCH v8 00/15] Restricted DMA Claire Chang
` (10 preceding siblings ...)
2021-05-27 13:32 ` [PATCH v8 00/15] Restricted DMA Christoph Hellwig
@ 2021-06-04 17:48 ` Will Deacon
2021-06-07 3:28 ` Claire Chang
11 siblings, 1 reply; 15+ messages in thread
From: Will Deacon @ 2021-06-04 17:48 UTC (permalink / raw)
To: Claire Chang
Cc: Rob Herring, mpe, Joerg Roedel, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski, benh, paulus,
list@263.net:IOMMU DRIVERS, sstabellini, Robin Murphy,
grant.likely, xypron.glpk, Thierry Reding, mingo, bauerman,
peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, tfiga, bskeggs,
bhelgaas, chris, daniel, airlied, dri-devel, intel-gfx,
jani.nikula, jxgao, joonas.lahtinen, linux-pci,
maarten.lankhorst, matthew.auld, rodrigo.vivi, thomas.hellstrom
Hi Claire,
On Thu, May 27, 2021 at 08:58:30PM +0800, Claire Chang wrote:
> This series implements mitigations for lack of DMA access control on
> systems without an IOMMU, which could result in the DMA accessing the
> system memory at unexpected times and/or unexpected addresses, possibly
> leading to data leakage or corruption.
>
> For example, we plan to use the PCI-e bus for Wi-Fi and that PCI-e bus is
> not behind an IOMMU. As PCI-e, by design, gives the device full access to
> system memory, a vulnerability in the Wi-Fi firmware could easily escalate
> to a full system exploit (remote wifi exploits: [1a], [1b] that shows a
> full chain of exploits; [2], [3]).
>
> To mitigate the security concerns, we introduce restricted DMA. Restricted
> DMA utilizes the existing swiotlb to bounce streaming DMA in and out of a
> specially allocated region and does memory allocation from the same region.
> The feature on its own provides a basic level of protection against the DMA
> overwriting buffer contents at unexpected times. However, to protect
> against general data leakage and system memory corruption, the system needs
> to provide a way to restrict the DMA to a predefined memory region (this is
> usually done at firmware level, e.g. MPU in ATF on some ARM platforms [4]).
>
> [1a] https://googleprojectzero.blogspot.com/2017/04/over-air-exploiting-broadcoms-wi-fi_4.html
> [1b] https://googleprojectzero.blogspot.com/2017/04/over-air-exploiting-broadcoms-wi-fi_11.html
> [2] https://blade.tencent.com/en/advisories/qualpwn/
> [3] https://www.bleepingcomputer.com/news/security/vulnerabilities-found-in-highly-popular-firmware-for-wifi-chips/
> [4] https://github.com/ARM-software/arm-trusted-firmware/blob/master/plat/mediatek/mt8183/drivers/emi_mpu/emi_mpu.c#L132
>
> v8:
> - Fix reserved-memory.txt and add the reg property in example.
> - Fix sizeof for of_property_count_elems_of_size in
> drivers/of/address.c#of_dma_set_restricted_buffer.
> - Apply Will's suggestion to try the OF node having DMA configuration in
> drivers/of/address.c#of_dma_set_restricted_buffer.
> - Fix typo in the comment of drivers/of/address.c#of_dma_set_restricted_buffer.
> - Add error message for PageHighMem in
> kernel/dma/swiotlb.c#rmem_swiotlb_device_init and move it to
> rmem_swiotlb_setup.
> - Fix the message string in rmem_swiotlb_setup.
Thanks for the v8. It works for me out of the box on arm64 under KVM, so:
Tested-by: Will Deacon <will@kernel.org>
Note that something seems to have gone wrong with the mail threading, so
the last 5 patches ended up as a separate thread for me. Probably worth
posting again with all the patches in one place, if you can.
Cheers,
Will
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v8 00/15] Restricted DMA
2021-06-04 17:48 ` Will Deacon
@ 2021-06-07 3:28 ` Claire Chang
2021-06-11 15:31 ` Claire Chang
0 siblings, 1 reply; 15+ messages in thread
From: Claire Chang @ 2021-06-07 3:28 UTC (permalink / raw)
To: Will Deacon
Cc: Rob Herring, mpe, Joerg Roedel, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski, benh, paulus,
list@263.net:IOMMU DRIVERS, sstabellini, Robin Murphy,
grant.likely, xypron.glpk, Thierry Reding, mingo, bauerman,
peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, Tomasz Figa, bskeggs,
Bjorn Helgaas, chris, Daniel Vetter, airlied, dri-devel,
intel-gfx, jani.nikula, Jianxiong Gao, joonas.lahtinen,
linux-pci, maarten.lankhorst, matthew.auld, rodrigo.vivi,
thomas.hellstrom
On Sat, Jun 5, 2021 at 1:48 AM Will Deacon <will@kernel.org> wrote:
>
> Hi Claire,
>
> On Thu, May 27, 2021 at 08:58:30PM +0800, Claire Chang wrote:
> > This series implements mitigations for lack of DMA access control on
> > systems without an IOMMU, which could result in the DMA accessing the
> > system memory at unexpected times and/or unexpected addresses, possibly
> > leading to data leakage or corruption.
> >
> > For example, we plan to use the PCI-e bus for Wi-Fi and that PCI-e bus is
> > not behind an IOMMU. As PCI-e, by design, gives the device full access to
> > system memory, a vulnerability in the Wi-Fi firmware could easily escalate
> > to a full system exploit (remote wifi exploits: [1a], [1b] that shows a
> > full chain of exploits; [2], [3]).
> >
> > To mitigate the security concerns, we introduce restricted DMA. Restricted
> > DMA utilizes the existing swiotlb to bounce streaming DMA in and out of a
> > specially allocated region and does memory allocation from the same region.
> > The feature on its own provides a basic level of protection against the DMA
> > overwriting buffer contents at unexpected times. However, to protect
> > against general data leakage and system memory corruption, the system needs
> > to provide a way to restrict the DMA to a predefined memory region (this is
> > usually done at firmware level, e.g. MPU in ATF on some ARM platforms [4]).
> >
> > [1a] https://googleprojectzero.blogspot.com/2017/04/over-air-exploiting-broadcoms-wi-fi_4.html
> > [1b] https://googleprojectzero.blogspot.com/2017/04/over-air-exploiting-broadcoms-wi-fi_11.html
> > [2] https://blade.tencent.com/en/advisories/qualpwn/
> > [3] https://www.bleepingcomputer.com/news/security/vulnerabilities-found-in-highly-popular-firmware-for-wifi-chips/
> > [4] https://github.com/ARM-software/arm-trusted-firmware/blob/master/plat/mediatek/mt8183/drivers/emi_mpu/emi_mpu.c#L132
> >
> > v8:
> > - Fix reserved-memory.txt and add the reg property in example.
> > - Fix sizeof for of_property_count_elems_of_size in
> > drivers/of/address.c#of_dma_set_restricted_buffer.
> > - Apply Will's suggestion to try the OF node having DMA configuration in
> > drivers/of/address.c#of_dma_set_restricted_buffer.
> > - Fix typo in the comment of drivers/of/address.c#of_dma_set_restricted_buffer.
> > - Add error message for PageHighMem in
> > kernel/dma/swiotlb.c#rmem_swiotlb_device_init and move it to
> > rmem_swiotlb_setup.
> > - Fix the message string in rmem_swiotlb_setup.
>
> Thanks for the v8. It works for me out of the box on arm64 under KVM, so:
>
> Tested-by: Will Deacon <will@kernel.org>
>
> Note that something seems to have gone wrong with the mail threading, so
> the last 5 patches ended up as a separate thread for me. Probably worth
> posting again with all the patches in one place, if you can.
Thanks for testing.
Christoph also added some comments in v7, so I'll prepare v9.
>
> Cheers,
>
> Will
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v8 00/15] Restricted DMA
2021-06-07 3:28 ` Claire Chang
@ 2021-06-11 15:31 ` Claire Chang
0 siblings, 0 replies; 15+ messages in thread
From: Claire Chang @ 2021-06-11 15:31 UTC (permalink / raw)
To: Will Deacon
Cc: Rob Herring, mpe, Joerg Roedel, Frank Rowand,
Konrad Rzeszutek Wilk, boris.ostrovsky, jgross,
Christoph Hellwig, Marek Szyprowski, benh, paulus,
list@263.net:IOMMU DRIVERS, sstabellini, Robin Murphy,
grant.likely, xypron.glpk, Thierry Reding, mingo, bauerman,
peterz, Greg KH, Saravana Kannan, Rafael J . Wysocki,
heikki.krogerus, Andy Shevchenko, Randy Dunlap, Dan Williams,
Bartosz Golaszewski, linux-devicetree, lkml, linuxppc-dev,
xen-devel, Nicolas Boichat, Jim Quinlan, Tomasz Figa, bskeggs,
Bjorn Helgaas, chris, Daniel Vetter, airlied, dri-devel,
intel-gfx, jani.nikula, Jianxiong Gao, joonas.lahtinen,
linux-pci, maarten.lankhorst, matthew.auld, rodrigo.vivi,
thomas.hellstrom
v9 here: https://lore.kernel.org/patchwork/cover/1445081/
On Mon, Jun 7, 2021 at 11:28 AM Claire Chang <tientzu@chromium.org> wrote:
>
> On Sat, Jun 5, 2021 at 1:48 AM Will Deacon <will@kernel.org> wrote:
> >
> > Hi Claire,
> >
> > On Thu, May 27, 2021 at 08:58:30PM +0800, Claire Chang wrote:
> > > This series implements mitigations for lack of DMA access control on
> > > systems without an IOMMU, which could result in the DMA accessing the
> > > system memory at unexpected times and/or unexpected addresses, possibly
> > > leading to data leakage or corruption.
> > >
> > > For example, we plan to use the PCI-e bus for Wi-Fi and that PCI-e bus is
> > > not behind an IOMMU. As PCI-e, by design, gives the device full access to
> > > system memory, a vulnerability in the Wi-Fi firmware could easily escalate
> > > to a full system exploit (remote wifi exploits: [1a], [1b] that shows a
> > > full chain of exploits; [2], [3]).
> > >
> > > To mitigate the security concerns, we introduce restricted DMA. Restricted
> > > DMA utilizes the existing swiotlb to bounce streaming DMA in and out of a
> > > specially allocated region and does memory allocation from the same region.
> > > The feature on its own provides a basic level of protection against the DMA
> > > overwriting buffer contents at unexpected times. However, to protect
> > > against general data leakage and system memory corruption, the system needs
> > > to provide a way to restrict the DMA to a predefined memory region (this is
> > > usually done at firmware level, e.g. MPU in ATF on some ARM platforms [4]).
> > >
> > > [1a] https://googleprojectzero.blogspot.com/2017/04/over-air-exploiting-broadcoms-wi-fi_4.html
> > > [1b] https://googleprojectzero.blogspot.com/2017/04/over-air-exploiting-broadcoms-wi-fi_11.html
> > > [2] https://blade.tencent.com/en/advisories/qualpwn/
> > > [3] https://www.bleepingcomputer.com/news/security/vulnerabilities-found-in-highly-popular-firmware-for-wifi-chips/
> > > [4] https://github.com/ARM-software/arm-trusted-firmware/blob/master/plat/mediatek/mt8183/drivers/emi_mpu/emi_mpu.c#L132
> > >
> > > v8:
> > > - Fix reserved-memory.txt and add the reg property in example.
> > > - Fix sizeof for of_property_count_elems_of_size in
> > > drivers/of/address.c#of_dma_set_restricted_buffer.
> > > - Apply Will's suggestion to try the OF node having DMA configuration in
> > > drivers/of/address.c#of_dma_set_restricted_buffer.
> > > - Fix typo in the comment of drivers/of/address.c#of_dma_set_restricted_buffer.
> > > - Add error message for PageHighMem in
> > > kernel/dma/swiotlb.c#rmem_swiotlb_device_init and move it to
> > > rmem_swiotlb_setup.
> > > - Fix the message string in rmem_swiotlb_setup.
> >
> > Thanks for the v8. It works for me out of the box on arm64 under KVM, so:
> >
> > Tested-by: Will Deacon <will@kernel.org>
> >
> > Note that something seems to have gone wrong with the mail threading, so
> > the last 5 patches ended up as a separate thread for me. Probably worth
> > posting again with all the patches in one place, if you can.
>
> Thanks for testing.
>
> Christoph also added some comments in v7, so I'll prepare v9.
>
> >
> > Cheers,
> >
> > Will
^ permalink raw reply [flat|nested] 15+ messages in thread