From: Claire Chang <tientzu@chromium.org>
To: Rob Herring <robh+dt@kernel.org>,
mpe@ellerman.id.au, Joerg Roedel <joro@8bytes.org>,
Will Deacon <will@kernel.org>,
Frank Rowand <frowand.list@gmail.com>,
Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
boris.ostrovsky@oracle.com, jgross@suse.com,
Christoph Hellwig <hch@lst.de>,
Marek Szyprowski <m.szyprowski@samsung.com>
Cc: benh@kernel.crashing.org, paulus@samba.org,
"list@263.net:IOMMU DRIVERS" <iommu@lists.linux-foundation.org>,
sstabellini@kernel.org, Robin Murphy <robin.murphy@arm.com>,
grant.likely@arm.com, xypron.glpk@gmx.de,
Thierry Reding <treding@nvidia.com>,
mingo@kernel.org, bauerman@linux.ibm.com, peterz@infradead.org,
Greg KH <gregkh@linuxfoundation.org>,
Saravana Kannan <saravanak@google.com>,
"Rafael J . Wysocki" <rafael.j.wysocki@intel.com>,
heikki.krogerus@linux.intel.com,
Andy Shevchenko <andriy.shevchenko@linux.intel.com>,
Randy Dunlap <rdunlap@infradead.org>,
Dan Williams <dan.j.williams@intel.com>,
Bartosz Golaszewski <bgolaszewski@baylibre.com>,
linux-devicetree <devicetree@vger.kernel.org>,
lkml <linux-kernel@vger.kernel.org>,
linuxppc-dev@lists.ozlabs.org, xen-devel@lists.xenproject.org,
Nicolas Boichat <drinkcat@chromium.org>,
Jim Quinlan <james.quinlan@broadcom.com>,
tfiga@chromium.org, bskeggs@redhat.com, bhelgaas@google.com,
chris@chris-wilson.co.uk, tientzu@chromium.org, daniel@ffwll.ch,
airlied@linux.ie, dri-devel@lists.freedesktop.org,
intel-gfx@lists.freedesktop.org, jani.nikula@linux.intel.com,
jxgao@google.com, joonas.lahtinen@linux.intel.com,
linux-pci@vger.kernel.org, maarten.lankhorst@linux.intel.com,
matthew.auld@intel.com, rodrigo.vivi@intel.com,
thomas.hellstrom@linux.intel.com
Subject: [PATCH v7 13/15] dma-direct: Allocate memory from restricted DMA pool if available
Date: Tue, 18 May 2021 14:42:13 +0800 [thread overview]
Message-ID: <20210518064215.2856977-14-tientzu@chromium.org> (raw)
In-Reply-To: <20210518064215.2856977-1-tientzu@chromium.org>
The restricted DMA pool is preferred if available.
The restricted DMA pools provide a basic level of protection against the
DMA overwriting buffer contents at unexpected times. However, to protect
against general data leakage and system memory corruption, the system
needs to provide a way to lock down the memory access, e.g., MPU.
Note that since coherent allocation needs remapping, one must set up
another device coherent pool by shared-dma-pool and use
dma_alloc_from_dev_coherent instead for atomic coherent allocation.
Signed-off-by: Claire Chang <tientzu@chromium.org>
---
kernel/dma/direct.c | 38 +++++++++++++++++++++++++++++---------
1 file changed, 29 insertions(+), 9 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index eb4098323bbc..0d521f78c7b9 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -78,6 +78,10 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
static void __dma_direct_free_pages(struct device *dev, struct page *page,
size_t size)
{
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+ if (swiotlb_free(dev, page, size))
+ return;
+#endif
dma_free_contiguous(dev, page, size);
}
@@ -92,7 +96,17 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
- page = dma_alloc_contiguous(dev, size, gfp);
+
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+ page = swiotlb_alloc(dev, size);
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ __dma_direct_free_pages(dev, page, size);
+ page = NULL;
+ }
+#endif
+
+ if (!page)
+ page = dma_alloc_contiguous(dev, size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size);
page = NULL;
@@ -148,7 +162,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
gfp |= __GFP_NOWARN;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
- !force_dma_unencrypted(dev)) {
+ !force_dma_unencrypted(dev) && !is_dev_swiotlb_force(dev)) {
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
if (!page)
return NULL;
@@ -161,18 +175,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
}
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
- !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- !dev_is_dma_coherent(dev))
+ !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
+ !is_dev_swiotlb_force(dev))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
/*
* Remapping or decrypting memory may block. If either is required and
* we can't block, allocate the memory from the atomic pools.
+ * If restricted DMA (i.e., is_dev_swiotlb_force) is required, one must
+ * set up another device coherent pool by shared-dma-pool and use
+ * dma_alloc_from_dev_coherent instead.
*/
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
!gfpflags_allow_blocking(gfp) &&
(force_dma_unencrypted(dev) ||
- (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev))))
+ (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ !dev_is_dma_coherent(dev))) &&
+ !is_dev_swiotlb_force(dev))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */
@@ -253,15 +272,15 @@ void dma_direct_free(struct device *dev, size_t size,
unsigned int page_order = get_order(size);
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
- !force_dma_unencrypted(dev)) {
+ !force_dma_unencrypted(dev) && !is_dev_swiotlb_force(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
dma_free_contiguous(dev, cpu_addr, size);
return;
}
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
- !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- !dev_is_dma_coherent(dev)) {
+ !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
+ !is_dev_swiotlb_force(dev)) {
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
return;
}
@@ -289,7 +308,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
void *ret;
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
- force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp))
+ force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
+ !is_dev_swiotlb_force(dev))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
page = __dma_direct_alloc_pages(dev, size, gfp);
--
2.31.1.751.gd2f1c929bd-goog
next prev parent reply other threads:[~2021-05-18 6:44 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-18 6:42 [PATCH v7 00/15] Restricted DMA Claire Chang
2021-05-18 6:42 ` [PATCH v7 01/15] swiotlb: Refactor swiotlb init functions Claire Chang
2021-05-19 18:50 ` Florian Fainelli
2021-05-20 6:40 ` Claire Chang
2021-05-24 15:53 ` Konrad Rzeszutek Wilk
2021-05-25 3:08 ` Claire Chang
2021-05-27 13:02 ` Christoph Hellwig
2021-05-27 14:41 ` Tom Lendacky
2021-05-27 16:32 ` Tom Lendacky
2021-05-31 15:00 ` Claire Chang
2021-05-18 6:42 ` [PATCH v7 02/15] swiotlb: Refactor swiotlb_create_debugfs Claire Chang
2021-05-19 19:24 ` Florian Fainelli
2021-05-27 13:24 ` Christoph Hellwig
2021-05-18 6:42 ` [PATCH v7 03/15] swiotlb: Add DMA_RESTRICTED_POOL Claire Chang
2021-05-19 19:00 ` Florian Fainelli
2021-05-27 13:25 ` Christoph Hellwig
2021-05-18 6:42 ` [PATCH v7 04/15] swiotlb: Add restricted DMA pool initialization Claire Chang
2021-05-18 6:48 ` Claire Chang
2021-05-24 15:49 ` Konrad Rzeszutek Wilk
2021-05-25 3:08 ` Claire Chang
2021-05-27 13:27 ` Christoph Hellwig
2021-05-19 18:54 ` Florian Fainelli
2021-05-20 6:39 ` Claire Chang
2021-05-27 13:27 ` Christoph Hellwig
2021-05-18 6:42 ` [PATCH v7 05/15] swiotlb: Add a new get_io_tlb_mem getter Claire Chang
2021-05-18 6:51 ` Claire Chang
2021-05-24 15:51 ` Konrad Rzeszutek Wilk
2021-05-25 3:08 ` Claire Chang
2021-05-19 19:18 ` Florian Fainelli
2021-05-18 6:42 ` [PATCH v7 06/15] swiotlb: Update is_swiotlb_buffer to add a struct device argument Claire Chang
2021-05-19 19:19 ` Florian Fainelli
2021-05-18 6:42 ` [PATCH v7 07/15] swiotlb: Update is_swiotlb_active " Claire Chang
2021-05-19 19:20 ` Florian Fainelli
2021-05-27 13:28 ` Christoph Hellwig
2021-05-18 6:42 ` [PATCH v7 08/15] swiotlb: Bounce data from/to restricted DMA pool if available Claire Chang
2021-05-18 6:42 ` [PATCH v7 09/15] swiotlb: Move alloc_size to find_slots Claire Chang
2021-05-18 6:42 ` [PATCH v7 10/15] swiotlb: Refactor swiotlb_tbl_unmap_single Claire Chang
2021-05-18 6:42 ` [PATCH v7 11/15] dma-direct: Add a new wrapper __dma_direct_free_pages() Claire Chang
2021-05-19 18:59 ` Florian Fainelli
2021-05-18 6:42 ` [PATCH v7 12/15] swiotlb: Add restricted DMA alloc/free support Claire Chang
2021-05-18 6:42 ` Claire Chang [this message]
2021-05-27 13:30 ` [PATCH v7 13/15] dma-direct: Allocate memory from restricted DMA pool if available Christoph Hellwig
2021-05-18 6:42 ` [PATCH v7 14/15] dt-bindings: of: Add restricted DMA pool Claire Chang
2021-05-26 12:13 ` Will Deacon
2021-05-26 15:53 ` Will Deacon
2021-05-27 11:29 ` Claire Chang
2021-05-27 11:34 ` Will Deacon
2021-05-27 12:48 ` Claire Chang
2021-05-27 12:53 ` Will Deacon
2021-05-18 6:42 ` [PATCH v7 15/15] of: Add plumbing for " Claire Chang
2021-05-27 13:00 ` [PATCH v7 00/15] Restricted DMA Claire Chang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210518064215.2856977-14-tientzu@chromium.org \
--to=tientzu@chromium.org \
--cc=airlied@linux.ie \
--cc=andriy.shevchenko@linux.intel.com \
--cc=bauerman@linux.ibm.com \
--cc=benh@kernel.crashing.org \
--cc=bgolaszewski@baylibre.com \
--cc=bhelgaas@google.com \
--cc=boris.ostrovsky@oracle.com \
--cc=bskeggs@redhat.com \
--cc=chris@chris-wilson.co.uk \
--cc=dan.j.williams@intel.com \
--cc=daniel@ffwll.ch \
--cc=devicetree@vger.kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=drinkcat@chromium.org \
--cc=frowand.list@gmail.com \
--cc=grant.likely@arm.com \
--cc=gregkh@linuxfoundation.org \
--cc=hch@lst.de \
--cc=heikki.krogerus@linux.intel.com \
--cc=intel-gfx@lists.freedesktop.org \
--cc=iommu@lists.linux-foundation.org \
--cc=james.quinlan@broadcom.com \
--cc=jani.nikula@linux.intel.com \
--cc=jgross@suse.com \
--cc=joonas.lahtinen@linux.intel.com \
--cc=joro@8bytes.org \
--cc=jxgao@google.com \
--cc=konrad.wilk@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pci@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=maarten.lankhorst@linux.intel.com \
--cc=matthew.auld@intel.com \
--cc=mingo@kernel.org \
--cc=mpe@ellerman.id.au \
--cc=paulus@samba.org \
--cc=peterz@infradead.org \
--cc=rafael.j.wysocki@intel.com \
--cc=rdunlap@infradead.org \
--cc=robh+dt@kernel.org \
--cc=robin.murphy@arm.com \
--cc=rodrigo.vivi@intel.com \
--cc=saravanak@google.com \
--cc=sstabellini@kernel.org \
--cc=tfiga@chromium.org \
--cc=thomas.hellstrom@linux.intel.com \
--cc=treding@nvidia.com \
--cc=will@kernel.org \
--cc=xen-devel@lists.xenproject.org \
--cc=xypron.glpk@gmx.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).