linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Claire Chang <tientzu@chromium.org>
To: Joerg Roedel <joro@8bytes.org>, Will Deacon <will@kernel.org>,
	Frank Rowand <frowand.list@gmail.com>,
	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
	boris.ostrovsky@oracle.com, jgross@suse.com,
	Christoph Hellwig <hch@lst.de>,
	Marek Szyprowski <m.szyprowski@samsung.com>
Cc: benh@kernel.crashing.org, paulus@samba.org,
	"list@263.net:IOMMU DRIVERS" <iommu@lists.linux-foundation.org>,
	sstabellini@kernel.org, Robin Murphy <robin.murphy@arm.com>,
	grant.likely@arm.com, xypron.glpk@gmx.de,
	Thierry Reding <treding@nvidia.com>,
	mingo@kernel.org, bauerman@linux.ibm.com, peterz@infradead.org,
	Greg KH <gregkh@linuxfoundation.org>,
	Saravana Kannan <saravanak@google.com>,
	"Rafael J . Wysocki" <rafael.j.wysocki@intel.com>,
	heikki.krogerus@linux.intel.com,
	Andy Shevchenko <andriy.shevchenko@linux.intel.com>,
	Randy Dunlap <rdunlap@infradead.org>,
	Dan Williams <dan.j.williams@intel.com>,
	Bartosz Golaszewski <bgolaszewski@baylibre.com>,
	linux-devicetree <devicetree@vger.kernel.org>,
	lkml <linux-kernel@vger.kernel.org>,
	linuxppc-dev@lists.ozlabs.org, xen-devel@lists.xenproject.org,
	Nicolas Boichat <drinkcat@chromium.org>,
	Jim Quinlan <james.quinlan@broadcom.com>,
	tfiga@chromium.org, bskeggs@redhat.com, bhelgaas@google.com,
	chris@chris-wilson.co.uk, tientzu@chromium.org, daniel@ffwll.ch,
	airlied@linux.ie, dri-devel@lists.freedesktop.org,
	intel-gfx@lists.freedesktop.org, jani.nikula@linux.intel.com,
	jxgao@google.com, joonas.lahtinen@linux.intel.com,
	linux-pci@vger.kernel.org, maarten.lankhorst@linux.intel.com,
	matthew.auld@intel.com, nouveau@lists.freedesktop.org,
	rodrigo.vivi@intel.com, thomas.hellstrom@linux.intel.com
Subject: [PATCH v5 14/16] dma-direct: Allocate memory from restricted DMA pool if available
Date: Thu, 22 Apr 2021 16:15:06 +0800	[thread overview]
Message-ID: <20210422081508.3942748-15-tientzu@chromium.org> (raw)
In-Reply-To: <20210422081508.3942748-1-tientzu@chromium.org>

The restricted DMA pool is preferred if available.

The restricted DMA pools provide a basic level of protection against the
DMA overwriting buffer contents at unexpected times. However, to protect
against general data leakage and system memory corruption, the system
needs to provide a way to lock down the memory access, e.g., MPU.

Signed-off-by: Claire Chang <tientzu@chromium.org>
---
 kernel/dma/direct.c | 35 ++++++++++++++++++++++++++---------
 1 file changed, 26 insertions(+), 9 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 7a27f0510fcc..29523d2a9845 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -78,6 +78,10 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 static void __dma_direct_free_pages(struct device *dev, struct page *page,
 				    size_t size)
 {
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+	if (swiotlb_free(dev, page, size))
+		return;
+#endif
 	dma_free_contiguous(dev, page, size);
 }
 
@@ -92,7 +96,17 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 
 	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
 					   &phys_limit);
-	page = dma_alloc_contiguous(dev, size, gfp);
+
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+	page = swiotlb_alloc(dev, size);
+	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+		__dma_direct_free_pages(dev, page, size);
+		page = NULL;
+	}
+#endif
+
+	if (!page)
+		page = dma_alloc_contiguous(dev, size, gfp);
 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
 		dma_free_contiguous(dev, page, size);
 		page = NULL;
@@ -148,7 +162,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 		gfp |= __GFP_NOWARN;
 
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
-	    !force_dma_unencrypted(dev)) {
+	    !force_dma_unencrypted(dev) && !is_dev_swiotlb_force(dev)) {
 		page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
 		if (!page)
 			return NULL;
@@ -161,8 +175,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	}
 
 	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	    !dev_is_dma_coherent(dev))
+	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
+	    !is_dev_swiotlb_force(dev))
 		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
 
 	/*
@@ -172,7 +186,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
 	    !gfpflags_allow_blocking(gfp) &&
 	    (force_dma_unencrypted(dev) ||
-	     (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev))))
+	     (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+	      !dev_is_dma_coherent(dev))) &&
+	    !is_dev_swiotlb_force(dev))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
 	/* we always manually zero the memory once we are done */
@@ -253,15 +269,15 @@ void dma_direct_free(struct device *dev, size_t size,
 	unsigned int page_order = get_order(size);
 
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
-	    !force_dma_unencrypted(dev)) {
+	    !force_dma_unencrypted(dev) && !is_dev_swiotlb_force(dev)) {
 		/* cpu_addr is a struct page cookie, not a kernel address */
 		dma_free_contiguous(dev, cpu_addr, size);
 		return;
 	}
 
 	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	    !dev_is_dma_coherent(dev)) {
+	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
+	    !is_dev_swiotlb_force(dev)) {
 		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
 		return;
 	}
@@ -289,7 +305,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	void *ret;
 
 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
-	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp))
+	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
+	    !is_dev_swiotlb_force(dev))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
 	page = __dma_direct_alloc_pages(dev, size, gfp);
-- 
2.31.1.368.gbe11c130af-goog


  parent reply	other threads:[~2021-04-22  8:17 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-22  8:14 [PATCH v5 00/16] Restricted DMA Claire Chang
2021-04-22  8:14 ` [PATCH v5 01/16] swiotlb: Fix the type of index Claire Chang
2021-04-23  7:11   ` Christoph Hellwig
2021-04-22  8:14 ` [PATCH v5 02/16] swiotlb: Refactor swiotlb init functions Claire Chang
2021-04-22  8:14 ` [PATCH v5 03/16] swiotlb: Refactor swiotlb_create_debugfs Claire Chang
2021-04-22  8:14 ` [PATCH v5 04/16] swiotlb: Add DMA_RESTRICTED_POOL Claire Chang
2021-04-22  8:14 ` [PATCH v5 05/16] swiotlb: Add restricted DMA pool initialization Claire Chang
2021-04-23 11:34   ` Steven Price
2021-04-26 16:37     ` Claire Chang
2021-04-28  9:50       ` Steven Price
2021-04-22  8:14 ` [PATCH v5 06/16] swiotlb: Add a new get_io_tlb_mem getter Claire Chang
2021-04-22  8:14 ` [PATCH v5 07/16] swiotlb: Update is_swiotlb_buffer to add a struct device argument Claire Chang
2021-04-22  8:15 ` [PATCH v5 08/16] swiotlb: Update is_swiotlb_active " Claire Chang
2021-04-23 13:31   ` Robin Murphy
2021-04-26 16:37     ` Claire Chang
2021-04-22  8:15 ` [PATCH v5 09/16] swiotlb: Bounce data from/to restricted DMA pool if available Claire Chang
2021-04-22  8:15 ` [PATCH v5 10/16] swiotlb: Move alloc_size to find_slots Claire Chang
2021-04-22  8:15 ` [PATCH v5 11/16] swiotlb: Refactor swiotlb_tbl_unmap_single Claire Chang
2021-04-22  8:15 ` [PATCH v5 12/16] dma-direct: Add a new wrapper __dma_direct_free_pages() Claire Chang
2021-04-22  8:15 ` [PATCH v5 13/16] swiotlb: Add restricted DMA alloc/free support Claire Chang
2021-04-22  8:15 ` Claire Chang [this message]
2021-04-23 13:46   ` [PATCH v5 14/16] dma-direct: Allocate memory from restricted DMA pool if available Robin Murphy
2021-05-03 14:26     ` Claire Chang
2021-04-22  8:15 ` [PATCH v5 15/16] dt-bindings: of: Add restricted DMA pool Claire Chang
2021-04-22  8:15 ` [PATCH v5 16/16] of: Add plumbing for " Claire Chang
2021-04-23  2:52   ` Claire Chang
2021-04-23 13:35   ` Robin Murphy
2021-04-26 16:38     ` Claire Chang
2021-05-10  9:53 ` [PATCH v5 00/16] Restricted DMA Claire Chang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210422081508.3942748-15-tientzu@chromium.org \
    --to=tientzu@chromium.org \
    --cc=airlied@linux.ie \
    --cc=andriy.shevchenko@linux.intel.com \
    --cc=bauerman@linux.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=bgolaszewski@baylibre.com \
    --cc=bhelgaas@google.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=bskeggs@redhat.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=dan.j.williams@intel.com \
    --cc=daniel@ffwll.ch \
    --cc=devicetree@vger.kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=drinkcat@chromium.org \
    --cc=frowand.list@gmail.com \
    --cc=grant.likely@arm.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hch@lst.de \
    --cc=heikki.krogerus@linux.intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=iommu@lists.linux-foundation.org \
    --cc=james.quinlan@broadcom.com \
    --cc=jani.nikula@linux.intel.com \
    --cc=jgross@suse.com \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=joro@8bytes.org \
    --cc=jxgao@google.com \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=m.szyprowski@samsung.com \
    --cc=maarten.lankhorst@linux.intel.com \
    --cc=matthew.auld@intel.com \
    --cc=mingo@kernel.org \
    --cc=nouveau@lists.freedesktop.org \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=rafael.j.wysocki@intel.com \
    --cc=rdunlap@infradead.org \
    --cc=robin.murphy@arm.com \
    --cc=rodrigo.vivi@intel.com \
    --cc=saravanak@google.com \
    --cc=sstabellini@kernel.org \
    --cc=tfiga@chromium.org \
    --cc=thomas.hellstrom@linux.intel.com \
    --cc=treding@nvidia.com \
    --cc=will@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    --cc=xypron.glpk@gmx.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).