All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yong Wu <yong.wu@mediatek.com>
To: Rob Herring <robh+dt@kernel.org>,
	Sumit Semwal <sumit.semwal@linaro.org>,
	<christian.koenig@amd.com>,
	Matthias Brugger <matthias.bgg@gmail.com>
Cc: Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>,
	Conor Dooley <conor+dt@kernel.org>,
	Benjamin Gaignard <benjamin.gaignard@collabora.com>,
	Brian Starkey <Brian.Starkey@arm.com>,
	John Stultz <jstultz@google.com>, <tjmercier@google.com>,
	AngeloGioacchino Del Regno 
	<angelogioacchino.delregno@collabora.com>,
	Yong Wu <yong.wu@mediatek.com>, <devicetree@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>, <linux-media@vger.kernel.org>,
	<dri-devel@lists.freedesktop.org>,
	<linaro-mm-sig@lists.linaro.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-mediatek@lists.infradead.org>,
	<jianjiao.zeng@mediatek.com>, <kuohong.wang@mediatek.com>,
	Vijayanand Jitta <quic_vjitta@quicinc.com>,
	Joakim Bech <joakim.bech@linaro.org>,
	Jeffrey Kardatzke <jkardatzke@google.com>,
	Nicolas Dufresne <nicolas@ndufresne.ca>,
	<ckoenig.leichtzumerken@gmail.com>
Subject: [PATCH v3 7/7] dma_buf: heaps: secure_heap_mtk: Add a new CMA heap
Date: Tue, 12 Dec 2023 10:46:07 +0800	[thread overview]
Message-ID: <20231212024607.3681-8-yong.wu@mediatek.com> (raw)
In-Reply-To: <20231212024607.3681-1-yong.wu@mediatek.com>

Create a new MediaTek CMA heap from the CMA reserved buffer.

In this heap, When the first allocating buffer, use cma_alloc to prepare
whole the CMA range, then send its range to TEE to protect and manage.
For the later allocating, we just adds the cma_used_size.

When SVP done, cma_release will release the buffer, then kernel may
reuse it.

For the "CMA" secure heap, "struct cma *cma" is a common property, not just
for MediaTek, so put it into "struct secure_heap" instead of our private
data.

Signed-off-by: Yong Wu <yong.wu@mediatek.com>
---
 drivers/dma-buf/heaps/Kconfig           |   2 +-
 drivers/dma-buf/heaps/secure_heap.h     |   4 +
 drivers/dma-buf/heaps/secure_heap_mtk.c | 119 +++++++++++++++++++++++-
 3 files changed, 122 insertions(+), 3 deletions(-)

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index 12962189878e..f117d91a0a9d 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -21,7 +21,7 @@ config DMABUF_HEAPS_SECURE
 
 config DMABUF_HEAPS_SECURE_MTK
 	bool "MediaTek DMA-BUF Secure Heap"
-	depends on DMABUF_HEAPS_SECURE && TEE=y
+	depends on DMABUF_HEAPS_SECURE && DMA_CMA && TEE=y
 	help
 	  Enable secure dma-buf heaps for MediaTek platform. This heap is backed by
 	  TEE client interfaces. If in doubt, say N.
diff --git a/drivers/dma-buf/heaps/secure_heap.h b/drivers/dma-buf/heaps/secure_heap.h
index 374fd276bdd7..9f80edcd3e1f 100644
--- a/drivers/dma-buf/heaps/secure_heap.h
+++ b/drivers/dma-buf/heaps/secure_heap.h
@@ -20,6 +20,10 @@ struct secure_heap {
 
 	const struct secure_heap_ops *ops;
 
+	struct cma		*cma;
+	unsigned long		cma_paddr;
+	unsigned long		cma_size;
+
 	void			*priv_data;
 };
 
diff --git a/drivers/dma-buf/heaps/secure_heap_mtk.c b/drivers/dma-buf/heaps/secure_heap_mtk.c
index 2c6aaeaf469f..58148120f4ed 100644
--- a/drivers/dma-buf/heaps/secure_heap_mtk.c
+++ b/drivers/dma-buf/heaps/secure_heap_mtk.c
@@ -4,9 +4,11 @@
  *
  * Copyright (C) 2023 MediaTek Inc.
  */
+#include <linux/cma.h>
 #include <linux/dma-buf.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/of_reserved_mem.h>
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
 #include <linux/uuid.h>
@@ -23,6 +25,13 @@ enum mtk_secure_mem_type {
 	 * management is inside the TEE.
 	 */
 	MTK_SECURE_MEMORY_TYPE_CM_TZ	= 1,
+	/*
+	 * MediaTek dynamic chunk memory carved out from CMA.
+	 * In normal case, the CMA could be used in kernel; When SVP start, we will
+	 * allocate whole this CMA and pass whole the CMA PA and size into TEE to
+	 * protect it, then the detail memory management also is inside the TEE.
+	 */
+	MTK_SECURE_MEMORY_TYPE_CM_CMA	= 2,
 };
 
 enum mtk_secure_buffer_tee_cmd {
@@ -32,6 +41,8 @@ enum mtk_secure_buffer_tee_cmd {
 	 * [in]  value[0].a: The buffer size.
 	 *       value[0].b: alignment.
 	 * [in]  value[1].a: enum mtk_secure_mem_type.
+	 * [in]  value[2].a: pa base in cma case.
+	 *       value[2].b: The buffer size in cma case.
 	 * [out] value[3].a: The secure handle.
 	 */
 	MTK_TZCMD_SECMEM_ZALLOC	= 0x10000, /* MTK TEE Command ID Base */
@@ -52,6 +63,9 @@ struct mtk_secure_heap_data {
 
 	const enum mtk_secure_mem_type mem_type;
 
+	struct page		*cma_page;
+	unsigned long		cma_used_size;
+	struct mutex		lock; /* lock for cma_used_size */
 };
 
 static int mtk_tee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
@@ -126,6 +140,10 @@ static int mtk_tee_secure_memory(struct secure_heap *sec_heap, struct secure_buf
 	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
 	params[1].u.value.a = data->mem_type;
 	params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	if (sec_heap->cma && data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+		params[2].u.value.a = sec_heap->cma_paddr;
+		params[2].u.value.b = sec_heap->cma_size;
+	}
 	params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
 	ret = mtk_tee_service_call(data->tee_ctx, data->tee_session,
 				   MTK_TZCMD_SECMEM_ZALLOC, params);
@@ -162,6 +180,48 @@ static void mtk_secure_memory_free(struct secure_heap *sec_heap, struct secure_b
 {
 }
 
+static int mtk_secure_memory_cma_allocate(struct secure_heap *sec_heap,
+					  struct secure_buffer *sec_buf)
+{
+	struct mtk_secure_heap_data *data = sec_heap->priv_data;
+	int ret = 0;
+	/*
+	 * Allocate CMA only when allocating buffer for the first time, and just
+	 * increase cma_used_size at the other time, Actually the memory
+	 * allocating is within the TEE.
+	 */
+	mutex_lock(&data->lock);
+	if (!data->cma_used_size) {
+		data->cma_page = cma_alloc(sec_heap->cma, sec_heap->cma_size >> PAGE_SHIFT,
+					   get_order(PAGE_SIZE), false);
+		if (!data->cma_page) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+	} else if (data->cma_used_size + sec_buf->size > sec_heap->cma_size) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+	data->cma_used_size += sec_buf->size;
+
+out_unlock:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static void mtk_secure_memory_cma_free(struct secure_heap *sec_heap,
+				       struct secure_buffer *sec_buf)
+{
+	struct mtk_secure_heap_data *data = sec_heap->priv_data;
+
+	mutex_lock(&data->lock);
+	data->cma_used_size -= sec_buf->size;
+	if (!data->cma_used_size)
+		cma_release(sec_heap->cma, data->cma_page,
+			    sec_heap->cma_size >> PAGE_SHIFT);
+	mutex_unlock(&data->lock);
+}
+
 static int mtk_secure_heap_init(struct secure_heap *sec_heap)
 {
 	struct mtk_secure_heap_data *data = sec_heap->priv_data;
@@ -183,21 +243,76 @@ static struct mtk_secure_heap_data mtk_sec_heap_data = {
 	.mem_type = MTK_SECURE_MEMORY_TYPE_CM_TZ,
 };
 
+static const struct secure_heap_ops mtk_sec_mem_ops_cma = {
+	.heap_init		= mtk_secure_heap_init,
+	.memory_alloc		= mtk_secure_memory_cma_allocate,
+	.memory_free		= mtk_secure_memory_cma_free,
+	.secure_the_memory	= mtk_tee_secure_memory,
+	.unsecure_the_memory	= mtk_tee_unsecure_memory,
+};
+
+static struct mtk_secure_heap_data mtk_sec_heap_data_cma = {
+	.mem_type = MTK_SECURE_MEMORY_TYPE_CM_CMA,
+};
+
 static struct secure_heap mtk_secure_heaps[] = {
 	{
 		.name		= "secure_mtk_cm",
 		.ops		= &mtk_sec_mem_ops,
 		.priv_data	= &mtk_sec_heap_data,
 	},
+	{
+		.name		= "secure_mtk_cma",
+		.ops		= &mtk_sec_mem_ops_cma,
+		.priv_data	= &mtk_sec_heap_data_cma,
+	},
 };
 
+static int __init mtk_secure_cma_init(struct reserved_mem *rmem)
+{
+	struct mtk_secure_heap_data *data;
+	struct secure_heap *sec_heap = mtk_secure_heaps, *sec_heap_cma = NULL;
+	struct cma *sec_cma;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_secure_heaps); i++, sec_heap++) {
+		data = sec_heap->priv_data;
+		if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+			sec_heap_cma = sec_heap;
+			break;
+		}
+	}
+	if (!sec_heap_cma)
+		return -EINVAL;
+
+	ret = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name,
+				    &sec_cma);
+	if (ret) {
+		pr_err("%s: %s set up CMA fail\n", __func__, rmem->name);
+		return ret;
+	}
+
+	sec_heap_cma->cma = sec_cma;
+	sec_heap_cma->cma_paddr = rmem->base;
+	sec_heap_cma->cma_size = rmem->size;
+	return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(secure_cma, "mediatek,dynamic-secure-region", mtk_secure_cma_init);
+
 static int mtk_sec_heap_init(void)
 {
 	struct secure_heap *sec_heap = mtk_secure_heaps;
+	struct mtk_secure_heap_data *data;
 	unsigned int i;
 
-	for (i = 0; i < ARRAY_SIZE(mtk_secure_heaps); i++, sec_heap++)
-		secure_heap_add(sec_heap);
+	for (i = 0; i < ARRAY_SIZE(mtk_secure_heaps); i++, sec_heap++) {
+		data = sec_heap->priv_data;
+		if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA && !sec_heap->cma)
+			continue;
+		if (!secure_heap_add(sec_heap))
+			mutex_init(&data->lock);
+	}
 	return 0;
 }
 
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Yong Wu <yong.wu@mediatek.com>
To: Rob Herring <robh+dt@kernel.org>,
	Sumit Semwal <sumit.semwal@linaro.org>,
	 <christian.koenig@amd.com>,
	Matthias Brugger <matthias.bgg@gmail.com>
Cc: dri-devel@lists.freedesktop.org, John Stultz <jstultz@google.com>,
	Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>,
	Jeffrey Kardatzke <jkardatzke@google.com>,
	Benjamin Gaignard <benjamin.gaignard@collabora.com>,
	Vijayanand Jitta <quic_vjitta@quicinc.com>,
	Nicolas Dufresne <nicolas@ndufresne.ca>,
	Yong Wu <yong.wu@mediatek.com>,
	jianjiao.zeng@mediatek.com, linux-media@vger.kernel.org,
	devicetree@vger.kernel.org, Conor Dooley <conor+dt@kernel.org>,
	ckoenig.leichtzumerken@gmail.com, linaro-mm-sig@lists.linaro.org,
	linux-mediatek@lists.infradead.org,
	Joakim Bech <joakim.bech@linaro.org>,
	tjmercier@google.com, linux-arm-kernel@lists.infradead.org,
	AngeloGioacchino Del Regno
	<angelogioacchino.delregno@collabora.com>,
	kuohong.wang@mediatek.com, linux-kernel@vger.kernel.org
Subject: [PATCH v3 7/7] dma_buf: heaps: secure_heap_mtk: Add a new CMA heap
Date: Tue, 12 Dec 2023 10:46:07 +0800	[thread overview]
Message-ID: <20231212024607.3681-8-yong.wu@mediatek.com> (raw)
In-Reply-To: <20231212024607.3681-1-yong.wu@mediatek.com>

Create a new MediaTek CMA heap from the CMA reserved buffer.

In this heap, When the first allocating buffer, use cma_alloc to prepare
whole the CMA range, then send its range to TEE to protect and manage.
For the later allocating, we just adds the cma_used_size.

When SVP done, cma_release will release the buffer, then kernel may
reuse it.

For the "CMA" secure heap, "struct cma *cma" is a common property, not just
for MediaTek, so put it into "struct secure_heap" instead of our private
data.

Signed-off-by: Yong Wu <yong.wu@mediatek.com>
---
 drivers/dma-buf/heaps/Kconfig           |   2 +-
 drivers/dma-buf/heaps/secure_heap.h     |   4 +
 drivers/dma-buf/heaps/secure_heap_mtk.c | 119 +++++++++++++++++++++++-
 3 files changed, 122 insertions(+), 3 deletions(-)

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index 12962189878e..f117d91a0a9d 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -21,7 +21,7 @@ config DMABUF_HEAPS_SECURE
 
 config DMABUF_HEAPS_SECURE_MTK
 	bool "MediaTek DMA-BUF Secure Heap"
-	depends on DMABUF_HEAPS_SECURE && TEE=y
+	depends on DMABUF_HEAPS_SECURE && DMA_CMA && TEE=y
 	help
 	  Enable secure dma-buf heaps for MediaTek platform. This heap is backed by
 	  TEE client interfaces. If in doubt, say N.
diff --git a/drivers/dma-buf/heaps/secure_heap.h b/drivers/dma-buf/heaps/secure_heap.h
index 374fd276bdd7..9f80edcd3e1f 100644
--- a/drivers/dma-buf/heaps/secure_heap.h
+++ b/drivers/dma-buf/heaps/secure_heap.h
@@ -20,6 +20,10 @@ struct secure_heap {
 
 	const struct secure_heap_ops *ops;
 
+	struct cma		*cma;
+	unsigned long		cma_paddr;
+	unsigned long		cma_size;
+
 	void			*priv_data;
 };
 
diff --git a/drivers/dma-buf/heaps/secure_heap_mtk.c b/drivers/dma-buf/heaps/secure_heap_mtk.c
index 2c6aaeaf469f..58148120f4ed 100644
--- a/drivers/dma-buf/heaps/secure_heap_mtk.c
+++ b/drivers/dma-buf/heaps/secure_heap_mtk.c
@@ -4,9 +4,11 @@
  *
  * Copyright (C) 2023 MediaTek Inc.
  */
+#include <linux/cma.h>
 #include <linux/dma-buf.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/of_reserved_mem.h>
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
 #include <linux/uuid.h>
@@ -23,6 +25,13 @@ enum mtk_secure_mem_type {
 	 * management is inside the TEE.
 	 */
 	MTK_SECURE_MEMORY_TYPE_CM_TZ	= 1,
+	/*
+	 * MediaTek dynamic chunk memory carved out from CMA.
+	 * In normal case, the CMA could be used in kernel; When SVP start, we will
+	 * allocate whole this CMA and pass whole the CMA PA and size into TEE to
+	 * protect it, then the detail memory management also is inside the TEE.
+	 */
+	MTK_SECURE_MEMORY_TYPE_CM_CMA	= 2,
 };
 
 enum mtk_secure_buffer_tee_cmd {
@@ -32,6 +41,8 @@ enum mtk_secure_buffer_tee_cmd {
 	 * [in]  value[0].a: The buffer size.
 	 *       value[0].b: alignment.
 	 * [in]  value[1].a: enum mtk_secure_mem_type.
+	 * [in]  value[2].a: pa base in cma case.
+	 *       value[2].b: The buffer size in cma case.
 	 * [out] value[3].a: The secure handle.
 	 */
 	MTK_TZCMD_SECMEM_ZALLOC	= 0x10000, /* MTK TEE Command ID Base */
@@ -52,6 +63,9 @@ struct mtk_secure_heap_data {
 
 	const enum mtk_secure_mem_type mem_type;
 
+	struct page		*cma_page;
+	unsigned long		cma_used_size;
+	struct mutex		lock; /* lock for cma_used_size */
 };
 
 static int mtk_tee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
@@ -126,6 +140,10 @@ static int mtk_tee_secure_memory(struct secure_heap *sec_heap, struct secure_buf
 	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
 	params[1].u.value.a = data->mem_type;
 	params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	if (sec_heap->cma && data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+		params[2].u.value.a = sec_heap->cma_paddr;
+		params[2].u.value.b = sec_heap->cma_size;
+	}
 	params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
 	ret = mtk_tee_service_call(data->tee_ctx, data->tee_session,
 				   MTK_TZCMD_SECMEM_ZALLOC, params);
@@ -162,6 +180,48 @@ static void mtk_secure_memory_free(struct secure_heap *sec_heap, struct secure_b
 {
 }
 
+static int mtk_secure_memory_cma_allocate(struct secure_heap *sec_heap,
+					  struct secure_buffer *sec_buf)
+{
+	struct mtk_secure_heap_data *data = sec_heap->priv_data;
+	int ret = 0;
+	/*
+	 * Allocate CMA only when allocating buffer for the first time, and just
+	 * increase cma_used_size at the other time, Actually the memory
+	 * allocating is within the TEE.
+	 */
+	mutex_lock(&data->lock);
+	if (!data->cma_used_size) {
+		data->cma_page = cma_alloc(sec_heap->cma, sec_heap->cma_size >> PAGE_SHIFT,
+					   get_order(PAGE_SIZE), false);
+		if (!data->cma_page) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+	} else if (data->cma_used_size + sec_buf->size > sec_heap->cma_size) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+	data->cma_used_size += sec_buf->size;
+
+out_unlock:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static void mtk_secure_memory_cma_free(struct secure_heap *sec_heap,
+				       struct secure_buffer *sec_buf)
+{
+	struct mtk_secure_heap_data *data = sec_heap->priv_data;
+
+	mutex_lock(&data->lock);
+	data->cma_used_size -= sec_buf->size;
+	if (!data->cma_used_size)
+		cma_release(sec_heap->cma, data->cma_page,
+			    sec_heap->cma_size >> PAGE_SHIFT);
+	mutex_unlock(&data->lock);
+}
+
 static int mtk_secure_heap_init(struct secure_heap *sec_heap)
 {
 	struct mtk_secure_heap_data *data = sec_heap->priv_data;
@@ -183,21 +243,76 @@ static struct mtk_secure_heap_data mtk_sec_heap_data = {
 	.mem_type = MTK_SECURE_MEMORY_TYPE_CM_TZ,
 };
 
+static const struct secure_heap_ops mtk_sec_mem_ops_cma = {
+	.heap_init		= mtk_secure_heap_init,
+	.memory_alloc		= mtk_secure_memory_cma_allocate,
+	.memory_free		= mtk_secure_memory_cma_free,
+	.secure_the_memory	= mtk_tee_secure_memory,
+	.unsecure_the_memory	= mtk_tee_unsecure_memory,
+};
+
+static struct mtk_secure_heap_data mtk_sec_heap_data_cma = {
+	.mem_type = MTK_SECURE_MEMORY_TYPE_CM_CMA,
+};
+
 static struct secure_heap mtk_secure_heaps[] = {
 	{
 		.name		= "secure_mtk_cm",
 		.ops		= &mtk_sec_mem_ops,
 		.priv_data	= &mtk_sec_heap_data,
 	},
+	{
+		.name		= "secure_mtk_cma",
+		.ops		= &mtk_sec_mem_ops_cma,
+		.priv_data	= &mtk_sec_heap_data_cma,
+	},
 };
 
+static int __init mtk_secure_cma_init(struct reserved_mem *rmem)
+{
+	struct mtk_secure_heap_data *data;
+	struct secure_heap *sec_heap = mtk_secure_heaps, *sec_heap_cma = NULL;
+	struct cma *sec_cma;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_secure_heaps); i++, sec_heap++) {
+		data = sec_heap->priv_data;
+		if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+			sec_heap_cma = sec_heap;
+			break;
+		}
+	}
+	if (!sec_heap_cma)
+		return -EINVAL;
+
+	ret = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name,
+				    &sec_cma);
+	if (ret) {
+		pr_err("%s: %s set up CMA fail\n", __func__, rmem->name);
+		return ret;
+	}
+
+	sec_heap_cma->cma = sec_cma;
+	sec_heap_cma->cma_paddr = rmem->base;
+	sec_heap_cma->cma_size = rmem->size;
+	return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(secure_cma, "mediatek,dynamic-secure-region", mtk_secure_cma_init);
+
 static int mtk_sec_heap_init(void)
 {
 	struct secure_heap *sec_heap = mtk_secure_heaps;
+	struct mtk_secure_heap_data *data;
 	unsigned int i;
 
-	for (i = 0; i < ARRAY_SIZE(mtk_secure_heaps); i++, sec_heap++)
-		secure_heap_add(sec_heap);
+	for (i = 0; i < ARRAY_SIZE(mtk_secure_heaps); i++, sec_heap++) {
+		data = sec_heap->priv_data;
+		if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA && !sec_heap->cma)
+			continue;
+		if (!secure_heap_add(sec_heap))
+			mutex_init(&data->lock);
+	}
 	return 0;
 }
 
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Yong Wu <yong.wu@mediatek.com>
To: Rob Herring <robh+dt@kernel.org>,
	Sumit Semwal <sumit.semwal@linaro.org>,
	<christian.koenig@amd.com>,
	Matthias Brugger <matthias.bgg@gmail.com>
Cc: Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>,
	Conor Dooley <conor+dt@kernel.org>,
	Benjamin Gaignard <benjamin.gaignard@collabora.com>,
	Brian Starkey <Brian.Starkey@arm.com>,
	John Stultz <jstultz@google.com>, <tjmercier@google.com>,
	AngeloGioacchino Del Regno
	<angelogioacchino.delregno@collabora.com>,
	Yong Wu <yong.wu@mediatek.com>, <devicetree@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>, <linux-media@vger.kernel.org>,
	<dri-devel@lists.freedesktop.org>,
	<linaro-mm-sig@lists.linaro.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-mediatek@lists.infradead.org>,
	<jianjiao.zeng@mediatek.com>, <kuohong.wang@mediatek.com>,
	Vijayanand Jitta <quic_vjitta@quicinc.com>,
	Joakim Bech <joakim.bech@linaro.org>,
	Jeffrey Kardatzke <jkardatzke@google.com>,
	Nicolas Dufresne <nicolas@ndufresne.ca>,
	<ckoenig.leichtzumerken@gmail.com>
Subject: [PATCH v3 7/7] dma_buf: heaps: secure_heap_mtk: Add a new CMA heap
Date: Tue, 12 Dec 2023 10:46:07 +0800	[thread overview]
Message-ID: <20231212024607.3681-8-yong.wu@mediatek.com> (raw)
In-Reply-To: <20231212024607.3681-1-yong.wu@mediatek.com>

Create a new MediaTek CMA heap from the CMA reserved buffer.

In this heap, When the first allocating buffer, use cma_alloc to prepare
whole the CMA range, then send its range to TEE to protect and manage.
For the later allocating, we just adds the cma_used_size.

When SVP done, cma_release will release the buffer, then kernel may
reuse it.

For the "CMA" secure heap, "struct cma *cma" is a common property, not just
for MediaTek, so put it into "struct secure_heap" instead of our private
data.

Signed-off-by: Yong Wu <yong.wu@mediatek.com>
---
 drivers/dma-buf/heaps/Kconfig           |   2 +-
 drivers/dma-buf/heaps/secure_heap.h     |   4 +
 drivers/dma-buf/heaps/secure_heap_mtk.c | 119 +++++++++++++++++++++++-
 3 files changed, 122 insertions(+), 3 deletions(-)

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index 12962189878e..f117d91a0a9d 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -21,7 +21,7 @@ config DMABUF_HEAPS_SECURE
 
 config DMABUF_HEAPS_SECURE_MTK
 	bool "MediaTek DMA-BUF Secure Heap"
-	depends on DMABUF_HEAPS_SECURE && TEE=y
+	depends on DMABUF_HEAPS_SECURE && DMA_CMA && TEE=y
 	help
 	  Enable secure dma-buf heaps for MediaTek platform. This heap is backed by
 	  TEE client interfaces. If in doubt, say N.
diff --git a/drivers/dma-buf/heaps/secure_heap.h b/drivers/dma-buf/heaps/secure_heap.h
index 374fd276bdd7..9f80edcd3e1f 100644
--- a/drivers/dma-buf/heaps/secure_heap.h
+++ b/drivers/dma-buf/heaps/secure_heap.h
@@ -20,6 +20,10 @@ struct secure_heap {
 
 	const struct secure_heap_ops *ops;
 
+	struct cma		*cma;
+	unsigned long		cma_paddr;
+	unsigned long		cma_size;
+
 	void			*priv_data;
 };
 
diff --git a/drivers/dma-buf/heaps/secure_heap_mtk.c b/drivers/dma-buf/heaps/secure_heap_mtk.c
index 2c6aaeaf469f..58148120f4ed 100644
--- a/drivers/dma-buf/heaps/secure_heap_mtk.c
+++ b/drivers/dma-buf/heaps/secure_heap_mtk.c
@@ -4,9 +4,11 @@
  *
  * Copyright (C) 2023 MediaTek Inc.
  */
+#include <linux/cma.h>
 #include <linux/dma-buf.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/of_reserved_mem.h>
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
 #include <linux/uuid.h>
@@ -23,6 +25,13 @@ enum mtk_secure_mem_type {
 	 * management is inside the TEE.
 	 */
 	MTK_SECURE_MEMORY_TYPE_CM_TZ	= 1,
+	/*
+	 * MediaTek dynamic chunk memory carved out from CMA.
+	 * In normal case, the CMA could be used in kernel; When SVP start, we will
+	 * allocate whole this CMA and pass whole the CMA PA and size into TEE to
+	 * protect it, then the detail memory management also is inside the TEE.
+	 */
+	MTK_SECURE_MEMORY_TYPE_CM_CMA	= 2,
 };
 
 enum mtk_secure_buffer_tee_cmd {
@@ -32,6 +41,8 @@ enum mtk_secure_buffer_tee_cmd {
 	 * [in]  value[0].a: The buffer size.
 	 *       value[0].b: alignment.
 	 * [in]  value[1].a: enum mtk_secure_mem_type.
+	 * [in]  value[2].a: pa base in cma case.
+	 *       value[2].b: The buffer size in cma case.
 	 * [out] value[3].a: The secure handle.
 	 */
 	MTK_TZCMD_SECMEM_ZALLOC	= 0x10000, /* MTK TEE Command ID Base */
@@ -52,6 +63,9 @@ struct mtk_secure_heap_data {
 
 	const enum mtk_secure_mem_type mem_type;
 
+	struct page		*cma_page;
+	unsigned long		cma_used_size;
+	struct mutex		lock; /* lock for cma_used_size */
 };
 
 static int mtk_tee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
@@ -126,6 +140,10 @@ static int mtk_tee_secure_memory(struct secure_heap *sec_heap, struct secure_buf
 	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
 	params[1].u.value.a = data->mem_type;
 	params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	if (sec_heap->cma && data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+		params[2].u.value.a = sec_heap->cma_paddr;
+		params[2].u.value.b = sec_heap->cma_size;
+	}
 	params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
 	ret = mtk_tee_service_call(data->tee_ctx, data->tee_session,
 				   MTK_TZCMD_SECMEM_ZALLOC, params);
@@ -162,6 +180,48 @@ static void mtk_secure_memory_free(struct secure_heap *sec_heap, struct secure_b
 {
 }
 
+static int mtk_secure_memory_cma_allocate(struct secure_heap *sec_heap,
+					  struct secure_buffer *sec_buf)
+{
+	struct mtk_secure_heap_data *data = sec_heap->priv_data;
+	int ret = 0;
+	/*
+	 * Allocate CMA only when allocating buffer for the first time, and just
+	 * increase cma_used_size at the other time, Actually the memory
+	 * allocating is within the TEE.
+	 */
+	mutex_lock(&data->lock);
+	if (!data->cma_used_size) {
+		data->cma_page = cma_alloc(sec_heap->cma, sec_heap->cma_size >> PAGE_SHIFT,
+					   get_order(PAGE_SIZE), false);
+		if (!data->cma_page) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+	} else if (data->cma_used_size + sec_buf->size > sec_heap->cma_size) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+	data->cma_used_size += sec_buf->size;
+
+out_unlock:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static void mtk_secure_memory_cma_free(struct secure_heap *sec_heap,
+				       struct secure_buffer *sec_buf)
+{
+	struct mtk_secure_heap_data *data = sec_heap->priv_data;
+
+	mutex_lock(&data->lock);
+	data->cma_used_size -= sec_buf->size;
+	if (!data->cma_used_size)
+		cma_release(sec_heap->cma, data->cma_page,
+			    sec_heap->cma_size >> PAGE_SHIFT);
+	mutex_unlock(&data->lock);
+}
+
 static int mtk_secure_heap_init(struct secure_heap *sec_heap)
 {
 	struct mtk_secure_heap_data *data = sec_heap->priv_data;
@@ -183,21 +243,76 @@ static struct mtk_secure_heap_data mtk_sec_heap_data = {
 	.mem_type = MTK_SECURE_MEMORY_TYPE_CM_TZ,
 };
 
+static const struct secure_heap_ops mtk_sec_mem_ops_cma = {
+	.heap_init		= mtk_secure_heap_init,
+	.memory_alloc		= mtk_secure_memory_cma_allocate,
+	.memory_free		= mtk_secure_memory_cma_free,
+	.secure_the_memory	= mtk_tee_secure_memory,
+	.unsecure_the_memory	= mtk_tee_unsecure_memory,
+};
+
+static struct mtk_secure_heap_data mtk_sec_heap_data_cma = {
+	.mem_type = MTK_SECURE_MEMORY_TYPE_CM_CMA,
+};
+
 static struct secure_heap mtk_secure_heaps[] = {
 	{
 		.name		= "secure_mtk_cm",
 		.ops		= &mtk_sec_mem_ops,
 		.priv_data	= &mtk_sec_heap_data,
 	},
+	{
+		.name		= "secure_mtk_cma",
+		.ops		= &mtk_sec_mem_ops_cma,
+		.priv_data	= &mtk_sec_heap_data_cma,
+	},
 };
 
+static int __init mtk_secure_cma_init(struct reserved_mem *rmem)
+{
+	struct mtk_secure_heap_data *data;
+	struct secure_heap *sec_heap = mtk_secure_heaps, *sec_heap_cma = NULL;
+	struct cma *sec_cma;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_secure_heaps); i++, sec_heap++) {
+		data = sec_heap->priv_data;
+		if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+			sec_heap_cma = sec_heap;
+			break;
+		}
+	}
+	if (!sec_heap_cma)
+		return -EINVAL;
+
+	ret = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name,
+				    &sec_cma);
+	if (ret) {
+		pr_err("%s: %s set up CMA fail\n", __func__, rmem->name);
+		return ret;
+	}
+
+	sec_heap_cma->cma = sec_cma;
+	sec_heap_cma->cma_paddr = rmem->base;
+	sec_heap_cma->cma_size = rmem->size;
+	return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(secure_cma, "mediatek,dynamic-secure-region", mtk_secure_cma_init);
+
 static int mtk_sec_heap_init(void)
 {
 	struct secure_heap *sec_heap = mtk_secure_heaps;
+	struct mtk_secure_heap_data *data;
 	unsigned int i;
 
-	for (i = 0; i < ARRAY_SIZE(mtk_secure_heaps); i++, sec_heap++)
-		secure_heap_add(sec_heap);
+	for (i = 0; i < ARRAY_SIZE(mtk_secure_heaps); i++, sec_heap++) {
+		data = sec_heap->priv_data;
+		if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA && !sec_heap->cma)
+			continue;
+		if (!secure_heap_add(sec_heap))
+			mutex_init(&data->lock);
+	}
 	return 0;
 }
 
-- 
2.25.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2023-12-12  2:48 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-12  2:46 [PATCH v3 0/7] dma-buf: heaps: Add secure heap Yong Wu
2023-12-12  2:46 ` Yong Wu
2023-12-12  2:46 ` Yong Wu
2023-12-12  2:46 ` [PATCH v3 1/7] dt-bindings: reserved-memory: Add mediatek,dynamic-secure-region Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46   ` [PATCH v3 1/7] dt-bindings: reserved-memory: Add mediatek, dynamic-secure-region Yong Wu
2023-12-12  2:46 ` [PATCH v3 2/7] dma-buf: heaps: Initialize a secure heap Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46 ` [PATCH v3 3/7] dma-buf: heaps: secure_heap: Add private heap ops Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46 ` [PATCH v3 4/7] dma-buf: heaps: secure_heap: Add dma_ops Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46 ` [PATCH v3 5/7] dma-buf: heaps: secure_heap: Add MediaTek secure heap and heap_init Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46 ` [PATCH v3 6/7] dma-buf: heaps: secure_heap_mtk: Add tee memory service call Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12  2:46 ` Yong Wu [this message]
2023-12-12  2:46   ` [PATCH v3 7/7] dma_buf: heaps: secure_heap_mtk: Add a new CMA heap Yong Wu
2023-12-12  2:46   ` Yong Wu
2023-12-12 16:36 ` [PATCH v3 0/7] dma-buf: heaps: Add secure heap Simon Ser
2023-12-12 16:36   ` Simon Ser
2023-12-12 16:36   ` Simon Ser
2023-12-13  9:05   ` Pekka Paalanen
2023-12-13  9:05     ` Pekka Paalanen
2023-12-13  9:05     ` Pekka Paalanen
2023-12-13 10:15     ` Joakim Bech
2023-12-13 10:15       ` Joakim Bech
2023-12-13 10:15       ` Joakim Bech
2023-12-13 11:38       ` Pekka Paalanen
2023-12-13 11:38         ` Pekka Paalanen
2023-12-13 11:38         ` Pekka Paalanen
2023-12-13 13:22         ` Joakim Bech
2023-12-13 13:22           ` Joakim Bech
2023-12-13 13:22           ` Joakim Bech
2023-12-13 13:59           ` Christian König
2023-12-13 13:59             ` Christian König
2023-12-13 13:59             ` Christian König
2023-12-13 14:16           ` Pekka Paalanen
2023-12-13 14:16             ` Pekka Paalanen
2023-12-13 14:16             ` Pekka Paalanen
2023-12-22  9:40             ` Simon Ser
2023-12-22  9:40               ` Simon Ser
2023-12-22  9:40               ` Simon Ser
2024-01-04 19:50               ` Jeffrey Kardatzke
2024-01-04 19:50                 ` Jeffrey Kardatzke
2024-01-04 19:50                 ` Jeffrey Kardatzke
2024-01-05  9:35                 ` Christian König
2024-01-05  9:35                   ` Christian König
2024-01-05  9:35                   ` Christian König
2024-01-09  3:07                   ` Yong Wu (吴勇)
2024-01-09  3:07                     ` Yong Wu (吴勇)
2024-01-09  3:07                     ` Yong Wu (吴勇)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231212024607.3681-8-yong.wu@mediatek.com \
    --to=yong.wu@mediatek.com \
    --cc=Brian.Starkey@arm.com \
    --cc=angelogioacchino.delregno@collabora.com \
    --cc=benjamin.gaignard@collabora.com \
    --cc=christian.koenig@amd.com \
    --cc=ckoenig.leichtzumerken@gmail.com \
    --cc=conor+dt@kernel.org \
    --cc=devicetree@vger.kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=jianjiao.zeng@mediatek.com \
    --cc=jkardatzke@google.com \
    --cc=joakim.bech@linaro.org \
    --cc=jstultz@google.com \
    --cc=krzysztof.kozlowski+dt@linaro.org \
    --cc=kuohong.wang@mediatek.com \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=linux-mediatek@lists.infradead.org \
    --cc=matthias.bgg@gmail.com \
    --cc=nicolas@ndufresne.ca \
    --cc=quic_vjitta@quicinc.com \
    --cc=robh+dt@kernel.org \
    --cc=sumit.semwal@linaro.org \
    --cc=tjmercier@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.