All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sudeep Holla <sudeep.holla@arm.com>
To: linux-arm-kernel@lists.infradead.org, devicetree@vger.kernel.org
Cc: Sudeep Holla <sudeep.holla@arm.com>,
	Trilok Soni <tsoni@codeaurora.org>,
	arve@android.com, Andrew Walbran <qwandor@google.com>,
	David Hartley <dhh@qti.qualcomm.com>,
	Achin Gupta <Achin.Gupta@arm.com>,
	Jens Wiklander <jens.wiklander@linaro.org>,
	Arunachalam Ganapathy <arunachalam.ganapathy@arm.com>
Subject: [PATCH v3 7/7] firmware: arm_ffa: Add support for MEM_* interfaces
Date: Fri,  4 Dec 2020 12:11:37 +0000	[thread overview]
Message-ID: <20201204121137.2966778-8-sudeep.holla@arm.com> (raw)
In-Reply-To: <20201204121137.2966778-1-sudeep.holla@arm.com>

Most of the MEM_* APIs share the same parameters, so they can be
generalised. Currently only MEM_SHARE is implemented and the user space
interface for that is not added yet.

Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
---
 drivers/firmware/arm_ffa/driver.c | 180 ++++++++++++++++++++++++++++++
 include/linux/arm_ffa.h           | 149 +++++++++++++++++++++++++
 2 files changed, 329 insertions(+)

diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 3e4ba841dbf8..92a0bf542f18 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -28,7 +28,9 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mm.h>
 #include <linux/of.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/uuid.h>
 
@@ -306,6 +308,177 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id,
 	return 0;
 }
 
+static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
+			      u32 frag_len, u32 len, u64 *handle)
+{
+	ffa_res_t ret;
+
+	ret = invoke_ffa_fn(func_id, len, frag_len, buf, buf_sz, 0, 0, 0);
+
+	while (ret.a0 == FFA_MEM_OP_PAUSE)
+		ret = invoke_ffa_fn(FFA_MEM_OP_RESUME, ret.a1, ret.a2,
+				    0, 0, 0, 0, 0);
+	if (ret.a0 == FFA_ERROR)
+		return ffa_to_linux_errno((int)ret.a2);
+
+	if (ret.a0 != FFA_SUCCESS)
+		return -EOPNOTSUPP;
+
+	if (handle)
+		*handle = PACK_HANDLE(ret.a3, ret.a2);
+
+	return frag_len;
+}
+
+static int ffa_mem_next_frag(u64 handle, u32 frag_len)
+{
+	ffa_res_t ret;
+
+	ret = invoke_ffa_fn(FFA_MEM_FRAG_TX, HANDLE_LOW(handle),
+			    HANDLE_HIGH(handle), frag_len, 0, 0, 0, 0);
+
+	while (ret.a0 == FFA_MEM_OP_PAUSE)
+		ret = invoke_ffa_fn(FFA_MEM_OP_RESUME, ret.a1, ret.a2,
+				    0, 0, 0, 0, 0);
+	if (ret.a0 == FFA_ERROR)
+		return ffa_to_linux_errno((int)ret.a2);
+
+	if (ret.a0 != FFA_MEM_FRAG_RX)
+		return -EOPNOTSUPP;
+
+	return ret.a3;
+}
+
+static int
+ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
+		      u32 len, u64 *handle, bool first)
+{
+	if (!first)
+		return ffa_mem_next_frag(*handle, frag_len);
+
+	return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len,
+				      len, handle);
+}
+
+static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
+{
+	u32 num_pages = 0;
+
+	do {
+		num_pages += sg->length / FFA_PAGE_SIZE;
+	} while ((sg = sg_next(sg)));
+
+	return num_pages;
+}
+
+static int
+ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+		       struct ffa_mem_ops_args *args)
+{
+	int rc = 0;
+	bool first = true;
+	phys_addr_t addr = 0;
+	struct ffa_composite_mem_region *composite;
+	struct ffa_mem_region_addr_range *constituents;
+	struct ffa_mem_region_attributes *ep_mem_access;
+	struct ffa_mem_region *mem_region = buffer;
+	u32 idx, frag_len, length, num_entries = sg_nents(args->sg);
+	u32 buf_sz = max_fragsize / FFA_PAGE_SIZE;
+
+	mem_region->tag = args->tag;
+	mem_region->flags = args->flags;
+	mem_region->sender_id = drv_info->vm_id;
+	mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
+				 FFA_MEM_INNER_SHAREABLE;
+	ep_mem_access = &mem_region->ep_mem_access[0];
+
+	for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+		ep_mem_access->receiver = args->attrs[idx].receiver;
+		ep_mem_access->attrs = args->attrs[idx].attrs;
+		ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
+	}
+	mem_region->ep_count = args->nattrs;
+
+	composite = buffer + COMPOSITE_OFFSET(args->nattrs);
+	composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
+	composite->addr_range_cnt = num_entries;
+
+	length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
+	frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
+	if (frag_len > max_fragsize)
+		return -ENXIO;
+
+	if (!args->use_txbuf)
+		addr = virt_to_phys(buffer);
+
+	constituents = buffer + frag_len;
+	idx = 0;
+	do {
+		if (frag_len == max_fragsize) {
+			rc = ffa_transmit_fragment(func_id, addr, buf_sz,
+						   frag_len, length,
+						   args->g_handle, first);
+			if (rc < 0)
+				return -ENXIO;
+
+			first = false;
+			idx = 0;
+			frag_len = 0;
+			constituents = buffer;
+		}
+
+		if ((void *)constituents - buffer > max_fragsize) {
+			pr_err("Memory Region Fragment > Tx Buffer size\n");
+			return -EFAULT;
+		}
+
+		constituents->address = sg_phys(args->sg);
+		constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
+		constituents++;
+		frag_len += sizeof(struct ffa_mem_region_addr_range);
+	} while ((args->sg = sg_next(args->sg)));
+
+	return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
+				     length, args->g_handle, first);
+}
+
+static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
+{
+	int ret;
+	void *buffer;
+
+	if (!args->use_txbuf) {
+		buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+		if (!buffer)
+			return -ENOMEM;
+	} else {
+		buffer = drv_info->tx_buffer;
+		mutex_lock(&drv_info->tx_lock);
+	}
+
+	ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
+
+	if (args->use_txbuf)
+		mutex_unlock(&drv_info->tx_lock);
+	else
+		free_pages_exact(buffer, RXTX_BUFFER_SIZE);
+
+	return ret < 0 ? ret : 0;
+}
+
+static int ffa_memory_reclaim(u64 g_handle, u32 flags)
+{
+	ffa_res_t ret;
+
+	ret = invoke_ffa_fn(FFA_MEM_RECLAIM, HANDLE_LOW(g_handle),
+			    HANDLE_HIGH(g_handle), flags, 0, 0, 0, 0);
+
+	if (ret.a0 == FFA_ERROR)
+		return ffa_to_linux_errno((int)ret.a2);
+
+	return 0;
+}
+
 static u32 ffa_api_version_get(void)
 {
 	return drv_info->version;
@@ -331,11 +504,18 @@ static int ffa_sync_send_receive(struct ffa_device *dev, u16 ep,
 	return ffa_msg_send_direct_req(dev->vm_id, ep, data);
 }
 
+static int ffa_memory_share(struct ffa_mem_ops_args *args)
+{
+	return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
+}
+
 static const struct ffa_dev_ops ffa_ops = {
 	.api_version_get = ffa_api_version_get,
 	.partition_id_get = ffa_partition_id_get,
 	.partition_info_get = ffa_partition_info_get,
 	.sync_send_receive = ffa_sync_send_receive,
+	.memory_reclaim = ffa_memory_reclaim,
+	.memory_share = ffa_memory_share,
 };
 
 const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 8604c48289ce..67e3180e7097 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -109,6 +109,153 @@ struct ffa_send_direct_data {
 	unsigned long data4;
 };
 
+struct ffa_mem_region_addr_range {
+	/* The base IPA of the constituent memory region, aligned to 4 kiB */
+	u64 address;
+	/* The number of 4 kiB pages in the constituent memory region. */
+	u32 pg_cnt;
+	u32 reserved;
+};
+
+struct ffa_composite_mem_region {
+	/*
+	 * The total number of 4 kiB pages included in this memory region. This
+	 * must be equal to the sum of page counts specified in each
+	 * `struct ffa_mem_region_addr_range`.
+	 */
+	u32 total_pg_cnt;
+	/* The number of constituents included in this memory region range */
+	u32 addr_range_cnt;
+	u64 reserved;
+	/** An array of `addr_range_cnt` memory region constituents. */
+	struct ffa_mem_region_addr_range constituents[];
+};
+
+struct ffa_mem_region_attributes {
+	/* The ID of the VM to which the memory is being given or shared. */
+	u16 receiver;
+	/*
+	 * The permissions with which the memory region should be mapped in the
+	 * receiver's page table.
+	 */
+#define FFA_MEM_EXEC	BIT(3)
+#define FFA_MEM_NO_EXEC	BIT(2)
+#define FFA_MEM_RW		BIT(1)
+#define FFA_MEM_RO		BIT(0)
+	u8 attrs;
+	/*
+	 * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+	 * for memory regions with multiple borrowers.
+	 */
+#define FFA_MEM_RETRIEVE_SELF_BORROWER	BIT(0)
+	u8 flag;
+	u32 composite_off;
+	/*
+	 * Offset in bytes from the start of the outer `ffa_memory_region` to
+	 * an `struct ffa_mem_region_addr_range`.
+	 */
+	u64 reserved;
+};
+
+struct ffa_mem_region {
+	/* The ID of the VM/owner which originally sent the memory region */
+	u16 sender_id;
+#define FFA_MEM_NORMAL		BIT(5)
+#define FFA_MEM_DEVICE		BIT(4)
+
+#define FFA_MEM_WRITE_BACK	(3 << 2)
+#define FFA_MEM_NON_CACHEABLE	(1 << 2)
+
+#define FFA_DEV_nGnRnE		(0 << 2)
+#define FFA_DEV_nGnRE		(1 << 2)
+#define FFA_DEV_nGRE		(2 << 2)
+#define FFA_DEV_GRE		(3 << 2)
+
+#define FFA_MEM_NON_SHAREABLE	(0)
+#define FFA_MEM_OUTER_SHAREABLE	(2)
+#define FFA_MEM_INNER_SHAREABLE	(3)
+	u8 attributes;
+	u8 reserved_0;
+/*
+ * Clear memory region contents after unmapping it from the sender and
+ * before mapping it for any receiver.
+ */
+#define FFA_MEM_CLEAR			BIT(0)
+/*
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_TIME_SLICE_ENABLE		BIT(1)
+
+/*
+ * Whether the hypervisor should clear the memory region before the receiver
+ * relinquishes it or is aborted.
+ */
+#define FFA_MEM_CLEAR_BEFORE_RELINQUISH	BIT(0)
+/*
+ * Whether the hypervisor should clear the memory region after the receiver
+ * relinquishes it or is aborted.
+ */
+#define FFA_MEM_CLEAR_AFTER_RELINQUISH	BIT(2)
+
+#define FFA_MEM_RETRIEVE_TYPE_IN_RESP	(0 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_SHARE	(1 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_LEND	(2 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_DONATE	(3 << 3)
+
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT	BIT(9)
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x)		((x) << 5)
+	/* Flags to control behaviour of the transaction. */
+	u32 flags;
+#define HANDLE_LOW_MASK		GENMASK_ULL(31, 0)
+#define HANDLE_HIGH_MASK	GENMASK_ULL(63, 32)
+#define HANDLE_LOW(x)		(u32)(FIELD_GET(HANDLE_LOW_MASK, (x)))
+#define	HANDLE_HIGH(x)		(u32)(FIELD_GET(HANDLE_HIGH_MASK, (x)))
+
+#define PACK_HANDLE(l, h)		\
+	(FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
+	/*
+	 * A globally-unique ID assigned by the hypervisor for a region
+	 * of memory being sent between VMs.
+	 */
+	u64 handle;
+	/*
+	 * An implementation defined value associated with the receiver and the
+	 * memory region.
+	 */
+	u64 tag;
+	u32 reserved_1;
+	/*
+	 * The number of `ffa_mem_region_attributes` entries included in this
+	 * transaction.
+	 */
+	u32 ep_count;
+	/*
+	 * An array of endpoint memory access descriptors.
+	 * Each one specifies a memory region offset, an endpoint and the
+	 * attributes with which this memory region should be mapped in that
+	 * endpoint's page table.
+	 */
+	struct ffa_mem_region_attributes ep_mem_access[];
+};
+
+#define	COMPOSITE_OFFSET(x)	\
+	(offsetof(struct ffa_mem_region, ep_mem_access[x]))
+#define CONSTITUENTS_OFFSET(x)	\
+	(offsetof(struct ffa_composite_mem_region, constituents[x]))
+#define COMPOSITE_CONSTITUENTS_OFFSET(x, y)	\
+	(COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y))
+
+struct ffa_mem_ops_args {
+	bool use_txbuf;
+	u64 tag;
+	u32 flags;
+	struct ffa_mem_region_attributes *attrs;
+	u32 nattrs;
+	struct scatterlist *sg;
+	u64 *g_handle;
+};
+
 struct ffa_dev_ops {
 	u32 (*api_version_get)(void);
 	u16 (*partition_id_get)(struct ffa_device *dev);
@@ -116,6 +263,8 @@ struct ffa_dev_ops {
 				  struct ffa_partition_info *buffer);
 	int (*sync_send_receive)(struct ffa_device *dev, u16 ep,
 				 struct ffa_send_direct_data *data);
+	int (*memory_reclaim)(u64 g_handle, u32 flags);
+	int (*memory_share)(struct ffa_mem_ops_args *args);
 };
 
 #endif /* _LINUX_ARM_FFA_H */
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Sudeep Holla <sudeep.holla@arm.com>
To: linux-arm-kernel@lists.infradead.org, devicetree@vger.kernel.org
Cc: Trilok Soni <tsoni@codeaurora.org>,
	David Hartley <dhh@qti.qualcomm.com>,
	Andrew Walbran <qwandor@google.com>,
	Achin Gupta <Achin.Gupta@arm.com>,
	arve@android.com, Sudeep Holla <sudeep.holla@arm.com>,
	Arunachalam Ganapathy <arunachalam.ganapathy@arm.com>,
	Jens Wiklander <jens.wiklander@linaro.org>
Subject: [PATCH v3 7/7] firmware: arm_ffa: Add support for MEM_* interfaces
Date: Fri,  4 Dec 2020 12:11:37 +0000	[thread overview]
Message-ID: <20201204121137.2966778-8-sudeep.holla@arm.com> (raw)
In-Reply-To: <20201204121137.2966778-1-sudeep.holla@arm.com>

Most of the MEM_* APIs share the same parameters, so they can be
generalised. Currently only MEM_SHARE is implemented and the user space
interface for that is not added yet.

Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
---
 drivers/firmware/arm_ffa/driver.c | 180 ++++++++++++++++++++++++++++++
 include/linux/arm_ffa.h           | 149 +++++++++++++++++++++++++
 2 files changed, 329 insertions(+)

diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 3e4ba841dbf8..92a0bf542f18 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -28,7 +28,9 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mm.h>
 #include <linux/of.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/uuid.h>
 
@@ -306,6 +308,177 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id,
 	return 0;
 }
 
+static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
+			      u32 frag_len, u32 len, u64 *handle)
+{
+	ffa_res_t ret;
+
+	ret = invoke_ffa_fn(func_id, len, frag_len, buf, buf_sz, 0, 0, 0);
+
+	while (ret.a0 == FFA_MEM_OP_PAUSE)
+		ret = invoke_ffa_fn(FFA_MEM_OP_RESUME, ret.a1, ret.a2,
+				    0, 0, 0, 0, 0);
+	if (ret.a0 == FFA_ERROR)
+		return ffa_to_linux_errno((int)ret.a2);
+
+	if (ret.a0 != FFA_SUCCESS)
+		return -EOPNOTSUPP;
+
+	if (handle)
+		*handle = PACK_HANDLE(ret.a3, ret.a2);
+
+	return frag_len;
+}
+
+static int ffa_mem_next_frag(u64 handle, u32 frag_len)
+{
+	ffa_res_t ret;
+
+	ret = invoke_ffa_fn(FFA_MEM_FRAG_TX, HANDLE_LOW(handle),
+			    HANDLE_HIGH(handle), frag_len, 0, 0, 0, 0);
+
+	while (ret.a0 == FFA_MEM_OP_PAUSE)
+		ret = invoke_ffa_fn(FFA_MEM_OP_RESUME, ret.a1, ret.a2,
+				    0, 0, 0, 0, 0);
+	if (ret.a0 == FFA_ERROR)
+		return ffa_to_linux_errno((int)ret.a2);
+
+	if (ret.a0 != FFA_MEM_FRAG_RX)
+		return -EOPNOTSUPP;
+
+	return ret.a3;
+}
+
+static int
+ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
+		      u32 len, u64 *handle, bool first)
+{
+	if (!first)
+		return ffa_mem_next_frag(*handle, frag_len);
+
+	return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len,
+				      len, handle);
+}
+
+static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
+{
+	u32 num_pages = 0;
+
+	do {
+		num_pages += sg->length / FFA_PAGE_SIZE;
+	} while ((sg = sg_next(sg)));
+
+	return num_pages;
+}
+
+static int
+ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+		       struct ffa_mem_ops_args *args)
+{
+	int rc = 0;
+	bool first = true;
+	phys_addr_t addr = 0;
+	struct ffa_composite_mem_region *composite;
+	struct ffa_mem_region_addr_range *constituents;
+	struct ffa_mem_region_attributes *ep_mem_access;
+	struct ffa_mem_region *mem_region = buffer;
+	u32 idx, frag_len, length, num_entries = sg_nents(args->sg);
+	u32 buf_sz = max_fragsize / FFA_PAGE_SIZE;
+
+	mem_region->tag = args->tag;
+	mem_region->flags = args->flags;
+	mem_region->sender_id = drv_info->vm_id;
+	mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
+				 FFA_MEM_INNER_SHAREABLE;
+	ep_mem_access = &mem_region->ep_mem_access[0];
+
+	for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+		ep_mem_access->receiver = args->attrs[idx].receiver;
+		ep_mem_access->attrs = args->attrs[idx].attrs;
+		ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
+	}
+	mem_region->ep_count = args->nattrs;
+
+	composite = buffer + COMPOSITE_OFFSET(args->nattrs);
+	composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
+	composite->addr_range_cnt = num_entries;
+
+	length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
+	frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
+	if (frag_len > max_fragsize)
+		return -ENXIO;
+
+	if (!args->use_txbuf)
+		addr = virt_to_phys(buffer);
+
+	constituents = buffer + frag_len;
+	idx = 0;
+	do {
+		if (frag_len == max_fragsize) {
+			rc = ffa_transmit_fragment(func_id, addr, buf_sz,
+						   frag_len, length,
+						   args->g_handle, first);
+			if (rc < 0)
+				return -ENXIO;
+
+			first = false;
+			idx = 0;
+			frag_len = 0;
+			constituents = buffer;
+		}
+
+		if ((void *)constituents - buffer > max_fragsize) {
+			pr_err("Memory Region Fragment > Tx Buffer size\n");
+			return -EFAULT;
+		}
+
+		constituents->address = sg_phys(args->sg);
+		constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
+		constituents++;
+		frag_len += sizeof(struct ffa_mem_region_addr_range);
+	} while ((args->sg = sg_next(args->sg)));
+
+	return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
+				     length, args->g_handle, first);
+}
+
+static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
+{
+	int ret;
+	void *buffer;
+
+	if (!args->use_txbuf) {
+		buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+		if (!buffer)
+			return -ENOMEM;
+	} else {
+		buffer = drv_info->tx_buffer;
+		mutex_lock(&drv_info->tx_lock);
+	}
+
+	ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
+
+	if (args->use_txbuf)
+		mutex_unlock(&drv_info->tx_lock);
+	else
+		free_pages_exact(buffer, RXTX_BUFFER_SIZE);
+
+	return ret < 0 ? ret : 0;
+}
+
+static int ffa_memory_reclaim(u64 g_handle, u32 flags)
+{
+	ffa_res_t ret;
+
+	ret = invoke_ffa_fn(FFA_MEM_RECLAIM, HANDLE_LOW(g_handle),
+			    HANDLE_HIGH(g_handle), flags, 0, 0, 0, 0);
+
+	if (ret.a0 == FFA_ERROR)
+		return ffa_to_linux_errno((int)ret.a2);
+
+	return 0;
+}
+
 static u32 ffa_api_version_get(void)
 {
 	return drv_info->version;
@@ -331,11 +504,18 @@ static int ffa_sync_send_receive(struct ffa_device *dev, u16 ep,
 	return ffa_msg_send_direct_req(dev->vm_id, ep, data);
 }
 
+static int ffa_memory_share(struct ffa_mem_ops_args *args)
+{
+	return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
+}
+
 static const struct ffa_dev_ops ffa_ops = {
 	.api_version_get = ffa_api_version_get,
 	.partition_id_get = ffa_partition_id_get,
 	.partition_info_get = ffa_partition_info_get,
 	.sync_send_receive = ffa_sync_send_receive,
+	.memory_reclaim = ffa_memory_reclaim,
+	.memory_share = ffa_memory_share,
 };
 
 const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 8604c48289ce..67e3180e7097 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -109,6 +109,153 @@ struct ffa_send_direct_data {
 	unsigned long data4;
 };
 
+struct ffa_mem_region_addr_range {
+	/* The base IPA of the constituent memory region, aligned to 4 kiB */
+	u64 address;
+	/* The number of 4 kiB pages in the constituent memory region. */
+	u32 pg_cnt;
+	u32 reserved;
+};
+
+struct ffa_composite_mem_region {
+	/*
+	 * The total number of 4 kiB pages included in this memory region. This
+	 * must be equal to the sum of page counts specified in each
+	 * `struct ffa_mem_region_addr_range`.
+	 */
+	u32 total_pg_cnt;
+	/* The number of constituents included in this memory region range */
+	u32 addr_range_cnt;
+	u64 reserved;
+	/** An array of `addr_range_cnt` memory region constituents. */
+	struct ffa_mem_region_addr_range constituents[];
+};
+
+struct ffa_mem_region_attributes {
+	/* The ID of the VM to which the memory is being given or shared. */
+	u16 receiver;
+	/*
+	 * The permissions with which the memory region should be mapped in the
+	 * receiver's page table.
+	 */
+#define FFA_MEM_EXEC	BIT(3)
+#define FFA_MEM_NO_EXEC	BIT(2)
+#define FFA_MEM_RW		BIT(1)
+#define FFA_MEM_RO		BIT(0)
+	u8 attrs;
+	/*
+	 * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+	 * for memory regions with multiple borrowers.
+	 */
+#define FFA_MEM_RETRIEVE_SELF_BORROWER	BIT(0)
+	u8 flag;
+	u32 composite_off;
+	/*
+	 * Offset in bytes from the start of the outer `ffa_memory_region` to
+	 * an `struct ffa_mem_region_addr_range`.
+	 */
+	u64 reserved;
+};
+
+struct ffa_mem_region {
+	/* The ID of the VM/owner which originally sent the memory region */
+	u16 sender_id;
+#define FFA_MEM_NORMAL		BIT(5)
+#define FFA_MEM_DEVICE		BIT(4)
+
+#define FFA_MEM_WRITE_BACK	(3 << 2)
+#define FFA_MEM_NON_CACHEABLE	(1 << 2)
+
+#define FFA_DEV_nGnRnE		(0 << 2)
+#define FFA_DEV_nGnRE		(1 << 2)
+#define FFA_DEV_nGRE		(2 << 2)
+#define FFA_DEV_GRE		(3 << 2)
+
+#define FFA_MEM_NON_SHAREABLE	(0)
+#define FFA_MEM_OUTER_SHAREABLE	(2)
+#define FFA_MEM_INNER_SHAREABLE	(3)
+	u8 attributes;
+	u8 reserved_0;
+/*
+ * Clear memory region contents after unmapping it from the sender and
+ * before mapping it for any receiver.
+ */
+#define FFA_MEM_CLEAR			BIT(0)
+/*
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_TIME_SLICE_ENABLE		BIT(1)
+
+/*
+ * Whether the hypervisor should clear the memory region before the receiver
+ * relinquishes it or is aborted.
+ */
+#define FFA_MEM_CLEAR_BEFORE_RELINQUISH	BIT(0)
+/*
+ * Whether the hypervisor should clear the memory region after the receiver
+ * relinquishes it or is aborted.
+ */
+#define FFA_MEM_CLEAR_AFTER_RELINQUISH	BIT(2)
+
+#define FFA_MEM_RETRIEVE_TYPE_IN_RESP	(0 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_SHARE	(1 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_LEND	(2 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_DONATE	(3 << 3)
+
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT	BIT(9)
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x)		((x) << 5)
+	/* Flags to control behaviour of the transaction. */
+	u32 flags;
+#define HANDLE_LOW_MASK		GENMASK_ULL(31, 0)
+#define HANDLE_HIGH_MASK	GENMASK_ULL(63, 32)
+#define HANDLE_LOW(x)		(u32)(FIELD_GET(HANDLE_LOW_MASK, (x)))
+#define	HANDLE_HIGH(x)		(u32)(FIELD_GET(HANDLE_HIGH_MASK, (x)))
+
+#define PACK_HANDLE(l, h)		\
+	(FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
+	/*
+	 * A globally-unique ID assigned by the hypervisor for a region
+	 * of memory being sent between VMs.
+	 */
+	u64 handle;
+	/*
+	 * An implementation defined value associated with the receiver and the
+	 * memory region.
+	 */
+	u64 tag;
+	u32 reserved_1;
+	/*
+	 * The number of `ffa_mem_region_attributes` entries included in this
+	 * transaction.
+	 */
+	u32 ep_count;
+	/*
+	 * An array of endpoint memory access descriptors.
+	 * Each one specifies a memory region offset, an endpoint and the
+	 * attributes with which this memory region should be mapped in that
+	 * endpoint's page table.
+	 */
+	struct ffa_mem_region_attributes ep_mem_access[];
+};
+
+#define	COMPOSITE_OFFSET(x)	\
+	(offsetof(struct ffa_mem_region, ep_mem_access[x]))
+#define CONSTITUENTS_OFFSET(x)	\
+	(offsetof(struct ffa_composite_mem_region, constituents[x]))
+#define COMPOSITE_CONSTITUENTS_OFFSET(x, y)	\
+	(COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y))
+
+struct ffa_mem_ops_args {
+	bool use_txbuf;
+	u64 tag;
+	u32 flags;
+	struct ffa_mem_region_attributes *attrs;
+	u32 nattrs;
+	struct scatterlist *sg;
+	u64 *g_handle;
+};
+
 struct ffa_dev_ops {
 	u32 (*api_version_get)(void);
 	u16 (*partition_id_get)(struct ffa_device *dev);
@@ -116,6 +263,8 @@ struct ffa_dev_ops {
 				  struct ffa_partition_info *buffer);
 	int (*sync_send_receive)(struct ffa_device *dev, u16 ep,
 				 struct ffa_send_direct_data *data);
+	int (*memory_reclaim)(u64 g_handle, u32 flags);
+	int (*memory_share)(struct ffa_mem_ops_args *args);
 };
 
 #endif /* _LINUX_ARM_FFA_H */
-- 
2.25.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2020-12-04 12:13 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-04 12:11 [PATCH v3 0/7] firmware: Add initial support for Arm FF-A Sudeep Holla
2020-12-04 12:11 ` Sudeep Holla
2020-12-04 12:11 ` [PATCH v3 1/7] dt-bindings: Arm: Add Firmware Framework for Armv8-A (FF-A) binding Sudeep Holla
2020-12-04 12:11   ` Sudeep Holla
2020-12-14 22:01   ` Rob Herring
2020-12-14 22:01     ` Rob Herring
2020-12-16 12:24     ` Sudeep Holla
2020-12-16 12:24       ` Sudeep Holla
2020-12-16 13:46       ` Jens Wiklander
2020-12-16 13:46         ` Jens Wiklander
2021-01-13 10:00       ` Sudeep Holla
2021-01-13 10:00         ` Sudeep Holla
2020-12-04 12:11 ` [PATCH v3 2/7] arm64: smccc: Add support for SMCCCv1.2 input/output registers Sudeep Holla
2020-12-04 12:11   ` Sudeep Holla
2020-12-04 12:11 ` [PATCH v3 3/7] firmware: arm_ffa: Add initial FFA bus support for device enumeration Sudeep Holla
2020-12-04 12:11   ` Sudeep Holla
2020-12-04 12:11 ` [PATCH v3 4/7] firmware: arm_ffa: Add initial Arm FFA driver support Sudeep Holla
2020-12-04 12:11   ` Sudeep Holla
2020-12-04 12:11 ` [PATCH v3 5/7] firmware: arm_ffa: Add support for SMCCC as transport to FFA driver Sudeep Holla
2020-12-04 12:11   ` Sudeep Holla
2020-12-04 12:11 ` [PATCH v3 6/7] firmware: arm_ffa: Setup in-kernel users of FFA partitions Sudeep Holla
2020-12-04 12:11   ` Sudeep Holla
2020-12-07 12:30   ` Jens Wiklander
2020-12-07 12:30     ` Jens Wiklander
2021-01-13  9:22     ` Sudeep Holla
2021-01-13  9:22       ` Sudeep Holla
2020-12-11 10:45   ` Jens Wiklander
2020-12-11 10:45     ` Jens Wiklander
2020-12-11 10:59     ` Jens Wiklander
2020-12-11 10:59       ` Jens Wiklander
2021-01-13  9:44       ` Sudeep Holla
2021-01-13  9:44         ` Sudeep Holla
2021-01-13 12:30         ` Jens Wiklander
2021-01-13 12:30           ` Jens Wiklander
2021-01-13 13:58           ` Achin Gupta
2021-01-13 13:58             ` Achin Gupta
2021-01-13 17:20             ` Sudeep Holla
2021-01-13 17:20               ` Sudeep Holla
2021-01-14  6:48               ` Jens Wiklander
2021-01-14  6:48                 ` Jens Wiklander
2021-01-12 18:04     ` Sudeep Holla
2021-01-12 18:04       ` Sudeep Holla
2021-01-13  7:10       ` Jens Wiklander
2021-01-13  7:10         ` Jens Wiklander
2020-12-04 12:11 ` Sudeep Holla [this message]
2020-12-04 12:11   ` [PATCH v3 7/7] firmware: arm_ffa: Add support for MEM_* interfaces Sudeep Holla
2020-12-11 10:54   ` Jens Wiklander
2020-12-11 10:54     ` Jens Wiklander

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201204121137.2966778-8-sudeep.holla@arm.com \
    --to=sudeep.holla@arm.com \
    --cc=Achin.Gupta@arm.com \
    --cc=arunachalam.ganapathy@arm.com \
    --cc=arve@android.com \
    --cc=devicetree@vger.kernel.org \
    --cc=dhh@qti.qualcomm.com \
    --cc=jens.wiklander@linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=qwandor@google.com \
    --cc=tsoni@codeaurora.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.