From: Jens Wiklander <jens.wiklander@linaro.org>
To: xen-devel@lists.xenproject.org
Cc: Stefano Stabellini <sstabellini@kernel.org>,
Julien Grall <julien@xen.org>,
Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>,
Bertrand.Marquis@arm.com,
Anthony PERARD <anthony.perard@citrix.com>,
Juergen Gross <jgross@suse.com>, Wei Liu <wl@xen.org>,
Marc Bonnici <marc.bonnici@arm.com>,
Achin Gupta <achin.gupta@arm.com>,
Jens Wiklander <jens.wiklander@linaro.org>
Subject: [PATCH v6 7/9] xen/arm: ffa: support guest FFA_PARTITION_INFO_GET
Date: Mon, 19 Sep 2022 11:12:36 +0200 [thread overview]
Message-ID: <20220919091238.2068052-8-jens.wiklander@linaro.org> (raw)
In-Reply-To: <20220919091238.2068052-1-jens.wiklander@linaro.org>
Adds support in the mediator to handle FFA_PARTITION_INFO_GET requests
from a guest. The requests are forwarded to the SPMC and the response is
translated according to the FF-A version in use by the guest.
Using FFA_PARTITION_INFO_GET changes the owner of the RX buffer to the
caller (the guest in this case), so once it is done with the buffer it
must be released using FFA_RX_RELEASE before another call can be made.
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
---
xen/arch/arm/ffa.c | 126 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 124 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/ffa.c b/xen/arch/arm/ffa.c
index 60157a696a9a..4c88da9d6d3b 100644
--- a/xen/arch/arm/ffa.c
+++ b/xen/arch/arm/ffa.c
@@ -188,6 +188,12 @@
#define FFA_MSG_POLL 0x8400006AU
/* Partition information descriptor */
+struct ffa_partition_info_1_0 {
+ uint16_t id;
+ uint16_t execution_context;
+ uint32_t partition_properties;
+};
+
struct ffa_partition_info_1_1 {
uint16_t id;
uint16_t execution_context;
@@ -204,9 +210,8 @@ struct ffa_ctx {
uint32_t guest_vers;
bool tx_is_mine;
bool interrupted;
+ spinlock_t lock;
};
-
-
/* Negotiated FF-A version to use with the SPMC */
static uint32_t ffa_version __ro_after_init;
@@ -220,10 +225,16 @@ static unsigned int subscr_vm_destroyed_count __read_mostly;
* Our rx/tx buffers shared with the SPMC.
*
* ffa_page_count is the number of pages used in each of these buffers.
+ *
+ * The RX buffer is protected from concurrent usage with ffa_rx_buffer_lock.
+ * Note that the SPMC is also tracking the ownership of our RX buffer so
+ * for calls which uses our RX buffer to deliver a result we must call
+ * ffa_rx_release() to let the SPMC know that we're done with the buffer.
*/
static void *ffa_rx __read_mostly;
static void *ffa_tx __read_mostly;
static unsigned int ffa_page_count __read_mostly;
+static DEFINE_SPINLOCK(ffa_rx_buffer_lock);
static bool ffa_get_version(uint32_t *vers)
{
@@ -510,6 +521,98 @@ static uint32_t handle_rxtx_unmap(void)
return FFA_RET_OK;
}
+static uint32_t handle_partition_info_get(uint32_t w1, uint32_t w2, uint32_t w3,
+ uint32_t w4, uint32_t w5,
+ uint32_t *count)
+{
+ bool query_count_only = w5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
+ uint32_t w5_mask = 0;
+ uint32_t ret = FFA_RET_DENIED;
+ struct domain *d = current->domain;
+ struct ffa_ctx *ctx = d->arch.ffa;
+
+ /*
+ * FF-A v1.0 has w5 MBZ while v1.1 allows
+ * FFA_PARTITION_INFO_GET_COUNT_FLAG to be non-zero.
+ */
+ if ( ctx->guest_vers == FFA_VERSION_1_1 )
+ w5_mask = FFA_PARTITION_INFO_GET_COUNT_FLAG;
+ if ( w5 & ~w5_mask )
+ return FFA_RET_INVALID_PARAMETERS;
+
+ if ( query_count_only )
+ return ffa_partition_info_get(w1, w2, w3, w4, w5, count);
+
+ if ( !ffa_page_count )
+ return FFA_RET_DENIED;
+
+ spin_lock(&ctx->lock);
+ spin_lock(&ffa_rx_buffer_lock);
+ if ( !ctx->page_count || !ctx->tx_is_mine )
+ goto out;
+ ret = ffa_partition_info_get(w1, w2, w3, w4, w5, count);
+ if ( ret )
+ goto out;
+
+ if ( ctx->guest_vers == FFA_VERSION_1_0 )
+ {
+ size_t n;
+ struct ffa_partition_info_1_1 *src = ffa_rx;
+ struct ffa_partition_info_1_0 *dst = ctx->rx;
+
+ if ( ctx->page_count * FFA_PAGE_SIZE < *count * sizeof(*dst) )
+ {
+ ret = FFA_RET_NO_MEMORY;
+ goto out_rx_release;
+ }
+
+ for ( n = 0; n < *count; n++ )
+ {
+ dst[n].id = src[n].id;
+ dst[n].execution_context = src[n].execution_context;
+ dst[n].partition_properties = src[n].partition_properties;
+ }
+ }
+ else
+ {
+ size_t sz = *count * sizeof(struct ffa_partition_info_1_1);
+
+ if ( ctx->page_count * FFA_PAGE_SIZE < sz )
+ {
+ ret = FFA_RET_NO_MEMORY;
+ goto out_rx_release;
+ }
+
+
+ memcpy(ctx->rx, ffa_rx, sz);
+ }
+ ctx->tx_is_mine = false;
+out_rx_release:
+ ffa_rx_release();
+out:
+ spin_unlock(&ffa_rx_buffer_lock);
+ spin_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static uint32_t handle_rx_release(void)
+{
+ uint32_t ret = FFA_RET_DENIED;
+ struct domain *d = current->domain;
+ struct ffa_ctx *ctx = d->arch.ffa;
+
+ spin_lock(&ctx->lock);
+ if ( !ctx->page_count || ctx->tx_is_mine )
+ goto out;
+ ret = FFA_RET_OK;
+ ctx->tx_is_mine = true;
+out:
+ spin_unlock(&ctx->lock);
+
+ return ret;
+}
+
static void handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t fid)
{
struct arm_smccc_1_2_regs arg = { .a0 = fid, };
@@ -574,6 +677,7 @@ bool ffa_handle_call(struct cpu_user_regs *regs, uint32_t fid)
{
struct domain *d = current->domain;
struct ffa_ctx *ctx = d->arch.ffa;
+ uint32_t count;
int e;
if ( !ctx )
@@ -605,6 +709,24 @@ bool ffa_handle_call(struct cpu_user_regs *regs, uint32_t fid)
else
set_regs_success(regs, 0, 0);
return true;
+ case FFA_PARTITION_INFO_GET:
+ e = handle_partition_info_get(get_user_reg(regs, 1),
+ get_user_reg(regs, 2),
+ get_user_reg(regs, 3),
+ get_user_reg(regs, 4),
+ get_user_reg(regs, 5), &count);
+ if ( e )
+ set_regs_error(regs, e);
+ else
+ set_regs_success(regs, count, 0);
+ return true;
+ case FFA_RX_RELEASE:
+ e = handle_rx_release();
+ if ( e )
+ set_regs_error(regs, e);
+ else
+ set_regs_success(regs, 0, 0);
+ return true;
case FFA_MSG_SEND_DIRECT_REQ_32:
#ifdef CONFIG_ARM_64
case FFA_MSG_SEND_DIRECT_REQ_64:
--
2.31.1
next prev parent reply other threads:[~2022-09-19 9:13 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-19 9:12 [PATCH v6 0/9] Xen FF-A mediator Jens Wiklander
2022-09-19 9:12 ` [PATCH v6 1/9] xen/arm: smccc: add support for SMCCCv1.2 extended input/output registers Jens Wiklander
2022-09-19 9:12 ` [PATCH v6 2/9] xen/arm: add a primitive FF-A mediator Jens Wiklander
2022-09-19 9:12 ` [PATCH v6 3/9] xen/arm: ffa: add direct request support Jens Wiklander
2022-09-19 9:12 ` [PATCH v6 4/9] xen/arm: ffa: map SPMC rx/tx buffers Jens Wiklander
2022-09-19 9:12 ` [PATCH v6 5/9] xen/arm: ffa: send guest events to Secure Partitions Jens Wiklander
2022-09-19 9:12 ` [PATCH v6 6/9] xen/arm: ffa: support mapping guest RX/TX buffers Jens Wiklander
2022-09-19 9:12 ` Jens Wiklander [this message]
2022-09-19 9:12 ` [PATCH v6 8/9] xen/arm: move regpair_to_uint64() and uint64_to_regpair() to regs.h Jens Wiklander
2022-09-19 9:12 ` [PATCH v6 9/9] xen/arm: ffa: support sharing memory Jens Wiklander
2022-10-14 10:53 ` [PATCH v6 0/9] Xen FF-A mediator Jens Wiklander
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220919091238.2068052-8-jens.wiklander@linaro.org \
--to=jens.wiklander@linaro.org \
--cc=Bertrand.Marquis@arm.com \
--cc=Volodymyr_Babchuk@epam.com \
--cc=achin.gupta@arm.com \
--cc=anthony.perard@citrix.com \
--cc=jgross@suse.com \
--cc=julien@xen.org \
--cc=marc.bonnici@arm.com \
--cc=sstabellini@kernel.org \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).