Xen-Devel Archive on lore.kernel.org
 help / color / Atom feed
From: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "tee-dev@lists.linaro.org" <tee-dev@lists.linaro.org>,
	Julien Grall <julien.grall@arm.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
Subject: [Xen-devel] [PATCH v6 06/10] xen/arm: optee: add support for RPC SHM buffers
Date: Tue, 11 Jun 2019 18:46:36 +0000
Message-ID: <20190611184541.7281-7-volodymyr_babchuk@epam.com> (raw)
In-Reply-To: <20190611184541.7281-1-volodymyr_babchuk@epam.com>

OP-TEE usually uses the same idea with command buffers (see
previous commit) to issue RPC requests. Problem is that initially
it has no buffer, where it can write request. So the first RPC
request it makes is special: it requests NW to allocate shared
buffer for other RPC requests. Usually this buffer is allocated
only once for every OP-TEE thread and it remains allocated all
the time until guest shuts down. Guest can ask OP-TEE to disable
RPC buffers caching, in this case OP-TEE will ask guest to
allocate/free buffer for the each RPC.

Mediator needs to pin this buffer to make sure that page will be
not free while it is shared with OP-TEE.

Life cycle of this buffer is controlled by OP-TEE. It asks guest to
create buffer and it asks it to free it. So it there is not much sense
to limit number of those buffers, because we already limit the number
of concurrent standard calls and prevention of RPC buffer allocation will
impair OP-TEE functionality.

Those buffers can be freed in two ways: either OP-TEE issues
OPTEE_SMC_RPC_FUNC_FREE RPC request or guest tries to disable
buffer caching by calling OPTEE_SMC_DISABLE_SHM_CACHE function.
In the latter case OP-TEE will return cookie of the SHM buffer it
just freed.

OP-TEE expects that this RPC buffer have size of
OPTEE_MSG_NONCONTIG_PAGE_SIZE, which equals to 4096 and is aligned
with the same size. So, basically it expects one 4k page from the
guest. This is the same as Xen's PAGE_SIZE.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
Acked-by: Julien Grall <julien.grall@arm.com>

---
 All the patches to optee.c should be merged together. They were
 split to ease up review. But they depend heavily on each other.

 Changes from v4:
  - handle_rpc_func_alloc() now calls do_call_with_arg() directly

 Changes from v3:
  - Removed MAX_RPC_SHMS constant. Now this value depends on
    number of OP-TEE threads
  - Various formatting fixes
  - Added checks for guest memory type

 Changes from v2:
  - Added check to ensure that guests does not return two SHM buffers
    with the same cookie
  - Fixed coding style
  - Storing RPC parameters during RPC return to make sure, that guest
    will not change them during call continuation
---
 xen/arch/arm/tee/optee.c | 149 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 145 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
index f092492849..175789fb00 100644
--- a/xen/arch/arm/tee/optee.c
+++ b/xen/arch/arm/tee/optee.c
@@ -81,9 +81,17 @@ struct optee_std_call {
     register_t rpc_params[2];
 };
 
+/* Pre-allocated SHM buffer for RPC commands */
+struct shm_rpc {
+    struct list_head list;
+    struct page_info *guest_page;
+    uint64_t cookie;
+};
+
 /* Domain context */
 struct optee_domain {
     struct list_head call_list;
+    struct list_head shm_rpc_list;
     atomic_t call_count;
     spinlock_t lock;
 };
@@ -158,6 +166,7 @@ static int optee_domain_init(struct domain *d)
     }
 
     INIT_LIST_HEAD(&ctx->call_list);
+    INIT_LIST_HEAD(&ctx->shm_rpc_list);
     atomic_set(&ctx->call_count, 0);
     spin_lock_init(&ctx->lock);
 
@@ -199,7 +208,11 @@ static struct optee_std_call *allocate_std_call(struct optee_domain *ctx)
     struct optee_std_call *call;
     int count;
 
-    /* Make sure that guest does not execute more than max_optee_threads */
+    /*
+     * Make sure that guest does not execute more than max_optee_threads.
+     * This also indirectly limits number of RPC SHM buffers, because OP-TEE
+     * allocates one such buffer per standard call.
+     */
     count = atomic_add_unless(&ctx->call_count, 1, max_optee_threads);
     if ( count == max_optee_threads )
         return ERR_PTR(-ENOSPC);
@@ -294,10 +307,80 @@ static void put_std_call(struct optee_domain *ctx, struct optee_std_call *call)
     spin_unlock(&ctx->lock);
 }
 
+static struct shm_rpc *allocate_and_pin_shm_rpc(struct optee_domain *ctx,
+                                                gfn_t gfn, uint64_t cookie)
+{
+    struct shm_rpc *shm_rpc, *shm_rpc_tmp;
+
+    shm_rpc = xzalloc(struct shm_rpc);
+    if ( !shm_rpc )
+        return ERR_PTR(-ENOMEM);
+
+    /* This page will be shared with OP-TEE, so we need to pin it. */
+    shm_rpc->guest_page = get_domain_ram_page(gfn);
+    if ( !shm_rpc->guest_page )
+        goto err;
+
+    shm_rpc->cookie = cookie;
+
+    spin_lock(&ctx->lock);
+    /* Check if there is existing SHM with the same cookie. */
+    list_for_each_entry( shm_rpc_tmp, &ctx->shm_rpc_list, list )
+    {
+        if ( shm_rpc_tmp->cookie == cookie )
+        {
+            spin_unlock(&ctx->lock);
+            gdprintk(XENLOG_WARNING, "Guest tries to use the same RPC SHM cookie %lx\n",
+                     cookie);
+            goto err;
+        }
+    }
+
+    list_add_tail(&shm_rpc->list, &ctx->shm_rpc_list);
+    spin_unlock(&ctx->lock);
+
+    return shm_rpc;
+
+err:
+    if ( shm_rpc->guest_page )
+        put_page(shm_rpc->guest_page);
+    xfree(shm_rpc);
+
+    return ERR_PTR(-EINVAL);
+}
+
+static void free_shm_rpc(struct optee_domain *ctx, uint64_t cookie)
+{
+    struct shm_rpc *shm_rpc;
+    bool found = false;
+
+    spin_lock(&ctx->lock);
+
+    list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list )
+    {
+        if ( shm_rpc->cookie == cookie )
+        {
+            found = true;
+            list_del(&shm_rpc->list);
+            break;
+        }
+    }
+    spin_unlock(&ctx->lock);
+
+    if ( !found )
+        return;
+
+    ASSERT(shm_rpc->guest_page);
+    put_page(shm_rpc->guest_page);
+
+    xfree(shm_rpc);
+}
+
 static int optee_relinquish_resources(struct domain *d)
 {
     struct arm_smccc_res resp;
     struct optee_std_call *call, *call_tmp;
+    struct shm_rpc *shm_rpc, *shm_rpc_tmp;
     struct optee_domain *ctx = d->arch.tee;
 
     if ( !ctx )
@@ -314,6 +397,16 @@ static int optee_relinquish_resources(struct domain *d)
     if ( hypercall_preempt_check() )
         return -ERESTART;
 
+    /*
+     * Number of this buffers also depends on max_optee_threads, so
+     * check the comment above.
+     */
+    list_for_each_entry_safe( shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list )
+        free_shm_rpc(ctx, shm_rpc->cookie);
+
+    if ( hypercall_preempt_check() )
+        return -ERESTART;
+
     /*
      * Inform OP-TEE that domain is shutting down. This is
      * also a fast SMC call, like OPTEE_SMC_VM_CREATED, so
@@ -328,6 +421,7 @@ static int optee_relinquish_resources(struct domain *d)
 
     ASSERT(!spin_is_locked(&ctx->lock));
     ASSERT(!atomic_read(&ctx->call_count));
+    ASSERT(list_empty(&ctx->shm_rpc_list));
 
     XFREE(d->arch.tee);
 
@@ -587,6 +681,48 @@ err:
  * request from OP-TEE and wished to resume the interrupted standard
  * call.
  */
+static void handle_rpc_func_alloc(struct optee_domain *ctx,
+                                  struct cpu_user_regs *regs,
+                                  struct optee_std_call *call)
+{
+    struct shm_rpc *shm_rpc;
+    register_t r1, r2;
+    paddr_t ptr = regpair_to_uint64(get_user_reg(regs, 1),
+                                    get_user_reg(regs, 2));
+    uint64_t cookie = regpair_to_uint64(get_user_reg(regs, 4),
+                                        get_user_reg(regs, 5));
+
+    if ( ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1) )
+    {
+        gdprintk(XENLOG_WARNING, "Domain returned invalid RPC command buffer\n");
+        /*
+         * OP-TEE is waiting for a response to the RPC. We can't just
+         * return error to the guest. We need to provide some invalid
+         * value to OP-TEE, so it can handle error on its side.
+         */
+        ptr = 0;
+        goto out;
+    }
+
+    shm_rpc = allocate_and_pin_shm_rpc(ctx, gaddr_to_gfn(ptr), cookie);
+    if ( IS_ERR(shm_rpc) )
+    {
+        gdprintk(XENLOG_WARNING, "Failed to allocate shm_rpc object: %ld\n",
+                 PTR_ERR(shm_rpc));
+        ptr = 0;
+    }
+    else
+        ptr = page_to_maddr(shm_rpc->guest_page);
+
+out:
+    uint64_to_regpair(&r1, &r2, ptr);
+
+    do_call_with_arg(ctx, call, regs, OPTEE_SMC_CALL_RETURN_FROM_RPC, r1, r2,
+                     get_user_reg(regs, 3),
+                     get_user_reg(regs, 4),
+                     get_user_reg(regs, 5));
+}
+
 static void handle_rpc(struct optee_domain *ctx, struct cpu_user_regs *regs)
 {
     struct optee_std_call *call;
@@ -610,11 +746,15 @@ static void handle_rpc(struct optee_domain *ctx, struct cpu_user_regs *regs)
     switch ( call->rpc_op )
     {
     case OPTEE_SMC_RPC_FUNC_ALLOC:
-        /* TODO: Add handling */
-        break;
+        handle_rpc_func_alloc(ctx, regs, call);
+        return;
     case OPTEE_SMC_RPC_FUNC_FREE:
-        /* TODO: Add handling */
+    {
+        uint64_t cookie = regpair_to_uint64(call->rpc_params[0],
+                                            call->rpc_params[1]);
+        free_shm_rpc(ctx, cookie);
         break;
+    }
     case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
         break;
     case OPTEE_SMC_RPC_FUNC_CMD:
@@ -720,6 +860,7 @@ static bool optee_handle_call(struct cpu_user_regs *regs)
                       OPTEE_CLIENT_ID(current->domain), &resp);
         set_user_reg(regs, 0, resp.a0);
         if ( resp.a0 == OPTEE_SMC_RETURN_OK ) {
+            free_shm_rpc(ctx,  regpair_to_uint64(resp.a1, resp.a2));
             set_user_reg(regs, 1, resp.a1);
             set_user_reg(regs, 2, resp.a2);
         }
-- 
2.21.0

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply index

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-11 18:46 [Xen-devel] [PATCH v6 00/10] TEE mediator (and OP-TEE) support in XEN Volodymyr Babchuk
2019-06-11 18:46 ` [Xen-devel] [PATCH v6 01/10] xen/arm: add generic TEE mediator framework Volodymyr Babchuk
2019-06-17 16:11   ` Stefano Stabellini
2019-06-11 18:46 ` [Xen-devel] [PATCH v6 02/10] xen/arm: optee: add OP-TEE header files Volodymyr Babchuk
2019-06-15 18:39   ` Julien Grall
2019-06-17 15:24     ` Julien Grall
2019-06-17 16:28       ` Stefano Stabellini
2019-06-17 16:34         ` Julien Grall
2019-06-17 17:28           ` Stefano Stabellini
2019-06-19  8:20             ` Lars Kurth
2019-06-19 10:26               ` Julien Grall
2019-06-11 18:46 ` [Xen-devel] [PATCH v6 03/10] xen/arm: optee: add OP-TEE mediator skeleton Volodymyr Babchuk
2019-06-19 11:01   ` Julien Grall
2019-06-19 11:03     ` Julien Grall
2019-06-19 15:44       ` Volodymyr Babchuk
2019-06-11 18:46 ` [Xen-devel] [PATCH v6 04/10] xen/arm: optee: add fast calls handling Volodymyr Babchuk
2019-06-11 18:46 ` [Xen-devel] [PATCH v6 05/10] xen/arm: optee: add std call handling Volodymyr Babchuk
2019-06-11 18:46 ` Volodymyr Babchuk [this message]
2019-06-11 18:46 ` [Xen-devel] [PATCH v6 07/10] xen/arm: optee: add support for arbitrary shared memory Volodymyr Babchuk
2019-06-11 18:46 ` [Xen-devel] [PATCH v6 08/10] xen/arm: optee: add support for RPC commands Volodymyr Babchuk
2019-06-11 18:46 ` [Xen-devel] [PATCH v6 09/10] tools/arm: tee: add "tee" option for xl.cfg Volodymyr Babchuk
2019-06-15 19:10   ` Julien Grall
2019-06-18 11:19     ` Volodymyr Babchuk
2019-06-18 12:49       ` Julien Grall
2019-06-18 14:30         ` Volodymyr Babchuk
2019-06-18 15:19           ` Julien Grall
2019-06-18 15:23             ` Volodymyr Babchuk
2019-06-19 10:30               ` Julien Grall
2019-06-11 18:46 ` [Xen-devel] [PATCH v6 10/10] tools/arm: optee: create optee firmware node in DT if tee=optee Volodymyr Babchuk
2019-06-19 11:08 ` [Xen-devel] [PATCH v6 00/10] TEE mediator (and OP-TEE) support in XEN Julien Grall

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190611184541.7281-7-volodymyr_babchuk@epam.com \
    --to=volodymyr_babchuk@epam.com \
    --cc=julien.grall@arm.com \
    --cc=sstabellini@kernel.org \
    --cc=tee-dev@lists.linaro.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Xen-Devel Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/xen-devel/0 xen-devel/git/0.git
	git clone --mirror https://lore.kernel.org/xen-devel/1 xen-devel/git/1.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 xen-devel xen-devel/ https://lore.kernel.org/xen-devel \
		xen-devel@lists.xenproject.org xen-devel@lists.xen.org
	public-inbox-index xen-devel

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.xenproject.lists.xen-devel


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git