All of lore.kernel.org
 help / color / mirror / Atom feed
From: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
To: xen-devel@lists.xenproject.org, xen-devel@lists.xen.org
Cc: tee-dev@lists.linaro.org, Julien Grall <julien.grall@arm.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Volodymyr Babchuk <volodymyr_babchuk@epam.com>
Subject: [PATCH v2 10/13] optee: add support for RPC commands
Date: Mon,  3 Sep 2018 19:54:34 +0300	[thread overview]
Message-ID: <1535993677-20816-11-git-send-email-volodymyr_babchuk@epam.com> (raw)
In-Reply-To: <1535993677-20816-1-git-send-email-volodymyr_babchuk@epam.com>

OP-TEE can issue multiple RPC requests. We are interested mostly in
request that asks NW to allocate/free shared memory for OP-TEE
needs, becuase mediator need to do address translation in the same
way as it was done for shared buffers registered by NW.

As mediator now accesses shared command buffer, we need to shadow
it in the same way, as we shadow request buffers for STD calls.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
---
 xen/arch/arm/tee/optee.c | 137 +++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 126 insertions(+), 11 deletions(-)

diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
index 8bfcfdc..b2d795e 100644
--- a/xen/arch/arm/tee/optee.c
+++ b/xen/arch/arm/tee/optee.c
@@ -45,6 +45,7 @@ struct std_call_ctx {
 struct shm_rpc {
     struct list_head list;
     struct optee_msg_arg *guest_arg;
+    struct optee_msg_arg *xen_arg;
     struct page *guest_page;
     mfn_t guest_mfn;
     uint64_t cookie;
@@ -303,6 +304,10 @@ static struct shm_rpc *allocate_and_map_shm_rpc(struct domain_ctx *ctx, paddr_t
     if ( !shm_rpc )
         goto err;
 
+    shm_rpc->xen_arg = alloc_xenheap_page();
+    if ( !shm_rpc->xen_arg )
+        goto err;
+
     shm_rpc->guest_mfn = lookup_and_pin_guest_ram_addr(gaddr, NULL);
 
     if ( mfn_eq(shm_rpc->guest_mfn, INVALID_MFN) )
@@ -324,6 +329,10 @@ static struct shm_rpc *allocate_and_map_shm_rpc(struct domain_ctx *ctx, paddr_t
 
 err:
     atomic_dec(&ctx->shm_rpc_count);
+
+    if ( shm_rpc->xen_arg )
+        free_xenheap_page(shm_rpc->xen_arg);
+
     xfree(shm_rpc);
     return NULL;
 }
@@ -346,9 +355,10 @@ static void free_shm_rpc(struct domain_ctx *ctx, uint64_t cookie)
     }
     spin_unlock(&ctx->lock);
 
-    if ( !found ) {
+    if ( !found )
         return;
-    }
+
+    free_xenheap_page(shm_rpc->xen_arg);
 
     if ( shm_rpc->guest_arg ) {
         unpin_guest_ram_addr(shm_rpc->guest_mfn);
@@ -358,6 +368,24 @@ static void free_shm_rpc(struct domain_ctx *ctx, uint64_t cookie)
     xfree(shm_rpc);
 }
 
+static struct shm_rpc *find_shm_rpc(struct domain_ctx *ctx, uint64_t cookie)
+{
+    struct shm_rpc *shm_rpc;
+
+    spin_lock(&ctx->lock);
+    list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list )
+    {
+        if ( shm_rpc->cookie == cookie )
+        {
+                spin_unlock(&ctx->lock);
+                return shm_rpc;
+        }
+    }
+    spin_unlock(&ctx->lock);
+
+    return NULL;
+}
+
 static struct shm_buf *allocate_shm_buf(struct domain_ctx *ctx,
                                         uint64_t cookie,
                                         int pages_cnt)
@@ -704,6 +732,28 @@ static bool copy_std_request_back(struct domain_ctx *ctx,
     return true;
 }
 
+static void handle_rpc_return(struct domain_ctx *ctx,
+                             struct cpu_user_regs *regs,
+                             struct std_call_ctx *call)
+{
+    call->optee_thread_id = get_user_reg(regs, 3);
+    call->rpc_op = OPTEE_SMC_RETURN_GET_RPC_FUNC(get_user_reg(regs, 0));
+
+    if ( call->rpc_op == OPTEE_SMC_RPC_FUNC_CMD )
+    {
+        /* Copy RPC request from shadowed buffer to guest */
+        uint64_t cookie = get_user_reg(regs, 1) << 32 | get_user_reg(regs, 2);
+        struct shm_rpc *shm_rpc = find_shm_rpc(ctx, cookie);
+        if ( !shm_rpc )
+        {
+            gprintk(XENLOG_ERR, "Can't find SHM-RPC with cookie %lx\n", cookie);
+            return;
+        }
+        memcpy(shm_rpc->guest_arg, shm_rpc->xen_arg,
+               OPTEE_MSG_GET_ARG_SIZE(shm_rpc->xen_arg->num_params));
+    }
+}
+
 static bool execute_std_call(struct domain_ctx *ctx,
                              struct cpu_user_regs *regs,
                              struct std_call_ctx *call)
@@ -715,8 +765,7 @@ static bool execute_std_call(struct domain_ctx *ctx,
     optee_ret = get_user_reg(regs, 0);
     if ( OPTEE_SMC_RETURN_IS_RPC(optee_ret) )
     {
-        call->optee_thread_id = get_user_reg(regs, 3);
-        call->rpc_op = OPTEE_SMC_RETURN_GET_RPC_FUNC(optee_ret);
+        handle_rpc_return(ctx, regs, call);
         return true;
     }
 
@@ -783,6 +832,74 @@ out:
     return ret;
 }
 
+
+static void handle_rpc_cmd_alloc(struct domain_ctx *ctx,
+                                 struct cpu_user_regs *regs,
+                                 struct std_call_ctx *call,
+                                 struct shm_rpc *shm_rpc)
+{
+    if ( shm_rpc->xen_arg->params[0].attr != (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+                                            OPTEE_MSG_ATTR_NONCONTIG) )
+    {
+        gprintk(XENLOG_WARNING, "Invalid attrs for shared mem buffer\n");
+        return;
+    }
+
+    /* Last entry in non_contig array is used to hold RPC-allocated buffer */
+    if ( call->non_contig[MAX_NONCONTIG_ENTRIES - 1] )
+    {
+        free_xenheap_pages(call->non_contig[MAX_NONCONTIG_ENTRIES - 1],
+                           call->non_contig_order[MAX_NONCONTIG_ENTRIES - 1]);
+        call->non_contig[MAX_NONCONTIG_ENTRIES - 1] = NULL;
+    }
+    translate_noncontig(ctx, call, shm_rpc->xen_arg->params + 0,
+                        MAX_NONCONTIG_ENTRIES - 1);
+}
+
+static void handle_rpc_cmd(struct domain_ctx *ctx, struct cpu_user_regs *regs,
+                           struct std_call_ctx *call)
+{
+    struct shm_rpc *shm_rpc;
+    uint64_t cookie;
+    int num_params;
+
+    cookie = get_user_reg(regs, 1) << 32 | get_user_reg(regs, 2);
+
+    shm_rpc = find_shm_rpc(ctx, cookie);
+
+    if ( !shm_rpc )
+    {
+        gprintk(XENLOG_ERR, "Can't find SHM-RPC with cookie %lx\n", cookie);
+        return;
+    }
+
+    num_params = shm_rpc->guest_arg->num_params;
+
+    barrier(); /* Ensure that num_params is read once */
+    if ( OPTEE_MSG_GET_ARG_SIZE(num_params) > OPTEE_MSG_NONCONTIG_PAGE_SIZE )
+        return;
+
+    memcpy(shm_rpc->xen_arg, shm_rpc->guest_arg,
+           OPTEE_MSG_GET_ARG_SIZE(num_params));
+
+    switch (shm_rpc->xen_arg->cmd) {
+    case OPTEE_MSG_RPC_CMD_GET_TIME:
+        break;
+    case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
+        break;
+    case OPTEE_MSG_RPC_CMD_SUSPEND:
+        break;
+    case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
+        handle_rpc_cmd_alloc(ctx, regs, call, shm_rpc);
+        break;
+    case OPTEE_MSG_RPC_CMD_SHM_FREE:
+        free_shm_buf(ctx, shm_rpc->xen_arg->params[0].u.value.b);
+        break;
+    default:
+        break;
+    }
+}
+
 static void handle_rpc_func_alloc(struct domain_ctx *ctx,
                                   struct cpu_user_regs *regs)
 {
@@ -796,13 +913,11 @@ static void handle_rpc_func_alloc(struct domain_ctx *ctx,
         struct shm_rpc *shm_rpc;
 
         shm_rpc = allocate_and_map_shm_rpc(ctx, ptr, cookie);
-        if ( !shm_rpc )
-        {
+        if ( !shm_rpc ) {
             gprintk(XENLOG_WARNING, "Failed to allocate shm_rpc object\n");
-            ptr = 0;
-        }
-        else
-            ptr = mfn_to_maddr(shm_rpc->guest_mfn);
+            ptr = ~0;
+        } else
+            ptr = virt_to_maddr(shm_rpc->xen_arg);
 
         set_user_reg(regs, 1, ptr >> 32);
         set_user_reg(regs, 2, ptr & 0xFFFFFFFF);
@@ -833,7 +948,7 @@ static bool handle_rpc(struct domain_ctx *ctx, struct cpu_user_regs *regs)
     case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
         break;
     case OPTEE_SMC_RPC_FUNC_CMD:
-        /* TODO: Add handling */
+        handle_rpc_cmd(ctx, regs, call);
         break;
     }
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-09-03 16:56 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-03 16:54 [PATCH v2 00/13] TEE mediator (and OP-TEE) support in XEN Volodymyr Babchuk
2018-09-03 16:54 ` [PATCH v2 01/13] arm: add generic TEE mediator framework Volodymyr Babchuk
2018-09-03 17:22   ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 02/13] domctl: add tee_op domctl Volodymyr Babchuk
2018-09-03 17:16   ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 03/13] arm: tee: add OP-TEE header files Volodymyr Babchuk
2018-09-03 16:54 ` [PATCH v2 04/13] optee: add OP-TEE mediator skeleton Volodymyr Babchuk
2018-09-03 17:38   ` Julien Grall
2018-09-03 17:55     ` Volodymyr Babchuk
2018-09-04 19:48       ` Julien Grall
2018-09-05 12:17         ` Volodymyr Babchuk
2018-09-05 13:16           ` Julien Grall
2018-09-05 13:38             ` Volodymyr Babchuk
2018-09-05 13:47               ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 05/13] optee: add fast calls handling Volodymyr Babchuk
2018-09-05 13:36   ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 06/13] optee: add domain contexts Volodymyr Babchuk
2018-09-05 14:10   ` Julien Grall
2018-09-05 14:18     ` Andrew Cooper
2018-09-05 14:23     ` Volodymyr Babchuk
2018-09-05 14:27       ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 07/13] optee: add std call handling Volodymyr Babchuk
2018-09-05 15:17   ` Julien Grall
2018-09-10 17:37     ` Volodymyr Babchuk
2018-09-11 11:19       ` Julien Grall
2018-09-11 11:31         ` Volodymyr Babchuk
2018-09-11 13:30           ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 08/13] optee: add support for RPC SHM buffers Volodymyr Babchuk
2018-09-10 13:01   ` Julien Grall
2018-09-10 17:44     ` Volodymyr Babchuk
2018-09-11 11:53       ` Julien Grall
2018-09-11 19:30         ` Volodymyr Babchuk
2018-09-12 10:59           ` Julien Grall
2018-09-12 13:51             ` Volodymyr Babchuk
2018-09-18 16:11               ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 09/13] optee: add support for arbitrary shared memory Volodymyr Babchuk
2018-09-10 14:02   ` Julien Grall
2018-09-10 18:04     ` Volodymyr Babchuk
2018-09-11 13:37       ` Julien Grall
2018-09-11 19:33         ` Volodymyr Babchuk
2018-09-12 11:02           ` Julien Grall
2018-09-12 12:45             ` Volodymyr Babchuk
2018-09-18 16:19               ` Julien Grall
2018-09-03 16:54 ` Volodymyr Babchuk [this message]
2018-09-10 15:34   ` [PATCH v2 10/13] optee: add support for RPC commands Julien Grall
2018-09-10 18:14     ` Volodymyr Babchuk
2018-09-11 13:56       ` Julien Grall
2018-09-11 18:58         ` Volodymyr Babchuk
2018-09-18 16:50           ` Julien Grall
2018-09-19 15:21             ` Volodymyr Babchuk
2018-09-03 16:54 ` [PATCH v2 11/13] libxc: add xc_dom_tee_enable(...) function Volodymyr Babchuk
2018-09-06 10:59   ` Wei Liu
2018-09-03 16:54 ` [PATCH v2 12/13] xl: add "tee" option for xl.cfg Volodymyr Babchuk
2018-09-11 14:23   ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 13/13] lixl: arm: create optee firmware node in DT if tee=1 Volodymyr Babchuk
2018-09-11 14:48   ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1535993677-20816-11-git-send-email-volodymyr_babchuk@epam.com \
    --to=volodymyr_babchuk@epam.com \
    --cc=julien.grall@arm.com \
    --cc=sstabellini@kernel.org \
    --cc=tee-dev@lists.linaro.org \
    --cc=xen-devel@lists.xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.