All of lore.kernel.org
 help / color / mirror / Atom feed
From: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
To: xen-devel@lists.xenproject.org, xen-devel@lists.xen.org
Cc: tee-dev@lists.linaro.org, Julien Grall <julien.grall@arm.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Volodymyr Babchuk <volodymyr_babchuk@epam.com>
Subject: [PATCH v2 08/13] optee: add support for RPC SHM buffers
Date: Mon,  3 Sep 2018 19:54:32 +0300	[thread overview]
Message-ID: <1535993677-20816-9-git-send-email-volodymyr_babchuk@epam.com> (raw)
In-Reply-To: <1535993677-20816-1-git-send-email-volodymyr_babchuk@epam.com>

OP-TEE usually uses the same idea with command buffers (see
previous commit) to issue RPC requests. Problem is that initially
it has no buffer, where it can write request. So the first RPC
request it makes is special: it requests NW to allocate shared
buffer for other RPC requests. Usually this buffer is allocated
only once for every OP-TEE thread and it remains allocated all
the time until shutdown.

Mediator needs to pin this buffer(s) to make sure that domain can't
transfer it to someone else. Also it should be mapped into XEN
address space, because mediator needs to check responses from
guests.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
---
 xen/arch/arm/tee/optee.c | 121 ++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 119 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
index 1008eba..6d6b51d 100644
--- a/xen/arch/arm/tee/optee.c
+++ b/xen/arch/arm/tee/optee.c
@@ -21,6 +21,7 @@
 #include <asm/tee/optee_smc.h>
 
 #define MAX_STD_CALLS   16
+#define MAX_RPC_SHMS    16
 
 /*
  * Call context. OP-TEE can issue multiple RPC returns during one call.
@@ -35,11 +36,22 @@ struct std_call_ctx {
     int rpc_op;
 };
 
+/* Pre-allocated SHM buffer for RPC commands */
+struct shm_rpc {
+    struct list_head list;
+    struct optee_msg_arg *guest_arg;
+    struct page *guest_page;
+    mfn_t guest_mfn;
+    uint64_t cookie;
+};
+
 struct domain_ctx {
     struct list_head list;
     struct list_head call_ctx_list;
+    struct list_head shm_rpc_list;
     struct domain *domain;
     atomic_t call_ctx_count;
+    atomic_t shm_rpc_count;
     spinlock_t lock;
 };
 
@@ -145,8 +157,10 @@ static int optee_enable(struct domain *d)
 
     ctx->domain = d;
     INIT_LIST_HEAD(&ctx->call_ctx_list);
+    INIT_LIST_HEAD(&ctx->shm_rpc_list);
 
     atomic_set(&ctx->call_ctx_count, 0);
+    atomic_set(&ctx->shm_rpc_count, 0);
     spin_lock_init(&ctx->lock);
 
     spin_lock(&domain_ctx_list_lock);
@@ -256,11 +270,81 @@ static struct std_call_ctx *find_call_ctx(struct domain_ctx *ctx, int thread_id)
     return NULL;
 }
 
+static struct shm_rpc *allocate_and_map_shm_rpc(struct domain_ctx *ctx, paddr_t gaddr,
+                                        uint64_t cookie)
+{
+    struct shm_rpc *shm_rpc;
+    int count;
+
+    count = atomic_add_unless(&ctx->shm_rpc_count, 1, MAX_RPC_SHMS);
+    if ( count == MAX_RPC_SHMS )
+        return NULL;
+
+    shm_rpc = xzalloc(struct shm_rpc);
+    if ( !shm_rpc )
+        goto err;
+
+    shm_rpc->guest_mfn = lookup_and_pin_guest_ram_addr(gaddr, NULL);
+
+    if ( mfn_eq(shm_rpc->guest_mfn, INVALID_MFN) )
+        goto err;
+
+    shm_rpc->guest_arg = map_domain_page_global(shm_rpc->guest_mfn);
+    if ( !shm_rpc->guest_arg )
+    {
+        gprintk(XENLOG_INFO, "Could not map domain page\n");
+        goto err;
+    }
+    shm_rpc->cookie = cookie;
+
+    spin_lock(&ctx->lock);
+    list_add_tail(&shm_rpc->list, &ctx->shm_rpc_list);
+    spin_unlock(&ctx->lock);
+
+    return shm_rpc;
+
+err:
+    atomic_dec(&ctx->shm_rpc_count);
+    xfree(shm_rpc);
+    return NULL;
+}
+
+static void free_shm_rpc(struct domain_ctx *ctx, uint64_t cookie)
+{
+    struct shm_rpc *shm_rpc;
+    bool found = false;
+
+    spin_lock(&ctx->lock);
+
+    list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list )
+    {
+        if ( shm_rpc->cookie == cookie )
+        {
+            found = true;
+            list_del(&shm_rpc->list);
+            break;
+        }
+    }
+    spin_unlock(&ctx->lock);
+
+    if ( !found ) {
+        return;
+    }
+
+    if ( shm_rpc->guest_arg ) {
+        unpin_guest_ram_addr(shm_rpc->guest_mfn);
+        unmap_domain_page_global(shm_rpc->guest_arg);
+    }
+
+    xfree(shm_rpc);
+}
+
 static void optee_domain_destroy(struct domain *d)
 {
     struct arm_smccc_res resp;
     struct domain_ctx *ctx;
     struct std_call_ctx *call, *call_tmp;
+    struct shm_rpc *shm_rpc, *shm_rpc_tmp;
     bool found = false;
 
     /* At this time all domain VCPUs should be stopped */
@@ -290,7 +374,11 @@ static void optee_domain_destroy(struct domain *d)
     list_for_each_entry_safe( call, call_tmp, &ctx->call_ctx_list, list )
         free_std_call_ctx(ctx, call);
 
+    list_for_each_entry_safe( shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list )
+        free_shm_rpc(ctx, shm_rpc->cookie);
+
     ASSERT(!atomic_read(&ctx->call_ctx_count));
+    ASSERT(!atomic_read(&ctx->shm_rpc_count));
 
     xfree(ctx);
 }
@@ -452,6 +540,32 @@ out:
     return ret;
 }
 
+static void handle_rpc_func_alloc(struct domain_ctx *ctx,
+                                  struct cpu_user_regs *regs)
+{
+    paddr_t ptr = get_user_reg(regs, 1) << 32 | get_user_reg(regs, 2);
+
+    if ( ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1) )
+        gprintk(XENLOG_WARNING, "Domain returned invalid RPC command buffer\n");
+
+    if ( ptr ) {
+        uint64_t cookie = get_user_reg(regs, 4) << 32 | get_user_reg(regs, 5);
+        struct shm_rpc *shm_rpc;
+
+        shm_rpc = allocate_and_map_shm_rpc(ctx, ptr, cookie);
+        if ( !shm_rpc )
+        {
+            gprintk(XENLOG_WARNING, "Failed to allocate shm_rpc object\n");
+            ptr = 0;
+        }
+        else
+            ptr = mfn_to_maddr(shm_rpc->guest_mfn);
+
+        set_user_reg(regs, 1, ptr >> 32);
+        set_user_reg(regs, 2, ptr & 0xFFFFFFFF);
+    }
+}
+
 static bool handle_rpc(struct domain_ctx *ctx, struct cpu_user_regs *regs)
 {
     struct std_call_ctx *call;
@@ -465,11 +579,14 @@ static bool handle_rpc(struct domain_ctx *ctx, struct cpu_user_regs *regs)
 
     switch ( call->rpc_op ) {
     case OPTEE_SMC_RPC_FUNC_ALLOC:
-        /* TODO: Add handling */
+        handle_rpc_func_alloc(ctx, regs);
         break;
     case OPTEE_SMC_RPC_FUNC_FREE:
-        /* TODO: Add handling */
+    {
+        uint64_t cookie = get_user_reg(regs, 1) << 32 | get_user_reg(regs, 2);
+        free_shm_rpc(ctx, cookie);
         break;
+    }
     case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
         break;
     case OPTEE_SMC_RPC_FUNC_CMD:
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-09-03 16:55 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-03 16:54 [PATCH v2 00/13] TEE mediator (and OP-TEE) support in XEN Volodymyr Babchuk
2018-09-03 16:54 ` [PATCH v2 01/13] arm: add generic TEE mediator framework Volodymyr Babchuk
2018-09-03 17:22   ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 02/13] domctl: add tee_op domctl Volodymyr Babchuk
2018-09-03 17:16   ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 03/13] arm: tee: add OP-TEE header files Volodymyr Babchuk
2018-09-03 16:54 ` [PATCH v2 04/13] optee: add OP-TEE mediator skeleton Volodymyr Babchuk
2018-09-03 17:38   ` Julien Grall
2018-09-03 17:55     ` Volodymyr Babchuk
2018-09-04 19:48       ` Julien Grall
2018-09-05 12:17         ` Volodymyr Babchuk
2018-09-05 13:16           ` Julien Grall
2018-09-05 13:38             ` Volodymyr Babchuk
2018-09-05 13:47               ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 05/13] optee: add fast calls handling Volodymyr Babchuk
2018-09-05 13:36   ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 06/13] optee: add domain contexts Volodymyr Babchuk
2018-09-05 14:10   ` Julien Grall
2018-09-05 14:18     ` Andrew Cooper
2018-09-05 14:23     ` Volodymyr Babchuk
2018-09-05 14:27       ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 07/13] optee: add std call handling Volodymyr Babchuk
2018-09-05 15:17   ` Julien Grall
2018-09-10 17:37     ` Volodymyr Babchuk
2018-09-11 11:19       ` Julien Grall
2018-09-11 11:31         ` Volodymyr Babchuk
2018-09-11 13:30           ` Julien Grall
2018-09-03 16:54 ` Volodymyr Babchuk [this message]
2018-09-10 13:01   ` [PATCH v2 08/13] optee: add support for RPC SHM buffers Julien Grall
2018-09-10 17:44     ` Volodymyr Babchuk
2018-09-11 11:53       ` Julien Grall
2018-09-11 19:30         ` Volodymyr Babchuk
2018-09-12 10:59           ` Julien Grall
2018-09-12 13:51             ` Volodymyr Babchuk
2018-09-18 16:11               ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 09/13] optee: add support for arbitrary shared memory Volodymyr Babchuk
2018-09-10 14:02   ` Julien Grall
2018-09-10 18:04     ` Volodymyr Babchuk
2018-09-11 13:37       ` Julien Grall
2018-09-11 19:33         ` Volodymyr Babchuk
2018-09-12 11:02           ` Julien Grall
2018-09-12 12:45             ` Volodymyr Babchuk
2018-09-18 16:19               ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 10/13] optee: add support for RPC commands Volodymyr Babchuk
2018-09-10 15:34   ` Julien Grall
2018-09-10 18:14     ` Volodymyr Babchuk
2018-09-11 13:56       ` Julien Grall
2018-09-11 18:58         ` Volodymyr Babchuk
2018-09-18 16:50           ` Julien Grall
2018-09-19 15:21             ` Volodymyr Babchuk
2018-09-03 16:54 ` [PATCH v2 11/13] libxc: add xc_dom_tee_enable(...) function Volodymyr Babchuk
2018-09-06 10:59   ` Wei Liu
2018-09-03 16:54 ` [PATCH v2 12/13] xl: add "tee" option for xl.cfg Volodymyr Babchuk
2018-09-11 14:23   ` Julien Grall
2018-09-03 16:54 ` [PATCH v2 13/13] lixl: arm: create optee firmware node in DT if tee=1 Volodymyr Babchuk
2018-09-11 14:48   ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1535993677-20816-9-git-send-email-volodymyr_babchuk@epam.com \
    --to=volodymyr_babchuk@epam.com \
    --cc=julien.grall@arm.com \
    --cc=sstabellini@kernel.org \
    --cc=tee-dev@lists.linaro.org \
    --cc=xen-devel@lists.xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.