@@ -81,9 +81,17 @@ struct optee_std_call {
register_t rpc_params[2];
};
+/* Pre-allocated SHM buffer for RPC commands */
+struct shm_rpc {
+ struct list_head list;
+ struct page_info *guest_page;
+ uint64_t cookie;
+};
+
/* Domain context */
struct optee_domain {
struct list_head call_list;
+ struct list_head shm_rpc_list;
atomic_t call_count;
spinlock_t lock;
};
@@ -158,6 +166,7 @@ static int optee_domain_init(struct domain *d)
}
INIT_LIST_HEAD(&ctx->call_list);
+ INIT_LIST_HEAD(&ctx->shm_rpc_list);
atomic_set(&ctx->call_count, 0);
spin_lock_init(&ctx->lock);
@@ -199,7 +208,11 @@ static struct optee_std_call *allocate_std_call(struct optee_domain *ctx)
struct optee_std_call *call;
int count;
- /* Make sure that guest does not execute more than max_optee_threads */
+ /*
+ * Make sure that guest does not execute more than max_optee_threads.
+ * This also indirectly limits number of RPC SHM buffers, because OP-TEE
+ * allocates one such buffer per standard call.
+ */
count = atomic_add_unless(&ctx->call_count, 1, max_optee_threads);
if ( count == max_optee_threads )
return ERR_PTR(-ENOSPC);
@@ -294,10 +307,80 @@ static void put_std_call(struct optee_domain *ctx, struct optee_std_call *call)
spin_unlock(&ctx->lock);
}
+static struct shm_rpc *allocate_and_pin_shm_rpc(struct optee_domain *ctx,
+ gfn_t gfn, uint64_t cookie)
+{
+ struct shm_rpc *shm_rpc, *shm_rpc_tmp;
+
+ shm_rpc = xzalloc(struct shm_rpc);
+ if ( !shm_rpc )
+ return ERR_PTR(-ENOMEM);
+
+ /* This page will be shared with OP-TEE, so we need to pin it. */
+ shm_rpc->guest_page = get_domain_ram_page(gfn);
+ if ( !shm_rpc->guest_page )
+ goto err;
+
+ shm_rpc->cookie = cookie;
+
+ spin_lock(&ctx->lock);
+ /* Check if there is existing SHM with the same cookie. */
+ list_for_each_entry( shm_rpc_tmp, &ctx->shm_rpc_list, list )
+ {
+ if ( shm_rpc_tmp->cookie == cookie )
+ {
+ spin_unlock(&ctx->lock);
+ gdprintk(XENLOG_WARNING, "Guest tries to use the same RPC SHM cookie %lx\n",
+ cookie);
+ goto err;
+ }
+ }
+
+ list_add_tail(&shm_rpc->list, &ctx->shm_rpc_list);
+ spin_unlock(&ctx->lock);
+
+ return shm_rpc;
+
+err:
+ if ( shm_rpc->guest_page )
+ put_page(shm_rpc->guest_page);
+ xfree(shm_rpc);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void free_shm_rpc(struct optee_domain *ctx, uint64_t cookie)
+{
+ struct shm_rpc *shm_rpc;
+ bool found = false;
+
+ spin_lock(&ctx->lock);
+
+ list_for_each_entry( shm_rpc, &ctx->shm_rpc_list, list )
+ {
+ if ( shm_rpc->cookie == cookie )
+ {
+ found = true;
+ list_del(&shm_rpc->list);
+ break;
+ }
+ }
+ spin_unlock(&ctx->lock);
+
+ if ( !found )
+ return;
+
+ ASSERT(shm_rpc->guest_page);
+ put_page(shm_rpc->guest_page);
+
+ xfree(shm_rpc);
+}
+
static int optee_relinquish_resources(struct domain *d)
{
struct arm_smccc_res resp;
struct optee_std_call *call, *call_tmp;
+ struct shm_rpc *shm_rpc, *shm_rpc_tmp;
struct optee_domain *ctx = d->arch.tee;
if ( !ctx )
@@ -314,6 +397,16 @@ static int optee_relinquish_resources(struct domain *d)
if ( hypercall_preempt_check() )
return -ERESTART;
+ /*
+ * Number of this buffers also depends on max_optee_threads, so
+ * check the comment above.
+ */
+ list_for_each_entry_safe( shm_rpc, shm_rpc_tmp, &ctx->shm_rpc_list, list )
+ free_shm_rpc(ctx, shm_rpc->cookie);
+
+ if ( hypercall_preempt_check() )
+ return -ERESTART;
+
/*
* Inform OP-TEE that domain is shutting down. This is
* also a fast SMC call, like OPTEE_SMC_VM_CREATED, so
@@ -328,6 +421,7 @@ static int optee_relinquish_resources(struct domain *d)
ASSERT(!spin_is_locked(&ctx->lock));
ASSERT(!atomic_read(&ctx->call_count));
+ ASSERT(list_empty(&ctx->shm_rpc_list));
XFREE(d->arch.tee);
@@ -587,6 +681,48 @@ err:
* request from OP-TEE and wished to resume the interrupted standard
* call.
*/
+static void handle_rpc_func_alloc(struct optee_domain *ctx,
+ struct cpu_user_regs *regs,
+ struct optee_std_call *call)
+{
+ struct shm_rpc *shm_rpc;
+ register_t r1, r2;
+ paddr_t ptr = regpair_to_uint64(get_user_reg(regs, 1),
+ get_user_reg(regs, 2));
+ uint64_t cookie = regpair_to_uint64(get_user_reg(regs, 4),
+ get_user_reg(regs, 5));
+
+ if ( ptr & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1) )
+ {
+ gdprintk(XENLOG_WARNING, "Domain returned invalid RPC command buffer\n");
+ /*
+ * OP-TEE is waiting for a response to the RPC. We can't just
+ * return error to the guest. We need to provide some invalid
+ * value to OP-TEE, so it can handle error on its side.
+ */
+ ptr = 0;
+ goto out;
+ }
+
+ shm_rpc = allocate_and_pin_shm_rpc(ctx, gaddr_to_gfn(ptr), cookie);
+ if ( IS_ERR(shm_rpc) )
+ {
+ gdprintk(XENLOG_WARNING, "Failed to allocate shm_rpc object: %ld\n",
+ PTR_ERR(shm_rpc));
+ ptr = 0;
+ }
+ else
+ ptr = page_to_maddr(shm_rpc->guest_page);
+
+out:
+ uint64_to_regpair(&r1, &r2, ptr);
+
+ do_call_with_arg(ctx, call, regs, OPTEE_SMC_CALL_RETURN_FROM_RPC, r1, r2,
+ get_user_reg(regs, 3),
+ get_user_reg(regs, 4),
+ get_user_reg(regs, 5));
+}
+
static void handle_rpc(struct optee_domain *ctx, struct cpu_user_regs *regs)
{
struct optee_std_call *call;
@@ -610,11 +746,15 @@ static void handle_rpc(struct optee_domain *ctx, struct cpu_user_regs *regs)
switch ( call->rpc_op )
{
case OPTEE_SMC_RPC_FUNC_ALLOC:
- /* TODO: Add handling */
- break;
+ handle_rpc_func_alloc(ctx, regs, call);
+ return;
case OPTEE_SMC_RPC_FUNC_FREE:
- /* TODO: Add handling */
+ {
+ uint64_t cookie = regpair_to_uint64(call->rpc_params[0],
+ call->rpc_params[1]);
+ free_shm_rpc(ctx, cookie);
break;
+ }
case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
break;
case OPTEE_SMC_RPC_FUNC_CMD:
@@ -720,6 +860,7 @@ static bool optee_handle_call(struct cpu_user_regs *regs)
OPTEE_CLIENT_ID(current->domain), &resp);
set_user_reg(regs, 0, resp.a0);
if ( resp.a0 == OPTEE_SMC_RETURN_OK ) {
+ free_shm_rpc(ctx, regpair_to_uint64(resp.a1, resp.a2));
set_user_reg(regs, 1, resp.a1);
set_user_reg(regs, 2, resp.a2);
}