diff options
Diffstat (limited to 'drivers/tee/optee')
-rw-r--r-- | drivers/tee/optee/Kconfig | 16 | ||||
-rw-r--r-- | drivers/tee/optee/Makefile | 7 | ||||
-rw-r--r-- | drivers/tee/optee/call.c | 693 | ||||
-rw-r--r-- | drivers/tee/optee/core.c | 729 | ||||
-rw-r--r-- | drivers/tee/optee/optee_msg.h | 444 | ||||
-rw-r--r-- | drivers/tee/optee/optee_private.h | 200 | ||||
-rw-r--r-- | drivers/tee/optee/optee_smc.h | 465 | ||||
-rw-r--r-- | drivers/tee/optee/rpc.c | 452 | ||||
-rw-r--r-- | drivers/tee/optee/shm_pool.c | 75 | ||||
-rw-r--r-- | drivers/tee/optee/shm_pool.h | 23 | ||||
-rw-r--r-- | drivers/tee/optee/supp.c | 383 |
11 files changed, 3487 insertions, 0 deletions
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig new file mode 100644 index 000000000..3c1ec4e9e --- /dev/null +++ b/drivers/tee/optee/Kconfig @@ -0,0 +1,16 @@ +# OP-TEE Trusted Execution Environment Configuration +config OPTEE + tristate "OP-TEE" + depends on HAVE_ARM_SMCCC + depends on MMU + help + This implements the OP-TEE Trusted Execution Environment (TEE) + driver. + +config OPTEE_SHM_NUM_PRIV_PAGES + int "Private Shared Memory Pages" + default 1 + depends on OPTEE + help + This sets the number of private shared memory pages to be + used by OP-TEE TEE driver. diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile new file mode 100644 index 000000000..48d262ae2 --- /dev/null +++ b/drivers/tee/optee/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_OPTEE) += optee.o +optee-objs += core.o +optee-objs += call.o +optee-objs += rpc.o +optee-objs += supp.o +optee-objs += shm_pool.o diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c new file mode 100644 index 000000000..4f4a7e2b1 --- /dev/null +++ b/drivers/tee/optee/call.c @@ -0,0 +1,693 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/arm-smccc.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include "optee_private.h" +#include "optee_smc.h" + +struct optee_call_waiter { + struct list_head list_node; + struct completion c; +}; + +static void optee_cq_wait_init(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're preparing to make a call to secure world. In case we can't + * allocate a thread in secure world we'll end up waiting in + * optee_cq_wait_for_completion(). + * + * Normally if there's no contention in secure world the call will + * complete and we can cleanup directly with optee_cq_wait_final(). + */ + mutex_lock(&cq->mutex); + + /* + * We add ourselves to the queue, but we don't wait. This + * guarantees that we don't lose a completion if secure world + * returns busy and another thread just exited and try to complete + * someone. + */ + init_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_wait_for_completion(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + wait_for_completion(&w->c); + + mutex_lock(&cq->mutex); + + /* Move to end of list to get out of the way for other waiters */ + list_del(&w->list_node); + reinit_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_complete_one(struct optee_call_queue *cq) +{ + struct optee_call_waiter *w; + + list_for_each_entry(w, &cq->waiters, list_node) { + if (!completion_done(&w->c)) { + complete(&w->c); + break; + } + } +} + +static void optee_cq_wait_final(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're done with the call to secure world. The thread in secure + * world that was used for this call is now available for some + * other task to use. + */ + mutex_lock(&cq->mutex); + + /* Get out of the list */ + list_del(&w->list_node); + + /* Wake up one eventual waiting task */ + optee_cq_complete_one(cq); + + /* + * If we're completed we've got a completion from another task that + * was just done with its call to secure world. Since yet another + * thread now is available in secure world wake up another eventual + * waiting task. + */ + if (completion_done(&w->c)) + optee_cq_complete_one(cq); + + mutex_unlock(&cq->mutex); +} + +/* Requires the filpstate mutex to be held */ +static struct optee_session *find_session(struct optee_context_data *ctxdata, + u32 session_id) +{ + struct optee_session *sess; + + list_for_each_entry(sess, &ctxdata->sess_list, list_node) + if (sess->session_id == session_id) + return sess; + + return NULL; +} + +/** + * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world + * @ctx: calling context + * @parg: physical address of message to pass to secure world + * + * Does and SMC to OP-TEE in secure world and handles eventual resulting + * Remote Procedure Calls (RPC) from OP-TEE. + * + * Returns return code from secure world, 0 is OK + */ +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) +{ + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_call_waiter w; + struct optee_rpc_param param = { }; + struct optee_call_ctx call_ctx = { }; + u32 ret; + + param.a0 = OPTEE_SMC_CALL_WITH_ARG; + reg_pair_from_64(¶m.a1, ¶m.a2, parg); + /* Initialize waiter */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, + param.a4, param.a5, param.a6, param.a7, + &res); + + if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { + /* + * Out of threads in secure world, wait for a thread + * become available. + */ + optee_cq_wait_for_completion(&optee->call_queue, &w); + } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { + param.a0 = res.a0; + param.a1 = res.a1; + param.a2 = res.a2; + param.a3 = res.a3; + optee_handle_rpc(ctx, ¶m, &call_ctx); + } else { + ret = res.a0; + break; + } + } + + optee_rpc_finalize_call(&call_ctx); + /* + * We're done with our thread in secure world, if there's any + * thread waiters wake up one. + */ + optee_cq_wait_final(&optee->call_queue, &w); + + return ret; +} + +static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, + struct optee_msg_arg **msg_arg, + phys_addr_t *msg_parg) +{ + int rc; + struct tee_shm *shm; + struct optee_msg_arg *ma; + + shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), + TEE_SHM_MAPPED); + if (IS_ERR(shm)) + return shm; + + ma = tee_shm_get_va(shm, 0); + if (IS_ERR(ma)) { + rc = PTR_ERR(ma); + goto out; + } + + rc = tee_shm_get_pa(shm, 0, msg_parg); + if (rc) + goto out; + + memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); + ma->num_params = num_params; + *msg_arg = ma; +out: + if (rc) { + tee_shm_free(shm); + return ERR_PTR(rc); + } + + return shm; +} + +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + int rc; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess = NULL; + + /* +2 for the meta parameters added below */ + shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; + msg_arg->cancel_id = arg->cancel_id; + + /* + * Initialize and add the meta parameters needed when opening a + * session. + */ + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); + memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid)); + msg_arg->params[1].u.value.c = arg->clnt_login; + + rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); + if (rc) + goto out; + + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) { + rc = -ENOMEM; + goto out; + } + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (msg_arg->ret == TEEC_SUCCESS) { + /* A new session has been created, add it to the list. */ + sess->session_id = msg_arg->session; + mutex_lock(&ctxdata->mutex); + list_add(&sess->list_node, &ctxdata->sess_list); + mutex_unlock(&ctxdata->mutex); + } else { + kfree(sess); + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + /* Close session again to avoid leakage */ + optee_close_session(ctx, msg_arg->session); + } else { + arg->session = msg_arg->session; + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; + } +out: + tee_shm_free(shm); + + return rc; +} + +int optee_close_session(struct tee_context *ctx, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid and remove it from the list */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + if (sess) + list_del(&sess->list_node); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + kfree(sess); + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + msg_arg->session = session; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + int rc; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, arg->session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; + msg_arg->func = arg->func; + msg_arg->session = arg->session; + msg_arg->cancel_id = arg->cancel_id; + + rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); + if (rc) + goto out; + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; +out: + tee_shm_free(shm); + return rc; +} + +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; + msg_arg->session = session; + msg_arg->cancel_id = cancel_id; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +/** + * optee_enable_shm_cache() - Enables caching of some shared memory allocation + * in OP-TEE + * @optee: main service struct + */ +void optee_enable_shm_cache(struct optee *optee) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res); + if (res.a0 == OPTEE_SMC_RETURN_OK) + break; + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + optee_cq_wait_final(&optee->call_queue, &w); +} + +/** + * __optee_disable_shm_cache() - Disables caching of some shared memory + * allocation in OP-TEE + * @optee: main service struct + * @is_mapped: true if the cached shared memory addresses were mapped by this + * kernel, are safe to dereference, and should be freed + */ +static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + union { + struct arm_smccc_res smccc; + struct optee_smc_disable_shm_cache_result result; + } res; + + optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res.smccc); + if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) + break; /* All shm's freed */ + if (res.result.status == OPTEE_SMC_RETURN_OK) { + struct tee_shm *shm; + + /* + * Shared memory references that were not mapped by + * this kernel must be ignored to prevent a crash. + */ + if (!is_mapped) + continue; + + shm = reg_pair_to_ptr(res.result.shm_upper32, + res.result.shm_lower32); + tee_shm_free(shm); + } else { + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + } + optee_cq_wait_final(&optee->call_queue, &w); +} + +/** + * optee_disable_shm_cache() - Disables caching of mapped shared memory + * allocations in OP-TEE + * @optee: main service struct + */ +void optee_disable_shm_cache(struct optee *optee) +{ + return __optee_disable_shm_cache(optee, true); +} + +/** + * optee_disable_unmapped_shm_cache() - Disables caching of shared memory + * allocations in OP-TEE which are not + * currently mapped + * @optee: main service struct + */ +void optee_disable_unmapped_shm_cache(struct optee *optee) +{ + return __optee_disable_shm_cache(optee, false); +} + +#define PAGELIST_ENTRIES_PER_PAGE \ + ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) + +/** + * optee_fill_pages_list() - write list of user pages to given shared + * buffer. + * + * @dst: page-aligned buffer where list of pages will be stored + * @pages: array of pages that represents shared buffer + * @num_pages: number of entries in @pages + * @page_offset: offset of user buffer from page start + * + * @dst should be big enough to hold list of user page addresses and + * links to the next pages of buffer + */ +void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, + size_t page_offset) +{ + int n = 0; + phys_addr_t optee_page; + /* + * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h + * for details. + */ + struct { + u64 pages_list[PAGELIST_ENTRIES_PER_PAGE]; + u64 next_page_data; + } *pages_data; + + /* + * Currently OP-TEE uses 4k page size and it does not looks + * like this will change in the future. On other hand, there are + * no know ARM architectures with page size < 4k. + * Thus the next built assert looks redundant. But the following + * code heavily relies on this assumption, so it is better be + * safe than sorry. + */ + BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE); + + pages_data = (void *)dst; + /* + * If linux page is bigger than 4k, and user buffer offset is + * larger than 4k/8k/12k/etc this will skip first 4k pages, + * because they bear no value data for OP-TEE. + */ + optee_page = page_to_phys(*pages) + + round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE); + + while (true) { + pages_data->pages_list[n++] = optee_page; + + if (n == PAGELIST_ENTRIES_PER_PAGE) { + pages_data->next_page_data = + virt_to_phys(pages_data + 1); + pages_data++; + n = 0; + } + + optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE; + if (!(optee_page & ~PAGE_MASK)) { + if (!--num_pages) + break; + pages++; + optee_page = page_to_phys(*pages); + } + } +} + +/* + * The final entry in each pagelist page is a pointer to the next + * pagelist page. + */ +static size_t get_pages_list_size(size_t num_entries) +{ + int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); + + return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; +} + +u64 *optee_allocate_pages_list(size_t num_entries) +{ + return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL); +} + +void optee_free_pages_list(void *list, size_t num_entries) +{ + free_pages_exact(list, get_pages_list_size(num_entries)); +} + +static bool is_normal_memory(pgprot_t p) +{ +#if defined(CONFIG_ARM) + return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) || + ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK)); +#elif defined(CONFIG_ARM64) + return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL); +#else +#error "Unuspported architecture" +#endif +} + +static int __check_mem_type(struct vm_area_struct *vma, unsigned long end) +{ + while (vma && is_normal_memory(vma->vm_page_prot)) { + if (vma->vm_end >= end) + return 0; + vma = vma->vm_next; + } + + return -EINVAL; +} + +static int check_mem_type(unsigned long start, size_t num_pages) +{ + struct mm_struct *mm = current->mm; + int rc; + + down_read(&mm->mmap_sem); + rc = __check_mem_type(find_vma(mm, start), + start + num_pages * PAGE_SIZE); + up_read(&mm->mmap_sem); + + return rc; +} + +int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start) +{ + struct tee_shm *shm_arg = NULL; + struct optee_msg_arg *msg_arg; + u64 *pages_list; + phys_addr_t msg_parg; + int rc; + + if (!num_pages) + return -EINVAL; + + rc = check_mem_type(start, num_pages); + if (rc) + return rc; + + pages_list = optee_allocate_pages_list(num_pages); + if (!pages_list) + return -ENOMEM; + + shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); + if (IS_ERR(shm_arg)) { + rc = PTR_ERR(shm_arg); + goto out; + } + + optee_fill_pages_list(pages_list, pages, num_pages, + tee_shm_get_page_offset(shm)); + + msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; + msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | + OPTEE_MSG_ATTR_NONCONTIG; + msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; + msg_arg->params->u.tmem.size = tee_shm_get_size(shm); + /* + * In the least bits of msg_arg->params->u.tmem.buf_ptr we + * store buffer offset from 4k page, as described in OP-TEE ABI. + */ + msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | + (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); + + if (optee_do_call_with_arg(ctx, msg_parg) || + msg_arg->ret != TEEC_SUCCESS) + rc = -EINVAL; + + tee_shm_free(shm_arg); +out: + optee_free_pages_list(pages_list, num_pages); + return rc; +} + +int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) +{ + struct tee_shm *shm_arg; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + int rc = 0; + + shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); + if (IS_ERR(shm_arg)) + return PTR_ERR(shm_arg); + + msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; + + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; + msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; + + if (optee_do_call_with_arg(ctx, msg_parg) || + msg_arg->ret != TEEC_SUCCESS) + rc = -EINVAL; + tee_shm_free(shm_arg); + return rc; +} + +int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start) +{ + /* + * We don't want to register supplicant memory in OP-TEE. + * Instead information about it will be passed in RPC code. + */ + return check_mem_type(start, num_pages); +} + +int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm) +{ + return 0; +} diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c new file mode 100644 index 000000000..473981e3a --- /dev/null +++ b/drivers/tee/optee/core.c @@ -0,0 +1,729 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/arm-smccc.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include "optee_private.h" +#include "optee_smc.h" +#include "shm_pool.h" + +#define DRIVER_NAME "optee" + +#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES + +/** + * optee_from_msg_param() - convert from OPTEE_MSG parameters to + * struct tee_param + * @params: subsystem internal parameter representation + * @num_params: number of elements in the parameter arrays + * @msg_params: OPTEE_MSG parameters + * Returns 0 on success or <0 on failure + */ +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params) +{ + int rc; + size_t n; + struct tee_shm *shm; + phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; + const struct optee_msg_param *mp = msg_params + n; + u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; + + switch (attr) { + case OPTEE_MSG_ATTR_TYPE_NONE: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&p->u, 0, sizeof(p->u)); + break; + case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; + p->u.value.a = mp->u.value.a; + p->u.value.b = mp->u.value.b; + p->u.value.c = mp->u.value.c; + break; + case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; + p->u.memref.size = mp->u.tmem.size; + shm = (struct tee_shm *)(unsigned long) + mp->u.tmem.shm_ref; + if (!shm) { + p->u.memref.shm_offs = 0; + p->u.memref.shm = NULL; + break; + } + rc = tee_shm_get_pa(shm, 0, &pa); + if (rc) + return rc; + p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; + p->u.memref.shm = shm; + break; + case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; + p->u.memref.size = mp->u.rmem.size; + shm = (struct tee_shm *)(unsigned long) + mp->u.rmem.shm_ref; + + if (!shm) { + p->u.memref.shm_offs = 0; + p->u.memref.shm = NULL; + break; + } + p->u.memref.shm_offs = mp->u.rmem.offs; + p->u.memref.shm = shm; + + break; + + default: + return -EINVAL; + } + } + return 0; +} + +static int to_msg_param_tmp_mem(struct optee_msg_param *mp, + const struct tee_param *p) +{ + int rc; + phys_addr_t pa; + + mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + + mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; + mp->u.tmem.size = p->u.memref.size; + + if (!p->u.memref.shm) { + mp->u.tmem.buf_ptr = 0; + return 0; + } + + rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa); + if (rc) + return rc; + + mp->u.tmem.buf_ptr = pa; + mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << + OPTEE_MSG_ATTR_CACHE_SHIFT; + + return 0; +} + +static int to_msg_param_reg_mem(struct optee_msg_param *mp, + const struct tee_param *p) +{ + mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + + mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm; + mp->u.rmem.size = p->u.memref.size; + mp->u.rmem.offs = p->u.memref.shm_offs; + return 0; +} + +/** + * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters + * @msg_params: OPTEE_MSG parameters + * @num_params: number of elements in the parameter arrays + * @params: subsystem itnernal parameter representation + * Returns 0 on success or <0 on failure + */ +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params) +{ + int rc; + size_t n; + + for (n = 0; n < num_params; n++) { + const struct tee_param *p = params + n; + struct optee_msg_param *mp = msg_params + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&mp->u, 0, sizeof(mp->u)); + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + mp->u.value.a = p->u.value.a; + mp->u.value.b = p->u.value.b; + mp->u.value.c = p->u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + if (tee_shm_is_registered(p->u.memref.shm)) + rc = to_msg_param_reg_mem(mp, p); + else + rc = to_msg_param_tmp_mem(mp, p); + if (rc) + return rc; + break; + default: + return -EINVAL; + } + } + return 0; +} + +static void optee_get_version(struct tee_device *teedev, + struct tee_ioctl_version_data *vers) +{ + struct tee_ioctl_version_data v = { + .impl_id = TEE_IMPL_ID_OPTEE, + .impl_caps = TEE_OPTEE_CAP_TZ, + .gen_caps = TEE_GEN_CAP_GP, + }; + struct optee *optee = tee_get_drvdata(teedev); + + if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) + v.gen_caps |= TEE_GEN_CAP_REG_MEM; + *vers = v; +} + +static int optee_open(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + + ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL); + if (!ctxdata) + return -ENOMEM; + + if (teedev == optee->supp_teedev) { + bool busy = true; + + mutex_lock(&optee->supp.mutex); + if (!optee->supp.ctx) { + busy = false; + optee->supp.ctx = ctx; + } + mutex_unlock(&optee->supp.mutex); + if (busy) { + kfree(ctxdata); + return -EBUSY; + } + } + + mutex_init(&ctxdata->mutex); + INIT_LIST_HEAD(&ctxdata->sess_list); + + ctx->data = ctxdata; + return 0; +} + +static void optee_release(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + struct optee_msg_arg *arg = NULL; + phys_addr_t parg; + struct optee_session *sess; + struct optee_session *sess_tmp; + + if (!ctxdata) + return; + + shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED); + if (!IS_ERR(shm)) { + arg = tee_shm_get_va(shm, 0); + /* + * If va2pa fails for some reason, we can't call into + * secure world, only free the memory. Secure OS will leak + * sessions and finally refuse more sessions, but we will + * at least let normal world reclaim its memory. + */ + if (!IS_ERR(arg)) + if (tee_shm_va2pa(shm, arg, &parg)) + arg = NULL; /* prevent usage of parg below */ + } + + list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, + list_node) { + list_del(&sess->list_node); + if (!IS_ERR_OR_NULL(arg)) { + memset(arg, 0, sizeof(*arg)); + arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + arg->session = sess->session_id; + optee_do_call_with_arg(ctx, parg); + } + kfree(sess); + } + kfree(ctxdata); + + if (!IS_ERR(shm)) + tee_shm_free(shm); + + ctx->data = NULL; + + if (teedev == optee->supp_teedev) + optee_supp_release(&optee->supp); +} + +static const struct tee_driver_ops optee_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .open_session = optee_open_session, + .close_session = optee_close_session, + .invoke_func = optee_invoke_func, + .cancel_req = optee_cancel_req, + .shm_register = optee_shm_register, + .shm_unregister = optee_shm_unregister, +}; + +static const struct tee_desc optee_desc = { + .name = DRIVER_NAME "-clnt", + .ops = &optee_ops, + .owner = THIS_MODULE, +}; + +static const struct tee_driver_ops optee_supp_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .supp_recv = optee_supp_recv, + .supp_send = optee_supp_send, + .shm_register = optee_shm_register_supp, + .shm_unregister = optee_shm_unregister_supp, +}; + +static const struct tee_desc optee_supp_desc = { + .name = DRIVER_NAME "-supp", + .ops = &optee_supp_ops, + .owner = THIS_MODULE, + .flags = TEE_DESC_PRIVILEGED, +}; + +static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) +{ + struct arm_smccc_res res; + + invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); + + if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && + res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) + return true; + return false; +} + +static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_call_get_os_revision_result result; + } res = { + .result = { + .build_id = 0 + } + }; + + invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0, + &res.smccc); + + if (res.result.build_id) + pr_info("revision %lu.%lu (%08lx)", res.result.major, + res.result.minor, res.result.build_id); + else + pr_info("revision %lu.%lu", res.result.major, res.result.minor); +} + +static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_calls_revision_result result; + } res; + + invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + + if (res.result.major == OPTEE_MSG_REVISION_MAJOR && + (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) + return true; + return false; +} + +static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, + u32 *sec_caps) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_exchange_capabilities_result result; + } res; + u32 a1 = 0; + + /* + * TODO This isn't enough to tell if it's UP system (from kernel + * point of view) or not, is_smp() returns the the information + * needed, but can't be called directly from here. + */ + if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) + a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; + + invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, + &res.smccc); + + if (res.result.status != OPTEE_SMC_RETURN_OK) + return false; + + *sec_caps = res.result.capabilities; + return true; +} + +static struct tee_shm_pool * +optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm, + u32 sec_caps) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_get_shm_config_result result; + } res; + unsigned long vaddr; + phys_addr_t paddr; + size_t size; + phys_addr_t begin; + phys_addr_t end; + void *va; + struct tee_shm_pool_mgr *priv_mgr; + struct tee_shm_pool_mgr *dmabuf_mgr; + void *rc; + + invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + if (res.result.status != OPTEE_SMC_RETURN_OK) { + pr_info("shm service not available\n"); + return ERR_PTR(-ENOENT); + } + + if (res.result.settings != OPTEE_SMC_SHM_CACHED) { + pr_err("only normal cached shared memory supported\n"); + return ERR_PTR(-EINVAL); + } + + begin = roundup(res.result.start, PAGE_SIZE); + end = rounddown(res.result.start + res.result.size, PAGE_SIZE); + paddr = begin; + size = end - begin; + + if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { + pr_err("too small shared memory area\n"); + return ERR_PTR(-EINVAL); + } + + va = memremap(paddr, size, MEMREMAP_WB); + if (!va) { + pr_err("shared memory ioremap failed\n"); + return ERR_PTR(-EINVAL); + } + vaddr = (unsigned long)va; + + /* + * If OP-TEE can work with unregistered SHM, we will use own pool + * for private shm + */ + if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) { + rc = optee_shm_pool_alloc_pages(); + if (IS_ERR(rc)) + goto err_memunmap; + priv_mgr = rc; + } else { + const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + + rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz, + 3 /* 8 bytes aligned */); + if (IS_ERR(rc)) + goto err_memunmap; + priv_mgr = rc; + + vaddr += sz; + paddr += sz; + size -= sz; + } + + rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT); + if (IS_ERR(rc)) + goto err_free_priv_mgr; + dmabuf_mgr = rc; + + rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); + if (IS_ERR(rc)) + goto err_free_dmabuf_mgr; + + *memremaped_shm = va; + + return rc; + +err_free_dmabuf_mgr: + tee_shm_pool_mgr_destroy(dmabuf_mgr); +err_free_priv_mgr: + tee_shm_pool_mgr_destroy(priv_mgr); +err_memunmap: + memunmap(va); + return rc; +} + +/* Simple wrapper functions to be able to use a function pointer */ +static void optee_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static void optee_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static optee_invoke_fn *get_invoke_func(struct device_node *np) +{ + const char *method; + + pr_info("probing for conduit method from DT.\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warn("missing \"method\" property\n"); + return ERR_PTR(-ENXIO); + } + + if (!strcmp("hvc", method)) + return optee_smccc_hvc; + else if (!strcmp("smc", method)) + return optee_smccc_smc; + + pr_warn("invalid \"method\" property: %s\n", method); + return ERR_PTR(-EINVAL); +} + +static struct optee *optee_probe(struct device_node *np) +{ + optee_invoke_fn *invoke_fn; + struct tee_shm_pool *pool; + struct optee *optee = NULL; + void *memremaped_shm = NULL; + struct tee_device *teedev; + u32 sec_caps; + int rc; + + invoke_fn = get_invoke_func(np); + if (IS_ERR(invoke_fn)) + return (void *)invoke_fn; + + if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { + pr_warn("api uid mismatch\n"); + return ERR_PTR(-EINVAL); + } + + optee_msg_get_os_revision(invoke_fn); + + if (!optee_msg_api_revision_is_compatible(invoke_fn)) { + pr_warn("api revision mismatch\n"); + return ERR_PTR(-EINVAL); + } + + if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { + pr_warn("capabilities mismatch\n"); + return ERR_PTR(-EINVAL); + } + + /* + * We have no other option for shared memory, if secure world + * doesn't have any reserved memory we can use we can't continue. + */ + if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) + return ERR_PTR(-EINVAL); + + pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps); + if (IS_ERR(pool)) + return (void *)pool; + + optee = kzalloc(sizeof(*optee), GFP_KERNEL); + if (!optee) { + rc = -ENOMEM; + goto err; + } + + optee->invoke_fn = invoke_fn; + optee->sec_caps = sec_caps; + + teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->teedev = teedev; + + teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->supp_teedev = teedev; + + rc = tee_device_register(optee->teedev); + if (rc) + goto err; + + rc = tee_device_register(optee->supp_teedev); + if (rc) + goto err; + + mutex_init(&optee->call_queue.mutex); + INIT_LIST_HEAD(&optee->call_queue.waiters); + optee_wait_queue_init(&optee->wait_queue); + optee_supp_init(&optee->supp); + optee->memremaped_shm = memremaped_shm; + optee->pool = pool; + + /* + * Ensure that there are no pre-existing shm objects before enabling + * the shm cache so that there's no chance of receiving an invalid + * address during shutdown. This could occur, for example, if we're + * kexec booting from an older kernel that did not properly cleanup the + * shm cache. + */ + optee_disable_unmapped_shm_cache(optee); + + optee_enable_shm_cache(optee); + + pr_info("initialized driver\n"); + return optee; +err: + if (optee) { + /* + * tee_device_unregister() is safe to call even if the + * devices hasn't been registered with + * tee_device_register() yet. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + kfree(optee); + } + if (pool) + tee_shm_pool_free(pool); + if (memremaped_shm) + memunmap(memremaped_shm); + return ERR_PTR(rc); +} + +static void optee_remove(struct optee *optee) +{ + /* + * Ask OP-TEE to free all cached shared memory objects to decrease + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ + optee_disable_shm_cache(optee); + + /* + * The two devices has to be unregistered before we can free the + * other resources. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + + tee_shm_pool_free(optee->pool); + if (optee->memremaped_shm) + memunmap(optee->memremaped_shm); + optee_wait_queue_exit(&optee->wait_queue); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + + kfree(optee); +} + +static const struct of_device_id optee_match[] = { + { .compatible = "linaro,optee-tz" }, + {}, +}; + +static struct optee *optee_svc; + +static int __init optee_driver_init(void) +{ + struct device_node *fw_np; + struct device_node *np; + struct optee *optee; + + /* Node is supposed to be below /firmware */ + fw_np = of_find_node_by_name(NULL, "firmware"); + if (!fw_np) + return -ENODEV; + + np = of_find_matching_node(fw_np, optee_match); + if (!np || !of_device_is_available(np)) { + of_node_put(np); + return -ENODEV; + } + + optee = optee_probe(np); + of_node_put(np); + + if (IS_ERR(optee)) + return PTR_ERR(optee); + + optee_svc = optee; + + return 0; +} +module_init(optee_driver_init); + +static void __exit optee_driver_exit(void) +{ + struct optee *optee = optee_svc; + + optee_svc = NULL; + if (optee) + optee_remove(optee); +} +module_exit(optee_driver_exit); + +MODULE_AUTHOR("Linaro"); +MODULE_DESCRIPTION("OP-TEE driver"); +MODULE_SUPPORTED_DEVICE(""); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h new file mode 100644 index 000000000..30504901b --- /dev/null +++ b/drivers/tee/optee/optee_msg.h @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _OPTEE_MSG_H +#define _OPTEE_MSG_H + +#include <linux/bitops.h> +#include <linux/types.h> + +/* + * This file defines the OP-TEE message protocol used to communicate + * with an instance of OP-TEE running in secure world. + * + * This file is divided into three sections. + * 1. Formatting of messages. + * 2. Requests from normal world + * 3. Requests from secure world, Remote Procedure Call (RPC), handled by + * tee-supplicant. + */ + +/***************************************************************************** + * Part 1 - formatting of messages + *****************************************************************************/ + +#define OPTEE_MSG_ATTR_TYPE_NONE 0x0 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1 +#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5 +#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7 +#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 +#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa +#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb + +#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0) + +/* + * Meta parameter to be absorbed by the Secure OS and not passed + * to the Trusted Application. + * + * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION. + */ +#define OPTEE_MSG_ATTR_META BIT(8) + +/* + * Pointer to a list of pages used to register user-defined SHM buffer. + * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*. + * buf_ptr should point to the beginning of the buffer. Buffer will contain + * list of page addresses. OP-TEE core can reconstruct contiguous buffer from + * that page addresses list. Page addresses are stored as 64 bit values. + * Last entry on a page should point to the next page of buffer. + * Every entry in buffer should point to a 4k page beginning (12 least + * significant bits must be equal to zero). + * + * 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page + * offset of the user buffer. + * + * So, entries should be placed like members of this structure: + * + * struct page_data { + * uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1]; + * uint64_t next_page_data; + * }; + * + * Structure is designed to exactly fit into the page size + * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page. + * + * The size of 4KB is chosen because this is the smallest page size for ARM + * architectures. If REE uses larger pages, it should divide them to 4KB ones. + */ +#define OPTEE_MSG_ATTR_NONCONTIG BIT(9) + +/* + * Memory attributes for caching passed with temp memrefs. The actual value + * used is defined outside the message protocol with the exception of + * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already + * defined for the memory range should be used. If optee_smc.h is used as + * bearer of this protocol OPTEE_SMC_SHM_* is used for values. + */ +#define OPTEE_MSG_ATTR_CACHE_SHIFT 16 +#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0) +#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0 + +/* + * Same values as TEE_LOGIN_* from TEE Internal API + */ +#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000 +#define OPTEE_MSG_LOGIN_USER 0x00000001 +#define OPTEE_MSG_LOGIN_GROUP 0x00000002 +#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004 +#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 +#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 + +/* + * Page size used in non-contiguous buffer entries + */ +#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096 + +/** + * struct optee_msg_param_tmem - temporary memory reference parameter + * @buf_ptr: Address of the buffer + * @size: Size of the buffer + * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm + * + * Secure and normal world communicates pointers as physical address + * instead of the virtual address. This is because secure and normal world + * have completely independent memory mapping. Normal world can even have a + * hypervisor which need to translate the guest physical address (AKA IPA + * in ARM documentation) to a real physical address before passing the + * structure to secure world. + */ +struct optee_msg_param_tmem { + u64 buf_ptr; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_rmem - registered memory reference parameter + * @offs: Offset into shared memory reference + * @size: Size of the buffer + * @shm_ref: Shared memory reference, pointer to a struct tee_shm + */ +struct optee_msg_param_rmem { + u64 offs; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_value - opaque value parameter + * + * Value parameters are passed unchecked between normal and secure world. + */ +struct optee_msg_param_value { + u64 a; + u64 b; + u64 c; +}; + +/** + * struct optee_msg_param - parameter used together with struct optee_msg_arg + * @attr: attributes + * @tmem: parameter by temporary memory reference + * @rmem: parameter by registered memory reference + * @value: parameter by opaque value + * + * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in + * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and + * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem, + * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. + */ +struct optee_msg_param { + u64 attr; + union { + struct optee_msg_param_tmem tmem; + struct optee_msg_param_rmem rmem; + struct optee_msg_param_value value; + } u; +}; + +/** + * struct optee_msg_arg - call argument + * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_* + * @func: Trusted Application function, specific to the Trusted Application, + * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND + * @session: In parameter for all OPTEE_MSG_CMD_* except + * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead + * @cancel_id: Cancellation id, a unique value to identify this request + * @ret: return value + * @ret_origin: origin of the return value + * @num_params: number of parameters supplied to the OS Command + * @params: the parameters supplied to the OS Command + * + * All normal calls to Trusted OS uses this struct. If cmd requires further + * information than what these field holds it can be passed as a parameter + * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding + * attrs field). All parameters tagged as meta has to come first. + * + * Temp memref parameters can be fragmented if supported by the Trusted OS + * (when optee_smc.h is bearer of this protocol this is indicated with + * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is + * fragmented then has all but the last fragment the + * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented + * it will still be presented as a single logical memref to the Trusted + * Application. + */ +struct optee_msg_arg { + u32 cmd; + u32 func; + u32 session; + u32 cancel_id; + u32 pad; + u32 ret; + u32 ret_origin; + u32 num_params; + + /* num_params tells the actual number of element in params */ + struct optee_msg_param params[0]; +}; + +/** + * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg + * + * @num_params: Number of parameters embedded in the struct optee_msg_arg + * + * Returns the size of the struct optee_msg_arg together with the number + * of embedded parameters. + */ +#define OPTEE_MSG_GET_ARG_SIZE(num_params) \ + (sizeof(struct optee_msg_arg) + \ + sizeof(struct optee_msg_param) * (num_params)) + +/***************************************************************************** + * Part 2 - requests from normal world + *****************************************************************************/ + +/* + * Return the following UID if using API specified in this file without + * further extensions: + * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. + * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1, + * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3. + */ +#define OPTEE_MSG_UID_0 0x384fb3e0 +#define OPTEE_MSG_UID_1 0xe7f811e3 +#define OPTEE_MSG_UID_2 0xaf630002 +#define OPTEE_MSG_UID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01 + +/* + * Returns 2.0 if using API specified in this file without further + * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR + * and OPTEE_MSG_REVISION_MINOR + */ +#define OPTEE_MSG_REVISION_MAJOR 2 +#define OPTEE_MSG_REVISION_MINOR 0 +#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03 + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in 4 32-bit words in the same way as + * OPTEE_MSG_FUNCID_CALLS_UID described above. + */ +#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0 +#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3 +#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002 +#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000 + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in 2 32-bit words in the same way as + * OPTEE_MSG_CALLS_REVISION described above. + */ +#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001 + +/* + * Do a secure call with struct optee_msg_arg as argument + * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd + * + * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application. + * The first two parameters are tagged as meta, holding two value + * parameters to pass the following information: + * param[0].u.value.a-b uuid of Trusted Application + * param[1].u.value.a-b uuid of Client + * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_* + * + * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened + * session to a Trusted Application. struct optee_msg_arg::func is Trusted + * Application function, specific to the Trusted Application. + * + * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to + * Trusted Application. + * + * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command. + * + * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The + * information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + * [| OPTEE_MSG_ATTR_FRAGMENT] + * [in] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [in] param[0].u.tmem.size size (of first fragment) + * [in] param[0].u.tmem.shm_ref holds shared memory reference + * ... + * The shared memory can optionally be fragmented, temp memrefs can follow + * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set. + * + * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared + * memory reference. The information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + * [in] param[0].u.rmem.shm_ref holds shared memory reference + * [in] param[0].u.rmem.offs 0 + * [in] param[0].u.rmem.size 0 + */ +#define OPTEE_MSG_CMD_OPEN_SESSION 0 +#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 +#define OPTEE_MSG_CMD_CLOSE_SESSION 2 +#define OPTEE_MSG_CMD_CANCEL 3 +#define OPTEE_MSG_CMD_REGISTER_SHM 4 +#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 +#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 + +/***************************************************************************** + * Part 3 - Requests from secure world, RPC + *****************************************************************************/ + +/* + * All RPC is done with a struct optee_msg_arg as bearer of information, + * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below + * + * RPC communication with tee-supplicant is reversed compared to normal + * client communication desribed above. The supplicant receives requests + * and sends responses. + */ + +/* + * Load a TA into memory, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_LOAD_TA 0 + +/* + * Reserved + */ +#define OPTEE_MSG_RPC_CMD_RPMB 1 + +/* + * File system access, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_FS 2 + +/* + * Get time + * + * Returns number of seconds and nano seconds since the Epoch, + * 1970-01-01 00:00:00 +0000 (UTC). + * + * [out] param[0].u.value.a Number of seconds + * [out] param[0].u.value.b Number of nano seconds. + */ +#define OPTEE_MSG_RPC_CMD_GET_TIME 3 + +/* + * Wait queue primitive, helper for secure world to implement a wait queue. + * + * If secure world need to wait for a secure world mutex it issues a sleep + * request instead of spinning in secure world. Conversely is a wakeup + * request issued when a secure world mutex with a thread waiting thread is + * unlocked. + * + * Waiting on a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP + * [in] param[0].u.value.b wait key + * + * Waking up a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP + * [in] param[0].u.value.b wakeup key + */ +#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4 +#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0 +#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1 + +/* + * Suspend execution + * + * [in] param[0].value .a number of milliseconds to suspend + */ +#define OPTEE_MSG_RPC_CMD_SUSPEND 5 + +/* + * Allocate a piece of shared memory + * + * Shared memory can optionally be fragmented, to support that additional + * spare param entries are allocated to make room for eventual fragments. + * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when + * unused. All returned temp memrefs except the last should have the + * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field. + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* below + * [in] param[0].u.value.b requested size + * [in] param[0].u.value.c required alignment + * + * [out] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [out] param[0].u.tmem.size size (of first fragment) + * [out] param[0].u.tmem.shm_ref shared memory reference + * ... + * [out] param[n].u.tmem.buf_ptr physical address + * [out] param[n].u.tmem.size size + * [out] param[n].u.tmem.shm_ref shared memory reference (same value + * as in param[n-1].u.tmem.shm_ref) + */ +#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6 +/* Memory that can be shared with a non-secure user space application */ +#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0 +/* Memory only shared with non-secure kernel */ +#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1 + +/* + * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* above + * [in] param[0].u.value.b value of shared memory reference + * returned in param[0].u.tmem.shm_ref + * above + */ +#define OPTEE_MSG_RPC_CMD_SHM_FREE 7 + +#endif /* _OPTEE_MSG_H */ diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h new file mode 100644 index 000000000..489039acb --- /dev/null +++ b/drivers/tee/optee/optee_private.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef OPTEE_PRIVATE_H +#define OPTEE_PRIVATE_H + +#include <linux/arm-smccc.h> +#include <linux/semaphore.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include "optee_msg.h" + +#define OPTEE_MAX_ARG_SIZE 1024 + +/* Some Global Platform error codes used in this driver */ +#define TEEC_SUCCESS 0x00000000 +#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define TEEC_ERROR_COMMUNICATION 0xFFFF000E +#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C + +#define TEEC_ORIGIN_COMMS 0x00000002 + +typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, + struct arm_smccc_res *); + +struct optee_call_queue { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head waiters; +}; + +struct optee_wait_queue { + /* Serializes access to this struct */ + struct mutex mu; + struct list_head db; +}; + +/** + * struct optee_supp - supplicant synchronization struct + * @ctx the context of current connected supplicant. + * if !NULL the supplicant device is available for use, + * else busy + * @mutex: held while accessing content of this struct + * @req_id: current request id if supplicant is doing synchronous + * communication, else -1 + * @reqs: queued request not yet retrieved by supplicant + * @idr: IDR holding all requests currently being processed + * by supplicant + * @reqs_c: completion used by supplicant when waiting for a + * request to be queued. + */ +struct optee_supp { + /* Serializes access to this struct */ + struct mutex mutex; + struct tee_context *ctx; + + int req_id; + struct list_head reqs; + struct idr idr; + struct completion reqs_c; +}; + +/** + * struct optee - main service struct + * @supp_teedev: supplicant device + * @teedev: client device + * @invoke_fn: function to issue smc or hvc + * @call_queue: queue of threads waiting to call @invoke_fn + * @wait_queue: queue of threads from secure world waiting for a + * secure world sync object + * @supp: supplicant synchronization struct for RPC to supplicant + * @pool: shared memory pool + * @memremaped_shm virtual address of memory in shared memory pool + * @sec_caps: secure world capabilities defined by + * OPTEE_SMC_SEC_CAP_* in optee_smc.h + */ +struct optee { + struct tee_device *supp_teedev; + struct tee_device *teedev; + optee_invoke_fn *invoke_fn; + struct optee_call_queue call_queue; + struct optee_wait_queue wait_queue; + struct optee_supp supp; + struct tee_shm_pool *pool; + void *memremaped_shm; + u32 sec_caps; +}; + +struct optee_session { + struct list_head list_node; + u32 session_id; +}; + +struct optee_context_data { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head sess_list; +}; + +struct optee_rpc_param { + u32 a0; + u32 a1; + u32 a2; + u32 a3; + u32 a4; + u32 a5; + u32 a6; + u32 a7; +}; + +/* Holds context that is preserved during one STD call */ +struct optee_call_ctx { + /* information about pages list used in last allocation */ + void *pages_list; + size_t num_entries; +}; + +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, + struct optee_call_ctx *call_ctx); +void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx); + +void optee_wait_queue_init(struct optee_wait_queue *wq); +void optee_wait_queue_exit(struct optee_wait_queue *wq); + +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param); + +int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len); +int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); +void optee_supp_init(struct optee_supp *supp); +void optee_supp_uninit(struct optee_supp *supp); +void optee_supp_release(struct optee_supp *supp); + +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param); +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); + +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg); +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); +int optee_close_session(struct tee_context *ctx, u32 session); +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); + +void optee_enable_shm_cache(struct optee *optee); +void optee_disable_shm_cache(struct optee *optee); +void optee_disable_unmapped_shm_cache(struct optee *optee); + +int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start); +int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm); + +int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start); +int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm); + +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params); +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params); + +u64 *optee_allocate_pages_list(size_t num_entries); +void optee_free_pages_list(void *array, size_t num_entries); +void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, + size_t page_offset); + +/* + * Small helpers + */ + +static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1) +{ + return (void *)(unsigned long)(((u64)reg0 << 32) | reg1); +} + +static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val) +{ + *reg0 = val >> 32; + *reg1 = val; +} + +#endif /*OPTEE_PRIVATE_H*/ diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h new file mode 100644 index 000000000..bbf0cf028 --- /dev/null +++ b/drivers/tee/optee/optee_smc.h @@ -0,0 +1,465 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef OPTEE_SMC_H +#define OPTEE_SMC_H + +#include <linux/arm-smccc.h> +#include <linux/bitops.h> + +#define OPTEE_SMC_STD_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) +#define OPTEE_SMC_FAST_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) + +/* + * Function specified by SMC Calling convention. + */ +#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00 +#define OPTEE_SMC_CALLS_COUNT \ + ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \ + SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_COUNT) + +/* + * Normal cached memory (write-back), shareable for SMP systems and not + * shareable for UP systems. + */ +#define OPTEE_SMC_SHM_CACHED 1 + +/* + * a0..a7 is used as register names in the descriptions below, on arm32 + * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's + * 32-bit registers. + */ + +/* + * Function specified by SMC Calling convention + * + * Return one of the following UIDs if using API specified in this file + * without further extentions: + * 65cb6b93-af0c-4617-8ed6-644a8d1140f8 + * see also OPTEE_SMC_UID_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID +#define OPTEE_SMC_CALLS_UID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_UID) + +/* + * Function specified by SMC Calling convention + * + * Returns 2.0 if using API specified in this file without further extentions. + * see also OPTEE_MSG_REVISION_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION +#define OPTEE_SMC_CALLS_REVISION \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_REVISION) + +struct optee_smc_calls_revision_result { + unsigned long major; + unsigned long minor; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID +#define OPTEE_SMC_CALL_GET_OS_UUID \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID) + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION + * described above. May optionally return a 32-bit build identifier in a2, + * with zero meaning unspecified. + */ +#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION +#define OPTEE_SMC_CALL_GET_OS_REVISION \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION) + +struct optee_smc_call_get_os_revision_result { + unsigned long major; + unsigned long minor; + unsigned long build_id; + unsigned long reserved1; +}; + +/* + * Call with struct optee_msg_arg as argument + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG + * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a3 Cache settings, not used if physical pointer is in a predefined shared + * memory area else per OPTEE_SMC_SHM_* + * a4-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 Return value, OPTEE_SMC_RETURN_* + * a1-3 Not used + * a4-7 Preserved + * + * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage: + * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT + * a1-3 Preserved + * a4-7 Preserved + * + * RPC return register usage: + * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val) + * a1-2 RPC parameters + * a3-7 Resume information, must be preserved + * + * Possible return values: + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Call completed, result updated in + * the previously supplied struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded, + * try again later. + * OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg + * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal + * world. + */ +#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG +#define OPTEE_SMC_CALL_WITH_ARG \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG) + +/* + * Get Shared Memory Config + * + * Returns the Secure/Non-secure shared memory config. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Have config return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Physical address of start of SHM + * a2 Size of of SHM + * a3 Cache settings of memory, as defined by the + * OPTEE_SMC_SHM_* values above + * a4-7 Preserved + * + * Not available register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-3 Not used + * a4-7 Preserved + */ +#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7 +#define OPTEE_SMC_GET_SHM_CONFIG \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG) + +struct optee_smc_get_shm_config_result { + unsigned long status; + unsigned long start; + unsigned long size; + unsigned long settings; +}; + +/* + * Exchanges capabilities between normal world and secure world + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES + * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_* + * a2-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + * + * Error return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + */ +/* Normal world works as a uniprocessor system */ +#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0) +/* Secure world has reserved shared memory for normal world to use */ +#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) +/* Secure world can communicate via previously unregistered shared memory */ +#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) + +/* + * Secure world supports commands "register/unregister shared memory", + * secure world accepts command buffers located in any parts of non-secure RAM + */ +#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2) + +#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 +#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) + +struct optee_smc_exchange_capabilities_result { + unsigned long status; + unsigned long capabilities; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Disable and empties cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns one shared memory reference to free. To disable the + * cache and free all cached objects this function has to be called until + * it returns OPTEE_SMC_RETURN_ENOTAVAIL. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Upper 32bit of a 64bit Shared memory cookie + * a2 Lower 32bit of a 64bit Shared memory cookie + * a3-7 Preserved + * + * Cache empty return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10 +#define OPTEE_SMC_DISABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE) + +struct optee_smc_disable_shm_cache_result { + unsigned long status; + unsigned long shm_upper32; + unsigned long shm_lower32; + unsigned long reserved0; +}; + +/* + * Enable cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If + * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11 +#define OPTEE_SMC_ENABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) + +/* + * Resume from RPC (for example after processing a foreign interrupt) + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC + * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned + * OPTEE_SMC_RETURN_RPC in a0 + * + * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above. + * + * Possible return values + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Original call completed, result + * updated in the previously supplied. + * struct optee_msg_arg + * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal + * world. + * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume + * information was corrupt. + */ +#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3 +#define OPTEE_SMC_CALL_RETURN_FROM_RPC \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC) + +#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF + +#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \ + ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK) + +#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX) + +/* + * Allocate memory for RPC parameter passing. The memory is used to hold a + * struct optee_msg_arg. + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC + * a1 Size in bytes of required argument memory + * a2 Not used + * a3 Resume information, must be preserved + * a4-5 Not used + * a6-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1 Upper 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated. + * a2 Lower 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated + * a3 Preserved + * a4 Upper 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a5 Lower 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a6-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_ALLOC 0 +#define OPTEE_SMC_RETURN_RPC_ALLOC \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC) + +/* + * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_FREE + * a1 Upper 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a2 Lower 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_FREE 2 +#define OPTEE_SMC_RETURN_RPC_FREE \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) + +/* + * Deliver foreign interrupt to normal world. + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR + * a1-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4 +#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR) + +/* + * Do an RPC request. The supplied struct optee_msg_arg tells which + * request to do and the parameters for the request. The following fields + * are used (the rest are unused): + * - cmd the Request ID + * - ret return value of the request, filled in by normal world + * - num_params number of parameters for the request + * - params the parameters + * - param_attrs attributes of the parameters + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_CMD + * a1 Upper 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a2 Lower 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_CMD 5 +#define OPTEE_SMC_RETURN_RPC_CMD \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD) + +/* Returned in a0 */ +#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF + +/* Returned in a0 only from Trusted OS functions */ +#define OPTEE_SMC_RETURN_OK 0x0 +#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1 +#define OPTEE_SMC_RETURN_EBUSY 0x2 +#define OPTEE_SMC_RETURN_ERESUME 0x3 +#define OPTEE_SMC_RETURN_EBADADDR 0x4 +#define OPTEE_SMC_RETURN_EBADCMD 0x5 +#define OPTEE_SMC_RETURN_ENOMEM 0x6 +#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7 +#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret)) + +static inline bool __optee_smc_return_is_rpc(u32 ret) +{ + return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION && + (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == + OPTEE_SMC_RETURN_RPC_PREFIX; +} + +#endif /* OPTEE_SMC_H */ diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c new file mode 100644 index 000000000..b45c73dd3 --- /dev/null +++ b/drivers/tee/optee/rpc.c @@ -0,0 +1,452 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "optee_private.h" +#include "optee_smc.h" + +struct wq_entry { + struct list_head link; + struct completion c; + u32 key; +}; + +void optee_wait_queue_init(struct optee_wait_queue *priv) +{ + mutex_init(&priv->mu); + INIT_LIST_HEAD(&priv->db); +} + +void optee_wait_queue_exit(struct optee_wait_queue *priv) +{ + mutex_destroy(&priv->mu); +} + +static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) +{ + struct timespec64 ts; + + if (arg->num_params != 1) + goto bad; + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT) + goto bad; + + ktime_get_real_ts64(&ts); + arg->params[0].u.value.a = ts.tv_sec; + arg->params[0].u.value.b = ts.tv_nsec; + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w; + + mutex_lock(&wq->mu); + + list_for_each_entry(w, &wq->db, link) + if (w->key == key) + goto out; + + w = kmalloc(sizeof(*w), GFP_KERNEL); + if (w) { + init_completion(&w->c); + w->key = key; + list_add_tail(&w->link, &wq->db); + } +out: + mutex_unlock(&wq->mu); + return w; +} + +static void wq_sleep(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) { + wait_for_completion(&w->c); + mutex_lock(&wq->mu); + list_del(&w->link); + mutex_unlock(&wq->mu); + kfree(w); + } +} + +static void wq_wakeup(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) + complete(&w->c); +} + +static void handle_rpc_func_cmd_wq(struct optee *optee, + struct optee_msg_arg *arg) +{ + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP: + wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); + break; + case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP: + wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); + break; + default: + goto bad; + } + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) +{ + u32 msec_to_wait; + + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + msec_to_wait = arg->params[0].u.value.a; + + /* Go to interruptible sleep */ + msleep_interruptible(msec_to_wait); + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_supp_cmd(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_param *params; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + params = kmalloc_array(arg->num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (optee_from_msg_param(params, arg->num_params, arg->params)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto out; + } + + arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params); + + if (optee_to_msg_param(arg->params, arg->num_params, params)) + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +out: + kfree(params); +} + +static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) +{ + u32 ret; + struct tee_param param; + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct tee_shm *shm; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = sz; + param.u.value.c = 0; + + ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, ¶m); + if (ret) + return ERR_PTR(-ENOMEM); + + mutex_lock(&optee->supp.mutex); + /* Increases count as secure world doesn't have a reference */ + shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); + mutex_unlock(&optee->supp.mutex); + return shm; +} + +static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee_msg_arg *arg, + struct optee_call_ctx *call_ctx) +{ + phys_addr_t pa; + struct tee_shm *shm; + size_t sz; + size_t n; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (!arg->num_params || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + for (n = 1; n < arg->num_params; n++) { + if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + } + + sz = arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + shm = cmd_alloc_suppl(ctx, sz); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + if (IS_ERR(shm)) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (tee_shm_get_pa(shm, 0, &pa)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto bad; + } + + sz = tee_shm_get_size(shm); + + if (tee_shm_is_registered(shm)) { + struct page **pages; + u64 *pages_list; + size_t page_num; + + pages = tee_shm_get_pages(shm, &page_num); + if (!pages || !page_num) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + goto bad; + } + + pages_list = optee_allocate_pages_list(page_num); + if (!pages_list) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + goto bad; + } + + call_ctx->pages_list = pages_list; + call_ctx->num_entries = page_num; + + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | + OPTEE_MSG_ATTR_NONCONTIG; + /* + * In the least bits of u.tmem.buf_ptr we store buffer offset + * from 4k page, as described in OP-TEE ABI. + */ + arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) | + (tee_shm_get_page_offset(shm) & + (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); + arg->params[0].u.tmem.size = tee_shm_get_size(shm); + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + + optee_fill_pages_list(pages_list, pages, page_num, + tee_shm_get_page_offset(shm)); + } else { + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; + arg->params[0].u.tmem.buf_ptr = pa; + arg->params[0].u.tmem.size = sz; + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + } + + arg->ret = TEEC_SUCCESS; + return; +bad: + tee_shm_free(shm); +} + +static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) +{ + struct tee_param param; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = tee_shm_get_id(shm); + param.u.value.c = 0; + + /* + * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure + * world has released its reference. + * + * It's better to do this before sending the request to supplicant + * as we'd like to let the process doing the initial allocation to + * do release the last reference too in order to avoid stacking + * many pending fput() on the client process. This could otherwise + * happen if secure world does many allocate and free in a single + * invoke. + */ + tee_shm_put(shm); + + optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, ¶m); +} + +static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_shm *shm; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (arg->num_params != 1 || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + cmd_free_suppl(ctx, shm); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + tee_shm_free(shm); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + } + arg->ret = TEEC_SUCCESS; +} + +static void free_pages_list(struct optee_call_ctx *call_ctx) +{ + if (call_ctx->pages_list) { + optee_free_pages_list(call_ctx->pages_list, + call_ctx->num_entries); + call_ctx->pages_list = NULL; + call_ctx->num_entries = 0; + } +} + +void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx) +{ + free_pages_list(call_ctx); +} + +static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, + struct tee_shm *shm, + struct optee_call_ctx *call_ctx) +{ + struct optee_msg_arg *arg; + + arg = tee_shm_get_va(shm, 0); + if (IS_ERR(arg)) { + pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); + return; + } + + switch (arg->cmd) { + case OPTEE_MSG_RPC_CMD_GET_TIME: + handle_rpc_func_cmd_get_time(arg); + break; + case OPTEE_MSG_RPC_CMD_WAIT_QUEUE: + handle_rpc_func_cmd_wq(optee, arg); + break; + case OPTEE_MSG_RPC_CMD_SUSPEND: + handle_rpc_func_cmd_wait(arg); + break; + case OPTEE_MSG_RPC_CMD_SHM_ALLOC: + free_pages_list(call_ctx); + handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); + break; + case OPTEE_MSG_RPC_CMD_SHM_FREE: + handle_rpc_func_cmd_shm_free(ctx, arg); + break; + default: + handle_rpc_supp_cmd(ctx, arg); + } +} + +/** + * optee_handle_rpc() - handle RPC from secure world + * @ctx: context doing the RPC + * @param: value of registers for the RPC + * @call_ctx: call context. Preserved during one OP-TEE invocation + * + * Result of RPC is written back into @param. + */ +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, + struct optee_call_ctx *call_ctx) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + phys_addr_t pa; + + switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { + case OPTEE_SMC_RPC_FUNC_ALLOC: + shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED); + if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { + reg_pair_from_64(¶m->a1, ¶m->a2, pa); + reg_pair_from_64(¶m->a4, ¶m->a5, + (unsigned long)shm); + } else { + param->a1 = 0; + param->a2 = 0; + param->a4 = 0; + param->a5 = 0; + } + break; + case OPTEE_SMC_RPC_FUNC_FREE: + shm = reg_pair_to_ptr(param->a1, param->a2); + tee_shm_free(shm); + break; + case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR: + /* + * A foreign interrupt was raised while secure world was + * executing, since they are handled in Linux a dummy RPC is + * performed to let Linux take the interrupt through the normal + * vector. + */ + break; + case OPTEE_SMC_RPC_FUNC_CMD: + shm = reg_pair_to_ptr(param->a1, param->a2); + handle_rpc_func_cmd(ctx, optee, shm, call_ctx); + break; + default: + pr_warn("Unknown RPC func 0x%x\n", + (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); + break; + } + + param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; +} diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c new file mode 100644 index 000000000..49397813f --- /dev/null +++ b/drivers/tee/optee/shm_pool.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2017, EPAM Systems + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/genalloc.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "optee_private.h" +#include "optee_smc.h" +#include "shm_pool.h" + +static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm, size_t size) +{ + unsigned int order = get_order(size); + struct page *page; + + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return -ENOMEM; + + shm->kaddr = page_address(page); + shm->paddr = page_to_phys(page); + shm->size = PAGE_SIZE << order; + + return 0; +} + +static void pool_op_free(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm) +{ + free_pages((unsigned long)shm->kaddr, get_order(shm->size)); + shm->kaddr = NULL; +} + +static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +{ + kfree(poolm); +} + +static const struct tee_shm_pool_mgr_ops pool_ops = { + .alloc = pool_op_alloc, + .free = pool_op_free, + .destroy_poolmgr = pool_op_destroy_poolmgr, +}; + +/** + * optee_shm_pool_alloc_pages() - create page-based allocator pool + * + * This pool is used when OP-TEE supports dymanic SHM. In this case + * command buffers and such are allocated from kernel's own memory. + */ +struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void) +{ + struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + + if (!mgr) + return ERR_PTR(-ENOMEM); + + mgr->ops = &pool_ops; + + return mgr; +} diff --git a/drivers/tee/optee/shm_pool.h b/drivers/tee/optee/shm_pool.h new file mode 100644 index 000000000..4e753c3bf --- /dev/null +++ b/drivers/tee/optee/shm_pool.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2016, EPAM Systems + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef SHM_POOL_H +#define SHM_POOL_H + +#include <linux/tee_drv.h> + +struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void); + +#endif diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c new file mode 100644 index 000000000..43626e157 --- /dev/null +++ b/drivers/tee/optee/supp.c @@ -0,0 +1,383 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include "optee_private.h" + +struct optee_supp_req { + struct list_head link; + + bool in_queue; + u32 func; + u32 ret; + size_t num_params; + struct tee_param *param; + + struct completion c; +}; + +void optee_supp_init(struct optee_supp *supp) +{ + memset(supp, 0, sizeof(*supp)); + mutex_init(&supp->mutex); + init_completion(&supp->reqs_c); + idr_init(&supp->idr); + INIT_LIST_HEAD(&supp->reqs); + supp->req_id = -1; +} + +void optee_supp_uninit(struct optee_supp *supp) +{ + mutex_destroy(&supp->mutex); + idr_destroy(&supp->idr); +} + +void optee_supp_release(struct optee_supp *supp) +{ + int id; + struct optee_supp_req *req; + struct optee_supp_req *req_tmp; + + mutex_lock(&supp->mutex); + + /* Abort all request retrieved by supplicant */ + idr_for_each_entry(&supp->idr, req, id) { + idr_remove(&supp->idr, id); + req->ret = TEEC_ERROR_COMMUNICATION; + complete(&req->c); + } + + /* Abort all queued requests */ + list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) { + list_del(&req->link); + req->in_queue = false; + req->ret = TEEC_ERROR_COMMUNICATION; + complete(&req->c); + } + + supp->ctx = NULL; + supp->req_id = -1; + + mutex_unlock(&supp->mutex); +} + +/** + * optee_supp_thrd_req() - request service from supplicant + * @ctx: context doing the request + * @func: function requested + * @num_params: number of elements in @param array + * @param: parameters for function + * + * Returns result of operation to be passed to secure world + */ +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param) + +{ + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_supp *supp = &optee->supp; + struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL); + bool interruptable; + u32 ret; + + if (!req) + return TEEC_ERROR_OUT_OF_MEMORY; + + init_completion(&req->c); + req->func = func; + req->num_params = num_params; + req->param = param; + + /* Insert the request in the request list */ + mutex_lock(&supp->mutex); + list_add_tail(&req->link, &supp->reqs); + req->in_queue = true; + mutex_unlock(&supp->mutex); + + /* Tell an eventual waiter there's a new request */ + complete(&supp->reqs_c); + + /* + * Wait for supplicant to process and return result, once we've + * returned from wait_for_completion(&req->c) successfully we have + * exclusive access again. + */ + while (wait_for_completion_interruptible(&req->c)) { + mutex_lock(&supp->mutex); + interruptable = !supp->ctx; + if (interruptable) { + /* + * There's no supplicant available and since the + * supp->mutex currently is held none can + * become available until the mutex released + * again. + * + * Interrupting an RPC to supplicant is only + * allowed as a way of slightly improving the user + * experience in case the supplicant hasn't been + * started yet. During normal operation the supplicant + * will serve all requests in a timely manner and + * interrupting then wouldn't make sense. + */ + if (req->in_queue) { + list_del(&req->link); + req->in_queue = false; + } + } + mutex_unlock(&supp->mutex); + + if (interruptable) { + req->ret = TEEC_ERROR_COMMUNICATION; + break; + } + } + + ret = req->ret; + kfree(req); + + return ret; +} + +static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp, + int num_params, int *id) +{ + struct optee_supp_req *req; + + if (supp->req_id != -1) { + /* + * Supplicant should not mix synchronous and asnynchronous + * requests. + */ + return ERR_PTR(-EINVAL); + } + + if (list_empty(&supp->reqs)) + return NULL; + + req = list_first_entry(&supp->reqs, struct optee_supp_req, link); + + if (num_params < req->num_params) { + /* Not enough room for parameters */ + return ERR_PTR(-EINVAL); + } + + *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL); + if (*id < 0) + return ERR_PTR(-ENOMEM); + + list_del(&req->link); + req->in_queue = false; + + return req; +} + +static int supp_check_recv_params(size_t num_params, struct tee_param *params, + size_t *num_meta) +{ + size_t n; + + if (!num_params) + return -EINVAL; + + /* + * If there's memrefs we need to decrease those as they where + * increased earlier and we'll even refuse to accept any below. + */ + for (n = 0; n < num_params; n++) + if (tee_param_is_memref(params + n) && params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + + /* + * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with + * or without the TEE_IOCTL_PARAM_ATTR_META bit set. + */ + for (n = 0; n < num_params; n++) + if (params[n].attr && + params[n].attr != TEE_IOCTL_PARAM_ATTR_META) + return -EINVAL; + + /* At most we'll need one meta parameter so no need to check for more */ + if (params->attr == TEE_IOCTL_PARAM_ATTR_META) + *num_meta = 1; + else + *num_meta = 0; + + return 0; +} + +/** + * optee_supp_recv() - receive request for supplicant + * @ctx: context receiving the request + * @func: requested function in supplicant + * @num_params: number of elements allocated in @param, updated with number + * used elements + * @param: space for parameters for @func + * + * Returns 0 on success or <0 on failure + */ +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + struct optee_supp_req *req = NULL; + int id; + size_t num_meta; + int rc; + + rc = supp_check_recv_params(*num_params, param, &num_meta); + if (rc) + return rc; + + while (true) { + mutex_lock(&supp->mutex); + req = supp_pop_entry(supp, *num_params - num_meta, &id); + mutex_unlock(&supp->mutex); + + if (req) { + if (IS_ERR(req)) + return PTR_ERR(req); + break; + } + + /* + * If we didn't get a request we'll block in + * wait_for_completion() to avoid needless spinning. + * + * This is where supplicant will be hanging most of + * the time, let's make this interruptable so we + * can easily restart supplicant if needed. + */ + if (wait_for_completion_interruptible(&supp->reqs_c)) + return -ERESTARTSYS; + } + + if (num_meta) { + /* + * tee-supplicant support meta parameters -> requsts can be + * processed asynchronously. + */ + param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | + TEE_IOCTL_PARAM_ATTR_META; + param->u.value.a = id; + param->u.value.b = 0; + param->u.value.c = 0; + } else { + mutex_lock(&supp->mutex); + supp->req_id = id; + mutex_unlock(&supp->mutex); + } + + *func = req->func; + *num_params = req->num_params + num_meta; + memcpy(param + num_meta, req->param, + sizeof(struct tee_param) * req->num_params); + + return 0; +} + +static struct optee_supp_req *supp_pop_req(struct optee_supp *supp, + size_t num_params, + struct tee_param *param, + size_t *num_meta) +{ + struct optee_supp_req *req; + int id; + size_t nm; + const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | + TEE_IOCTL_PARAM_ATTR_META; + + if (!num_params) + return ERR_PTR(-EINVAL); + + if (supp->req_id == -1) { + if (param->attr != attr) + return ERR_PTR(-EINVAL); + id = param->u.value.a; + nm = 1; + } else { + id = supp->req_id; + nm = 0; + } + + req = idr_find(&supp->idr, id); + if (!req) + return ERR_PTR(-ENOENT); + + if ((num_params - nm) != req->num_params) + return ERR_PTR(-EINVAL); + + idr_remove(&supp->idr, id); + supp->req_id = -1; + *num_meta = nm; + + return req; +} + +/** + * optee_supp_send() - send result of request from supplicant + * @ctx: context sending result + * @ret: return value of request + * @num_params: number of parameters returned + * @param: returned parameters + * + * Returns 0 on success or <0 on failure. + */ +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + struct optee_supp_req *req; + size_t n; + size_t num_meta; + + mutex_lock(&supp->mutex); + req = supp_pop_req(supp, num_params, param, &num_meta); + mutex_unlock(&supp->mutex); + + if (IS_ERR(req)) { + /* Something is wrong, let supplicant restart. */ + return PTR_ERR(req); + } + + /* Update out and in/out parameters */ + for (n = 0; n < req->num_params; n++) { + struct tee_param *p = req->param + n; + + switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + p->u.value.a = param[n + num_meta].u.value.a; + p->u.value.b = param[n + num_meta].u.value.b; + p->u.value.c = param[n + num_meta].u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + p->u.memref.size = param[n + num_meta].u.memref.size; + break; + default: + break; + } + } + req->ret = ret; + + /* Let the requesting thread continue */ + complete(&req->c); + + return 0; +} |