summaryrefslogtreecommitdiffstats
path: root/drivers/tee/optee
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/tee/optee
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/tee/optee')
-rw-r--r--drivers/tee/optee/Kconfig26
-rw-r--r--drivers/tee/optee/Makefile13
-rw-r--r--drivers/tee/optee/call.c526
-rw-r--r--drivers/tee/optee/core.c226
-rw-r--r--drivers/tee/optee/device.c190
-rw-r--r--drivers/tee/optee/ffa_abi.c922
-rw-r--r--drivers/tee/optee/notif.c125
-rw-r--r--drivers/tee/optee/optee_ffa.h163
-rw-r--r--drivers/tee/optee/optee_msg.h351
-rw-r--r--drivers/tee/optee/optee_private.h347
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h106
-rw-r--r--drivers/tee/optee/optee_smc.h603
-rw-r--r--drivers/tee/optee/optee_trace.h67
-rw-r--r--drivers/tee/optee/rpc.c279
-rw-r--r--drivers/tee/optee/smc_abi.c1841
-rw-r--r--drivers/tee/optee/supp.c382
16 files changed, 6167 insertions, 0 deletions
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
new file mode 100644
index 0000000000..70898bbd58
--- /dev/null
+++ b/drivers/tee/optee/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# OP-TEE Trusted Execution Environment Configuration
+config OPTEE
+ tristate "OP-TEE"
+ depends on HAVE_ARM_SMCCC
+ depends on MMU
+ help
+ This implements the OP-TEE Trusted Execution Environment (TEE)
+ driver.
+
+config OPTEE_INSECURE_LOAD_IMAGE
+ bool "Load OP-TEE image as firmware"
+ default n
+ depends on OPTEE && ARM64
+ help
+ This loads the BL32 image for OP-TEE as firmware when the driver is
+ probed. This returns -EPROBE_DEFER until the firmware is loadable from
+ the filesystem which is determined by checking the system_state until
+ it is in SYSTEM_RUNNING. This also requires enabling the corresponding
+ option in Trusted Firmware for Arm. The documentation there explains
+ the security threat associated with enabling this as well as
+ mitigations at the firmware and platform level.
+ https://trustedfirmware-a.readthedocs.io/en/latest/threat_model/threat_model.html
+
+ Additional documentation on kernel security risks are at
+ Documentation/staging/tee.rst.
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
new file mode 100644
index 0000000000..a6eff388d3
--- /dev/null
+++ b/drivers/tee/optee/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_OPTEE) += optee.o
+optee-objs += core.o
+optee-objs += call.o
+optee-objs += notif.o
+optee-objs += rpc.o
+optee-objs += supp.o
+optee-objs += device.o
+optee-objs += smc_abi.o
+optee-objs += ffa_abi.o
+
+# for tracing framework to find optee_trace.h
+CFLAGS_smc_abi.o := -I$(src)
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
new file mode 100644
index 0000000000..df5fb5410b
--- /dev/null
+++ b/drivers/tee/optee/call.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_private.h"
+
+#define MAX_ARG_PARAM_COUNT 6
+
+/*
+ * How much memory we allocate for each entry. This doesn't have to be a
+ * single page, but it makes sense to keep at least keep it as multiples of
+ * the page size.
+ */
+#define SHM_ENTRY_SIZE PAGE_SIZE
+
+/*
+ * We need to have a compile time constant to be able to determine the
+ * maximum needed size of the bit field.
+ */
+#define MIN_ARG_SIZE OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT)
+#define MAX_ARG_COUNT_PER_ENTRY (SHM_ENTRY_SIZE / MIN_ARG_SIZE)
+
+/*
+ * Shared memory for argument structs are cached here. The number of
+ * arguments structs that can fit is determined at runtime depending on the
+ * needed RPC parameter count reported by secure world
+ * (optee->rpc_param_count).
+ */
+struct optee_shm_arg_entry {
+ struct list_head list_node;
+ struct tee_shm *shm;
+ DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
+};
+
+void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ /*
+ * We're preparing to make a call to secure world. In case we can't
+ * allocate a thread in secure world we'll end up waiting in
+ * optee_cq_wait_for_completion().
+ *
+ * Normally if there's no contention in secure world the call will
+ * complete and we can cleanup directly with optee_cq_wait_final().
+ */
+ mutex_lock(&cq->mutex);
+
+ /*
+ * We add ourselves to the queue, but we don't wait. This
+ * guarantees that we don't lose a completion if secure world
+ * returns busy and another thread just exited and try to complete
+ * someone.
+ */
+ init_completion(&w->c);
+ list_add_tail(&w->list_node, &cq->waiters);
+
+ mutex_unlock(&cq->mutex);
+}
+
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ wait_for_completion(&w->c);
+
+ mutex_lock(&cq->mutex);
+
+ /* Move to end of list to get out of the way for other waiters */
+ list_del(&w->list_node);
+ reinit_completion(&w->c);
+ list_add_tail(&w->list_node, &cq->waiters);
+
+ mutex_unlock(&cq->mutex);
+}
+
+static void optee_cq_complete_one(struct optee_call_queue *cq)
+{
+ struct optee_call_waiter *w;
+
+ list_for_each_entry(w, &cq->waiters, list_node) {
+ if (!completion_done(&w->c)) {
+ complete(&w->c);
+ break;
+ }
+ }
+}
+
+void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ /*
+ * We're done with the call to secure world. The thread in secure
+ * world that was used for this call is now available for some
+ * other task to use.
+ */
+ mutex_lock(&cq->mutex);
+
+ /* Get out of the list */
+ list_del(&w->list_node);
+
+ /* Wake up one eventual waiting task */
+ optee_cq_complete_one(cq);
+
+ /*
+ * If we're completed we've got a completion from another task that
+ * was just done with its call to secure world. Since yet another
+ * thread now is available in secure world wake up another eventual
+ * waiting task.
+ */
+ if (completion_done(&w->c))
+ optee_cq_complete_one(cq);
+
+ mutex_unlock(&cq->mutex);
+}
+
+/* Requires the filpstate mutex to be held */
+static struct optee_session *find_session(struct optee_context_data *ctxdata,
+ u32 session_id)
+{
+ struct optee_session *sess;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->session_id == session_id)
+ return sess;
+
+ return NULL;
+}
+
+void optee_shm_arg_cache_init(struct optee *optee, u32 flags)
+{
+ INIT_LIST_HEAD(&optee->shm_arg_cache.shm_args);
+ mutex_init(&optee->shm_arg_cache.mutex);
+ optee->shm_arg_cache.flags = flags;
+}
+
+void optee_shm_arg_cache_uninit(struct optee *optee)
+{
+ struct list_head *head = &optee->shm_arg_cache.shm_args;
+ struct optee_shm_arg_entry *entry;
+
+ mutex_destroy(&optee->shm_arg_cache.mutex);
+ while (!list_empty(head)) {
+ entry = list_first_entry(head, struct optee_shm_arg_entry,
+ list_node);
+ list_del(&entry->list_node);
+ if (find_first_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY) !=
+ MAX_ARG_COUNT_PER_ENTRY) {
+ pr_err("Freeing non-free entry\n");
+ }
+ tee_shm_free(entry->shm);
+ kfree(entry);
+ }
+}
+
+size_t optee_msg_arg_size(size_t rpc_param_count)
+{
+ size_t sz = OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT);
+
+ if (rpc_param_count)
+ sz += OPTEE_MSG_GET_ARG_SIZE(rpc_param_count);
+
+ return sz;
+}
+
+/**
+ * optee_get_msg_arg() - Provide shared memory for argument struct
+ * @ctx: Caller TEE context
+ * @num_params: Number of parameter to store
+ * @entry_ret: Entry pointer, needed when freeing the buffer
+ * @shm_ret: Shared memory buffer
+ * @offs_ret: Offset of argument strut in shared memory buffer
+ *
+ * @returns a pointer to the argument struct in memory, else an ERR_PTR
+ */
+struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
+ size_t num_params,
+ struct optee_shm_arg_entry **entry_ret,
+ struct tee_shm **shm_ret,
+ u_int *offs_ret)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ size_t sz = optee_msg_arg_size(optee->rpc_param_count);
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *ma;
+ size_t args_per_entry;
+ u_long bit;
+ u_int offs;
+ void *res;
+
+ if (num_params > MAX_ARG_PARAM_COUNT)
+ return ERR_PTR(-EINVAL);
+
+ if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_SHARED)
+ args_per_entry = SHM_ENTRY_SIZE / sz;
+ else
+ args_per_entry = 1;
+
+ mutex_lock(&optee->shm_arg_cache.mutex);
+ list_for_each_entry(entry, &optee->shm_arg_cache.shm_args, list_node) {
+ bit = find_first_zero_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY);
+ if (bit < args_per_entry)
+ goto have_entry;
+ }
+
+ /*
+ * No entry was found, let's allocate a new.
+ */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ res = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_ALLOC_PRIV)
+ res = tee_shm_alloc_priv_buf(ctx, SHM_ENTRY_SIZE);
+ else
+ res = tee_shm_alloc_kernel_buf(ctx, SHM_ENTRY_SIZE);
+
+ if (IS_ERR(res)) {
+ kfree(entry);
+ goto out;
+ }
+ entry->shm = res;
+ list_add(&entry->list_node, &optee->shm_arg_cache.shm_args);
+ bit = 0;
+
+have_entry:
+ offs = bit * sz;
+ res = tee_shm_get_va(entry->shm, offs);
+ if (IS_ERR(res))
+ goto out;
+ ma = res;
+ set_bit(bit, entry->map);
+ memset(ma, 0, sz);
+ ma->num_params = num_params;
+ *entry_ret = entry;
+ *shm_ret = entry->shm;
+ *offs_ret = offs;
+out:
+ mutex_unlock(&optee->shm_arg_cache.mutex);
+ return res;
+}
+
+/**
+ * optee_free_msg_arg() - Free previsouly obtained shared memory
+ * @ctx: Caller TEE context
+ * @entry: Pointer returned when the shared memory was obtained
+ * @offs: Offset of shared memory buffer to free
+ *
+ * This function frees the shared memory obtained with optee_get_msg_arg().
+ */
+void optee_free_msg_arg(struct tee_context *ctx,
+ struct optee_shm_arg_entry *entry, u_int offs)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ size_t sz = optee_msg_arg_size(optee->rpc_param_count);
+ u_long bit;
+
+ if (offs > SHM_ENTRY_SIZE || offs % sz) {
+ pr_err("Invalid offs %u\n", offs);
+ return;
+ }
+ bit = offs / sz;
+
+ mutex_lock(&optee->shm_arg_cache.mutex);
+
+ if (!test_bit(bit, entry->map))
+ pr_err("Bit pos %lu is already free\n", bit);
+ clear_bit(bit, entry->map);
+
+ mutex_unlock(&optee->shm_arg_cache.mutex);
+}
+
+int optee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_shm_arg_entry *entry;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ struct optee_session *sess = NULL;
+ uuid_t client_uuid;
+ u_int offs;
+ int rc;
+
+ /* +2 for the meta parameters added below */
+ msg_arg = optee_get_msg_arg(ctx, arg->num_params + 2,
+ &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
+ msg_arg->cancel_id = arg->cancel_id;
+
+ /*
+ * Initialize and add the meta parameters needed when opening a
+ * session.
+ */
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+ OPTEE_MSG_ATTR_META;
+ msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+ OPTEE_MSG_ATTR_META;
+ memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
+ msg_arg->params[1].u.value.c = arg->clnt_login;
+
+ rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
+ arg->clnt_uuid);
+ if (rc)
+ goto out;
+ export_uuid(msg_arg->params[1].u.octets, &client_uuid);
+
+ rc = optee->ops->to_msg_param(optee, msg_arg->params + 2,
+ arg->num_params, param);
+ if (rc)
+ goto out;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ if (msg_arg->ret == TEEC_SUCCESS) {
+ /* A new session has been created, add it to the list. */
+ sess->session_id = msg_arg->session;
+ mutex_lock(&ctxdata->mutex);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ mutex_unlock(&ctxdata->mutex);
+ } else {
+ kfree(sess);
+ }
+
+ if (optee->ops->from_msg_param(optee, param, arg->num_params,
+ msg_arg->params + 2)) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ /* Close session again to avoid leakage */
+ optee_close_session(ctx, msg_arg->session);
+ } else {
+ arg->session = msg_arg->session;
+ arg->ret = msg_arg->ret;
+ arg->ret_origin = msg_arg->ret_origin;
+ }
+out:
+ optee_free_msg_arg(ctx, entry, offs);
+
+ return rc;
+}
+
+int optee_close_session_helper(struct tee_context *ctx, u32 session)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+ msg_arg->session = session;
+ optee->ops->do_call_with_arg(ctx, shm, offs);
+
+ optee_free_msg_arg(ctx, entry, offs);
+
+ return 0;
+}
+
+int optee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_session *sess;
+
+ /* Check that the session is valid and remove it from the list */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, session);
+ if (sess)
+ list_del(&sess->list_node);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+ kfree(sess);
+
+ return optee_close_session_helper(ctx, session);
+}
+
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct optee_session *sess;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ /* Check that the session is valid */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, arg->session);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+
+ msg_arg = optee_get_msg_arg(ctx, arg->num_params,
+ &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+ msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
+ msg_arg->func = arg->func;
+ msg_arg->session = arg->session;
+ msg_arg->cancel_id = arg->cancel_id;
+
+ rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params,
+ param);
+ if (rc)
+ goto out;
+
+ if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ if (optee->ops->from_msg_param(optee, param, arg->num_params,
+ msg_arg->params)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ arg->ret = msg_arg->ret;
+ arg->ret_origin = msg_arg->ret_origin;
+out:
+ optee_free_msg_arg(ctx, entry, offs);
+ return rc;
+}
+
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct optee_session *sess;
+ struct tee_shm *shm;
+ u_int offs;
+
+ /* Check that the session is valid */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, session);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
+ msg_arg->session = session;
+ msg_arg->cancel_id = cancel_id;
+ optee->ops->do_call_with_arg(ctx, shm, offs);
+
+ optee_free_msg_arg(ctx, entry, offs);
+ return 0;
+}
+
+static bool is_normal_memory(pgprot_t p)
+{
+#if defined(CONFIG_ARM)
+ return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
+ ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
+#elif defined(CONFIG_ARM64)
+ return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
+#else
+#error "Unsupported architecture"
+#endif
+}
+
+static int __check_mem_type(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, start);
+
+ for_each_vma_range(vmi, vma, end) {
+ if (!is_normal_memory(vma->vm_page_prot))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int optee_check_mem_type(unsigned long start, size_t num_pages)
+{
+ struct mm_struct *mm = current->mm;
+ int rc;
+
+ /*
+ * Allow kernel address to register with OP-TEE as kernel
+ * pages are configured as normal memory only.
+ */
+ if (virt_addr_valid((void *)start) || is_vmalloc_addr((void *)start))
+ return 0;
+
+ mmap_read_lock(mm);
+ rc = __check_mem_type(mm, start, start + num_pages * PAGE_SIZE);
+ mmap_read_unlock(mm);
+
+ return rc;
+}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
new file mode 100644
index 0000000000..2a258bd3b6
--- /dev/null
+++ b/drivers/tee/optee/core.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crash_dump.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "optee_private.h"
+
+int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start))
+{
+ unsigned int order = get_order(size);
+ struct page *page;
+ int rc = 0;
+
+ /*
+ * Ignore alignment since this is already going to be page aligned
+ * and there's no need for any larger alignment.
+ */
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return -ENOMEM;
+
+ shm->kaddr = page_address(page);
+ shm->paddr = page_to_phys(page);
+ shm->size = PAGE_SIZE << order;
+
+ if (shm_register) {
+ unsigned int nr_pages = 1 << order, i;
+ struct page **pages;
+
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
+
+ rc = shm_register(shm->ctx, shm, pages, nr_pages,
+ (unsigned long)shm->kaddr);
+ kfree(pages);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ free_pages((unsigned long)shm->kaddr, order);
+ return rc;
+}
+
+void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm))
+{
+ if (shm_unregister)
+ shm_unregister(shm->ctx, shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void optee_bus_scan(struct work_struct *work)
+{
+ WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
+}
+
+int optee_open(struct tee_context *ctx, bool cap_memref_null)
+{
+ struct optee_context_data *ctxdata;
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ if (teedev == optee->supp_teedev) {
+ bool busy = true;
+
+ mutex_lock(&optee->supp.mutex);
+ if (!optee->supp.ctx) {
+ busy = false;
+ optee->supp.ctx = ctx;
+ }
+ mutex_unlock(&optee->supp.mutex);
+ if (busy) {
+ kfree(ctxdata);
+ return -EBUSY;
+ }
+
+ if (!optee->scan_bus_done) {
+ INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
+ optee->scan_bus_wq = create_workqueue("optee_bus_scan");
+ if (!optee->scan_bus_wq) {
+ kfree(ctxdata);
+ return -ECHILD;
+ }
+ queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
+ optee->scan_bus_done = true;
+ }
+ }
+ mutex_init(&ctxdata->mutex);
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+
+ ctx->cap_memref_null = cap_memref_null;
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void optee_release_helper(struct tee_context *ctx,
+ int (*close_session)(struct tee_context *ctx,
+ u32 session))
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_session *sess;
+ struct optee_session *sess_tmp;
+
+ if (!ctxdata)
+ return;
+
+ list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
+ list_node) {
+ list_del(&sess->list_node);
+ close_session(ctx, sess->session_id);
+ kfree(sess);
+ }
+ kfree(ctxdata);
+ ctx->data = NULL;
+}
+
+void optee_release(struct tee_context *ctx)
+{
+ optee_release_helper(ctx, optee_close_session_helper);
+}
+
+void optee_release_supp(struct tee_context *ctx)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+
+ optee_release_helper(ctx, optee_close_session_helper);
+ if (optee->scan_bus_wq) {
+ destroy_workqueue(optee->scan_bus_wq);
+ optee->scan_bus_wq = NULL;
+ }
+ optee_supp_release(&optee->supp);
+}
+
+void optee_remove_common(struct optee *optee)
+{
+ /* Unregister OP-TEE specific client devices on TEE bus */
+ optee_unregister_devices();
+
+ optee_notif_uninit(optee);
+ optee_shm_arg_cache_uninit(optee);
+ teedev_close_context(optee->ctx);
+ /*
+ * The two devices have to be unregistered before we can free the
+ * other resources.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+
+ tee_shm_pool_free(optee->pool);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+}
+
+static int smc_abi_rc;
+static int ffa_abi_rc;
+
+static int __init optee_core_init(void)
+{
+ /*
+ * The kernel may have crashed at the same time that all available
+ * secure world threads were suspended and we cannot reschedule the
+ * suspended threads without access to the crashed kernel's wait_queue.
+ * Therefore, we cannot reliably initialize the OP-TEE driver in the
+ * kdump kernel.
+ */
+ if (is_kdump_kernel())
+ return -ENODEV;
+
+ smc_abi_rc = optee_smc_abi_register();
+ ffa_abi_rc = optee_ffa_abi_register();
+
+ /* If both failed there's no point with this module */
+ if (smc_abi_rc && ffa_abi_rc)
+ return smc_abi_rc;
+ return 0;
+}
+module_init(optee_core_init);
+
+static void __exit optee_core_exit(void)
+{
+ if (!smc_abi_rc)
+ optee_smc_abi_unregister();
+ if (!ffa_abi_rc)
+ optee_ffa_abi_unregister();
+}
+module_exit(optee_core_exit);
+
+MODULE_AUTHOR("Linaro");
+MODULE_DESCRIPTION("OP-TEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:optee");
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
new file mode 100644
index 0000000000..4b10921276
--- /dev/null
+++ b/drivers/tee/optee/device.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include "optee_private.h"
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int get_devices(struct tee_context *ctx, u32 session,
+ struct tee_shm *device_shm, u32 *shm_size,
+ u32 func)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ inv_arg.func = func;
+ inv_arg.session = session;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = device_shm;
+ param[0].u.memref.size = *shm_size;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(ctx, &inv_arg, param);
+ if ((ret < 0) || ((inv_arg.ret != TEEC_SUCCESS) &&
+ (inv_arg.ret != TEEC_ERROR_SHORT_BUFFER))) {
+ pr_err("PTA_CMD_GET_DEVICES invoke function err: %x\n",
+ inv_arg.ret);
+ return -EINVAL;
+ }
+
+ *shm_size = param[0].u.memref.size;
+
+ return 0;
+}
+
+static void optee_release_device(struct device *dev)
+{
+ struct tee_client_device *optee_device = to_tee_client_device(dev);
+
+ kfree(optee_device);
+}
+
+static ssize_t need_supplicant_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static DEVICE_ATTR_RO(need_supplicant);
+
+static int optee_register_device(const uuid_t *device_uuid, u32 func)
+{
+ struct tee_client_device *optee_device = NULL;
+ int rc;
+
+ optee_device = kzalloc(sizeof(*optee_device), GFP_KERNEL);
+ if (!optee_device)
+ return -ENOMEM;
+
+ optee_device->dev.bus = &tee_bus_type;
+ optee_device->dev.release = optee_release_device;
+ if (dev_set_name(&optee_device->dev, "optee-ta-%pUb", device_uuid)) {
+ kfree(optee_device);
+ return -ENOMEM;
+ }
+ uuid_copy(&optee_device->id.uuid, device_uuid);
+
+ rc = device_register(&optee_device->dev);
+ if (rc) {
+ pr_err("device registration failed, err: %d\n", rc);
+ put_device(&optee_device->dev);
+ }
+
+ if (func == PTA_CMD_GET_DEVICES_SUPP)
+ device_create_file(&optee_device->dev,
+ &dev_attr_need_supplicant);
+
+ return rc;
+}
+
+static int __optee_enumerate_devices(u32 func)
+{
+ const uuid_t pta_uuid =
+ UUID_INIT(0x7011a688, 0xddde, 0x4053,
+ 0xa5, 0xa9, 0x7b, 0x3c, 0x4d, 0xdf, 0x13, 0xb8);
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *device_shm = NULL;
+ const uuid_t *device_uuid = NULL;
+ struct tee_context *ctx = NULL;
+ u32 shm_size = 0, idx, num_devices = 0;
+ int rc;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with OP-TEE driver */
+ ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
+ if (IS_ERR(ctx))
+ return -ENODEV;
+
+ /* Open session with device enumeration pseudo TA */
+ export_uuid(sess_arg.uuid, &pta_uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != TEEC_SUCCESS)) {
+ /* Device enumeration pseudo TA not found */
+ rc = 0;
+ goto out_ctx;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, NULL, &shm_size, func);
+ if (rc < 0 || !shm_size)
+ goto out_sess;
+
+ device_shm = tee_shm_alloc_kernel_buf(ctx, shm_size);
+ if (IS_ERR(device_shm)) {
+ pr_err("tee_shm_alloc_kernel_buf failed\n");
+ rc = PTR_ERR(device_shm);
+ goto out_sess;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, device_shm, &shm_size, func);
+ if (rc < 0)
+ goto out_shm;
+
+ device_uuid = tee_shm_get_va(device_shm, 0);
+ if (IS_ERR(device_uuid)) {
+ pr_err("tee_shm_get_va failed\n");
+ rc = PTR_ERR(device_uuid);
+ goto out_shm;
+ }
+
+ num_devices = shm_size / sizeof(uuid_t);
+
+ for (idx = 0; idx < num_devices; idx++) {
+ rc = optee_register_device(&device_uuid[idx], func);
+ if (rc)
+ goto out_shm;
+ }
+
+out_shm:
+ tee_shm_free(device_shm);
+out_sess:
+ tee_client_close_session(ctx, sess_arg.session);
+out_ctx:
+ tee_client_close_context(ctx);
+
+ return rc;
+}
+
+int optee_enumerate_devices(u32 func)
+{
+ return __optee_enumerate_devices(func);
+}
+
+static int __optee_unregister_device(struct device *dev, void *data)
+{
+ if (!strncmp(dev_name(dev), "optee-ta", strlen("optee-ta")))
+ device_unregister(dev);
+
+ return 0;
+}
+
+void optee_unregister_devices(void)
+{
+ bus_for_each_dev(&tee_bus_type, NULL, NULL,
+ __optee_unregister_device);
+}
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
new file mode 100644
index 0000000000..0828240f27
--- /dev/null
+++ b/drivers/tee/optee/ffa_abi.c
@@ -0,0 +1,922 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/errno.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_private.h"
+#include "optee_ffa.h"
+#include "optee_rpc_cmd.h"
+
+/*
+ * This file implement the FF-A ABI used when communicating with secure world
+ * OP-TEE OS via FF-A.
+ * This file is divided into the following sections:
+ * 1. Maintain a hash table for lookup of a global FF-A memory handle
+ * 2. Convert between struct tee_param and struct optee_msg_param
+ * 3. Low level support functions to register shared memory in secure world
+ * 4. Dynamic shared memory pool based on alloc_pages()
+ * 5. Do a normal scheduled call into secure world
+ * 6. Driver initialization.
+ */
+
+/*
+ * 1. Maintain a hash table for lookup of a global FF-A memory handle
+ *
+ * FF-A assigns a global memory handle for each piece shared memory.
+ * This handle is then used when communicating with secure world.
+ *
+ * Main functions are optee_shm_add_ffa_handle() and optee_shm_rem_ffa_handle()
+ */
+struct shm_rhash {
+ struct tee_shm *shm;
+ u64 global_id;
+ struct rhash_head linkage;
+};
+
+static void rh_free_fn(void *ptr, void *arg)
+{
+ kfree(ptr);
+}
+
+static const struct rhashtable_params shm_rhash_params = {
+ .head_offset = offsetof(struct shm_rhash, linkage),
+ .key_len = sizeof(u64),
+ .key_offset = offsetof(struct shm_rhash, global_id),
+ .automatic_shrinking = true,
+};
+
+static struct tee_shm *optee_shm_from_ffa_handle(struct optee *optee,
+ u64 global_id)
+{
+ struct tee_shm *shm = NULL;
+ struct shm_rhash *r;
+
+ mutex_lock(&optee->ffa.mutex);
+ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
+ shm_rhash_params);
+ if (r)
+ shm = r->shm;
+ mutex_unlock(&optee->ffa.mutex);
+
+ return shm;
+}
+
+static int optee_shm_add_ffa_handle(struct optee *optee, struct tee_shm *shm,
+ u64 global_id)
+{
+ struct shm_rhash *r;
+ int rc;
+
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+ r->shm = shm;
+ r->global_id = global_id;
+
+ mutex_lock(&optee->ffa.mutex);
+ rc = rhashtable_lookup_insert_fast(&optee->ffa.global_ids, &r->linkage,
+ shm_rhash_params);
+ mutex_unlock(&optee->ffa.mutex);
+
+ if (rc)
+ kfree(r);
+
+ return rc;
+}
+
+static int optee_shm_rem_ffa_handle(struct optee *optee, u64 global_id)
+{
+ struct shm_rhash *r;
+ int rc = -ENOENT;
+
+ mutex_lock(&optee->ffa.mutex);
+ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
+ shm_rhash_params);
+ if (r)
+ rc = rhashtable_remove_fast(&optee->ffa.global_ids,
+ &r->linkage, shm_rhash_params);
+ mutex_unlock(&optee->ffa.mutex);
+
+ if (!rc)
+ kfree(r);
+
+ return rc;
+}
+
+/*
+ * 2. Convert between struct tee_param and struct optee_msg_param
+ *
+ * optee_ffa_from_msg_param() and optee_ffa_to_msg_param() are the main
+ * functions.
+ */
+
+static void from_msg_param_ffa_mem(struct optee *optee, struct tee_param *p,
+ u32 attr, const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm = NULL;
+ u64 offs_high = 0;
+ u64 offs_low = 0;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
+ p->u.memref.size = mp->u.fmem.size;
+
+ if (mp->u.fmem.global_id != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
+ shm = optee_shm_from_ffa_handle(optee, mp->u.fmem.global_id);
+ p->u.memref.shm = shm;
+
+ if (shm) {
+ offs_low = mp->u.fmem.offs_low;
+ offs_high = mp->u.fmem.offs_high;
+ }
+ p->u.memref.shm_offs = offs_low | offs_high << 32;
+}
+
+/**
+ * optee_ffa_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @optee: main service struct
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ *
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_ffa_from_msg_param(struct optee *optee,
+ struct tee_param *params, size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ optee_from_msg_param_value(p, attr, mp);
+ break;
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT:
+ from_msg_param_ffa_mem(optee, p, attr, mp);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int to_msg_param_ffa_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ struct tee_shm *shm = p->u.memref.shm;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_FMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ if (shm) {
+ u64 shm_offs = p->u.memref.shm_offs;
+
+ mp->u.fmem.internal_offs = shm->offset;
+
+ mp->u.fmem.offs_low = shm_offs;
+ mp->u.fmem.offs_high = shm_offs >> 32;
+ /* Check that the entire offset could be stored. */
+ if (mp->u.fmem.offs_high != shm_offs >> 32)
+ return -EINVAL;
+
+ mp->u.fmem.global_id = shm->sec_world_id;
+ } else {
+ memset(&mp->u, 0, sizeof(mp->u));
+ mp->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
+ }
+ mp->u.fmem.size = p->u.memref.size;
+
+ return 0;
+}
+
+/**
+ * optee_ffa_to_msg_param() - convert from struct tee_params to OPTEE_MSG
+ * parameters
+ * @optee: main service struct
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_ffa_to_msg_param(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params,
+ const struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ optee_to_msg_param_value(mp, p);
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (to_msg_param_ffa_mem(mp, p))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * 3. Low level support functions to register shared memory in secure world
+ *
+ * Functions to register and unregister shared memory both for normal
+ * clients and for tee-supplicant.
+ */
+
+static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ struct ffa_mem_region_attributes mem_attr = {
+ .receiver = ffa_dev->vm_id,
+ .attrs = FFA_MEM_RW,
+ };
+ struct ffa_mem_ops_args args = {
+ .use_txbuf = true,
+ .attrs = &mem_attr,
+ .nattrs = 1,
+ };
+ struct sg_table sgt;
+ int rc;
+
+ rc = optee_check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0,
+ num_pages * PAGE_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+ args.sg = sgt.sgl;
+ rc = mem_ops->memory_share(&args);
+ sg_free_table(&sgt);
+ if (rc)
+ return rc;
+
+ rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle);
+ if (rc) {
+ mem_ops->memory_reclaim(args.g_handle, 0);
+ return rc;
+ }
+
+ shm->sec_world_id = args.g_handle;
+
+ return 0;
+}
+
+static int optee_ffa_shm_unregister(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ u64 global_handle = shm->sec_world_id;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_UNREGISTER_SHM,
+ .data1 = (u32)global_handle,
+ .data2 = (u32)(global_handle >> 32)
+ };
+ int rc;
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ shm->sec_world_id = 0;
+
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc);
+
+ rc = mem_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ return rc;
+}
+
+static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_mem_ops *mem_ops;
+ u64 global_handle = shm->sec_world_id;
+ int rc;
+
+ /*
+ * We're skipping the OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM call
+ * since this is OP-TEE freeing via RPC so it has already retired
+ * this ID.
+ */
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ mem_ops = optee->ffa.ffa_dev->ops->mem_ops;
+ rc = mem_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ shm->sec_world_id = 0;
+
+ return rc;
+}
+
+/*
+ * 4. Dynamic shared memory pool based on alloc_pages()
+ *
+ * Implements an OP-TEE specific shared memory pool.
+ * The main function is optee_ffa_shm_pool_alloc_pages().
+ */
+
+static int pool_ffa_op_alloc(struct tee_shm_pool *pool,
+ struct tee_shm *shm, size_t size, size_t align)
+{
+ return optee_pool_op_alloc_helper(pool, shm, size, align,
+ optee_ffa_shm_register);
+}
+
+static void pool_ffa_op_free(struct tee_shm_pool *pool,
+ struct tee_shm *shm)
+{
+ optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister);
+}
+
+static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool)
+{
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ffa_ops = {
+ .alloc = pool_ffa_op_alloc,
+ .free = pool_ffa_op_free,
+ .destroy_pool = pool_ffa_op_destroy_pool,
+};
+
+/**
+ * optee_ffa_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used with OP-TEE over FF-A. In this case command buffers
+ * and such are allocated from kernel's own memory.
+ */
+static struct tee_shm_pool *optee_ffa_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->ops = &pool_ffa_ops;
+
+ return pool;
+}
+
+/*
+ * 5. Do a normal scheduled call into secure world
+ *
+ * The function optee_ffa_do_call_with_arg() performs a normal scheduled
+ * call into secure world. During this call may normal world request help
+ * from normal world using RPCs, Remote Procedure Calls. This includes
+ * delivery of non-secure interrupts to for instance allow rescheduling of
+ * the current task.
+ */
+
+static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc_priv_buf(optee->ctx,
+ arg->params[0].u.value.b);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ arg->params[0] = (struct optee_msg_param){
+ .attr = OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT,
+ .u.fmem.size = tee_shm_get_size(shm),
+ .u.fmem.global_id = shm->sec_world_id,
+ .u.fmem.internal_offs = shm->offset,
+ };
+
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_ffa_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto err_bad_param;
+
+ shm = optee_shm_from_ffa_handle(optee, arg->params[0].u.value.b);
+ if (!shm)
+ goto err_bad_param;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ optee_rpc_cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ goto err_bad_param;
+ }
+ arg->ret = TEEC_SUCCESS;
+ return;
+
+err_bad_param:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_SHM_ALLOC:
+ handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
+ break;
+ case OPTEE_RPC_CMD_SHM_FREE:
+ handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
+ break;
+ default:
+ optee_rpc_cmd(ctx, optee, arg);
+ }
+}
+
+static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
+ u32 cmd, struct optee_msg_arg *arg)
+{
+ switch (cmd) {
+ case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
+ handle_ffa_rpc_func_cmd(ctx, optee, arg);
+ break;
+ case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
+ /* Interrupt delivered by now */
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n", cmd);
+ break;
+ }
+}
+
+static int optee_ffa_yielding_call(struct tee_context *ctx,
+ struct ffa_send_direct_data *data,
+ struct optee_msg_arg *rpc_arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ struct optee_call_waiter w;
+ u32 cmd = data->data0;
+ u32 w4 = data->data1;
+ u32 w5 = data->data2;
+ u32 w6 = data->data3;
+ int rc;
+
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ rc = msg_ops->sync_send_receive(ffa_dev, data);
+ if (rc)
+ goto done;
+
+ switch ((int)data->data0) {
+ case TEEC_SUCCESS:
+ break;
+ case TEEC_ERROR_BUSY:
+ if (cmd == OPTEE_FFA_YIELDING_CALL_RESUME) {
+ rc = -EIO;
+ goto done;
+ }
+
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ data->data0 = cmd;
+ data->data1 = w4;
+ data->data2 = w5;
+ data->data3 = w6;
+ continue;
+ default:
+ rc = -EIO;
+ goto done;
+ }
+
+ if (data->data1 == OPTEE_FFA_YIELDING_CALL_RETURN_DONE)
+ goto done;
+
+ /*
+ * OP-TEE has returned with a RPC request.
+ *
+ * Note that data->data4 (passed in register w7) is already
+ * filled in by ffa_mem_ops->sync_send_receive() returning
+ * above.
+ */
+ cond_resched();
+ optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
+ cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
+ data->data0 = cmd;
+ data->data1 = 0;
+ data->data2 = 0;
+ data->data3 = 0;
+ }
+done:
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return rc;
+}
+
+/**
+ * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world
+ * @ctx: calling context
+ * @shm: shared memory holding the message to pass to secure world
+ * @offs: offset of the message in @shm
+ *
+ * Does a FF-A call to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from FF-A, 0 is OK
+ */
+
+static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
+ struct tee_shm *shm, u_int offs)
+{
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
+ .data1 = (u32)shm->sec_world_id,
+ .data2 = (u32)(shm->sec_world_id >> 32),
+ .data3 = offs,
+ };
+ struct optee_msg_arg *arg;
+ unsigned int rpc_arg_offs;
+ struct optee_msg_arg *rpc_arg;
+
+ /*
+ * The shared memory object has to start on a page when passed as
+ * an argument struct. This is also what the shm pool allocator
+ * returns, but check this before calling secure world to catch
+ * eventual errors early in case something changes.
+ */
+ if (shm->offset)
+ return -EINVAL;
+
+ arg = tee_shm_get_va(shm, offs);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+
+ rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
+
+ return optee_ffa_yielding_call(ctx, &data, rpc_arg);
+}
+
+/*
+ * 6. Driver initialization
+ *
+ * During driver inititialization is the OP-TEE Secure Partition is probed
+ * to find out which features it supports so the driver can be initialized
+ * with a matching configuration.
+ */
+
+static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ const struct ffa_ops *ops)
+{
+ const struct ffa_msg_ops *msg_ops = ops->msg_ops;
+ struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
+ int rc;
+
+ msg_ops->mode_32bit_set(ffa_dev);
+
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d\n", rc);
+ return false;
+ }
+ if (data.data0 != OPTEE_FFA_VERSION_MAJOR ||
+ data.data1 < OPTEE_FFA_VERSION_MINOR) {
+ pr_err("Incompatible OP-TEE API version %lu.%lu",
+ data.data0, data.data1);
+ return false;
+ }
+
+ data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d\n", rc);
+ return false;
+ }
+ if (data.data2)
+ pr_info("revision %lu.%lu (%08lx)",
+ data.data0, data.data1, data.data2);
+ else
+ pr_info("revision %lu.%lu", data.data0, data.data1);
+
+ return true;
+}
+
+static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
+ const struct ffa_ops *ops,
+ u32 *sec_caps,
+ unsigned int *rpc_param_count)
+{
+ struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
+ int rc;
+
+ rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d", rc);
+ return false;
+ }
+ if (data.data0) {
+ pr_err("Unexpected exchange error %lu", data.data0);
+ return false;
+ }
+
+ *rpc_param_count = (u8)data.data1;
+ *sec_caps = data.data2;
+
+ return true;
+}
+
+static void optee_ffa_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM |
+ TEE_GEN_CAP_MEMREF_NULL,
+ };
+
+ *vers = v;
+}
+
+static int optee_ffa_open(struct tee_context *ctx)
+{
+ return optee_open(ctx, true);
+}
+
+static const struct tee_driver_ops optee_ffa_clnt_ops = {
+ .get_version = optee_ffa_get_version,
+ .open = optee_ffa_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+ .shm_register = optee_ffa_shm_register,
+ .shm_unregister = optee_ffa_shm_unregister,
+};
+
+static const struct tee_desc optee_ffa_clnt_desc = {
+ .name = DRIVER_NAME "-ffa-clnt",
+ .ops = &optee_ffa_clnt_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_ffa_supp_ops = {
+ .get_version = optee_ffa_get_version,
+ .open = optee_ffa_open,
+ .release = optee_release_supp,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+ .shm_register = optee_ffa_shm_register, /* same as for clnt ops */
+ .shm_unregister = optee_ffa_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_ffa_supp_desc = {
+ .name = DRIVER_NAME "-ffa-supp",
+ .ops = &optee_ffa_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static const struct optee_ops optee_ffa_ops = {
+ .do_call_with_arg = optee_ffa_do_call_with_arg,
+ .to_msg_param = optee_ffa_to_msg_param,
+ .from_msg_param = optee_ffa_from_msg_param,
+};
+
+static void optee_ffa_remove(struct ffa_device *ffa_dev)
+{
+ struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
+
+ optee_remove_common(optee);
+
+ mutex_destroy(&optee->ffa.mutex);
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+
+ kfree(optee);
+}
+
+static int optee_ffa_probe(struct ffa_device *ffa_dev)
+{
+ const struct ffa_ops *ffa_ops;
+ unsigned int rpc_param_count;
+ struct tee_shm_pool *pool;
+ struct tee_device *teedev;
+ struct tee_context *ctx;
+ u32 arg_cache_flags = 0;
+ struct optee *optee;
+ u32 sec_caps;
+ int rc;
+
+ ffa_ops = ffa_dev->ops;
+
+ if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
+ return -EINVAL;
+
+ if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
+ &rpc_param_count))
+ return -EINVAL;
+ if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET)
+ arg_cache_flags |= OPTEE_SHM_ARG_SHARED;
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee)
+ return -ENOMEM;
+
+ pool = optee_ffa_shm_pool_alloc_pages();
+ if (IS_ERR(pool)) {
+ rc = PTR_ERR(pool);
+ goto err_free_optee;
+ }
+ optee->pool = pool;
+
+ optee->ops = &optee_ffa_ops;
+ optee->ffa.ffa_dev = ffa_dev;
+ optee->rpc_param_count = rpc_param_count;
+
+ teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
+ optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_pool;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_ffa_supp_desc, NULL, optee->pool,
+ optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_unreg_teedev;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err_unreg_supp_teedev;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err_unreg_supp_teedev;
+
+ rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
+ if (rc)
+ goto err_unreg_supp_teedev;
+ mutex_init(&optee->ffa.mutex);
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_supp_init(&optee->supp);
+ optee_shm_arg_cache_init(optee, arg_cache_flags);
+ ffa_dev_set_drvdata(ffa_dev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ goto err_rhashtable_free;
+ }
+ optee->ctx = ctx;
+ rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
+ if (rc)
+ goto err_close_ctx;
+
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
+ if (rc)
+ goto err_unregister_devices;
+
+ pr_info("initialized driver\n");
+ return 0;
+
+err_unregister_devices:
+ optee_unregister_devices();
+ optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
+err_rhashtable_free:
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+ mutex_destroy(&optee->ffa.mutex);
+err_unreg_supp_teedev:
+ tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
+ tee_device_unregister(optee->teedev);
+err_free_pool:
+ tee_shm_pool_free(pool);
+err_free_optee:
+ kfree(optee);
+ return rc;
+}
+
+static const struct ffa_device_id optee_ffa_device_id[] = {
+ /* 486178e0-e7f8-11e3-bc5e0002a5d5c51b */
+ { UUID_INIT(0x486178e0, 0xe7f8, 0x11e3,
+ 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b) },
+ {}
+};
+
+static struct ffa_driver optee_ffa_driver = {
+ .name = "optee",
+ .probe = optee_ffa_probe,
+ .remove = optee_ffa_remove,
+ .id_table = optee_ffa_device_id,
+};
+
+int optee_ffa_abi_register(void)
+{
+ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
+ return ffa_register(&optee_ffa_driver);
+ else
+ return -EOPNOTSUPP;
+}
+
+void optee_ffa_abi_unregister(void)
+{
+ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
+ ffa_unregister(&optee_ffa_driver);
+}
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
new file mode 100644
index 0000000000..05212842b0
--- /dev/null
+++ b/drivers/tee/optee/notif.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+
+struct notif_entry {
+ struct list_head link;
+ struct completion c;
+ u_int key;
+};
+
+static bool have_key(struct optee *optee, u_int key)
+{
+ struct notif_entry *entry;
+
+ list_for_each_entry(entry, &optee->notif.db, link)
+ if (entry->key == key)
+ return true;
+
+ return false;
+}
+
+int optee_notif_wait(struct optee *optee, u_int key)
+{
+ unsigned long flags;
+ struct notif_entry *entry;
+ int rc = 0;
+
+ if (key > optee->notif.max_key)
+ return -EINVAL;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ init_completion(&entry->c);
+ entry->key = key;
+
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ /*
+ * If the bit is already set it means that the key has already
+ * been posted and we must not wait.
+ */
+ if (test_bit(key, optee->notif.bitmap)) {
+ clear_bit(key, optee->notif.bitmap);
+ goto out;
+ }
+
+ /*
+ * Check if someone is already waiting for this key. If there is
+ * it's a programming error.
+ */
+ if (have_key(optee, key)) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ list_add_tail(&entry->link, &optee->notif.db);
+
+ /*
+ * Unlock temporarily and wait for completion.
+ */
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+ wait_for_completion(&entry->c);
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ list_del(&entry->link);
+out:
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+
+ kfree(entry);
+
+ return rc;
+}
+
+int optee_notif_send(struct optee *optee, u_int key)
+{
+ unsigned long flags;
+ struct notif_entry *entry;
+
+ if (key > optee->notif.max_key)
+ return -EINVAL;
+
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ list_for_each_entry(entry, &optee->notif.db, link)
+ if (entry->key == key) {
+ complete(&entry->c);
+ goto out;
+ }
+
+ /* Only set the bit in case there where nobody waiting */
+ set_bit(key, optee->notif.bitmap);
+out:
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+
+ return 0;
+}
+
+int optee_notif_init(struct optee *optee, u_int max_key)
+{
+ spin_lock_init(&optee->notif.lock);
+ INIT_LIST_HEAD(&optee->notif.db);
+ optee->notif.bitmap = bitmap_zalloc(max_key, GFP_KERNEL);
+ if (!optee->notif.bitmap)
+ return -ENOMEM;
+
+ optee->notif.max_key = max_key;
+
+ return 0;
+}
+
+void optee_notif_uninit(struct optee *optee)
+{
+ bitmap_free(optee->notif.bitmap);
+}
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
new file mode 100644
index 0000000000..97266243de
--- /dev/null
+++ b/drivers/tee/optee/optee_ffa.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2019-2021, Linaro Limited
+ */
+
+/*
+ * This file is exported by OP-TEE and is kept in sync between secure world
+ * and normal world drivers. We're using ARM FF-A 1.0 specification.
+ */
+
+#ifndef __OPTEE_FFA_H
+#define __OPTEE_FFA_H
+
+#include <linux/arm_ffa.h>
+
+/*
+ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and
+ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal
+ * messages.
+ *
+ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
+ * are using the AArch32 SMC calling convention with register usage as
+ * defined in FF-A specification:
+ * w0: Function ID (0x8400006F or 0x84000070)
+ * w1: Source/Destination IDs
+ * w2: Reserved (MBZ)
+ * w3-w7: Implementation defined, free to be used below
+ */
+
+#define OPTEE_FFA_VERSION_MAJOR 1
+#define OPTEE_FFA_VERSION_MINOR 0
+
+#define OPTEE_FFA_BLOCKING_CALL(id) (id)
+#define OPTEE_FFA_YIELDING_CALL_BIT 31
+#define OPTEE_FFA_YIELDING_CALL(id) ((id) | BIT(OPTEE_FFA_YIELDING_CALL_BIT))
+
+/*
+ * Returns the API version implemented, currently follows the FF-A version.
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_GET_API_VERSION
+ * w4-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: OPTEE_FFA_VERSION_MAJOR
+ * w4: OPTEE_FFA_VERSION_MINOR
+ * w5-w7: Not used (MBZ)
+ */
+#define OPTEE_FFA_GET_API_VERSION OPTEE_FFA_BLOCKING_CALL(0)
+
+/*
+ * Returns the revision of OP-TEE.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_GET_OS_VERSION
+ * w4-w7: Unused (MBZ)
+ *
+ * Return register usage:
+ * w3: CFG_OPTEE_REVISION_MAJOR
+ * w4: CFG_OPTEE_REVISION_MINOR
+ * w5: TEE_IMPL_GIT_SHA1 (or zero if not supported)
+ */
+#define OPTEE_FFA_GET_OS_VERSION OPTEE_FFA_BLOCKING_CALL(1)
+
+/*
+ * Exchange capabilities between normal world and secure world.
+ *
+ * Currently there are no defined capabilities. When features are added new
+ * capabilities may be added.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_EXCHANGE_CAPABILITIES
+ * w4-w7: Note used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4: Bit[7:0]: Number of parameters needed for RPC to be supplied
+ * as the second MSG arg struct for
+ * OPTEE_FFA_YIELDING_CALL_WITH_ARG.
+ * Bit[31:8]: Reserved (MBZ)
+ * w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
+ * unused bits MBZ.
+ * w6-w7: Not used (MBZ)
+ */
+/*
+ * Secure world supports giving an offset into the argument shared memory
+ * object, see also OPTEE_FFA_YIELDING_CALL_WITH_ARG
+ */
+#define OPTEE_FFA_SEC_CAP_ARG_OFFSET BIT(0)
+
+#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
+
+/*
+ * Unregister shared memory
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM
+ * w4: Shared memory handle, lower bits
+ * w5: Shared memory handle, higher bits
+ * w6-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
+
+/*
+ * Call with struct optee_msg_arg as argument in the supplied shared memory
+ * with a zero internal offset and normal cached memory attributes.
+ * Register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_WITH_ARG
+ * w4: Lower 32 bits of a 64-bit Shared memory handle
+ * w5: Upper 32 bits of a 64-bit Shared memory handle
+ * w6: Offset into shared memory pointing to a struct optee_msg_arg
+ * right after the parameters of this struct (at offset
+ * OPTEE_MSG_GET_ARG_SIZE(num_params) follows a struct optee_msg_arg
+ * for RPC, this struct has reserved space for the number of RPC
+ * parameters as returned by OPTEE_FFA_EXCHANGE_CAPABILITIES.
+ * MBZ unless the bit OPTEE_FFA_SEC_CAP_ARG_OFFSET is received with
+ * OPTEE_FFA_EXCHANGE_CAPABILITIES.
+ * w7: Not used (MBZ)
+ * Resume from RPC. Register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_RESUME
+ * w4-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Normal return (yielding call is completed). Register usage:
+ * w3: Error code, 0 on success
+ * w4: OPTEE_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w7: Not used (MBZ)
+ *
+ * RPC interrupt return (RPC from secure world). Register usage:
+ * w3: Error code == 0
+ * w4: Any defined RPC code but OPTEE_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Possible error codes in register w3:
+ * 0: Success
+ * FFA_DENIED: w4 isn't one of OPTEE_FFA_YIELDING_CALL_START
+ * OPTEE_FFA_YIELDING_CALL_RESUME
+ *
+ * Possible error codes for OPTEE_FFA_YIELDING_CALL_START,
+ * FFA_BUSY: Number of OP-TEE OS threads exceeded,
+ * try again later
+ * FFA_DENIED: RPC shared memory object not found
+ * FFA_INVALID_PARAMETER: Bad shared memory handle or offset into the memory
+ *
+ * Possible error codes for OPTEE_FFA_YIELDING_CALL_RESUME
+ * FFA_INVALID_PARAMETER: Bad resume info
+ */
+#define OPTEE_FFA_YIELDING_CALL_WITH_ARG OPTEE_FFA_YIELDING_CALL(0)
+#define OPTEE_FFA_YIELDING_CALL_RESUME OPTEE_FFA_YIELDING_CALL(1)
+
+#define OPTEE_FFA_YIELDING_CALL_RETURN_DONE 0
+#define OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD 1
+#define OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT 2
+
+#endif /*__OPTEE_FFA_H*/
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
new file mode 100644
index 0000000000..e8840a82b9
--- /dev/null
+++ b/drivers/tee/optee/optee_msg.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+#ifndef _OPTEE_MSG_H
+#define _OPTEE_MSG_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/*
+ * This file defines the OP-TEE message protocol (ABI) used to communicate
+ * with an instance of OP-TEE running in secure world.
+ *
+ * This file is divided into two sections.
+ * 1. Formatting of messages.
+ * 2. Requests from normal world
+ */
+
+/*****************************************************************************
+ * Part 1 - formatting of messages
+ *****************************************************************************/
+
+#define OPTEE_MSG_ATTR_TYPE_NONE 0x0
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1
+#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5
+#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7
+#define OPTEE_MSG_ATTR_TYPE_FMEM_INPUT OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
+#define OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT
+#define OPTEE_MSG_ATTR_TYPE_FMEM_INOUT OPTEE_MSG_ATTR_TYPE_RMEM_INOUT
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9
+#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb
+
+#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0)
+
+/*
+ * Meta parameter to be absorbed by the Secure OS and not passed
+ * to the Trusted Application.
+ *
+ * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION.
+ */
+#define OPTEE_MSG_ATTR_META BIT(8)
+
+/*
+ * Pointer to a list of pages used to register user-defined SHM buffer.
+ * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*.
+ * buf_ptr should point to the beginning of the buffer. Buffer will contain
+ * list of page addresses. OP-TEE core can reconstruct contiguous buffer from
+ * that page addresses list. Page addresses are stored as 64 bit values.
+ * Last entry on a page should point to the next page of buffer.
+ * Every entry in buffer should point to a 4k page beginning (12 least
+ * significant bits must be equal to zero).
+ *
+ * 12 least significant bits of optee_msg_param.u.tmem.buf_ptr should hold
+ * page offset of user buffer.
+ *
+ * So, entries should be placed like members of this structure:
+ *
+ * struct page_data {
+ * uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1];
+ * uint64_t next_page_data;
+ * };
+ *
+ * Structure is designed to exactly fit into the page size
+ * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page.
+ *
+ * The size of 4KB is chosen because this is the smallest page size for ARM
+ * architectures. If REE uses larger pages, it should divide them to 4KB ones.
+ */
+#define OPTEE_MSG_ATTR_NONCONTIG BIT(9)
+
+/*
+ * Memory attributes for caching passed with temp memrefs. The actual value
+ * used is defined outside the message protocol with the exception of
+ * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already
+ * defined for the memory range should be used. If optee_smc.h is used as
+ * bearer of this protocol OPTEE_SMC_SHM_* is used for values.
+ */
+#define OPTEE_MSG_ATTR_CACHE_SHIFT 16
+#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0)
+#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0
+
+/*
+ * Same values as TEE_LOGIN_* from TEE Internal API
+ */
+#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000
+#define OPTEE_MSG_LOGIN_USER 0x00000001
+#define OPTEE_MSG_LOGIN_GROUP 0x00000002
+#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004
+#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
+#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
+
+/*
+ * Page size used in non-contiguous buffer entries
+ */
+#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096
+
+#define OPTEE_MSG_FMEM_INVALID_GLOBAL_ID 0xffffffffffffffff
+
+/**
+ * struct optee_msg_param_tmem - temporary memory reference parameter
+ * @buf_ptr: Address of the buffer
+ * @size: Size of the buffer
+ * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm
+ *
+ * Secure and normal world communicates pointers as physical address
+ * instead of the virtual address. This is because secure and normal world
+ * have completely independent memory mapping. Normal world can even have a
+ * hypervisor which need to translate the guest physical address (AKA IPA
+ * in ARM documentation) to a real physical address before passing the
+ * structure to secure world.
+ */
+struct optee_msg_param_tmem {
+ u64 buf_ptr;
+ u64 size;
+ u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_rmem - registered memory reference parameter
+ * @offs: Offset into shared memory reference
+ * @size: Size of the buffer
+ * @shm_ref: Shared memory reference, pointer to a struct tee_shm
+ */
+struct optee_msg_param_rmem {
+ u64 offs;
+ u64 size;
+ u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_fmem - ffa memory reference parameter
+ * @offs_lower: Lower bits of offset into shared memory reference
+ * @offs_upper: Upper bits of offset into shared memory reference
+ * @internal_offs: Internal offset into the first page of shared memory
+ * reference
+ * @size: Size of the buffer
+ * @global_id: Global identifier of Shared memory
+ */
+struct optee_msg_param_fmem {
+ u32 offs_low;
+ u16 offs_high;
+ u16 internal_offs;
+ u64 size;
+ u64 global_id;
+};
+
+/**
+ * struct optee_msg_param_value - opaque value parameter
+ *
+ * Value parameters are passed unchecked between normal and secure world.
+ */
+struct optee_msg_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
+};
+
+/**
+ * struct optee_msg_param - parameter used together with struct optee_msg_arg
+ * @attr: attributes
+ * @tmem: parameter by temporary memory reference
+ * @rmem: parameter by registered memory reference
+ * @fmem: parameter by ffa registered memory reference
+ * @value: parameter by opaque value
+ * @octets: parameter by octet string
+ *
+ * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
+ * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* or the alias PTEE_MSG_ATTR_TYPE_FMEM_* indicates
+ * @rmem or @fmem depending on the conduit.
+ * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
+ */
+struct optee_msg_param {
+ u64 attr;
+ union {
+ struct optee_msg_param_tmem tmem;
+ struct optee_msg_param_rmem rmem;
+ struct optee_msg_param_fmem fmem;
+ struct optee_msg_param_value value;
+ u8 octets[24];
+ } u;
+};
+
+/**
+ * struct optee_msg_arg - call argument
+ * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_*
+ * @func: Trusted Application function, specific to the Trusted Application,
+ * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND
+ * @session: In parameter for all OPTEE_MSG_CMD_* except
+ * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead
+ * @cancel_id: Cancellation id, a unique value to identify this request
+ * @ret: return value
+ * @ret_origin: origin of the return value
+ * @num_params: number of parameters supplied to the OS Command
+ * @params: the parameters supplied to the OS Command
+ *
+ * All normal calls to Trusted OS uses this struct. If cmd requires further
+ * information than what these fields hold it can be passed as a parameter
+ * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding
+ * attrs field). All parameters tagged as meta have to come first.
+ */
+struct optee_msg_arg {
+ u32 cmd;
+ u32 func;
+ u32 session;
+ u32 cancel_id;
+ u32 pad;
+ u32 ret;
+ u32 ret_origin;
+ u32 num_params;
+
+ /* num_params tells the actual number of element in params */
+ struct optee_msg_param params[];
+};
+
+/**
+ * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg
+ *
+ * @num_params: Number of parameters embedded in the struct optee_msg_arg
+ *
+ * Returns the size of the struct optee_msg_arg together with the number
+ * of embedded parameters.
+ */
+#define OPTEE_MSG_GET_ARG_SIZE(num_params) \
+ (sizeof(struct optee_msg_arg) + \
+ sizeof(struct optee_msg_param) * (num_params))
+
+/*****************************************************************************
+ * Part 2 - requests from normal world
+ *****************************************************************************/
+
+/*
+ * Return the following UID if using API specified in this file without
+ * further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1,
+ * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3.
+ *
+ * In the case where the OP-TEE image is loaded by the kernel, this will
+ * initially return an alternate UID to reflect that we are communicating with
+ * the TF-A image loading service at that time instead of OP-TEE. That UID is:
+ * a3fbeab1-1246-315d-c7c4-06b9c03cbea4.
+ * Represented in 4 32-bit words in OPTEE_MSG_IMAGE_LOAD_UID_0,
+ * OPTEE_MSG_IMAGE_LOAD_UID_1, OPTEE_MSG_IMAGE_LOAD_UID_2,
+ * OPTEE_MSG_IMAGE_LOAD_UID_3.
+ */
+#define OPTEE_MSG_UID_0 0x384fb3e0
+#define OPTEE_MSG_UID_1 0xe7f811e3
+#define OPTEE_MSG_UID_2 0xaf630002
+#define OPTEE_MSG_UID_3 0xa5d5c51b
+#define OPTEE_MSG_IMAGE_LOAD_UID_0 0xa3fbeab1
+#define OPTEE_MSG_IMAGE_LOAD_UID_1 0x1246315d
+#define OPTEE_MSG_IMAGE_LOAD_UID_2 0xc7c406b9
+#define OPTEE_MSG_IMAGE_LOAD_UID_3 0xc03cbea4
+#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01
+
+/*
+ * Returns 2.0 if using API specified in this file without further
+ * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR
+ * and OPTEE_MSG_REVISION_MINOR
+ */
+#define OPTEE_MSG_REVISION_MAJOR 2
+#define OPTEE_MSG_REVISION_MINOR 0
+#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in 4 32-bit words in the same way as
+ * OPTEE_MSG_FUNCID_CALLS_UID described above.
+ */
+#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0
+#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3
+#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002
+#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b
+#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in 2 32-bit words in the same way as
+ * OPTEE_MSG_CALLS_REVISION described above.
+ */
+#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
+
+/*
+ * Do a secure call with struct optee_msg_arg as argument
+ * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
+ *
+ * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application.
+ * The first two parameters are tagged as meta, holding two value
+ * parameters to pass the following information:
+ * param[0].u.value.a-b uuid of Trusted Application
+ * param[1].u.value.a-b uuid of Client
+ * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_*
+ *
+ * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened
+ * session to a Trusted Application. struct optee_msg_arg::func is Trusted
+ * Application function, specific to the Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to
+ * Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command.
+ *
+ * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The
+ * information is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
+ * [| OPTEE_MSG_ATTR_NONCONTIG]
+ * [in] param[0].u.tmem.buf_ptr physical address (of first fragment)
+ * [in] param[0].u.tmem.size size (of first fragment)
+ * [in] param[0].u.tmem.shm_ref holds shared memory reference
+ *
+ * OPTEE_MSG_CMD_UNREGISTER_SHM unregisters a previously registered shared
+ * memory reference. The information is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
+ * [in] param[0].u.rmem.shm_ref holds shared memory reference
+ * [in] param[0].u.rmem.offs 0
+ * [in] param[0].u.rmem.size 0
+ *
+ * OPTEE_MSG_CMD_DO_BOTTOM_HALF does the scheduled bottom half processing
+ * of a driver.
+ *
+ * OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
+ * normal world unable to process asynchronous notifications. Typically
+ * used when the driver is shut down.
+ */
+#define OPTEE_MSG_CMD_OPEN_SESSION 0
+#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
+#define OPTEE_MSG_CMD_CLOSE_SESSION 2
+#define OPTEE_MSG_CMD_CANCEL 3
+#define OPTEE_MSG_CMD_REGISTER_SHM 4
+#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
+#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
+#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
+#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
+
+#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
new file mode 100644
index 0000000000..6bb5cae096
--- /dev/null
+++ b/drivers/tee/optee/optee_private.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+
+#ifndef OPTEE_PRIVATE_H
+#define OPTEE_PRIVATE_H
+
+#include <linux/arm-smccc.h>
+#include <linux/rhashtable.h>
+#include <linux/semaphore.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_msg.h"
+
+#define DRIVER_NAME "optee"
+
+#define OPTEE_MAX_ARG_SIZE 1024
+
+/* Some Global Platform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define TEEC_ERROR_BUSY 0xFFFF000D
+#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+/*
+ * This value should be larger than the number threads in secure world to
+ * meet the need from secure world. The number of threads in secure world
+ * are usually not even close to 255 so we should be safe for now.
+ */
+#define OPTEE_DEFAULT_MAX_NOTIF_VALUE 255
+
+typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ struct arm_smccc_res *);
+
+struct optee_call_waiter {
+ struct list_head list_node;
+ struct completion c;
+};
+
+struct optee_call_queue {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head waiters;
+};
+
+struct optee_notif {
+ u_int max_key;
+ /* Serializes access to the elements below in this struct */
+ spinlock_t lock;
+ struct list_head db;
+ u_long *bitmap;
+};
+
+#define OPTEE_SHM_ARG_ALLOC_PRIV BIT(0)
+#define OPTEE_SHM_ARG_SHARED BIT(1)
+struct optee_shm_arg_entry;
+struct optee_shm_arg_cache {
+ u32 flags;
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head shm_args;
+};
+
+/**
+ * struct optee_supp - supplicant synchronization struct
+ * @ctx the context of current connected supplicant.
+ * if !NULL the supplicant device is available for use,
+ * else busy
+ * @mutex: held while accessing content of this struct
+ * @req_id: current request id if supplicant is doing synchronous
+ * communication, else -1
+ * @reqs: queued request not yet retrieved by supplicant
+ * @idr: IDR holding all requests currently being processed
+ * by supplicant
+ * @reqs_c: completion used by supplicant when waiting for a
+ * request to be queued.
+ */
+struct optee_supp {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct tee_context *ctx;
+
+ int req_id;
+ struct list_head reqs;
+ struct idr idr;
+ struct completion reqs_c;
+};
+
+/*
+ * struct optee_pcpu - per cpu notif private struct passed to work functions
+ * @optee optee device reference
+ */
+struct optee_pcpu {
+ struct optee *optee;
+};
+
+/*
+ * struct optee_smc - optee smc communication struct
+ * @invoke_fn handler function to invoke secure monitor
+ * @memremaped_shm virtual address of memory in shared memory pool
+ * @sec_caps: secure world capabilities defined by
+ * OPTEE_SMC_SEC_CAP_* in optee_smc.h
+ * @notif_irq interrupt used as async notification by OP-TEE or 0
+ * @optee_pcpu per_cpu optee instance for per cpu work or NULL
+ * @notif_pcpu_wq workqueue for per cpu asynchronous notification or NULL
+ * @notif_pcpu_work work for per cpu asynchronous notification
+ * @notif_cpuhp_state CPU hotplug state assigned for pcpu interrupt management
+ */
+struct optee_smc {
+ optee_invoke_fn *invoke_fn;
+ void *memremaped_shm;
+ u32 sec_caps;
+ unsigned int notif_irq;
+ struct optee_pcpu __percpu *optee_pcpu;
+ struct workqueue_struct *notif_pcpu_wq;
+ struct work_struct notif_pcpu_work;
+ unsigned int notif_cpuhp_state;
+};
+
+/**
+ * struct optee_ffa_data - FFA communication struct
+ * @ffa_dev FFA device, contains the destination id, the id of
+ * OP-TEE in secure world
+ * @ffa_ops FFA operations
+ * @mutex Serializes access to @global_ids
+ * @global_ids FF-A shared memory global handle translation
+ */
+struct optee_ffa {
+ struct ffa_device *ffa_dev;
+ /* Serializes access to @global_ids */
+ struct mutex mutex;
+ struct rhashtable global_ids;
+};
+
+struct optee;
+
+/**
+ * struct optee_ops - OP-TEE driver internal operations
+ * @do_call_with_arg: enters OP-TEE in secure world
+ * @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters
+ * @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param
+ *
+ * These OPs are only supposed to be used internally in the OP-TEE driver
+ * as a way of abstracting the different methogs of entering OP-TEE in
+ * secure world.
+ */
+struct optee_ops {
+ int (*do_call_with_arg)(struct tee_context *ctx,
+ struct tee_shm *shm_arg, u_int offs);
+ int (*to_msg_param)(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params, const struct tee_param *params);
+ int (*from_msg_param)(struct optee *optee, struct tee_param *params,
+ size_t num_params,
+ const struct optee_msg_param *msg_params);
+};
+
+/**
+ * struct optee - main service struct
+ * @supp_teedev: supplicant device
+ * @teedev: client device
+ * @ops: internal callbacks for different ways to reach secure
+ * world
+ * @ctx: driver internal TEE context
+ * @smc: specific to SMC ABI
+ * @ffa: specific to FF-A ABI
+ * @call_queue: queue of threads waiting to call @invoke_fn
+ * @notif: notification synchronization struct
+ * @supp: supplicant synchronization struct for RPC to supplicant
+ * @pool: shared memory pool
+ * @rpc_param_count: If > 0 number of RPC parameters to make room for
+ * @scan_bus_done flag if device registation was already done.
+ * @scan_bus_wq workqueue to scan optee bus and register optee drivers
+ * @scan_bus_work workq to scan optee bus and register optee drivers
+ */
+struct optee {
+ struct tee_device *supp_teedev;
+ struct tee_device *teedev;
+ const struct optee_ops *ops;
+ struct tee_context *ctx;
+ union {
+ struct optee_smc smc;
+ struct optee_ffa ffa;
+ };
+ struct optee_shm_arg_cache shm_arg_cache;
+ struct optee_call_queue call_queue;
+ struct optee_notif notif;
+ struct optee_supp supp;
+ struct tee_shm_pool *pool;
+ unsigned int rpc_param_count;
+ bool scan_bus_done;
+ struct workqueue_struct *scan_bus_wq;
+ struct work_struct scan_bus_work;
+};
+
+struct optee_session {
+ struct list_head list_node;
+ u32 session_id;
+};
+
+struct optee_context_data {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head sess_list;
+};
+
+struct optee_rpc_param {
+ u32 a0;
+ u32 a1;
+ u32 a2;
+ u32 a3;
+ u32 a4;
+ u32 a5;
+ u32 a6;
+ u32 a7;
+};
+
+/* Holds context that is preserved during one STD call */
+struct optee_call_ctx {
+ /* information about pages list used in last allocation */
+ void *pages_list;
+ size_t num_entries;
+};
+
+int optee_notif_init(struct optee *optee, u_int max_key);
+void optee_notif_uninit(struct optee *optee);
+int optee_notif_wait(struct optee *optee, u_int key);
+int optee_notif_send(struct optee *optee, u_int key);
+
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ struct tee_param *param);
+
+void optee_supp_init(struct optee_supp *supp);
+void optee_supp_uninit(struct optee_supp *supp);
+void optee_supp_release(struct optee_supp *supp);
+
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+
+int optee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+int optee_close_session_helper(struct tee_context *ctx, u32 session);
+int optee_close_session(struct tee_context *ctx, u32 session);
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+#define PTA_CMD_GET_DEVICES 0x0
+#define PTA_CMD_GET_DEVICES_SUPP 0x1
+int optee_enumerate_devices(u32 func);
+void optee_unregister_devices(void);
+
+int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start));
+void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm));
+
+
+void optee_remove_common(struct optee *optee);
+int optee_open(struct tee_context *ctx, bool cap_memref_null);
+void optee_release(struct tee_context *ctx);
+void optee_release_supp(struct tee_context *ctx);
+
+static inline void optee_from_msg_param_value(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ p->u.value.a = mp->u.value.a;
+ p->u.value.b = mp->u.value.b;
+ p->u.value.c = mp->u.value.c;
+}
+
+static inline void optee_to_msg_param_value(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ mp->u.value.a = p->u.value.a;
+ mp->u.value.b = p->u.value.b;
+ mp->u.value.c = p->u.value.c;
+}
+
+void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+int optee_check_mem_type(unsigned long start, size_t num_pages);
+
+void optee_shm_arg_cache_init(struct optee *optee, u32 flags);
+void optee_shm_arg_cache_uninit(struct optee *optee);
+struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
+ size_t num_params,
+ struct optee_shm_arg_entry **entry,
+ struct tee_shm **shm_ret,
+ u_int *offs);
+void optee_free_msg_arg(struct tee_context *ctx,
+ struct optee_shm_arg_entry *entry, u_int offs);
+size_t optee_msg_arg_size(size_t rpc_param_count);
+
+
+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz);
+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg);
+
+/*
+ * Small helpers
+ */
+
+static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1)
+{
+ return (void *)(unsigned long)(((u64)reg0 << 32) | reg1);
+}
+
+static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
+{
+ *reg0 = val >> 32;
+ *reg1 = val;
+}
+
+/* Registration of the ABIs */
+int optee_smc_abi_register(void);
+void optee_smc_abi_unregister(void);
+int optee_ffa_abi_register(void);
+void optee_ffa_abi_unregister(void);
+
+#endif /*OPTEE_PRIVATE_H*/
diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h
new file mode 100644
index 0000000000..f3f06e0994
--- /dev/null
+++ b/drivers/tee/optee/optee_rpc_cmd.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2016-2021, Linaro Limited
+ */
+
+#ifndef __OPTEE_RPC_CMD_H
+#define __OPTEE_RPC_CMD_H
+
+/*
+ * All RPC is done with a struct optee_msg_arg as bearer of information,
+ * struct optee_msg_arg::arg holds values defined by OPTEE_RPC_CMD_* below.
+ * Only the commands handled by the kernel driver are defined here.
+ *
+ * RPC communication with tee-supplicant is reversed compared to normal
+ * client communication described above. The supplicant receives requests
+ * and sends responses.
+ */
+
+/*
+ * Get time
+ *
+ * Returns number of seconds and nano seconds since the Epoch,
+ * 1970-01-01 00:00:00 +0000 (UTC).
+ *
+ * [out] value[0].a Number of seconds
+ * [out] value[0].b Number of nano seconds.
+ */
+#define OPTEE_RPC_CMD_GET_TIME 3
+
+/*
+ * Notification from/to secure world.
+ *
+ * If secure world needs to wait for something, for instance a mutex, it
+ * does a notification wait request instead of spinning in secure world.
+ * Conversely can a synchronous notification can be sent when a secure
+ * world mutex with a thread waiting thread is unlocked.
+ *
+ * This interface can also be used to wait for a asynchronous notification
+ * which instead is sent via a non-secure interrupt.
+ *
+ * Waiting on notification
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT
+ * [in] value[0].b notification value
+ *
+ * Sending a synchronous notification
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND
+ * [in] value[0].b notification value
+ */
+#define OPTEE_RPC_CMD_NOTIFICATION 4
+#define OPTEE_RPC_NOTIFICATION_WAIT 0
+#define OPTEE_RPC_NOTIFICATION_SEND 1
+
+/*
+ * Suspend execution
+ *
+ * [in] value[0].a Number of milliseconds to suspend
+ */
+#define OPTEE_RPC_CMD_SUSPEND 5
+
+/*
+ * Allocate a piece of shared memory
+ *
+ * [in] value[0].a Type of memory one of
+ * OPTEE_RPC_SHM_TYPE_* below
+ * [in] value[0].b Requested size
+ * [in] value[0].c Required alignment
+ * [out] memref[0] Buffer
+ */
+#define OPTEE_RPC_CMD_SHM_ALLOC 6
+/* Memory that can be shared with a non-secure user space application */
+#define OPTEE_RPC_SHM_TYPE_APPL 0
+/* Memory only shared with non-secure kernel */
+#define OPTEE_RPC_SHM_TYPE_KERNEL 1
+
+/*
+ * Free shared memory previously allocated with OPTEE_RPC_CMD_SHM_ALLOC
+ *
+ * [in] value[0].a Type of memory one of
+ * OPTEE_RPC_SHM_TYPE_* above
+ * [in] value[0].b Value of shared memory reference or cookie
+ */
+#define OPTEE_RPC_CMD_SHM_FREE 7
+
+/*
+ * Issue master requests (read and write operations) to an I2C chip.
+ *
+ * [in] value[0].a Transfer mode (OPTEE_RPC_I2C_TRANSFER_*)
+ * [in] value[0].b The I2C bus (a.k.a adapter).
+ * 16 bit field.
+ * [in] value[0].c The I2C chip (a.k.a address).
+ * 16 bit field (either 7 or 10 bit effective).
+ * [in] value[1].a The I2C master control flags (ie, 10 bit address).
+ * 16 bit field.
+ * [in/out] memref[2] Buffer used for data transfers.
+ * [out] value[3].a Number of bytes transferred by the REE.
+ */
+#define OPTEE_RPC_CMD_I2C_TRANSFER 21
+
+/* I2C master transfer modes */
+#define OPTEE_RPC_I2C_TRANSFER_RD 0
+#define OPTEE_RPC_I2C_TRANSFER_WR 1
+
+/* I2C master control flags */
+#define OPTEE_RPC_I2C_FLAGS_TEN_BIT BIT(0)
+
+#endif /*__OPTEE_RPC_CMD_H*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
new file mode 100644
index 0000000000..7d9fa42650
--- /dev/null
+++ b/drivers/tee/optee/optee_smc.h
@@ -0,0 +1,603 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+#ifndef OPTEE_SMC_H
+#define OPTEE_SMC_H
+
+#include <linux/arm-smccc.h>
+#include <linux/bitops.h>
+
+#define OPTEE_SMC_STD_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+
+/*
+ * Function specified by SMC Calling convention.
+ */
+#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00
+#define OPTEE_SMC_CALLS_COUNT \
+ ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \
+ SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_COUNT)
+
+/*
+ * Normal cached memory (write-back), shareable for SMP systems and not
+ * shareable for UP systems.
+ */
+#define OPTEE_SMC_SHM_CACHED 1
+
+/*
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
+ * 32-bit registers.
+ */
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Return the following UID if using API specified in this file
+ * without further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * see also OPTEE_MSG_UID_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
+#define OPTEE_SMC_CALLS_UID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_UID)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Returns 2.0 if using API specified in this file without further extensions.
+ * see also OPTEE_MSG_REVISION_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
+#define OPTEE_SMC_CALLS_REVISION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_REVISION)
+
+struct optee_smc_calls_revision_result {
+ unsigned long major;
+ unsigned long minor;
+ unsigned long reserved0;
+ unsigned long reserved1;
+};
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
+#define OPTEE_SMC_CALL_GET_OS_UUID \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
+ * described above. May optionally return a 32-bit build identifier in a2,
+ * with zero meaning unspecified.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
+#define OPTEE_SMC_CALL_GET_OS_REVISION \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
+
+struct optee_smc_call_get_os_revision_result {
+ unsigned long major;
+ unsigned long minor;
+ unsigned long build_id;
+ unsigned long reserved1;
+};
+
+/*
+ * Load Trusted OS from optee/tee.bin in the Linux firmware.
+ *
+ * WARNING: Use this cautiously as it could lead to insecure loading of the
+ * Trusted OS.
+ * This SMC instructs EL3 to load a binary and execute it as the Trusted OS.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_LOAD_IMAGE
+ * a1 Upper 32bit of a 64bit size for the payload
+ * a2 Lower 32bit of a 64bit size for the payload
+ * a3 Upper 32bit of the physical address for the payload
+ * a4 Lower 32bit of the physical address for the payload
+ *
+ * The payload is in the OP-TEE image format.
+ *
+ * Returns result in a0, 0 on success and an error code otherwise.
+ */
+#define OPTEE_SMC_FUNCID_LOAD_IMAGE 2
+#define OPTEE_SMC_CALL_LOAD_IMAGE \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_LOAD_IMAGE)
+
+/*
+ * Call with struct optee_msg_arg as argument
+ *
+ * When called with OPTEE_SMC_CALL_WITH_RPC_ARG or
+ * OPTEE_SMC_CALL_WITH_REGD_ARG in a0 there is one RPC struct optee_msg_arg
+ * following after the first struct optee_msg_arg. The RPC struct
+ * optee_msg_arg has reserved space for the number of RPC parameters as
+ * returned by OPTEE_SMC_EXCHANGE_CAPABILITIES.
+ *
+ * When calling these functions, normal world has a few responsibilities:
+ * 1. It must be able to handle eventual RPCs
+ * 2. Non-secure interrupts should not be masked
+ * 3. If asynchronous notifications has been negotiated successfully, then
+ * the interrupt for asynchronous notifications should be unmasked
+ * during this call.
+ *
+ * Call register usage, OPTEE_SMC_CALL_WITH_ARG and
+ * OPTEE_SMC_CALL_WITH_RPC_ARG:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_WITH_ARG or OPTEE_SMC_CALL_WITH_RPC_ARG
+ * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
+ * a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
+ * a3 Cache settings, not used if physical pointer is in a predefined shared
+ * memory area else per OPTEE_SMC_SHM_*
+ * a4-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Call register usage, OPTEE_SMC_CALL_WITH_REGD_ARG:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_WITH_REGD_ARG
+ * a1 Upper 32 bits of a 64-bit shared memory cookie
+ * a2 Lower 32 bits of a 64-bit shared memory cookie
+ * a3 Offset of the struct optee_msg_arg in the shared memory with the
+ * supplied cookie
+ * a4-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_*
+ * a1-3 Not used
+ * a4-7 Preserved
+ *
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
+ * a1-3 Preserved
+ * a4-7 Preserved
+ *
+ * RPC return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val)
+ * a1-2 RPC parameters
+ * a3-7 Resume information, must be preserved
+ *
+ * Possible return values:
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Call completed, result updated in
+ * the previously supplied struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
+ * try again later.
+ * OPTEE_SMC_RETURN_EBADADDR Bad physical pointer to struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
+ * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
+ * world.
+ */
+#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
+#define OPTEE_SMC_CALL_WITH_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
+#define OPTEE_SMC_CALL_WITH_RPC_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG)
+#define OPTEE_SMC_CALL_WITH_REGD_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG)
+
+/*
+ * Get Shared Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Physical address of start of SHM
+ * a2 Size of SHM
+ * a3 Cache settings of memory, as defined by the
+ * OPTEE_SMC_SHM_* values above
+ * a4-7 Preserved
+ *
+ * Not available register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7
+#define OPTEE_SMC_GET_SHM_CONFIG \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
+
+struct optee_smc_get_shm_config_result {
+ unsigned long status;
+ unsigned long start;
+ unsigned long size;
+ unsigned long settings;
+};
+
+/*
+ * Exchanges capabilities between normal world and secure world
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
+ * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
+ * a2-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2 The maximum secure world notification number
+ * a3 Bit[7:0]: Number of parameters needed for RPC to be supplied
+ * as the second MSG arg struct for
+ * OPTEE_SMC_CALL_WITH_ARG
+ * Bit[31:8]: Reserved (MBZ)
+ * a4-7 Preserved
+ *
+ * Error return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ */
+/* Normal world works as a uniprocessor system */
+#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0)
+/* Secure world has reserved shared memory for normal world to use */
+#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
+/* Secure world can communicate via previously unregistered shared memory */
+#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
+
+/*
+ * Secure world supports commands "register/unregister shared memory",
+ * secure world accepts command buffers located in any parts of non-secure RAM
+ */
+#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
+/* Secure world is built with virtualization support */
+#define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3)
+/* Secure world supports Shared Memory with a NULL reference */
+#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
+/* Secure world supports asynchronous notification of normal world */
+#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
+/* Secure world supports pre-allocating RPC arg struct */
+#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6)
+
+#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
+#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
+
+struct optee_smc_exchange_capabilities_result {
+ unsigned long status;
+ unsigned long capabilities;
+ unsigned long max_notif_value;
+ unsigned long data;
+};
+
+/*
+ * Disable and empties cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns one shared memory reference to free. To disable the
+ * cache and free all cached objects this function has to be called until
+ * it returns OPTEE_SMC_RETURN_ENOTAVAIL.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie
+ * a3-7 Preserved
+ *
+ * Cache empty return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10
+#define OPTEE_SMC_DISABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
+
+struct optee_smc_disable_shm_cache_result {
+ unsigned long status;
+ unsigned long shm_upper32;
+ unsigned long shm_lower32;
+ unsigned long reserved0;
+};
+
+/*
+ * Enable cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
+ * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11
+#define OPTEE_SMC_ENABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
+
+/*
+ * Query OP-TEE about number of supported threads
+ *
+ * Normal World OS or Hypervisor issues this call to find out how many
+ * threads OP-TEE supports. That is how many standard calls can be issued
+ * in parallel before OP-TEE will return OPTEE_SMC_RETURN_ETHREAD_LIMIT.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_THREAD_COUNT
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Number of threads
+ * a2-7 Preserved
+ *
+ * Error return:
+ * a0 OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Requested call is not implemented
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_THREAD_COUNT 15
+#define OPTEE_SMC_GET_THREAD_COUNT \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT)
+
+/*
+ * Inform OP-TEE that normal world is able to receive asynchronous
+ * notifications.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_ASYNC_NOTIF
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not supported return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF 16
+#define OPTEE_SMC_ENABLE_ASYNC_NOTIF \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF)
+
+/*
+ * Retrieve a value of notifications pending since the last call of this
+ * function.
+ *
+ * OP-TEE keeps a record of all posted values. When an interrupt is
+ * received which indicates that there are posted values this function
+ * should be called until all pended values have been retrieved. When a
+ * value is retrieved, it's cleared from the record in secure world.
+ *
+ * It is expected that this function is called from an interrupt handler
+ * in normal world.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 value
+ * a2 Bit[0]: OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID if the value in a1 is
+ * valid, else 0 if no values where pending
+ * a2 Bit[1]: OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING if another value is
+ * pending, else 0.
+ * Bit[31:2]: MBZ
+ * a3-7 Preserved
+ *
+ * Not supported return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID BIT(0)
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING BIT(1)
+
+/*
+ * Notification that OP-TEE expects a yielding call to do some bottom half
+ * work in a driver.
+ */
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF 0
+
+#define OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE 17
+#define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE)
+
+/* See OPTEE_SMC_CALL_WITH_RPC_ARG above */
+#define OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG 18
+
+/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */
+#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19
+
+/*
+ * Resume from RPC (for example after processing a foreign interrupt)
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
+ * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
+ * OPTEE_SMC_RETURN_RPC in a0
+ *
+ * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
+ *
+ * Possible return values
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Original call completed, result
+ * updated in the previously supplied.
+ * struct optee_msg_arg
+ * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal
+ * world.
+ * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume
+ * information was corrupt.
+ */
+#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3
+#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
+
+#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF
+
+#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
+ ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
+
+#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
+
+/*
+ * Allocate memory for RPC parameter passing. The memory is used to hold a
+ * struct optee_msg_arg.
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC
+ * a1 Size in bytes of required argument memory
+ * a2 Not used
+ * a3 Resume information, must be preserved
+ * a4-5 Not used
+ * a6-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1 Upper 32 bits of 64-bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated.
+ * a2 Lower 32 bits of 64-bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated
+ * a3 Preserved
+ * a4 Upper 32 bits of 64-bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a5 Lower 32 bits of 64-bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a6-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_ALLOC 0
+#define OPTEE_SMC_RETURN_RPC_ALLOC \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
+
+/*
+ * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_FREE
+ * a1 Upper 32 bits of 64-bit shared memory cookie belonging to this
+ * argument memory
+ * a2 Lower 32 bits of 64-bit shared memory cookie belonging to this
+ * argument memory
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FREE 2
+#define OPTEE_SMC_RETURN_RPC_FREE \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
+
+/*
+ * Deliver a foreign interrupt in normal world.
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
+ * a1-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4
+#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR)
+
+/*
+ * Do an RPC request. The supplied struct optee_msg_arg tells which
+ * request to do and the parameters for the request. The following fields
+ * are used (the rest are unused):
+ * - cmd the Request ID
+ * - ret return value of the request, filled in by normal world
+ * - num_params number of parameters for the request
+ * - params the parameters
+ * - param_attrs attributes of the parameters
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_CMD
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_CMD 5
+#define OPTEE_SMC_RETURN_RPC_CMD \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
+
+/* Returned in a0 */
+#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+
+/* Returned in a0 only from Trusted OS functions */
+#define OPTEE_SMC_RETURN_OK 0x0
+#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1
+#define OPTEE_SMC_RETURN_EBUSY 0x2
+#define OPTEE_SMC_RETURN_ERESUME 0x3
+#define OPTEE_SMC_RETURN_EBADADDR 0x4
+#define OPTEE_SMC_RETURN_EBADCMD 0x5
+#define OPTEE_SMC_RETURN_ENOMEM 0x6
+#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7
+#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret))
+
+static inline bool __optee_smc_return_is_rpc(u32 ret)
+{
+ return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION &&
+ (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) ==
+ OPTEE_SMC_RETURN_RPC_PREFIX;
+}
+
+#endif /* OPTEE_SMC_H */
diff --git a/drivers/tee/optee/optee_trace.h b/drivers/tee/optee/optee_trace.h
new file mode 100644
index 0000000000..7c954eefa4
--- /dev/null
+++ b/drivers/tee/optee/optee_trace.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * optee trace points
+ *
+ * Copyright (C) 2021 Synaptics Incorporated
+ * Author: Jisheng Zhang <jszhang@kernel.org>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM optee
+
+#if !defined(_TRACE_OPTEE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_OPTEE_H
+
+#include <linux/arm-smccc.h>
+#include <linux/tracepoint.h>
+#include "optee_private.h"
+
+TRACE_EVENT(optee_invoke_fn_begin,
+ TP_PROTO(struct optee_rpc_param *param),
+ TP_ARGS(param),
+
+ TP_STRUCT__entry(
+ __field(void *, param)
+ __array(u32, args, 8)
+ ),
+
+ TP_fast_assign(
+ __entry->param = param;
+ BUILD_BUG_ON(sizeof(*param) < sizeof(__entry->args));
+ memcpy(__entry->args, param, sizeof(__entry->args));
+ ),
+
+ TP_printk("param=%p (%x, %x, %x, %x, %x, %x, %x, %x)", __entry->param,
+ __entry->args[0], __entry->args[1], __entry->args[2],
+ __entry->args[3], __entry->args[4], __entry->args[5],
+ __entry->args[6], __entry->args[7])
+);
+
+TRACE_EVENT(optee_invoke_fn_end,
+ TP_PROTO(struct optee_rpc_param *param, struct arm_smccc_res *res),
+ TP_ARGS(param, res),
+
+ TP_STRUCT__entry(
+ __field(void *, param)
+ __array(unsigned long, rets, 4)
+ ),
+
+ TP_fast_assign(
+ __entry->param = param;
+ BUILD_BUG_ON(sizeof(*res) < sizeof(__entry->rets));
+ memcpy(__entry->rets, res, sizeof(__entry->rets));
+ ),
+
+ TP_printk("param=%p ret (%lx, %lx, %lx, %lx)", __entry->param,
+ __entry->rets[0], __entry->rets[1], __entry->rets[2],
+ __entry->rets[3])
+);
+#endif /* _TRACE_OPTEE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE optee_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
new file mode 100644
index 0000000000..e69bc63806
--- /dev/null
+++ b/drivers/tee/optee/rpc.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+#include "optee_rpc_cmd.h"
+
+static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
+{
+ struct timespec64 ts;
+
+ if (arg->num_params != 1)
+ goto bad;
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
+ goto bad;
+
+ ktime_get_real_ts64(&ts);
+ arg->params[0].u.value.a = ts.tv_sec;
+ arg->params[0].u.value.b = ts.tv_nsec;
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+#if IS_REACHABLE(CONFIG_I2C)
+static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct tee_param *params;
+ struct i2c_adapter *adapter;
+ struct i2c_msg msg = { };
+ size_t i;
+ int ret = -EOPNOTSUPP;
+ u8 attr[] = {
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT,
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT,
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT,
+ };
+
+ if (arg->num_params != ARRAY_SIZE(attr)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params))
+ goto bad;
+
+ for (i = 0; i < arg->num_params; i++) {
+ if (params[i].attr != attr[i])
+ goto bad;
+ }
+
+ adapter = i2c_get_adapter(params[0].u.value.b);
+ if (!adapter)
+ goto bad;
+
+ if (params[1].u.value.a & OPTEE_RPC_I2C_FLAGS_TEN_BIT) {
+ if (!i2c_check_functionality(adapter,
+ I2C_FUNC_10BIT_ADDR)) {
+ i2c_put_adapter(adapter);
+ goto bad;
+ }
+
+ msg.flags = I2C_M_TEN;
+ }
+
+ msg.addr = params[0].u.value.c;
+ msg.buf = params[2].u.memref.shm->kaddr;
+ msg.len = params[2].u.memref.size;
+
+ switch (params[0].u.value.a) {
+ case OPTEE_RPC_I2C_TRANSFER_RD:
+ msg.flags |= I2C_M_RD;
+ break;
+ case OPTEE_RPC_I2C_TRANSFER_WR:
+ break;
+ default:
+ i2c_put_adapter(adapter);
+ goto bad;
+ }
+
+ ret = i2c_transfer(adapter, &msg, 1);
+
+ if (ret < 0) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ params[3].u.value.a = msg.len;
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params))
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ else
+ arg->ret = TEEC_SUCCESS;
+ }
+
+ i2c_put_adapter(adapter);
+ kfree(params);
+ return;
+bad:
+ kfree(params);
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+#else
+static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ arg->ret = TEEC_ERROR_NOT_SUPPORTED;
+}
+#endif
+
+static void handle_rpc_func_cmd_wq(struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ if (arg->num_params != 1)
+ goto bad;
+
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto bad;
+
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_NOTIFICATION_WAIT:
+ if (optee_notif_wait(optee, arg->params[0].u.value.b))
+ goto bad;
+ break;
+ case OPTEE_RPC_NOTIFICATION_SEND:
+ if (optee_notif_send(optee, arg->params[0].u.value.b))
+ goto bad;
+ break;
+ default:
+ goto bad;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
+{
+ u32 msec_to_wait;
+
+ if (arg->num_params != 1)
+ goto bad;
+
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto bad;
+
+ msec_to_wait = arg->params[0].u.value.a;
+
+ /* Go to interruptible sleep */
+ msleep_interruptible(msec_to_wait);
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_supp_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_param *params;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
+
+ if (optee->ops->to_msg_param(optee, arg->params, arg->num_params,
+ params))
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+out:
+ kfree(params);
+}
+
+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
+{
+ u32 ret;
+ struct tee_param param;
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct tee_shm *shm;
+
+ param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param.u.value.a = OPTEE_RPC_SHM_TYPE_APPL;
+ param.u.value.b = sz;
+ param.u.value.c = 0;
+
+ ret = optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_ALLOC, 1, &param);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&optee->supp.mutex);
+ /* Increases count as secure world doesn't have a reference */
+ shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
+ mutex_unlock(&optee->supp.mutex);
+ return shm;
+}
+
+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct tee_param param;
+
+ param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param.u.value.a = OPTEE_RPC_SHM_TYPE_APPL;
+ param.u.value.b = tee_shm_get_id(shm);
+ param.u.value.c = 0;
+
+ /*
+ * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
+ * world has released its reference.
+ *
+ * It's better to do this before sending the request to supplicant
+ * as we'd like to let the process doing the initial allocation to
+ * do release the last reference too in order to avoid stacking
+ * many pending fput() on the client process. This could otherwise
+ * happen if secure world does many allocate and free in a single
+ * invoke.
+ */
+ tee_shm_put(shm);
+
+ optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, &param);
+}
+
+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_GET_TIME:
+ handle_rpc_func_cmd_get_time(arg);
+ break;
+ case OPTEE_RPC_CMD_NOTIFICATION:
+ handle_rpc_func_cmd_wq(optee, arg);
+ break;
+ case OPTEE_RPC_CMD_SUSPEND:
+ handle_rpc_func_cmd_wait(arg);
+ break;
+ case OPTEE_RPC_CMD_I2C_TRANSFER:
+ handle_rpc_func_cmd_i2c_transfer(ctx, arg);
+ break;
+ default:
+ handle_rpc_supp_cmd(ctx, optee, arg);
+ }
+}
+
+
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
new file mode 100644
index 0000000000..d5b28fd35d
--- /dev/null
+++ b/drivers/tee/optee/smc_abi.c
@@ -0,0 +1,1841 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/cpuhotplug.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+#include "optee_rpc_cmd.h"
+#include <linux/kmemleak.h>
+#define CREATE_TRACE_POINTS
+#include "optee_trace.h"
+
+/*
+ * This file implement the SMC ABI used when communicating with secure world
+ * OP-TEE OS via raw SMCs.
+ * This file is divided into the following sections:
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ * 2. Low level support functions to register shared memory in secure world
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ * 4. Do a normal scheduled call into secure world
+ * 5. Asynchronous notification
+ * 6. Driver initialization.
+ */
+
+/*
+ * A typical OP-TEE private shm allocation is 224 bytes (argument struct
+ * with 6 parameters, needed for open session). So with an alignment of 512
+ * we'll waste a bit more than 50%. However, it's only expected that we'll
+ * have a handful of these structs allocated at a time. Most memory will
+ * be allocated aligned to the page size, So all in all this should scale
+ * up and down quite well.
+ */
+#define OPTEE_MIN_STATIC_POOL_ALIGN 9 /* 512 bytes aligned */
+
+/* SMC ABI considers at most a single TEE firmware */
+static unsigned int pcpu_irq_num;
+
+static int optee_cpuhp_enable_pcpu_irq(unsigned int cpu)
+{
+ enable_percpu_irq(pcpu_irq_num, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int optee_cpuhp_disable_pcpu_irq(unsigned int cpu)
+{
+ disable_percpu_irq(pcpu_irq_num);
+
+ return 0;
+}
+
+/*
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ *
+ * optee_from_msg_param() and optee_to_msg_param() are the main
+ * functions.
+ */
+
+static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+ phys_addr_t pa;
+ int rc;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ p->u.memref.size = mp->u.tmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
+ if (!shm) {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(shm, 0, &pa);
+ if (rc)
+ return rc;
+
+ p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+ p->u.memref.shm = shm;
+
+ return 0;
+}
+
+static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ p->u.memref.size = mp->u.rmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
+
+ if (shm) {
+ p->u.memref.shm_offs = mp->u.rmem.offs;
+ p->u.memref.shm = shm;
+ } else {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ }
+}
+
+/**
+ * optee_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @optee: main service struct
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
+ size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ optee_from_msg_param_value(p, attr, mp);
+ break;
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ rc = from_msg_param_tmp_mem(p, attr, mp);
+ if (rc)
+ return rc;
+ break;
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ from_msg_param_reg_mem(p, attr, mp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ int rc;
+ phys_addr_t pa;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.tmem.size = p->u.memref.size;
+
+ if (!p->u.memref.shm) {
+ mp->u.tmem.buf_ptr = 0;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
+ if (rc)
+ return rc;
+
+ mp->u.tmem.buf_ptr = pa;
+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+ OPTEE_MSG_ATTR_CACHE_SHIFT;
+
+ return 0;
+}
+
+static int to_msg_param_reg_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.rmem.size = p->u.memref.size;
+ mp->u.rmem.offs = p->u.memref.shm_offs;
+ return 0;
+}
+
+/**
+ * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
+ * @optee: main service struct
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_to_msg_param(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params, const struct tee_param *params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ optee_to_msg_param_value(mp, p);
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (tee_shm_is_dynamic(p->u.memref.shm))
+ rc = to_msg_param_reg_mem(mp, p);
+ else
+ rc = to_msg_param_tmp_mem(mp, p);
+ if (rc)
+ return rc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * 2. Low level support functions to register shared memory in secure world
+ *
+ * Functions to enable/disable shared memory caching in secure world, that
+ * is, lazy freeing of previously allocated shared memory. Freeing is
+ * performed when a request has been compled.
+ *
+ * Functions to register and unregister shared memory both for normal
+ * clients and for tee-supplicant.
+ */
+
+/**
+ * optee_enable_shm_cache() - Enables caching of some shared memory allocation
+ * in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_enable_shm_cache(struct optee *optee)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == OPTEE_SMC_RETURN_OK)
+ break;
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * __optee_disable_shm_cache() - Disables caching of some shared memory
+ * allocation in OP-TEE
+ * @optee: main service struct
+ * @is_mapped: true if the cached shared memory addresses were mapped by this
+ * kernel, are safe to dereference, and should be freed
+ */
+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_disable_shm_cache_result result;
+ } res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
+ break; /* All shm's freed */
+ if (res.result.status == OPTEE_SMC_RETURN_OK) {
+ struct tee_shm *shm;
+
+ /*
+ * Shared memory references that were not mapped by
+ * this kernel must be ignored to prevent a crash.
+ */
+ if (!is_mapped)
+ continue;
+
+ shm = reg_pair_to_ptr(res.result.shm_upper32,
+ res.result.shm_lower32);
+ tee_shm_free(shm);
+ } else {
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
+ * allocations in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_disable_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, true);
+}
+
+/**
+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
+ * allocations in OP-TEE which are not
+ * currently mapped
+ * @optee: main service struct
+ */
+static void optee_disable_unmapped_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, false);
+}
+
+#define PAGELIST_ENTRIES_PER_PAGE \
+ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+/*
+ * The final entry in each pagelist page is a pointer to the next
+ * pagelist page.
+ */
+static size_t get_pages_list_size(size_t num_entries)
+{
+ int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+ return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+static u64 *optee_allocate_pages_list(size_t num_entries)
+{
+ return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
+}
+
+static void optee_free_pages_list(void *list, size_t num_entries)
+{
+ free_pages_exact(list, get_pages_list_size(num_entries));
+}
+
+/**
+ * optee_fill_pages_list() - write list of user pages to given shared
+ * buffer.
+ *
+ * @dst: page-aligned buffer where list of pages will be stored
+ * @pages: array of pages that represents shared buffer
+ * @num_pages: number of entries in @pages
+ * @page_offset: offset of user buffer from page start
+ *
+ * @dst should be big enough to hold list of user page addresses and
+ * links to the next pages of buffer
+ */
+static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+ size_t page_offset)
+{
+ int n = 0;
+ phys_addr_t optee_page;
+ /*
+ * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
+ * for details.
+ */
+ struct {
+ u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
+ u64 next_page_data;
+ } *pages_data;
+
+ /*
+ * Currently OP-TEE uses 4k page size and it does not looks
+ * like this will change in the future. On other hand, there are
+ * no know ARM architectures with page size < 4k.
+ * Thus the next built assert looks redundant. But the following
+ * code heavily relies on this assumption, so it is better be
+ * safe than sorry.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ pages_data = (void *)dst;
+ /*
+ * If linux page is bigger than 4k, and user buffer offset is
+ * larger than 4k/8k/12k/etc this will skip first 4k pages,
+ * because they bear no value data for OP-TEE.
+ */
+ optee_page = page_to_phys(*pages) +
+ round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ while (true) {
+ pages_data->pages_list[n++] = optee_page;
+
+ if (n == PAGELIST_ENTRIES_PER_PAGE) {
+ pages_data->next_page_data =
+ virt_to_phys(pages_data + 1);
+ pages_data++;
+ n = 0;
+ }
+
+ optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+ if (!(optee_page & ~PAGE_MASK)) {
+ if (!--num_pages)
+ break;
+ pages++;
+ optee_page = page_to_phys(*pages);
+ }
+ }
+}
+
+static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ u64 *pages_list;
+ size_t sz;
+ int rc;
+
+ if (!num_pages)
+ return -EINVAL;
+
+ rc = optee_check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ pages_list = optee_allocate_pages_list(num_pages);
+ if (!pages_list)
+ return -ENOMEM;
+
+ /*
+ * We're about to register shared memory we can't register shared
+ * memory for this request or there's a catch-22.
+ *
+ * So in this we'll have to do the good old temporary private
+ * allocation instead of using optee_get_msg_arg().
+ */
+ sz = optee_msg_arg_size(optee->rpc_param_count);
+ shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
+ if (IS_ERR(shm_arg)) {
+ rc = PTR_ERR(shm_arg);
+ goto out;
+ }
+ msg_arg = tee_shm_get_va(shm_arg, 0);
+ if (IS_ERR(msg_arg)) {
+ rc = PTR_ERR(msg_arg);
+ goto out;
+ }
+
+ optee_fill_pages_list(pages_list, pages, num_pages,
+ tee_shm_get_page_offset(shm));
+
+ memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+ msg_arg->num_params = 1;
+ msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
+ msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
+ msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
+ /*
+ * In the least bits of msg_arg->params->u.tmem.buf_ptr we
+ * store buffer offset from 4k page, as described in OP-TEE ABI.
+ */
+ msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+ tee_shm_free(shm_arg);
+out:
+ optee_free_pages_list(pages_list, num_pages);
+ return rc;
+}
+
+static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ int rc = 0;
+ size_t sz;
+
+ /*
+ * We're about to unregister shared memory and we may not be able
+ * register shared memory for this request in case we're called
+ * from optee_shm_arg_cache_uninit().
+ *
+ * So in order to keep things simple in this function just as in
+ * optee_shm_register() we'll use temporary private allocation
+ * instead of using optee_get_msg_arg().
+ */
+ sz = optee_msg_arg_size(optee->rpc_param_count);
+ shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
+ if (IS_ERR(shm_arg))
+ return PTR_ERR(shm_arg);
+ msg_arg = tee_shm_get_va(shm_arg, 0);
+ if (IS_ERR(msg_arg)) {
+ rc = PTR_ERR(msg_arg);
+ goto out;
+ }
+
+ memset(msg_arg, 0, sz);
+ msg_arg->num_params = 1;
+ msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+out:
+ tee_shm_free(shm_arg);
+ return rc;
+}
+
+static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ /*
+ * We don't want to register supplicant memory in OP-TEE.
+ * Instead information about it will be passed in RPC code.
+ */
+ return optee_check_mem_type(start, num_pages);
+}
+
+static int optee_shm_unregister_supp(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ return 0;
+}
+
+/*
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ *
+ * Implements an OP-TEE specific shared memory pool which is used
+ * when dynamic shared memory is supported by secure world.
+ *
+ * The main function is optee_shm_pool_alloc_pages().
+ */
+
+static int pool_op_alloc(struct tee_shm_pool *pool,
+ struct tee_shm *shm, size_t size, size_t align)
+{
+ /*
+ * Shared memory private to the OP-TEE driver doesn't need
+ * to be registered with OP-TEE.
+ */
+ if (shm->flags & TEE_SHM_PRIV)
+ return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
+
+ return optee_pool_op_alloc_helper(pool, shm, size, align,
+ optee_shm_register);
+}
+
+static void pool_op_free(struct tee_shm_pool *pool,
+ struct tee_shm *shm)
+{
+ if (!(shm->flags & TEE_SHM_PRIV))
+ optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
+ else
+ optee_pool_op_free_helper(pool, shm, NULL);
+}
+
+static void pool_op_destroy_pool(struct tee_shm_pool *pool)
+{
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_pool = pool_op_destroy_pool,
+};
+
+/**
+ * optee_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used when OP-TEE supports dymanic SHM. In this case
+ * command buffers and such are allocated from kernel's own memory.
+ */
+static struct tee_shm_pool *optee_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->ops = &pool_ops;
+
+ return pool;
+}
+
+/*
+ * 4. Do a normal scheduled call into secure world
+ *
+ * The function optee_smc_do_call_with_arg() performs a normal scheduled
+ * call into secure world. During this call may normal world request help
+ * from normal world using RPCs, Remote Procedure Calls. This includes
+ * delivery of non-secure interrupts to for instance allow rescheduling of
+ * the current task.
+ */
+
+static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ optee_rpc_cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ }
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
+{
+ phys_addr_t pa;
+ struct tee_shm *shm;
+ size_t sz;
+ size_t n;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (!arg->num_params ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ for (n = 1; n < arg->num_params; n++) {
+ if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+ }
+
+ sz = arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc_priv_buf(optee->ctx, sz);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
+ sz = tee_shm_get_size(shm);
+
+ if (tee_shm_is_dynamic(shm)) {
+ struct page **pages;
+ u64 *pages_list;
+ size_t page_num;
+
+ pages = tee_shm_get_pages(shm, &page_num);
+ if (!pages || !page_num) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ pages_list = optee_allocate_pages_list(page_num);
+ if (!pages_list) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ call_ctx->pages_list = pages_list;
+ call_ctx->num_entries = page_num;
+
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ /*
+ * In the least bits of u.tmem.buf_ptr we store buffer offset
+ * from 4k page, as described in OP-TEE ABI.
+ */
+ arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) &
+ (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+
+ optee_fill_pages_list(pages_list, pages, page_num,
+ tee_shm_get_page_offset(shm));
+ } else {
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ arg->params[0].u.tmem.buf_ptr = pa;
+ arg->params[0].u.tmem.size = sz;
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ tee_shm_free(shm);
+}
+
+static void free_pages_list(struct optee_call_ctx *call_ctx)
+{
+ if (call_ctx->pages_list) {
+ optee_free_pages_list(call_ctx->pages_list,
+ call_ctx->num_entries);
+ call_ctx->pages_list = NULL;
+ call_ctx->num_entries = 0;
+ }
+}
+
+static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
+{
+ free_pages_list(call_ctx);
+}
+
+static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
+{
+
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_SHM_ALLOC:
+ free_pages_list(call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
+ break;
+ case OPTEE_RPC_CMD_SHM_FREE:
+ handle_rpc_func_cmd_shm_free(ctx, arg);
+ break;
+ default:
+ optee_rpc_cmd(ctx, optee, arg);
+ }
+}
+
+/**
+ * optee_handle_rpc() - handle RPC from secure world
+ * @ctx: context doing the RPC
+ * @param: value of registers for the RPC
+ * @call_ctx: call context. Preserved during one OP-TEE invocation
+ *
+ * Result of RPC is written back into @param.
+ */
+static void optee_handle_rpc(struct tee_context *ctx,
+ struct optee_msg_arg *rpc_arg,
+ struct optee_rpc_param *param,
+ struct optee_call_ctx *call_ctx)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_msg_arg *arg;
+ struct tee_shm *shm;
+ phys_addr_t pa;
+
+ switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+ case OPTEE_SMC_RPC_FUNC_ALLOC:
+ shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1);
+ if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+ reg_pair_from_64(&param->a1, &param->a2, pa);
+ reg_pair_from_64(&param->a4, &param->a5,
+ (unsigned long)shm);
+ } else {
+ param->a1 = 0;
+ param->a2 = 0;
+ param->a4 = 0;
+ param->a5 = 0;
+ }
+ kmemleak_not_leak(shm);
+ break;
+ case OPTEE_SMC_RPC_FUNC_FREE:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ tee_shm_free(shm);
+ break;
+ case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
+ /*
+ * A foreign interrupt was raised while secure world was
+ * executing, since they are handled in Linux a dummy RPC is
+ * performed to let Linux take the interrupt through the normal
+ * vector.
+ */
+ break;
+ case OPTEE_SMC_RPC_FUNC_CMD:
+ if (rpc_arg) {
+ arg = rpc_arg;
+ } else {
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg)) {
+ pr_err("%s: tee_shm_get_va %p failed\n",
+ __func__, shm);
+ break;
+ }
+ }
+
+ handle_rpc_func_cmd(ctx, optee, arg, call_ctx);
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n",
+ (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
+ break;
+ }
+
+ param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
+}
+
+/**
+ * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
+ * @ctx: calling context
+ * @shm: shared memory holding the message to pass to secure world
+ * @offs: offset of the message in @shm
+ *
+ * Does and SMC to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from secure world, 0 is OK
+ */
+static int optee_smc_do_call_with_arg(struct tee_context *ctx,
+ struct tee_shm *shm, u_int offs)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_call_waiter w;
+ struct optee_rpc_param param = { };
+ struct optee_call_ctx call_ctx = { };
+ struct optee_msg_arg *rpc_arg = NULL;
+ int rc;
+
+ if (optee->rpc_param_count) {
+ struct optee_msg_arg *arg;
+ unsigned int rpc_arg_offs;
+
+ arg = tee_shm_get_va(shm, offs);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+
+ rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
+ }
+
+ if (rpc_arg && tee_shm_is_dynamic(shm)) {
+ param.a0 = OPTEE_SMC_CALL_WITH_REGD_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, (u_long)shm);
+ param.a3 = offs;
+ } else {
+ phys_addr_t parg;
+
+ rc = tee_shm_get_pa(shm, offs, &parg);
+ if (rc)
+ return rc;
+
+ if (rpc_arg)
+ param.a0 = OPTEE_SMC_CALL_WITH_RPC_ARG;
+ else
+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, parg);
+ }
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ trace_optee_invoke_fn_begin(&param);
+ optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
+ param.a4, param.a5, param.a6, param.a7,
+ &res);
+ trace_optee_invoke_fn_end(&param, &res);
+
+ if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
+ cond_resched();
+ param.a0 = res.a0;
+ param.a1 = res.a1;
+ param.a2 = res.a2;
+ param.a3 = res.a3;
+ optee_handle_rpc(ctx, rpc_arg, &param, &call_ctx);
+ } else {
+ rc = res.a0;
+ break;
+ }
+ }
+
+ optee_rpc_finalize_call(&call_ctx);
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return rc;
+}
+
+static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = cmd;
+ optee_smc_do_call_with_arg(ctx, shm, offs);
+
+ optee_free_msg_arg(ctx, entry, offs);
+ return 0;
+}
+
+static int optee_smc_do_bottom_half(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
+}
+
+static int optee_smc_stop_async_notif(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
+}
+
+/*
+ * 5. Asynchronous notification
+ */
+
+static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
+ bool *value_pending)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0) {
+ *value_valid = false;
+ return 0;
+ }
+ *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
+ *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
+ return res.a1;
+}
+
+static irqreturn_t irq_handler(struct optee *optee)
+{
+ bool do_bottom_half = false;
+ bool value_valid;
+ bool value_pending;
+ u32 value;
+
+ do {
+ value = get_async_notif_value(optee->smc.invoke_fn,
+ &value_valid, &value_pending);
+ if (!value_valid)
+ break;
+
+ if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
+ do_bottom_half = true;
+ else
+ optee_notif_send(optee, value);
+ } while (value_pending);
+
+ if (do_bottom_half)
+ return IRQ_WAKE_THREAD;
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t notif_irq_handler(int irq, void *dev_id)
+{
+ struct optee *optee = dev_id;
+
+ return irq_handler(optee);
+}
+
+static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
+{
+ struct optee *optee = dev_id;
+
+ optee_smc_do_bottom_half(optee->ctx);
+
+ return IRQ_HANDLED;
+}
+
+static int init_irq(struct optee *optee, u_int irq)
+{
+ int rc;
+
+ rc = request_threaded_irq(irq, notif_irq_handler,
+ notif_irq_thread_fn,
+ 0, "optee_notification", optee);
+ if (rc)
+ return rc;
+
+ optee->smc.notif_irq = irq;
+
+ return 0;
+}
+
+static irqreturn_t notif_pcpu_irq_handler(int irq, void *dev_id)
+{
+ struct optee_pcpu *pcpu = dev_id;
+ struct optee *optee = pcpu->optee;
+
+ if (irq_handler(optee) == IRQ_WAKE_THREAD)
+ queue_work(optee->smc.notif_pcpu_wq,
+ &optee->smc.notif_pcpu_work);
+
+ return IRQ_HANDLED;
+}
+
+static void notif_pcpu_irq_work_fn(struct work_struct *work)
+{
+ struct optee_smc *optee_smc = container_of(work, struct optee_smc,
+ notif_pcpu_work);
+ struct optee *optee = container_of(optee_smc, struct optee, smc);
+
+ optee_smc_do_bottom_half(optee->ctx);
+}
+
+static int init_pcpu_irq(struct optee *optee, u_int irq)
+{
+ struct optee_pcpu __percpu *optee_pcpu;
+ int cpu, rc;
+
+ optee_pcpu = alloc_percpu(struct optee_pcpu);
+ if (!optee_pcpu)
+ return -ENOMEM;
+
+ for_each_present_cpu(cpu)
+ per_cpu_ptr(optee_pcpu, cpu)->optee = optee;
+
+ rc = request_percpu_irq(irq, notif_pcpu_irq_handler,
+ "optee_pcpu_notification", optee_pcpu);
+ if (rc)
+ goto err_free_pcpu;
+
+ INIT_WORK(&optee->smc.notif_pcpu_work, notif_pcpu_irq_work_fn);
+ optee->smc.notif_pcpu_wq = create_workqueue("optee_pcpu_notification");
+ if (!optee->smc.notif_pcpu_wq) {
+ rc = -EINVAL;
+ goto err_free_pcpu_irq;
+ }
+
+ optee->smc.optee_pcpu = optee_pcpu;
+ optee->smc.notif_irq = irq;
+
+ pcpu_irq_num = irq;
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "optee/pcpu-notif:starting",
+ optee_cpuhp_enable_pcpu_irq,
+ optee_cpuhp_disable_pcpu_irq);
+ if (!rc)
+ rc = -EINVAL;
+ if (rc < 0)
+ goto err_free_pcpu_irq;
+
+ optee->smc.notif_cpuhp_state = rc;
+
+ return 0;
+
+err_free_pcpu_irq:
+ free_percpu_irq(irq, optee_pcpu);
+err_free_pcpu:
+ free_percpu(optee_pcpu);
+
+ return rc;
+}
+
+static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
+{
+ if (irq_is_percpu_devid(irq))
+ return init_pcpu_irq(optee, irq);
+ else
+ return init_irq(optee, irq);
+}
+
+static void uninit_pcpu_irq(struct optee *optee)
+{
+ cpuhp_remove_state(optee->smc.notif_cpuhp_state);
+
+ destroy_workqueue(optee->smc.notif_pcpu_wq);
+
+ free_percpu_irq(optee->smc.notif_irq, optee->smc.optee_pcpu);
+ free_percpu(optee->smc.optee_pcpu);
+}
+
+static void optee_smc_notif_uninit_irq(struct optee *optee)
+{
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ optee_smc_stop_async_notif(optee->ctx);
+ if (optee->smc.notif_irq) {
+ if (irq_is_percpu_devid(optee->smc.notif_irq))
+ uninit_pcpu_irq(optee);
+ else
+ free_irq(optee->smc.notif_irq, optee);
+
+ irq_dispose_mapping(optee->smc.notif_irq);
+ }
+ }
+}
+
+/*
+ * 6. Driver initialization
+ *
+ * During driver initialization is secure world probed to find out which
+ * features it supports so the driver can be initialized with a matching
+ * configuration. This involves for instance support for dynamic shared
+ * memory instead of a static memory carvout.
+ */
+
+static void optee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ v.gen_caps |= TEE_GEN_CAP_REG_MEM;
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
+ v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
+ *vers = v;
+}
+
+static int optee_smc_open(struct tee_context *ctx)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ u32 sec_caps = optee->smc.sec_caps;
+
+ return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
+}
+
+static const struct tee_driver_ops optee_clnt_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+ .shm_register = optee_shm_register,
+ .shm_unregister = optee_shm_unregister,
+};
+
+static const struct tee_desc optee_clnt_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &optee_clnt_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_supp_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release_supp,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+ .shm_register = optee_shm_register_supp,
+ .shm_unregister = optee_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_supp_desc = {
+ .name = DRIVER_NAME "-supp",
+ .ops = &optee_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static const struct optee_ops optee_ops = {
+ .do_call_with_arg = optee_smc_do_call_with_arg,
+ .to_msg_param = optee_to_msg_param,
+ .from_msg_param = optee_from_msg_param,
+};
+
+static int enable_async_notif(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0)
+ return -EINVAL;
+ return 0;
+}
+
+static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
+ res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
+ return true;
+ return false;
+}
+
+#ifdef CONFIG_OPTEE_INSECURE_LOAD_IMAGE
+static bool optee_msg_api_uid_is_optee_image_load(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == OPTEE_MSG_IMAGE_LOAD_UID_0 &&
+ res.a1 == OPTEE_MSG_IMAGE_LOAD_UID_1 &&
+ res.a2 == OPTEE_MSG_IMAGE_LOAD_UID_2 &&
+ res.a3 == OPTEE_MSG_IMAGE_LOAD_UID_3)
+ return true;
+ return false;
+}
+#endif
+
+static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_call_get_os_revision_result result;
+ } res = {
+ .result = {
+ .build_id = 0
+ }
+ };
+
+ invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.build_id)
+ pr_info("revision %lu.%lu (%08lx)", res.result.major,
+ res.result.minor, res.result.build_id);
+ else
+ pr_info("revision %lu.%lu", res.result.major, res.result.minor);
+}
+
+static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_calls_revision_result result;
+ } res;
+
+ invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+
+ if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
+ (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
+ return true;
+ return false;
+}
+
+static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
+ u32 *sec_caps, u32 *max_notif_value,
+ unsigned int *rpc_param_count)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_exchange_capabilities_result result;
+ } res;
+ u32 a1 = 0;
+
+ /*
+ * TODO This isn't enough to tell if it's UP system (from kernel
+ * point of view) or not, is_smp() returns the information
+ * needed, but can't be called directly from here.
+ */
+ if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
+ a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
+
+ invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return false;
+
+ *sec_caps = res.result.capabilities;
+ if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
+ *max_notif_value = res.result.max_notif_value;
+ else
+ *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
+ if (*sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
+ *rpc_param_count = (u8)res.result.data;
+ else
+ *rpc_param_count = 0;
+
+ return true;
+}
+
+static struct tee_shm_pool *
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_shm_config_result result;
+ } res;
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t begin;
+ phys_addr_t end;
+ void *va;
+ void *rc;
+
+ invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK) {
+ pr_err("static shm service not available\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
+ pr_err("only normal cached shared memory supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ begin = roundup(res.result.start, PAGE_SIZE);
+ end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
+ paddr = begin;
+ size = end - begin;
+
+ va = memremap(paddr, size, MEMREMAP_WB);
+ if (!va) {
+ pr_err("shared memory ioremap failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vaddr = (unsigned long)va;
+
+ rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size,
+ OPTEE_MIN_STATIC_POOL_ALIGN);
+ if (IS_ERR(rc))
+ memunmap(va);
+ else
+ *memremaped_shm = va;
+
+ return rc;
+}
+
+/* Simple wrapper functions to be able to use a function pointer */
+static void optee_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static optee_invoke_fn *get_invoke_func(struct device *dev)
+{
+ const char *method;
+
+ pr_info("probing for conduit method.\n");
+
+ if (device_property_read_string(dev, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ if (!strcmp("hvc", method))
+ return optee_smccc_hvc;
+ else if (!strcmp("smc", method))
+ return optee_smccc_smc;
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return ERR_PTR(-EINVAL);
+}
+
+/* optee_remove - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * optee_remove is called by platform subsystem to alert the driver
+ * that it should release the device
+ */
+static int optee_smc_remove(struct platform_device *pdev)
+{
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+ * into the old shared memory range.
+ */
+ if (!optee->rpc_param_count)
+ optee_disable_shm_cache(optee);
+
+ optee_smc_notif_uninit_irq(optee);
+
+ optee_remove_common(optee);
+
+ if (optee->smc.memremaped_shm)
+ memunmap(optee->smc.memremaped_shm);
+
+ kfree(optee);
+
+ return 0;
+}
+
+/* optee_shutdown - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * platform_shutdown is called by the platform subsystem to alert
+ * the driver that a shutdown, reboot, or kexec is happening and
+ * device must be disabled.
+ */
+static void optee_shutdown(struct platform_device *pdev)
+{
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ if (!optee->rpc_param_count)
+ optee_disable_shm_cache(optee);
+}
+
+#ifdef CONFIG_OPTEE_INSECURE_LOAD_IMAGE
+
+#define OPTEE_FW_IMAGE "optee/tee.bin"
+
+static optee_invoke_fn *cpuhp_invoke_fn;
+
+static int optee_cpuhp_probe(unsigned int cpu)
+{
+ /*
+ * Invoking a call on a CPU will cause OP-TEE to perform the required
+ * setup for that CPU. Just invoke the call to get the UID since that
+ * has no side effects.
+ */
+ if (optee_msg_api_uid_is_optee_api(cpuhp_invoke_fn))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int optee_load_fw(struct platform_device *pdev,
+ optee_invoke_fn *invoke_fn)
+{
+ const struct firmware *fw = NULL;
+ struct arm_smccc_res res;
+ phys_addr_t data_pa;
+ u8 *data_buf = NULL;
+ u64 data_size;
+ u32 data_pa_high, data_pa_low;
+ u32 data_size_high, data_size_low;
+ int rc;
+ int hp_state;
+
+ if (!optee_msg_api_uid_is_optee_image_load(invoke_fn))
+ return 0;
+
+ rc = request_firmware(&fw, OPTEE_FW_IMAGE, &pdev->dev);
+ if (rc) {
+ /*
+ * The firmware in the rootfs will not be accessible until we
+ * are in the SYSTEM_RUNNING state, so return EPROBE_DEFER until
+ * that point.
+ */
+ if (system_state < SYSTEM_RUNNING)
+ return -EPROBE_DEFER;
+ goto fw_err;
+ }
+
+ data_size = fw->size;
+ /*
+ * This uses the GFP_DMA flag to ensure we are allocated memory in the
+ * 32-bit space since TF-A cannot map memory beyond the 32-bit boundary.
+ */
+ data_buf = kmemdup(fw->data, fw->size, GFP_KERNEL | GFP_DMA);
+ if (!data_buf) {
+ rc = -ENOMEM;
+ goto fw_err;
+ }
+ data_pa = virt_to_phys(data_buf);
+ reg_pair_from_64(&data_pa_high, &data_pa_low, data_pa);
+ reg_pair_from_64(&data_size_high, &data_size_low, data_size);
+ goto fw_load;
+
+fw_err:
+ pr_warn("image loading failed\n");
+ data_pa_high = 0;
+ data_pa_low = 0;
+ data_size_high = 0;
+ data_size_low = 0;
+
+fw_load:
+ /*
+ * Always invoke the SMC, even if loading the image fails, to indicate
+ * to EL3 that we have passed the point where it should allow invoking
+ * this SMC.
+ */
+ pr_warn("OP-TEE image loaded from kernel, this can be insecure");
+ invoke_fn(OPTEE_SMC_CALL_LOAD_IMAGE, data_size_high, data_size_low,
+ data_pa_high, data_pa_low, 0, 0, 0, &res);
+ if (!rc)
+ rc = res.a0;
+ if (fw)
+ release_firmware(fw);
+ kfree(data_buf);
+
+ if (!rc) {
+ /*
+ * We need to initialize OP-TEE on all other running cores as
+ * well. Any cores that aren't running yet will get initialized
+ * when they are brought up by the power management functions in
+ * TF-A which are registered by the OP-TEE SPD. Due to that we
+ * can un-register the callback right after registering it.
+ */
+ cpuhp_invoke_fn = invoke_fn;
+ hp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "optee:probe",
+ optee_cpuhp_probe, NULL);
+ if (hp_state < 0) {
+ pr_warn("Failed with CPU hotplug setup for OP-TEE");
+ return -EINVAL;
+ }
+ cpuhp_remove_state(hp_state);
+ cpuhp_invoke_fn = NULL;
+ }
+
+ return rc;
+}
+#else
+static inline int optee_load_fw(struct platform_device *pdev,
+ optee_invoke_fn *invoke_fn)
+{
+ return 0;
+}
+#endif
+
+static int optee_probe(struct platform_device *pdev)
+{
+ optee_invoke_fn *invoke_fn;
+ struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
+ struct optee *optee = NULL;
+ void *memremaped_shm = NULL;
+ unsigned int rpc_param_count;
+ struct tee_device *teedev;
+ struct tee_context *ctx;
+ u32 max_notif_value;
+ u32 arg_cache_flags;
+ u32 sec_caps;
+ int rc;
+
+ invoke_fn = get_invoke_func(&pdev->dev);
+ if (IS_ERR(invoke_fn))
+ return PTR_ERR(invoke_fn);
+
+ rc = optee_load_fw(pdev, invoke_fn);
+ if (rc)
+ return rc;
+
+ if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
+ pr_warn("api uid mismatch\n");
+ return -EINVAL;
+ }
+
+ optee_msg_get_os_revision(invoke_fn);
+
+ if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
+ pr_warn("api revision mismatch\n");
+ return -EINVAL;
+ }
+
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
+ &max_notif_value,
+ &rpc_param_count)) {
+ pr_warn("capabilities mismatch\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Try to use dynamic shared memory if possible
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
+ /*
+ * If we have OPTEE_SMC_SEC_CAP_RPC_ARG we can ask
+ * optee_get_msg_arg() to pre-register (by having
+ * OPTEE_SHM_ARG_ALLOC_PRIV cleared) the page used to pass
+ * an argument struct.
+ *
+ * With the page is pre-registered we can use a non-zero
+ * offset for argument struct, this is indicated with
+ * OPTEE_SHM_ARG_SHARED.
+ *
+ * This means that optee_smc_do_call_with_arg() will use
+ * OPTEE_SMC_CALL_WITH_REGD_ARG for pre-registered pages.
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
+ arg_cache_flags = OPTEE_SHM_ARG_SHARED;
+ else
+ arg_cache_flags = OPTEE_SHM_ARG_ALLOC_PRIV;
+
+ pool = optee_shm_pool_alloc_pages();
+ }
+
+ /*
+ * If dynamic shared memory is not available or failed - try static one
+ */
+ if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) {
+ /*
+ * The static memory pool can use non-zero page offsets so
+ * let optee_get_msg_arg() know that with OPTEE_SHM_ARG_SHARED.
+ *
+ * optee_get_msg_arg() should not pre-register the
+ * allocated page used to pass an argument struct, this is
+ * indicated with OPTEE_SHM_ARG_ALLOC_PRIV.
+ *
+ * This means that optee_smc_do_call_with_arg() will use
+ * OPTEE_SMC_CALL_WITH_ARG if rpc_param_count is 0, else
+ * OPTEE_SMC_CALL_WITH_RPC_ARG.
+ */
+ arg_cache_flags = OPTEE_SHM_ARG_SHARED |
+ OPTEE_SHM_ARG_ALLOC_PRIV;
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+ }
+
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee) {
+ rc = -ENOMEM;
+ goto err_free_pool;
+ }
+
+ optee->ops = &optee_ops;
+ optee->smc.invoke_fn = invoke_fn;
+ optee->smc.sec_caps = sec_caps;
+ optee->rpc_param_count = rpc_param_count;
+
+ teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_optee;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_unreg_teedev;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err_unreg_supp_teedev;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err_unreg_supp_teedev;
+
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_supp_init(&optee->supp);
+ optee->smc.memremaped_shm = memremaped_shm;
+ optee->pool = pool;
+ optee_shm_arg_cache_init(optee, arg_cache_flags);
+
+ platform_set_drvdata(pdev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ goto err_supp_uninit;
+ }
+ optee->ctx = ctx;
+ rc = optee_notif_init(optee, max_notif_value);
+ if (rc)
+ goto err_close_ctx;
+
+ if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ unsigned int irq;
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0) {
+ pr_err("platform_get_irq: ret %d\n", rc);
+ goto err_notif_uninit;
+ }
+ irq = rc;
+
+ rc = optee_smc_notif_init_irq(optee, irq);
+ if (rc) {
+ irq_dispose_mapping(irq);
+ goto err_notif_uninit;
+ }
+ enable_async_notif(optee->smc.invoke_fn);
+ pr_info("Asynchronous notifications enabled\n");
+ }
+
+ /*
+ * Ensure that there are no pre-existing shm objects before enabling
+ * the shm cache so that there's no chance of receiving an invalid
+ * address during shutdown. This could occur, for example, if we're
+ * kexec booting from an older kernel that did not properly cleanup the
+ * shm cache.
+ */
+ optee_disable_unmapped_shm_cache(optee);
+
+ /*
+ * Only enable the shm cache in case we're not able to pass the RPC
+ * arg struct right after the normal arg struct.
+ */
+ if (!optee->rpc_param_count)
+ optee_enable_shm_cache(optee);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pr_info("dynamic shared memory is enabled\n");
+
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
+ if (rc)
+ goto err_disable_shm_cache;
+
+ pr_info("initialized driver\n");
+ return 0;
+
+err_disable_shm_cache:
+ if (!optee->rpc_param_count)
+ optee_disable_shm_cache(optee);
+ optee_smc_notif_uninit_irq(optee);
+ optee_unregister_devices();
+err_notif_uninit:
+ optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
+err_supp_uninit:
+ optee_shm_arg_cache_uninit(optee);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+err_unreg_supp_teedev:
+ tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
+ tee_device_unregister(optee->teedev);
+err_free_optee:
+ kfree(optee);
+err_free_pool:
+ tee_shm_pool_free(pool);
+ if (memremaped_shm)
+ memunmap(memremaped_shm);
+ return rc;
+}
+
+static const struct of_device_id optee_dt_match[] = {
+ { .compatible = "linaro,optee-tz" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, optee_dt_match);
+
+static struct platform_driver optee_driver = {
+ .probe = optee_probe,
+ .remove = optee_smc_remove,
+ .shutdown = optee_shutdown,
+ .driver = {
+ .name = "optee",
+ .of_match_table = optee_dt_match,
+ },
+};
+
+int optee_smc_abi_register(void)
+{
+ return platform_driver_register(&optee_driver);
+}
+
+void optee_smc_abi_unregister(void)
+{
+ platform_driver_unregister(&optee_driver);
+}
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
new file mode 100644
index 0000000000..322a543b8c
--- /dev/null
+++ b/drivers/tee/optee/supp.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Linaro Limited
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+
+struct optee_supp_req {
+ struct list_head link;
+
+ bool in_queue;
+ u32 func;
+ u32 ret;
+ size_t num_params;
+ struct tee_param *param;
+
+ struct completion c;
+};
+
+void optee_supp_init(struct optee_supp *supp)
+{
+ memset(supp, 0, sizeof(*supp));
+ mutex_init(&supp->mutex);
+ init_completion(&supp->reqs_c);
+ idr_init(&supp->idr);
+ INIT_LIST_HEAD(&supp->reqs);
+ supp->req_id = -1;
+}
+
+void optee_supp_uninit(struct optee_supp *supp)
+{
+ mutex_destroy(&supp->mutex);
+ idr_destroy(&supp->idr);
+}
+
+void optee_supp_release(struct optee_supp *supp)
+{
+ int id;
+ struct optee_supp_req *req;
+ struct optee_supp_req *req_tmp;
+
+ mutex_lock(&supp->mutex);
+
+ /* Abort all request retrieved by supplicant */
+ idr_for_each_entry(&supp->idr, req, id) {
+ idr_remove(&supp->idr, id);
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+ }
+
+ /* Abort all queued requests */
+ list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
+ list_del(&req->link);
+ req->in_queue = false;
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+ }
+
+ supp->ctx = NULL;
+ supp->req_id = -1;
+
+ mutex_unlock(&supp->mutex);
+}
+
+/**
+ * optee_supp_thrd_req() - request service from supplicant
+ * @ctx: context doing the request
+ * @func: function requested
+ * @num_params: number of elements in @param array
+ * @param: parameters for function
+ *
+ * Returns result of operation to be passed to secure world
+ */
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ struct tee_param *param)
+
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req;
+ bool interruptable;
+ u32 ret;
+
+ /*
+ * Return in case there is no supplicant available and
+ * non-blocking request.
+ */
+ if (!supp->ctx && ctx->supp_nowait)
+ return TEEC_ERROR_COMMUNICATION;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return TEEC_ERROR_OUT_OF_MEMORY;
+
+ init_completion(&req->c);
+ req->func = func;
+ req->num_params = num_params;
+ req->param = param;
+
+ /* Insert the request in the request list */
+ mutex_lock(&supp->mutex);
+ list_add_tail(&req->link, &supp->reqs);
+ req->in_queue = true;
+ mutex_unlock(&supp->mutex);
+
+ /* Tell an eventual waiter there's a new request */
+ complete(&supp->reqs_c);
+
+ /*
+ * Wait for supplicant to process and return result, once we've
+ * returned from wait_for_completion(&req->c) successfully we have
+ * exclusive access again.
+ */
+ while (wait_for_completion_interruptible(&req->c)) {
+ mutex_lock(&supp->mutex);
+ interruptable = !supp->ctx;
+ if (interruptable) {
+ /*
+ * There's no supplicant available and since the
+ * supp->mutex currently is held none can
+ * become available until the mutex released
+ * again.
+ *
+ * Interrupting an RPC to supplicant is only
+ * allowed as a way of slightly improving the user
+ * experience in case the supplicant hasn't been
+ * started yet. During normal operation the supplicant
+ * will serve all requests in a timely manner and
+ * interrupting then wouldn't make sense.
+ */
+ if (req->in_queue) {
+ list_del(&req->link);
+ req->in_queue = false;
+ }
+ }
+ mutex_unlock(&supp->mutex);
+
+ if (interruptable) {
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ break;
+ }
+ }
+
+ ret = req->ret;
+ kfree(req);
+
+ return ret;
+}
+
+static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
+ int num_params, int *id)
+{
+ struct optee_supp_req *req;
+
+ if (supp->req_id != -1) {
+ /*
+ * Supplicant should not mix synchronous and asnynchronous
+ * requests.
+ */
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (list_empty(&supp->reqs))
+ return NULL;
+
+ req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
+
+ if (num_params < req->num_params) {
+ /* Not enough room for parameters */
+ return ERR_PTR(-EINVAL);
+ }
+
+ *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
+ if (*id < 0)
+ return ERR_PTR(-ENOMEM);
+
+ list_del(&req->link);
+ req->in_queue = false;
+
+ return req;
+}
+
+static int supp_check_recv_params(size_t num_params, struct tee_param *params,
+ size_t *num_meta)
+{
+ size_t n;
+
+ if (!num_params)
+ return -EINVAL;
+
+ /*
+ * If there's memrefs we need to decrease those as they where
+ * increased earlier and we'll even refuse to accept any below.
+ */
+ for (n = 0; n < num_params; n++)
+ if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+
+ /*
+ * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
+ * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
+ */
+ for (n = 0; n < num_params; n++)
+ if (params[n].attr &&
+ params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
+ return -EINVAL;
+
+ /* At most we'll need one meta parameter so no need to check for more */
+ if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
+ *num_meta = 1;
+ else
+ *num_meta = 0;
+
+ return 0;
+}
+
+/**
+ * optee_supp_recv() - receive request for supplicant
+ * @ctx: context receiving the request
+ * @func: requested function in supplicant
+ * @num_params: number of elements allocated in @param, updated with number
+ * used elements
+ * @param: space for parameters for @func
+ *
+ * Returns 0 on success or <0 on failure
+ */
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req = NULL;
+ int id;
+ size_t num_meta;
+ int rc;
+
+ rc = supp_check_recv_params(*num_params, param, &num_meta);
+ if (rc)
+ return rc;
+
+ while (true) {
+ mutex_lock(&supp->mutex);
+ req = supp_pop_entry(supp, *num_params - num_meta, &id);
+ mutex_unlock(&supp->mutex);
+
+ if (req) {
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ break;
+ }
+
+ /*
+ * If we didn't get a request we'll block in
+ * wait_for_completion() to avoid needless spinning.
+ *
+ * This is where supplicant will be hanging most of
+ * the time, let's make this interruptable so we
+ * can easily restart supplicant if needed.
+ */
+ if (wait_for_completion_interruptible(&supp->reqs_c))
+ return -ERESTARTSYS;
+ }
+
+ if (num_meta) {
+ /*
+ * tee-supplicant support meta parameters -> requsts can be
+ * processed asynchronously.
+ */
+ param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+ TEE_IOCTL_PARAM_ATTR_META;
+ param->u.value.a = id;
+ param->u.value.b = 0;
+ param->u.value.c = 0;
+ } else {
+ mutex_lock(&supp->mutex);
+ supp->req_id = id;
+ mutex_unlock(&supp->mutex);
+ }
+
+ *func = req->func;
+ *num_params = req->num_params + num_meta;
+ memcpy(param + num_meta, req->param,
+ sizeof(struct tee_param) * req->num_params);
+
+ return 0;
+}
+
+static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
+ size_t num_params,
+ struct tee_param *param,
+ size_t *num_meta)
+{
+ struct optee_supp_req *req;
+ int id;
+ size_t nm;
+ const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+ TEE_IOCTL_PARAM_ATTR_META;
+
+ if (!num_params)
+ return ERR_PTR(-EINVAL);
+
+ if (supp->req_id == -1) {
+ if (param->attr != attr)
+ return ERR_PTR(-EINVAL);
+ id = param->u.value.a;
+ nm = 1;
+ } else {
+ id = supp->req_id;
+ nm = 0;
+ }
+
+ req = idr_find(&supp->idr, id);
+ if (!req)
+ return ERR_PTR(-ENOENT);
+
+ if ((num_params - nm) != req->num_params)
+ return ERR_PTR(-EINVAL);
+
+ idr_remove(&supp->idr, id);
+ supp->req_id = -1;
+ *num_meta = nm;
+
+ return req;
+}
+
+/**
+ * optee_supp_send() - send result of request from supplicant
+ * @ctx: context sending result
+ * @ret: return value of request
+ * @num_params: number of parameters returned
+ * @param: returned parameters
+ *
+ * Returns 0 on success or <0 on failure.
+ */
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req;
+ size_t n;
+ size_t num_meta;
+
+ mutex_lock(&supp->mutex);
+ req = supp_pop_req(supp, num_params, param, &num_meta);
+ mutex_unlock(&supp->mutex);
+
+ if (IS_ERR(req)) {
+ /* Something is wrong, let supplicant restart. */
+ return PTR_ERR(req);
+ }
+
+ /* Update out and in/out parameters */
+ for (n = 0; n < req->num_params; n++) {
+ struct tee_param *p = req->param + n;
+
+ switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ p->u.value.a = param[n + num_meta].u.value.a;
+ p->u.value.b = param[n + num_meta].u.value.b;
+ p->u.value.c = param[n + num_meta].u.value.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ p->u.memref.size = param[n + num_meta].u.memref.size;
+ break;
+ default:
+ break;
+ }
+ }
+ req->ret = ret;
+
+ /* Let the requesting thread continue */
+ complete(&req->c);
+
+ return 0;
+}