summaryrefslogtreecommitdiffstats
path: root/drivers/tee
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/tee
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/tee/Kconfig19
-rw-r--r--drivers/tee/Makefile7
-rw-r--r--drivers/tee/amdtee/Kconfig8
-rw-r--r--drivers/tee/amdtee/Makefile5
-rw-r--r--drivers/tee/amdtee/amdtee_if.h185
-rw-r--r--drivers/tee/amdtee/amdtee_private.h172
-rw-r--r--drivers/tee/amdtee/call.c451
-rw-r--r--drivers/tee/amdtee/core.c536
-rw-r--r--drivers/tee/amdtee/shm_pool.c70
-rw-r--r--drivers/tee/optee/Kconfig9
-rw-r--r--drivers/tee/optee/Makefile13
-rw-r--r--drivers/tee/optee/call.c526
-rw-r--r--drivers/tee/optee/core.c226
-rw-r--r--drivers/tee/optee/device.c190
-rw-r--r--drivers/tee/optee/ffa_abi.c922
-rw-r--r--drivers/tee/optee/notif.c125
-rw-r--r--drivers/tee/optee/optee_ffa.h163
-rw-r--r--drivers/tee/optee/optee_msg.h339
-rw-r--r--drivers/tee/optee/optee_private.h325
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h106
-rw-r--r--drivers/tee/optee/optee_smc.h579
-rw-r--r--drivers/tee/optee/optee_trace.h67
-rw-r--r--drivers/tee/optee/rpc.c279
-rw-r--r--drivers/tee/optee/smc_abi.c1591
-rw-r--r--drivers/tee/optee/supp.c382
-rw-r--r--drivers/tee/tee_core.c1272
-rw-r--r--drivers/tee/tee_private.h64
-rw-r--r--drivers/tee/tee_shm.c529
-rw-r--r--drivers/tee/tee_shm_pool.c92
29 files changed, 9252 insertions, 0 deletions
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
new file mode 100644
index 000000000..73a147202
--- /dev/null
+++ b/drivers/tee/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Generic Trusted Execution Environment Configuration
+menuconfig TEE
+ tristate "Trusted Execution Environment support"
+ depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
+ select CRYPTO
+ select CRYPTO_SHA1
+ select DMA_SHARED_BUFFER
+ select GENERIC_ALLOCATOR
+ help
+ This implements a generic interface towards a Trusted Execution
+ Environment (TEE).
+
+if TEE
+
+source "drivers/tee/optee/Kconfig"
+source "drivers/tee/amdtee/Kconfig"
+
+endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
new file mode 100644
index 000000000..68da044af
--- /dev/null
+++ b/drivers/tee/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TEE) += tee.o
+tee-objs += tee_core.o
+tee-objs += tee_shm.o
+tee-objs += tee_shm_pool.o
+obj-$(CONFIG_OPTEE) += optee/
+obj-$(CONFIG_AMDTEE) += amdtee/
diff --git a/drivers/tee/amdtee/Kconfig b/drivers/tee/amdtee/Kconfig
new file mode 100644
index 000000000..191f9715f
--- /dev/null
+++ b/drivers/tee/amdtee/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+# AMD-TEE Trusted Execution Environment Configuration
+config AMDTEE
+ tristate "AMD-TEE"
+ default m
+ depends on CRYPTO_DEV_SP_PSP && CRYPTO_DEV_CCP_DD
+ help
+ This implements AMD's Trusted Execution Environment (TEE) driver.
diff --git a/drivers/tee/amdtee/Makefile b/drivers/tee/amdtee/Makefile
new file mode 100644
index 000000000..ff1485266
--- /dev/null
+++ b/drivers/tee/amdtee/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+obj-$(CONFIG_AMDTEE) += amdtee.o
+amdtee-objs += core.o
+amdtee-objs += call.o
+amdtee-objs += shm_pool.o
diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h
new file mode 100644
index 000000000..e2014e215
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_if.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+/*
+ * This file has definitions related to Host and AMD-TEE Trusted OS interface.
+ * These definitions must match the definitions on the TEE side.
+ */
+
+#ifndef AMDTEE_IF_H
+#define AMDTEE_IF_H
+
+#include <linux/types.h>
+
+/*****************************************************************************
+ ** TEE Param
+ ******************************************************************************/
+#define TEE_MAX_PARAMS 4
+
+/**
+ * struct memref - memory reference structure
+ * @buf_id: buffer ID of the buffer mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ * @offset: offset in bytes from beginning of the buffer
+ * @size: data size in bytes
+ */
+struct memref {
+ u32 buf_id;
+ u32 offset;
+ u32 size;
+};
+
+struct value {
+ u32 a;
+ u32 b;
+};
+
+/*
+ * Parameters passed to open_session or invoke_command
+ */
+union tee_op_param {
+ struct memref mref;
+ struct value val;
+};
+
+struct tee_operation {
+ u32 param_types;
+ union tee_op_param params[TEE_MAX_PARAMS];
+};
+
+/* Must be same as in GP TEE specification */
+#define TEE_OP_PARAM_TYPE_NONE 0
+#define TEE_OP_PARAM_TYPE_VALUE_INPUT 1
+#define TEE_OP_PARAM_TYPE_VALUE_OUTPUT 2
+#define TEE_OP_PARAM_TYPE_VALUE_INOUT 3
+#define TEE_OP_PARAM_TYPE_INVALID 4
+#define TEE_OP_PARAM_TYPE_MEMREF_INPUT 5
+#define TEE_OP_PARAM_TYPE_MEMREF_OUTPUT 6
+#define TEE_OP_PARAM_TYPE_MEMREF_INOUT 7
+
+#define TEE_PARAM_TYPE_GET(t, i) (((t) >> ((i) * 4)) & 0xF)
+#define TEE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+
+/*****************************************************************************
+ ** TEE Commands
+ *****************************************************************************/
+
+/*
+ * The shared memory between rich world and secure world may be physically
+ * non-contiguous. Below structures are meant to describe a shared memory region
+ * via scatter/gather (sg) list
+ */
+
+/**
+ * struct tee_sg_desc - sg descriptor for a physically contiguous buffer
+ * @low_addr: [in] bits[31:0] of buffer's physical address. Must be 4KB aligned
+ * @hi_addr: [in] bits[63:32] of the buffer's physical address
+ * @size: [in] size in bytes (must be multiple of 4KB)
+ */
+struct tee_sg_desc {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+/**
+ * struct tee_sg_list - structure describing a scatter/gather list
+ * @count: [in] number of sg descriptors
+ * @size: [in] total size of all buffers in the list. Must be multiple of 4KB
+ * @buf: [in] list of sg buffer descriptors
+ */
+#define TEE_MAX_SG_DESC 64
+struct tee_sg_list {
+ u32 count;
+ u32 size;
+ struct tee_sg_desc buf[TEE_MAX_SG_DESC];
+};
+
+/**
+ * struct tee_cmd_map_shared_mem - command to map shared memory
+ * @buf_id: [out] return buffer ID value
+ * @sg_list: [in] list describing memory to be mapped
+ */
+struct tee_cmd_map_shared_mem {
+ u32 buf_id;
+ struct tee_sg_list sg_list;
+};
+
+/**
+ * struct tee_cmd_unmap_shared_mem - command to unmap shared memory
+ * @buf_id: [in] buffer ID of memory to be unmapped
+ */
+struct tee_cmd_unmap_shared_mem {
+ u32 buf_id;
+};
+
+/**
+ * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
+ * @low_addr: [in] bits [31:0] of the physical address of the TA binary
+ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
+ * @size: [in] size of TA binary in bytes
+ * @ta_handle: [out] return handle of the loaded TA
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_load_ta {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+ u32 ta_handle;
+ u32 return_origin;
+};
+
+/**
+ * struct tee_cmd_unload_ta - command to unload TA binary from TEE environment
+ * @ta_handle: [in] handle of the loaded TA to be unloaded
+ */
+struct tee_cmd_unload_ta {
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_open_session - command to call TA_OpenSessionEntryPoint in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [out] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_open_session {
+ u32 ta_handle;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+/**
+ * struct tee_cmd_close_session - command to call TA_CloseSessionEntryPoint()
+ * in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [in] pointer to TA allocated session data
+ */
+struct tee_cmd_close_session {
+ u32 ta_handle;
+ u32 session_info;
+};
+
+/**
+ * struct tee_cmd_invoke_cmd - command to call TA_InvokeCommandEntryPoint() in
+ * TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @cmd_id: [in] TA command ID
+ * @session_info: [in] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_invoke_cmd {
+ u32 ta_handle;
+ u32 cmd_id;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+#endif /*AMDTEE_IF_H*/
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
new file mode 100644
index 000000000..6d0f7062b
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#ifndef AMDTEE_PRIVATE_H
+#define AMDTEE_PRIVATE_H
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include "amdtee_if.h"
+
+#define DRIVER_NAME "amdtee"
+#define DRIVER_AUTHOR "AMD-TEE Linux driver team"
+
+/* Some GlobalPlatform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_GENERIC 0xFFFF0000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+/* Maximum number of sessions which can be opened with a Trusted Application */
+#define TEE_NUM_SESSIONS 32
+
+#define TA_LOAD_PATH "/amdtee"
+#define TA_PATH_MAX 60
+
+/**
+ * struct amdtee - main service struct
+ * @teedev: client device
+ * @pool: shared memory pool
+ */
+struct amdtee {
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+};
+
+/**
+ * struct amdtee_session - Trusted Application (TA) session related information.
+ * @ta_handle: handle to Trusted Application (TA) loaded in TEE environment
+ * @refcount: counter to keep track of sessions opened for the TA instance
+ * @session_info: an array pointing to TA allocated session data.
+ * @sess_mask: session usage bit-mask. If a particular bit is set, then the
+ * corresponding @session_info entry is in use or valid.
+ *
+ * Session structure is updated on open_session and this information is used for
+ * subsequent operations with the Trusted Application.
+ */
+struct amdtee_session {
+ struct list_head list_node;
+ u32 ta_handle;
+ struct kref refcount;
+ u32 session_info[TEE_NUM_SESSIONS];
+ DECLARE_BITMAP(sess_mask, TEE_NUM_SESSIONS);
+ spinlock_t lock; /* synchronizes access to @sess_mask */
+};
+
+/**
+ * struct amdtee_context_data - AMD-TEE driver context data
+ * @sess_list: Keeps track of sessions opened in current TEE context
+ * @shm_list: Keeps track of buffers allocated and mapped in current TEE
+ * context
+ */
+struct amdtee_context_data {
+ struct list_head sess_list;
+ struct list_head shm_list;
+ struct mutex shm_mutex; /* synchronizes access to @shm_list */
+};
+
+struct amdtee_driver_data {
+ struct amdtee *amdtee;
+};
+
+struct shmem_desc {
+ void *kaddr;
+ u64 size;
+};
+
+/**
+ * struct amdtee_shm_data - Shared memory data
+ * @kaddr: Kernel virtual address of shared memory
+ * @buf_id: Buffer id of memory mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ */
+struct amdtee_shm_data {
+ struct list_head shm_node;
+ void *kaddr;
+ u32 buf_id;
+};
+
+/**
+ * struct amdtee_ta_data - Keeps track of all TAs loaded in AMD Secure
+ * Processor
+ * @ta_handle: Handle to TA loaded in TEE
+ * @refcount: Reference count for the loaded TA
+ */
+struct amdtee_ta_data {
+ struct list_head list_node;
+ u32 ta_handle;
+ u32 refcount;
+};
+
+#define LOWER_TWO_BYTE_MASK 0x0000FFFF
+
+/**
+ * set_session_id() - Sets the session identifier.
+ * @ta_handle: [in] handle of the loaded Trusted Application (TA)
+ * @session_index: [in] Session index. Range: 0 to (TEE_NUM_SESSIONS - 1).
+ * @session: [out] Pointer to session id
+ *
+ * Lower two bytes of the session identifier represents the TA handle and the
+ * upper two bytes is session index.
+ */
+static inline void set_session_id(u32 ta_handle, u32 session_index,
+ u32 *session)
+{
+ *session = (session_index << 16) | (LOWER_TWO_BYTE_MASK & ta_handle);
+}
+
+static inline u32 get_ta_handle(u32 session)
+{
+ return session & LOWER_TWO_BYTE_MASK;
+}
+
+static inline u32 get_session_index(u32 session)
+{
+ return (session >> 16) & LOWER_TWO_BYTE_MASK;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+
+int amdtee_close_session(struct tee_context *ctx, u32 session);
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+int amdtee_map_shmem(struct tee_shm *shm);
+
+void amdtee_unmap_shmem(struct tee_shm *shm);
+
+int handle_load_ta(void *data, u32 size,
+ struct tee_ioctl_open_session_arg *arg);
+
+int handle_unload_ta(u32 ta_handle);
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p);
+
+int handle_close_session(u32 ta_handle, u32 info);
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id);
+
+void handle_unmap_shmem(u32 buf_id);
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p);
+
+struct tee_shm_pool *amdtee_config_shm(void);
+
+u32 get_buffer_id(struct tee_shm *shm);
+#endif /*AMDTEE_PRIVATE_H*/
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
new file mode 100644
index 000000000..8a02c5fe3
--- /dev/null
+++ b/drivers/tee/amdtee/call.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/tee.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-tee.h>
+#include <linux/slab.h>
+#include <linux/psp-sev.h>
+#include "amdtee_if.h"
+#include "amdtee_private.h"
+
+static int tee_params_to_amd_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ amd->param_types = 0;
+ for (i = 0; i < count; i++) {
+ /* AMD TEE does not support meta parameter */
+ if (tee[i].attr > TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ amd->param_types |= ((tee[i].attr & 0xF) << i * 4);
+ }
+
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE)
+ continue;
+
+ /* It is assumed that all values are within 2^32-1 */
+ if (type > TEE_OP_PARAM_TYPE_VALUE_INOUT) {
+ u32 buf_id = get_buffer_id(tee[i].u.memref.shm);
+
+ amd->params[i].mref.buf_id = buf_id;
+ amd->params[i].mref.offset = tee[i].u.memref.shm_offs;
+ amd->params[i].mref.size = tee[i].u.memref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ if (tee[i].u.value.c)
+ pr_warn("%s: Discarding value c", __func__);
+
+ amd->params[i].val.a = tee[i].u.value.a;
+ amd->params[i].val.b = tee[i].u.value.b;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ /* Assumes amd->param_types is valid */
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID ||
+ type > TEE_OP_PARAM_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE ||
+ type == TEE_OP_PARAM_TYPE_VALUE_INPUT ||
+ type == TEE_OP_PARAM_TYPE_MEMREF_INPUT)
+ continue;
+
+ /*
+ * It is assumed that buf_id remains unchanged for
+ * both open_session and invoke_cmd call
+ */
+ if (type > TEE_OP_PARAM_TYPE_MEMREF_INPUT) {
+ tee[i].u.memref.shm_offs = amd->params[i].mref.offset;
+ tee[i].u.memref.size = amd->params[i].mref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ /* field 'c' not supported by AMD TEE */
+ tee[i].u.value.a = amd->params[i].val.a;
+ tee[i].u.value.b = amd->params[i].val.b;
+ tee[i].u.value.c = 0;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+static DEFINE_MUTEX(ta_refcount_mutex);
+static LIST_HEAD(ta_list);
+
+static u32 get_ta_refcount(u32 ta_handle)
+{
+ struct amdtee_ta_data *ta_data;
+ u32 count = 0;
+
+ /* Caller must hold a mutex */
+ list_for_each_entry(ta_data, &ta_list, list_node)
+ if (ta_data->ta_handle == ta_handle)
+ return ++ta_data->refcount;
+
+ ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
+ if (ta_data) {
+ ta_data->ta_handle = ta_handle;
+ ta_data->refcount = 1;
+ count = ta_data->refcount;
+ list_add(&ta_data->list_node, &ta_list);
+ }
+
+ return count;
+}
+
+static u32 put_ta_refcount(u32 ta_handle)
+{
+ struct amdtee_ta_data *ta_data;
+ u32 count = 0;
+
+ /* Caller must hold a mutex */
+ list_for_each_entry(ta_data, &ta_list, list_node)
+ if (ta_data->ta_handle == ta_handle) {
+ count = --ta_data->refcount;
+ if (count == 0) {
+ list_del(&ta_data->list_node);
+ kfree(ta_data);
+ break;
+ }
+ }
+
+ return count;
+}
+
+int handle_unload_ta(u32 ta_handle)
+{
+ struct tee_cmd_unload_ta cmd = {0};
+ u32 status, count;
+ int ret;
+
+ if (!ta_handle)
+ return -EINVAL;
+
+ mutex_lock(&ta_refcount_mutex);
+
+ count = put_ta_refcount(ta_handle);
+
+ if (count) {
+ pr_debug("unload ta: not unloading %u count %u\n",
+ ta_handle, count);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ cmd.ta_handle = ta_handle;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("unload ta: status = 0x%x\n", status);
+ ret = -EBUSY;
+ } else {
+ pr_debug("unloaded ta handle %u\n", ta_handle);
+ }
+
+unlock:
+ mutex_unlock(&ta_refcount_mutex);
+ return ret;
+}
+
+int handle_close_session(u32 ta_handle, u32 info)
+{
+ struct tee_cmd_close_session cmd = {0};
+ u32 status;
+ int ret;
+
+ if (ta_handle == 0)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+ cmd.session_info = info;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_CLOSE_SESSION, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("close session: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+void handle_unmap_shmem(u32 buf_id)
+{
+ struct tee_cmd_unmap_shared_mem cmd = {0};
+ u32 status;
+ int ret;
+
+ cmd.buf_id = buf_id;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNMAP_SHARED_MEM, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret)
+ pr_debug("unmap shared memory: buf_id %u status = 0x%x\n",
+ buf_id, status);
+}
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p)
+{
+ struct tee_cmd_invoke_cmd cmd = {0};
+ int ret;
+
+ if (!arg || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort invoke command\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ cmd.cmd_id = arg->func;
+ cmd.session_info = sinfo;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_INVOKE_CMD, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("invoke command: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ pr_debug("invoke command: RO = 0x%x ret = 0x%x\n",
+ arg->ret_origin, arg->ret);
+ }
+
+ return ret;
+}
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id)
+{
+ struct tee_cmd_map_shared_mem *cmd;
+ phys_addr_t paddr;
+ int ret, i;
+ u32 status;
+
+ if (!count || !start || !buf_id)
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /* Size must be page aligned */
+ for (i = 0; i < count ; i++) {
+ if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) {
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+
+ if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) {
+ pr_err("map shared memory: page unaligned. addr 0x%llx",
+ (u64)start[i].kaddr);
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+ }
+
+ cmd->sg_list.count = count;
+
+ /* Create buffer list */
+ for (i = 0; i < count ; i++) {
+ paddr = __psp_pa(start[i].kaddr);
+ cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr);
+ cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr);
+ cmd->sg_list.buf[i].size = start[i].size;
+ cmd->sg_list.size += cmd->sg_list.buf[i].size;
+
+ pr_debug("buf[%d]:hi addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].hi_addr);
+ pr_debug("buf[%d]:low addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].low_addr);
+ pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size);
+ pr_debug("list size = 0x%x\n", cmd->sg_list.size);
+ }
+
+ *buf_id = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_MAP_SHARED_MEM, (void *)cmd,
+ sizeof(*cmd), &status);
+ if (!ret && !status) {
+ *buf_id = cmd->buf_id;
+ pr_debug("mapped buffer ID = 0x%x\n", *buf_id);
+ } else {
+ pr_err("map shared memory: status = 0x%x\n", status);
+ ret = -ENOMEM;
+ }
+
+free_cmd:
+ kfree(cmd);
+
+ return ret;
+}
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p)
+{
+ struct tee_cmd_open_session cmd = {0};
+ int ret;
+
+ if (!arg || !info || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_GENERIC;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort open session\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ *info = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_OPEN_SESSION, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("open session: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ *info = cmd.session_info;
+ pr_debug("open session: session info = 0x%x\n", *info);
+ }
+
+ pr_debug("open session: ret = 0x%x RO = 0x%x\n", arg->ret,
+ arg->ret_origin);
+
+ return ret;
+}
+
+int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
+{
+ struct tee_cmd_unload_ta unload_cmd = {};
+ struct tee_cmd_load_ta load_cmd = {};
+ phys_addr_t blob;
+ int ret;
+
+ if (size == 0 || !data || !arg)
+ return -EINVAL;
+
+ blob = __psp_pa(data);
+ if (blob & (PAGE_SIZE - 1)) {
+ pr_err("load TA: page unaligned. blob 0x%llx", blob);
+ return -EINVAL;
+ }
+
+ load_cmd.hi_addr = upper_32_bits(blob);
+ load_cmd.low_addr = lower_32_bits(blob);
+ load_cmd.size = size;
+
+ mutex_lock(&ta_refcount_mutex);
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
+ sizeof(load_cmd), &arg->ret);
+ if (ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ arg->ret_origin = load_cmd.return_origin;
+
+ if (arg->ret == TEEC_SUCCESS) {
+ ret = get_ta_refcount(load_cmd.ta_handle);
+ if (!ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+
+ /* Unload the TA on error */
+ unload_cmd.ta_handle = load_cmd.ta_handle;
+ psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
+ (void *)&unload_cmd,
+ sizeof(unload_cmd), &ret);
+ } else {
+ set_session_id(load_cmd.ta_handle, 0, &arg->session);
+ }
+ }
+ }
+ mutex_unlock(&ta_refcount_mutex);
+
+ pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
+ load_cmd.ta_handle, arg->ret_origin, arg->ret);
+
+ return 0;
+}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
new file mode 100644
index 000000000..3c15f6a9e
--- /dev/null
+++ b/drivers/tee/amdtee/core.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include "amdtee_private.h"
+#include "../tee_private.h"
+#include <linux/psp-tee.h>
+
+static struct amdtee_driver_data *drv_data;
+static DEFINE_MUTEX(session_list_mutex);
+
+static void amdtee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_AMDTEE,
+ .impl_caps = 0,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ *vers = v;
+}
+
+static int amdtee_open(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+ INIT_LIST_HEAD(&ctxdata->shm_list);
+ mutex_init(&ctxdata->shm_mutex);
+
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void release_session(struct amdtee_session *sess)
+{
+ int i;
+
+ /* Close any open session */
+ for (i = 0; i < TEE_NUM_SESSIONS; ++i) {
+ /* Check if session entry 'i' is valid */
+ if (!test_bit(i, sess->sess_mask))
+ continue;
+
+ handle_close_session(sess->ta_handle, sess->session_info[i]);
+ handle_unload_ta(sess->ta_handle);
+ }
+
+ kfree(sess);
+}
+
+static void amdtee_release(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+
+ if (!ctxdata)
+ return;
+
+ while (true) {
+ struct amdtee_session *sess;
+
+ sess = list_first_entry_or_null(&ctxdata->sess_list,
+ struct amdtee_session,
+ list_node);
+
+ if (!sess)
+ break;
+
+ list_del(&sess->list_node);
+ release_session(sess);
+ }
+ mutex_destroy(&ctxdata->shm_mutex);
+ kfree(ctxdata);
+
+ ctx->data = NULL;
+}
+
+/**
+ * alloc_session() - Allocate a session structure
+ * @ctxdata: TEE Context data structure
+ * @session: Session ID for which 'struct amdtee_session' structure is to be
+ * allocated.
+ *
+ * Scans the TEE context's session list to check if TA is already loaded in to
+ * TEE. If yes, returns the 'session' structure for that TA. Else allocates,
+ * initializes a new 'session' structure and adds it to context's session list.
+ *
+ * The caller must hold a mutex.
+ *
+ * Returns:
+ * 'struct amdtee_session *' on success and NULL on failure.
+ */
+static struct amdtee_session *alloc_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ struct amdtee_session *sess;
+ u32 ta_handle = get_ta_handle(session);
+
+ /* Scan session list to check if TA is already loaded in to TEE */
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->ta_handle == ta_handle) {
+ kref_get(&sess->refcount);
+ return sess;
+ }
+
+ /* Allocate a new session and add to list */
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (sess) {
+ sess->ta_handle = ta_handle;
+ kref_init(&sess->refcount);
+ spin_lock_init(&sess->lock);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ }
+
+ return sess;
+}
+
+/* Requires mutex to be held */
+static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ u32 ta_handle = get_ta_handle(session);
+ u32 index = get_session_index(session);
+ struct amdtee_session *sess;
+
+ if (index >= TEE_NUM_SESSIONS)
+ return NULL;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (ta_handle == sess->ta_handle &&
+ test_bit(index, sess->sess_mask))
+ return sess;
+
+ return NULL;
+}
+
+u32 get_buffer_id(struct tee_shm *shm)
+{
+ struct amdtee_context_data *ctxdata = shm->ctx->data;
+ struct amdtee_shm_data *shmdata;
+ u32 buf_id = 0;
+
+ mutex_lock(&ctxdata->shm_mutex);
+ list_for_each_entry(shmdata, &ctxdata->shm_list, shm_node)
+ if (shmdata->kaddr == shm->kaddr) {
+ buf_id = shmdata->buf_id;
+ break;
+ }
+ mutex_unlock(&ctxdata->shm_mutex);
+
+ return buf_id;
+}
+
+static DEFINE_MUTEX(drv_mutex);
+static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
+ size_t *ta_size)
+{
+ const struct firmware *fw;
+ char fw_name[TA_PATH_MAX];
+ struct {
+ u32 lo;
+ u16 mid;
+ u16 hi_ver;
+ u8 seq_n[8];
+ } *uuid = ptr;
+ int n, rc = 0;
+
+ n = snprintf(fw_name, TA_PATH_MAX,
+ "%s/%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x.bin",
+ TA_LOAD_PATH, uuid->lo, uuid->mid, uuid->hi_ver,
+ uuid->seq_n[0], uuid->seq_n[1],
+ uuid->seq_n[2], uuid->seq_n[3],
+ uuid->seq_n[4], uuid->seq_n[5],
+ uuid->seq_n[6], uuid->seq_n[7]);
+ if (n < 0 || n >= TA_PATH_MAX) {
+ pr_err("failed to get firmware name\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&drv_mutex);
+ n = request_firmware(&fw, fw_name, &ctx->teedev->dev);
+ if (n) {
+ pr_err("failed to load firmware %s\n", fw_name);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ *ta_size = roundup(fw->size, PAGE_SIZE);
+ *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
+ if (!*ta) {
+ pr_err("%s: get_free_pages failed\n", __func__);
+ rc = -ENOMEM;
+ goto rel_fw;
+ }
+
+ memcpy(*ta, fw->data, fw->size);
+rel_fw:
+ release_firmware(fw);
+unlock:
+ mutex_unlock(&drv_mutex);
+ return rc;
+}
+
+/* mutex must be held by caller */
+static void destroy_session(struct kref *ref)
+{
+ struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ refcount);
+
+ list_del(&sess->list_node);
+ mutex_unlock(&session_list_mutex);
+ kfree(sess);
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess = NULL;
+ u32 session_info, ta_handle;
+ size_t ta_size;
+ int rc, i;
+ void *ta;
+
+ if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) {
+ pr_err("unsupported client login method\n");
+ return -EINVAL;
+ }
+
+ rc = copy_ta_binary(ctx, &arg->uuid[0], &ta, &ta_size);
+ if (rc) {
+ pr_err("failed to copy TA binary\n");
+ return rc;
+ }
+
+ /* Load the TA binary into TEE environment */
+ handle_load_ta(ta, ta_size, arg);
+ if (arg->ret != TEEC_SUCCESS)
+ goto out;
+
+ ta_handle = get_ta_handle(arg->session);
+
+ mutex_lock(&session_list_mutex);
+ sess = alloc_session(ctxdata, arg->session);
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess) {
+ handle_unload_ta(ta_handle);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+ if (arg->ret != TEEC_SUCCESS) {
+ pr_err("open_session failed %d\n", arg->ret);
+ handle_unload_ta(ta_handle);
+ kref_put_mutex(&sess->refcount, destroy_session,
+ &session_list_mutex);
+ goto out;
+ }
+
+ /* Find an empty session index for the given TA */
+ spin_lock(&sess->lock);
+ i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
+ if (i < TEE_NUM_SESSIONS) {
+ sess->session_info[i] = session_info;
+ set_session_id(ta_handle, i, &arg->session);
+ set_bit(i, sess->sess_mask);
+ }
+ spin_unlock(&sess->lock);
+
+ if (i >= TEE_NUM_SESSIONS) {
+ pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ handle_close_session(ta_handle, session_info);
+ handle_unload_ta(ta_handle);
+ kref_put_mutex(&sess->refcount, destroy_session,
+ &session_list_mutex);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+out:
+ free_pages((u64)ta, get_order(ta_size));
+ return rc;
+}
+
+int amdtee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ u32 i, ta_handle, session_info;
+ struct amdtee_session *sess;
+
+ pr_debug("%s: sid = 0x%x\n", __func__, session);
+
+ /*
+ * Check that the session is valid and clear the session
+ * usage bit
+ */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, session);
+ if (sess) {
+ ta_handle = get_ta_handle(session);
+ i = get_session_index(session);
+ session_info = sess->session_info[i];
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ /* Close the session */
+ handle_close_session(ta_handle, session_info);
+ handle_unload_ta(ta_handle);
+
+ kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex);
+
+ return 0;
+}
+
+int amdtee_map_shmem(struct tee_shm *shm)
+{
+ struct amdtee_context_data *ctxdata;
+ struct amdtee_shm_data *shmnode;
+ struct shmem_desc shmem;
+ int rc, count;
+ u32 buf_id;
+
+ if (!shm)
+ return -EINVAL;
+
+ shmnode = kmalloc(sizeof(*shmnode), GFP_KERNEL);
+ if (!shmnode)
+ return -ENOMEM;
+
+ count = 1;
+ shmem.kaddr = shm->kaddr;
+ shmem.size = shm->size;
+
+ /*
+ * Send a MAP command to TEE and get the corresponding
+ * buffer Id
+ */
+ rc = handle_map_shmem(count, &shmem, &buf_id);
+ if (rc) {
+ pr_err("map_shmem failed: ret = %d\n", rc);
+ kfree(shmnode);
+ return rc;
+ }
+
+ shmnode->kaddr = shm->kaddr;
+ shmnode->buf_id = buf_id;
+ ctxdata = shm->ctx->data;
+ mutex_lock(&ctxdata->shm_mutex);
+ list_add(&shmnode->shm_node, &ctxdata->shm_list);
+ mutex_unlock(&ctxdata->shm_mutex);
+
+ pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr);
+
+ return 0;
+}
+
+void amdtee_unmap_shmem(struct tee_shm *shm)
+{
+ struct amdtee_context_data *ctxdata;
+ struct amdtee_shm_data *shmnode;
+ u32 buf_id;
+
+ if (!shm)
+ return;
+
+ buf_id = get_buffer_id(shm);
+ /* Unmap the shared memory from TEE */
+ handle_unmap_shmem(buf_id);
+
+ ctxdata = shm->ctx->data;
+ mutex_lock(&ctxdata->shm_mutex);
+ list_for_each_entry(shmnode, &ctxdata->shm_list, shm_node)
+ if (buf_id == shmnode->buf_id) {
+ list_del(&shmnode->shm_node);
+ kfree(shmnode);
+ break;
+ }
+ mutex_unlock(&ctxdata->shm_mutex);
+}
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess;
+ u32 i, session_info;
+
+ /* Check that the session is valid */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, arg->session);
+ if (sess) {
+ i = get_session_index(arg->session);
+ session_info = sess->session_info[i];
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ handle_invoke_cmd(arg, session_info, param);
+
+ return 0;
+}
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ return -EINVAL;
+}
+
+static const struct tee_driver_ops amdtee_ops = {
+ .get_version = amdtee_get_version,
+ .open = amdtee_open,
+ .release = amdtee_release,
+ .open_session = amdtee_open_session,
+ .close_session = amdtee_close_session,
+ .invoke_func = amdtee_invoke_func,
+ .cancel_req = amdtee_cancel_req,
+};
+
+static const struct tee_desc amdtee_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &amdtee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init amdtee_driver_init(void)
+{
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct amdtee *amdtee;
+ int rc;
+
+ rc = psp_check_tee_status();
+ if (rc) {
+ pr_err("amd-tee driver: tee not present\n");
+ return rc;
+ }
+
+ drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ amdtee = kzalloc(sizeof(*amdtee), GFP_KERNEL);
+ if (!amdtee) {
+ rc = -ENOMEM;
+ goto err_kfree_drv_data;
+ }
+
+ pool = amdtee_config_shm();
+ if (IS_ERR(pool)) {
+ pr_err("shared pool configuration error\n");
+ rc = PTR_ERR(pool);
+ goto err_kfree_amdtee;
+ }
+
+ teedev = tee_device_alloc(&amdtee_desc, NULL, pool, amdtee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_pool;
+ }
+ amdtee->teedev = teedev;
+
+ rc = tee_device_register(amdtee->teedev);
+ if (rc)
+ goto err_device_unregister;
+
+ amdtee->pool = pool;
+
+ drv_data->amdtee = amdtee;
+
+ pr_info("amd-tee driver initialization successful\n");
+ return 0;
+
+err_device_unregister:
+ tee_device_unregister(amdtee->teedev);
+
+err_free_pool:
+ tee_shm_pool_free(pool);
+
+err_kfree_amdtee:
+ kfree(amdtee);
+
+err_kfree_drv_data:
+ kfree(drv_data);
+ drv_data = NULL;
+
+ pr_err("amd-tee driver initialization failed\n");
+ return rc;
+}
+module_init(amdtee_driver_init);
+
+static void __exit amdtee_driver_exit(void)
+{
+ struct amdtee *amdtee;
+
+ if (!drv_data || !drv_data->amdtee)
+ return;
+
+ amdtee = drv_data->amdtee;
+
+ tee_device_unregister(amdtee->teedev);
+ tee_shm_pool_free(amdtee->pool);
+}
+module_exit(amdtee_driver_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION("AMD-TEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
new file mode 100644
index 000000000..f87f96a29
--- /dev/null
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-sev.h>
+#include "amdtee_private.h"
+
+static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align)
+{
+ unsigned int order = get_order(size);
+ unsigned long va;
+ int rc;
+
+ /*
+ * Ignore alignment since this is already going to be page aligned
+ * and there's no need for any larger alignment.
+ */
+ va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!va)
+ return -ENOMEM;
+
+ shm->kaddr = (void *)va;
+ shm->paddr = __psp_pa((void *)va);
+ shm->size = PAGE_SIZE << order;
+
+ /* Map the allocated memory in to TEE */
+ rc = amdtee_map_shmem(shm);
+ if (rc) {
+ free_pages(va, order);
+ shm->kaddr = NULL;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm)
+{
+ /* Unmap the shared memory from TEE */
+ amdtee_unmap_shmem(shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_pool(struct tee_shm_pool *pool)
+{
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_pool = pool_op_destroy_pool,
+};
+
+struct tee_shm_pool *amdtee_config_shm(void)
+{
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->ops = &pool_ops;
+
+ return pool;
+}
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
new file mode 100644
index 000000000..f121c224e
--- /dev/null
+++ b/drivers/tee/optee/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# OP-TEE Trusted Execution Environment Configuration
+config OPTEE
+ tristate "OP-TEE"
+ depends on HAVE_ARM_SMCCC
+ depends on MMU
+ help
+ This implements the OP-TEE Trusted Execution Environment (TEE)
+ driver.
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
new file mode 100644
index 000000000..a6eff388d
--- /dev/null
+++ b/drivers/tee/optee/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_OPTEE) += optee.o
+optee-objs += core.o
+optee-objs += call.o
+optee-objs += notif.o
+optee-objs += rpc.o
+optee-objs += supp.o
+optee-objs += device.o
+optee-objs += smc_abi.o
+optee-objs += ffa_abi.o
+
+# for tracing framework to find optee_trace.h
+CFLAGS_smc_abi.o := -I$(src)
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
new file mode 100644
index 000000000..290b1bb0e
--- /dev/null
+++ b/drivers/tee/optee/call.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_private.h"
+
+#define MAX_ARG_PARAM_COUNT 6
+
+/*
+ * How much memory we allocate for each entry. This doesn't have to be a
+ * single page, but it makes sense to keep at least keep it as multiples of
+ * the page size.
+ */
+#define SHM_ENTRY_SIZE PAGE_SIZE
+
+/*
+ * We need to have a compile time constant to be able to determine the
+ * maximum needed size of the bit field.
+ */
+#define MIN_ARG_SIZE OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT)
+#define MAX_ARG_COUNT_PER_ENTRY (SHM_ENTRY_SIZE / MIN_ARG_SIZE)
+
+/*
+ * Shared memory for argument structs are cached here. The number of
+ * arguments structs that can fit is determined at runtime depending on the
+ * needed RPC parameter count reported by secure world
+ * (optee->rpc_param_count).
+ */
+struct optee_shm_arg_entry {
+ struct list_head list_node;
+ struct tee_shm *shm;
+ DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
+};
+
+void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ /*
+ * We're preparing to make a call to secure world. In case we can't
+ * allocate a thread in secure world we'll end up waiting in
+ * optee_cq_wait_for_completion().
+ *
+ * Normally if there's no contention in secure world the call will
+ * complete and we can cleanup directly with optee_cq_wait_final().
+ */
+ mutex_lock(&cq->mutex);
+
+ /*
+ * We add ourselves to the queue, but we don't wait. This
+ * guarantees that we don't lose a completion if secure world
+ * returns busy and another thread just exited and try to complete
+ * someone.
+ */
+ init_completion(&w->c);
+ list_add_tail(&w->list_node, &cq->waiters);
+
+ mutex_unlock(&cq->mutex);
+}
+
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ wait_for_completion(&w->c);
+
+ mutex_lock(&cq->mutex);
+
+ /* Move to end of list to get out of the way for other waiters */
+ list_del(&w->list_node);
+ reinit_completion(&w->c);
+ list_add_tail(&w->list_node, &cq->waiters);
+
+ mutex_unlock(&cq->mutex);
+}
+
+static void optee_cq_complete_one(struct optee_call_queue *cq)
+{
+ struct optee_call_waiter *w;
+
+ list_for_each_entry(w, &cq->waiters, list_node) {
+ if (!completion_done(&w->c)) {
+ complete(&w->c);
+ break;
+ }
+ }
+}
+
+void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ /*
+ * We're done with the call to secure world. The thread in secure
+ * world that was used for this call is now available for some
+ * other task to use.
+ */
+ mutex_lock(&cq->mutex);
+
+ /* Get out of the list */
+ list_del(&w->list_node);
+
+ /* Wake up one eventual waiting task */
+ optee_cq_complete_one(cq);
+
+ /*
+ * If we're completed we've got a completion from another task that
+ * was just done with its call to secure world. Since yet another
+ * thread now is available in secure world wake up another eventual
+ * waiting task.
+ */
+ if (completion_done(&w->c))
+ optee_cq_complete_one(cq);
+
+ mutex_unlock(&cq->mutex);
+}
+
+/* Requires the filpstate mutex to be held */
+static struct optee_session *find_session(struct optee_context_data *ctxdata,
+ u32 session_id)
+{
+ struct optee_session *sess;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->session_id == session_id)
+ return sess;
+
+ return NULL;
+}
+
+void optee_shm_arg_cache_init(struct optee *optee, u32 flags)
+{
+ INIT_LIST_HEAD(&optee->shm_arg_cache.shm_args);
+ mutex_init(&optee->shm_arg_cache.mutex);
+ optee->shm_arg_cache.flags = flags;
+}
+
+void optee_shm_arg_cache_uninit(struct optee *optee)
+{
+ struct list_head *head = &optee->shm_arg_cache.shm_args;
+ struct optee_shm_arg_entry *entry;
+
+ mutex_destroy(&optee->shm_arg_cache.mutex);
+ while (!list_empty(head)) {
+ entry = list_first_entry(head, struct optee_shm_arg_entry,
+ list_node);
+ list_del(&entry->list_node);
+ if (find_first_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY) !=
+ MAX_ARG_COUNT_PER_ENTRY) {
+ pr_err("Freeing non-free entry\n");
+ }
+ tee_shm_free(entry->shm);
+ kfree(entry);
+ }
+}
+
+size_t optee_msg_arg_size(size_t rpc_param_count)
+{
+ size_t sz = OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT);
+
+ if (rpc_param_count)
+ sz += OPTEE_MSG_GET_ARG_SIZE(rpc_param_count);
+
+ return sz;
+}
+
+/**
+ * optee_get_msg_arg() - Provide shared memory for argument struct
+ * @ctx: Caller TEE context
+ * @num_params: Number of parameter to store
+ * @entry_ret: Entry pointer, needed when freeing the buffer
+ * @shm_ret: Shared memory buffer
+ * @offs_ret: Offset of argument strut in shared memory buffer
+ *
+ * @returns a pointer to the argument struct in memory, else an ERR_PTR
+ */
+struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
+ size_t num_params,
+ struct optee_shm_arg_entry **entry_ret,
+ struct tee_shm **shm_ret,
+ u_int *offs_ret)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ size_t sz = optee_msg_arg_size(optee->rpc_param_count);
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *ma;
+ size_t args_per_entry;
+ u_long bit;
+ u_int offs;
+ void *res;
+
+ if (num_params > MAX_ARG_PARAM_COUNT)
+ return ERR_PTR(-EINVAL);
+
+ if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_SHARED)
+ args_per_entry = SHM_ENTRY_SIZE / sz;
+ else
+ args_per_entry = 1;
+
+ mutex_lock(&optee->shm_arg_cache.mutex);
+ list_for_each_entry(entry, &optee->shm_arg_cache.shm_args, list_node) {
+ bit = find_first_zero_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY);
+ if (bit < args_per_entry)
+ goto have_entry;
+ }
+
+ /*
+ * No entry was found, let's allocate a new.
+ */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ res = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_ALLOC_PRIV)
+ res = tee_shm_alloc_priv_buf(ctx, SHM_ENTRY_SIZE);
+ else
+ res = tee_shm_alloc_kernel_buf(ctx, SHM_ENTRY_SIZE);
+
+ if (IS_ERR(res)) {
+ kfree(entry);
+ goto out;
+ }
+ entry->shm = res;
+ list_add(&entry->list_node, &optee->shm_arg_cache.shm_args);
+ bit = 0;
+
+have_entry:
+ offs = bit * sz;
+ res = tee_shm_get_va(entry->shm, offs);
+ if (IS_ERR(res))
+ goto out;
+ ma = res;
+ set_bit(bit, entry->map);
+ memset(ma, 0, sz);
+ ma->num_params = num_params;
+ *entry_ret = entry;
+ *shm_ret = entry->shm;
+ *offs_ret = offs;
+out:
+ mutex_unlock(&optee->shm_arg_cache.mutex);
+ return res;
+}
+
+/**
+ * optee_free_msg_arg() - Free previsouly obtained shared memory
+ * @ctx: Caller TEE context
+ * @entry: Pointer returned when the shared memory was obtained
+ * @offs: Offset of shared memory buffer to free
+ *
+ * This function frees the shared memory obtained with optee_get_msg_arg().
+ */
+void optee_free_msg_arg(struct tee_context *ctx,
+ struct optee_shm_arg_entry *entry, u_int offs)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ size_t sz = optee_msg_arg_size(optee->rpc_param_count);
+ u_long bit;
+
+ if (offs > SHM_ENTRY_SIZE || offs % sz) {
+ pr_err("Invalid offs %u\n", offs);
+ return;
+ }
+ bit = offs / sz;
+
+ mutex_lock(&optee->shm_arg_cache.mutex);
+
+ if (!test_bit(bit, entry->map))
+ pr_err("Bit pos %lu is already free\n", bit);
+ clear_bit(bit, entry->map);
+
+ mutex_unlock(&optee->shm_arg_cache.mutex);
+}
+
+int optee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_shm_arg_entry *entry;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ struct optee_session *sess = NULL;
+ uuid_t client_uuid;
+ u_int offs;
+ int rc;
+
+ /* +2 for the meta parameters added below */
+ msg_arg = optee_get_msg_arg(ctx, arg->num_params + 2,
+ &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
+ msg_arg->cancel_id = arg->cancel_id;
+
+ /*
+ * Initialize and add the meta parameters needed when opening a
+ * session.
+ */
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+ OPTEE_MSG_ATTR_META;
+ msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+ OPTEE_MSG_ATTR_META;
+ memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
+ msg_arg->params[1].u.value.c = arg->clnt_login;
+
+ rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
+ arg->clnt_uuid);
+ if (rc)
+ goto out;
+ export_uuid(msg_arg->params[1].u.octets, &client_uuid);
+
+ rc = optee->ops->to_msg_param(optee, msg_arg->params + 2,
+ arg->num_params, param);
+ if (rc)
+ goto out;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ if (msg_arg->ret == TEEC_SUCCESS) {
+ /* A new session has been created, add it to the list. */
+ sess->session_id = msg_arg->session;
+ mutex_lock(&ctxdata->mutex);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ mutex_unlock(&ctxdata->mutex);
+ } else {
+ kfree(sess);
+ }
+
+ if (optee->ops->from_msg_param(optee, param, arg->num_params,
+ msg_arg->params + 2)) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ /* Close session again to avoid leakage */
+ optee_close_session(ctx, msg_arg->session);
+ } else {
+ arg->session = msg_arg->session;
+ arg->ret = msg_arg->ret;
+ arg->ret_origin = msg_arg->ret_origin;
+ }
+out:
+ optee_free_msg_arg(ctx, entry, offs);
+
+ return rc;
+}
+
+int optee_close_session_helper(struct tee_context *ctx, u32 session)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+ msg_arg->session = session;
+ optee->ops->do_call_with_arg(ctx, shm, offs);
+
+ optee_free_msg_arg(ctx, entry, offs);
+
+ return 0;
+}
+
+int optee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_session *sess;
+
+ /* Check that the session is valid and remove it from the list */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, session);
+ if (sess)
+ list_del(&sess->list_node);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+ kfree(sess);
+
+ return optee_close_session_helper(ctx, session);
+}
+
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct optee_session *sess;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ /* Check that the session is valid */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, arg->session);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+
+ msg_arg = optee_get_msg_arg(ctx, arg->num_params,
+ &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+ msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
+ msg_arg->func = arg->func;
+ msg_arg->session = arg->session;
+ msg_arg->cancel_id = arg->cancel_id;
+
+ rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params,
+ param);
+ if (rc)
+ goto out;
+
+ if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ if (optee->ops->from_msg_param(optee, param, arg->num_params,
+ msg_arg->params)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ arg->ret = msg_arg->ret;
+ arg->ret_origin = msg_arg->ret_origin;
+out:
+ optee_free_msg_arg(ctx, entry, offs);
+ return rc;
+}
+
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct optee_session *sess;
+ struct tee_shm *shm;
+ u_int offs;
+
+ /* Check that the session is valid */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, session);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
+ msg_arg->session = session;
+ msg_arg->cancel_id = cancel_id;
+ optee->ops->do_call_with_arg(ctx, shm, offs);
+
+ optee_free_msg_arg(ctx, entry, offs);
+ return 0;
+}
+
+static bool is_normal_memory(pgprot_t p)
+{
+#if defined(CONFIG_ARM)
+ return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
+ ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
+#elif defined(CONFIG_ARM64)
+ return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
+#else
+#error "Unuspported architecture"
+#endif
+}
+
+static int __check_mem_type(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, start);
+
+ for_each_vma_range(vmi, vma, end) {
+ if (!is_normal_memory(vma->vm_page_prot))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int optee_check_mem_type(unsigned long start, size_t num_pages)
+{
+ struct mm_struct *mm = current->mm;
+ int rc;
+
+ /*
+ * Allow kernel address to register with OP-TEE as kernel
+ * pages are configured as normal memory only.
+ */
+ if (virt_addr_valid((void *)start) || is_vmalloc_addr((void *)start))
+ return 0;
+
+ mmap_read_lock(mm);
+ rc = __check_mem_type(mm, start, start + num_pages * PAGE_SIZE);
+ mmap_read_unlock(mm);
+
+ return rc;
+}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
new file mode 100644
index 000000000..daf07737c
--- /dev/null
+++ b/drivers/tee/optee/core.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crash_dump.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "optee_private.h"
+
+int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start))
+{
+ unsigned int order = get_order(size);
+ struct page *page;
+ int rc = 0;
+
+ /*
+ * Ignore alignment since this is already going to be page aligned
+ * and there's no need for any larger alignment.
+ */
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return -ENOMEM;
+
+ shm->kaddr = page_address(page);
+ shm->paddr = page_to_phys(page);
+ shm->size = PAGE_SIZE << order;
+
+ if (shm_register) {
+ unsigned int nr_pages = 1 << order, i;
+ struct page **pages;
+
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
+
+ rc = shm_register(shm->ctx, shm, pages, nr_pages,
+ (unsigned long)shm->kaddr);
+ kfree(pages);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ free_pages((unsigned long)shm->kaddr, order);
+ return rc;
+}
+
+void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm))
+{
+ if (shm_unregister)
+ shm_unregister(shm->ctx, shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void optee_bus_scan(struct work_struct *work)
+{
+ WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
+}
+
+int optee_open(struct tee_context *ctx, bool cap_memref_null)
+{
+ struct optee_context_data *ctxdata;
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ if (teedev == optee->supp_teedev) {
+ bool busy = true;
+
+ mutex_lock(&optee->supp.mutex);
+ if (!optee->supp.ctx) {
+ busy = false;
+ optee->supp.ctx = ctx;
+ }
+ mutex_unlock(&optee->supp.mutex);
+ if (busy) {
+ kfree(ctxdata);
+ return -EBUSY;
+ }
+
+ if (!optee->scan_bus_done) {
+ INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
+ optee->scan_bus_wq = create_workqueue("optee_bus_scan");
+ if (!optee->scan_bus_wq) {
+ kfree(ctxdata);
+ return -ECHILD;
+ }
+ queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
+ optee->scan_bus_done = true;
+ }
+ }
+ mutex_init(&ctxdata->mutex);
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+
+ ctx->cap_memref_null = cap_memref_null;
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void optee_release_helper(struct tee_context *ctx,
+ int (*close_session)(struct tee_context *ctx,
+ u32 session))
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_session *sess;
+ struct optee_session *sess_tmp;
+
+ if (!ctxdata)
+ return;
+
+ list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
+ list_node) {
+ list_del(&sess->list_node);
+ close_session(ctx, sess->session_id);
+ kfree(sess);
+ }
+ kfree(ctxdata);
+ ctx->data = NULL;
+}
+
+void optee_release(struct tee_context *ctx)
+{
+ optee_release_helper(ctx, optee_close_session_helper);
+}
+
+void optee_release_supp(struct tee_context *ctx)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+
+ optee_release_helper(ctx, optee_close_session_helper);
+ if (optee->scan_bus_wq) {
+ destroy_workqueue(optee->scan_bus_wq);
+ optee->scan_bus_wq = NULL;
+ }
+ optee_supp_release(&optee->supp);
+}
+
+void optee_remove_common(struct optee *optee)
+{
+ /* Unregister OP-TEE specific client devices on TEE bus */
+ optee_unregister_devices();
+
+ optee_notif_uninit(optee);
+ optee_shm_arg_cache_uninit(optee);
+ teedev_close_context(optee->ctx);
+ /*
+ * The two devices have to be unregistered before we can free the
+ * other resources.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+
+ tee_shm_pool_free(optee->pool);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+}
+
+static int smc_abi_rc;
+static int ffa_abi_rc;
+
+static int optee_core_init(void)
+{
+ /*
+ * The kernel may have crashed at the same time that all available
+ * secure world threads were suspended and we cannot reschedule the
+ * suspended threads without access to the crashed kernel's wait_queue.
+ * Therefore, we cannot reliably initialize the OP-TEE driver in the
+ * kdump kernel.
+ */
+ if (is_kdump_kernel())
+ return -ENODEV;
+
+ smc_abi_rc = optee_smc_abi_register();
+ ffa_abi_rc = optee_ffa_abi_register();
+
+ /* If both failed there's no point with this module */
+ if (smc_abi_rc && ffa_abi_rc)
+ return smc_abi_rc;
+ return 0;
+}
+module_init(optee_core_init);
+
+static void optee_core_exit(void)
+{
+ if (!smc_abi_rc)
+ optee_smc_abi_unregister();
+ if (!ffa_abi_rc)
+ optee_ffa_abi_unregister();
+}
+module_exit(optee_core_exit);
+
+MODULE_AUTHOR("Linaro");
+MODULE_DESCRIPTION("OP-TEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:optee");
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
new file mode 100644
index 000000000..4b1092127
--- /dev/null
+++ b/drivers/tee/optee/device.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include "optee_private.h"
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int get_devices(struct tee_context *ctx, u32 session,
+ struct tee_shm *device_shm, u32 *shm_size,
+ u32 func)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ inv_arg.func = func;
+ inv_arg.session = session;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = device_shm;
+ param[0].u.memref.size = *shm_size;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(ctx, &inv_arg, param);
+ if ((ret < 0) || ((inv_arg.ret != TEEC_SUCCESS) &&
+ (inv_arg.ret != TEEC_ERROR_SHORT_BUFFER))) {
+ pr_err("PTA_CMD_GET_DEVICES invoke function err: %x\n",
+ inv_arg.ret);
+ return -EINVAL;
+ }
+
+ *shm_size = param[0].u.memref.size;
+
+ return 0;
+}
+
+static void optee_release_device(struct device *dev)
+{
+ struct tee_client_device *optee_device = to_tee_client_device(dev);
+
+ kfree(optee_device);
+}
+
+static ssize_t need_supplicant_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static DEVICE_ATTR_RO(need_supplicant);
+
+static int optee_register_device(const uuid_t *device_uuid, u32 func)
+{
+ struct tee_client_device *optee_device = NULL;
+ int rc;
+
+ optee_device = kzalloc(sizeof(*optee_device), GFP_KERNEL);
+ if (!optee_device)
+ return -ENOMEM;
+
+ optee_device->dev.bus = &tee_bus_type;
+ optee_device->dev.release = optee_release_device;
+ if (dev_set_name(&optee_device->dev, "optee-ta-%pUb", device_uuid)) {
+ kfree(optee_device);
+ return -ENOMEM;
+ }
+ uuid_copy(&optee_device->id.uuid, device_uuid);
+
+ rc = device_register(&optee_device->dev);
+ if (rc) {
+ pr_err("device registration failed, err: %d\n", rc);
+ put_device(&optee_device->dev);
+ }
+
+ if (func == PTA_CMD_GET_DEVICES_SUPP)
+ device_create_file(&optee_device->dev,
+ &dev_attr_need_supplicant);
+
+ return rc;
+}
+
+static int __optee_enumerate_devices(u32 func)
+{
+ const uuid_t pta_uuid =
+ UUID_INIT(0x7011a688, 0xddde, 0x4053,
+ 0xa5, 0xa9, 0x7b, 0x3c, 0x4d, 0xdf, 0x13, 0xb8);
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *device_shm = NULL;
+ const uuid_t *device_uuid = NULL;
+ struct tee_context *ctx = NULL;
+ u32 shm_size = 0, idx, num_devices = 0;
+ int rc;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with OP-TEE driver */
+ ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
+ if (IS_ERR(ctx))
+ return -ENODEV;
+
+ /* Open session with device enumeration pseudo TA */
+ export_uuid(sess_arg.uuid, &pta_uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != TEEC_SUCCESS)) {
+ /* Device enumeration pseudo TA not found */
+ rc = 0;
+ goto out_ctx;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, NULL, &shm_size, func);
+ if (rc < 0 || !shm_size)
+ goto out_sess;
+
+ device_shm = tee_shm_alloc_kernel_buf(ctx, shm_size);
+ if (IS_ERR(device_shm)) {
+ pr_err("tee_shm_alloc_kernel_buf failed\n");
+ rc = PTR_ERR(device_shm);
+ goto out_sess;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, device_shm, &shm_size, func);
+ if (rc < 0)
+ goto out_shm;
+
+ device_uuid = tee_shm_get_va(device_shm, 0);
+ if (IS_ERR(device_uuid)) {
+ pr_err("tee_shm_get_va failed\n");
+ rc = PTR_ERR(device_uuid);
+ goto out_shm;
+ }
+
+ num_devices = shm_size / sizeof(uuid_t);
+
+ for (idx = 0; idx < num_devices; idx++) {
+ rc = optee_register_device(&device_uuid[idx], func);
+ if (rc)
+ goto out_shm;
+ }
+
+out_shm:
+ tee_shm_free(device_shm);
+out_sess:
+ tee_client_close_session(ctx, sess_arg.session);
+out_ctx:
+ tee_client_close_context(ctx);
+
+ return rc;
+}
+
+int optee_enumerate_devices(u32 func)
+{
+ return __optee_enumerate_devices(func);
+}
+
+static int __optee_unregister_device(struct device *dev, void *data)
+{
+ if (!strncmp(dev_name(dev), "optee-ta", strlen("optee-ta")))
+ device_unregister(dev);
+
+ return 0;
+}
+
+void optee_unregister_devices(void)
+{
+ bus_for_each_dev(&tee_bus_type, NULL, NULL,
+ __optee_unregister_device);
+}
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
new file mode 100644
index 000000000..0828240f2
--- /dev/null
+++ b/drivers/tee/optee/ffa_abi.c
@@ -0,0 +1,922 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/errno.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_private.h"
+#include "optee_ffa.h"
+#include "optee_rpc_cmd.h"
+
+/*
+ * This file implement the FF-A ABI used when communicating with secure world
+ * OP-TEE OS via FF-A.
+ * This file is divided into the following sections:
+ * 1. Maintain a hash table for lookup of a global FF-A memory handle
+ * 2. Convert between struct tee_param and struct optee_msg_param
+ * 3. Low level support functions to register shared memory in secure world
+ * 4. Dynamic shared memory pool based on alloc_pages()
+ * 5. Do a normal scheduled call into secure world
+ * 6. Driver initialization.
+ */
+
+/*
+ * 1. Maintain a hash table for lookup of a global FF-A memory handle
+ *
+ * FF-A assigns a global memory handle for each piece shared memory.
+ * This handle is then used when communicating with secure world.
+ *
+ * Main functions are optee_shm_add_ffa_handle() and optee_shm_rem_ffa_handle()
+ */
+struct shm_rhash {
+ struct tee_shm *shm;
+ u64 global_id;
+ struct rhash_head linkage;
+};
+
+static void rh_free_fn(void *ptr, void *arg)
+{
+ kfree(ptr);
+}
+
+static const struct rhashtable_params shm_rhash_params = {
+ .head_offset = offsetof(struct shm_rhash, linkage),
+ .key_len = sizeof(u64),
+ .key_offset = offsetof(struct shm_rhash, global_id),
+ .automatic_shrinking = true,
+};
+
+static struct tee_shm *optee_shm_from_ffa_handle(struct optee *optee,
+ u64 global_id)
+{
+ struct tee_shm *shm = NULL;
+ struct shm_rhash *r;
+
+ mutex_lock(&optee->ffa.mutex);
+ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
+ shm_rhash_params);
+ if (r)
+ shm = r->shm;
+ mutex_unlock(&optee->ffa.mutex);
+
+ return shm;
+}
+
+static int optee_shm_add_ffa_handle(struct optee *optee, struct tee_shm *shm,
+ u64 global_id)
+{
+ struct shm_rhash *r;
+ int rc;
+
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+ r->shm = shm;
+ r->global_id = global_id;
+
+ mutex_lock(&optee->ffa.mutex);
+ rc = rhashtable_lookup_insert_fast(&optee->ffa.global_ids, &r->linkage,
+ shm_rhash_params);
+ mutex_unlock(&optee->ffa.mutex);
+
+ if (rc)
+ kfree(r);
+
+ return rc;
+}
+
+static int optee_shm_rem_ffa_handle(struct optee *optee, u64 global_id)
+{
+ struct shm_rhash *r;
+ int rc = -ENOENT;
+
+ mutex_lock(&optee->ffa.mutex);
+ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
+ shm_rhash_params);
+ if (r)
+ rc = rhashtable_remove_fast(&optee->ffa.global_ids,
+ &r->linkage, shm_rhash_params);
+ mutex_unlock(&optee->ffa.mutex);
+
+ if (!rc)
+ kfree(r);
+
+ return rc;
+}
+
+/*
+ * 2. Convert between struct tee_param and struct optee_msg_param
+ *
+ * optee_ffa_from_msg_param() and optee_ffa_to_msg_param() are the main
+ * functions.
+ */
+
+static void from_msg_param_ffa_mem(struct optee *optee, struct tee_param *p,
+ u32 attr, const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm = NULL;
+ u64 offs_high = 0;
+ u64 offs_low = 0;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
+ p->u.memref.size = mp->u.fmem.size;
+
+ if (mp->u.fmem.global_id != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
+ shm = optee_shm_from_ffa_handle(optee, mp->u.fmem.global_id);
+ p->u.memref.shm = shm;
+
+ if (shm) {
+ offs_low = mp->u.fmem.offs_low;
+ offs_high = mp->u.fmem.offs_high;
+ }
+ p->u.memref.shm_offs = offs_low | offs_high << 32;
+}
+
+/**
+ * optee_ffa_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @optee: main service struct
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ *
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_ffa_from_msg_param(struct optee *optee,
+ struct tee_param *params, size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ optee_from_msg_param_value(p, attr, mp);
+ break;
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT:
+ from_msg_param_ffa_mem(optee, p, attr, mp);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int to_msg_param_ffa_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ struct tee_shm *shm = p->u.memref.shm;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_FMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ if (shm) {
+ u64 shm_offs = p->u.memref.shm_offs;
+
+ mp->u.fmem.internal_offs = shm->offset;
+
+ mp->u.fmem.offs_low = shm_offs;
+ mp->u.fmem.offs_high = shm_offs >> 32;
+ /* Check that the entire offset could be stored. */
+ if (mp->u.fmem.offs_high != shm_offs >> 32)
+ return -EINVAL;
+
+ mp->u.fmem.global_id = shm->sec_world_id;
+ } else {
+ memset(&mp->u, 0, sizeof(mp->u));
+ mp->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
+ }
+ mp->u.fmem.size = p->u.memref.size;
+
+ return 0;
+}
+
+/**
+ * optee_ffa_to_msg_param() - convert from struct tee_params to OPTEE_MSG
+ * parameters
+ * @optee: main service struct
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_ffa_to_msg_param(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params,
+ const struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ optee_to_msg_param_value(mp, p);
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (to_msg_param_ffa_mem(mp, p))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * 3. Low level support functions to register shared memory in secure world
+ *
+ * Functions to register and unregister shared memory both for normal
+ * clients and for tee-supplicant.
+ */
+
+static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ struct ffa_mem_region_attributes mem_attr = {
+ .receiver = ffa_dev->vm_id,
+ .attrs = FFA_MEM_RW,
+ };
+ struct ffa_mem_ops_args args = {
+ .use_txbuf = true,
+ .attrs = &mem_attr,
+ .nattrs = 1,
+ };
+ struct sg_table sgt;
+ int rc;
+
+ rc = optee_check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0,
+ num_pages * PAGE_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+ args.sg = sgt.sgl;
+ rc = mem_ops->memory_share(&args);
+ sg_free_table(&sgt);
+ if (rc)
+ return rc;
+
+ rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle);
+ if (rc) {
+ mem_ops->memory_reclaim(args.g_handle, 0);
+ return rc;
+ }
+
+ shm->sec_world_id = args.g_handle;
+
+ return 0;
+}
+
+static int optee_ffa_shm_unregister(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ u64 global_handle = shm->sec_world_id;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_UNREGISTER_SHM,
+ .data1 = (u32)global_handle,
+ .data2 = (u32)(global_handle >> 32)
+ };
+ int rc;
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ shm->sec_world_id = 0;
+
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc);
+
+ rc = mem_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ return rc;
+}
+
+static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_mem_ops *mem_ops;
+ u64 global_handle = shm->sec_world_id;
+ int rc;
+
+ /*
+ * We're skipping the OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM call
+ * since this is OP-TEE freeing via RPC so it has already retired
+ * this ID.
+ */
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ mem_ops = optee->ffa.ffa_dev->ops->mem_ops;
+ rc = mem_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ shm->sec_world_id = 0;
+
+ return rc;
+}
+
+/*
+ * 4. Dynamic shared memory pool based on alloc_pages()
+ *
+ * Implements an OP-TEE specific shared memory pool.
+ * The main function is optee_ffa_shm_pool_alloc_pages().
+ */
+
+static int pool_ffa_op_alloc(struct tee_shm_pool *pool,
+ struct tee_shm *shm, size_t size, size_t align)
+{
+ return optee_pool_op_alloc_helper(pool, shm, size, align,
+ optee_ffa_shm_register);
+}
+
+static void pool_ffa_op_free(struct tee_shm_pool *pool,
+ struct tee_shm *shm)
+{
+ optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister);
+}
+
+static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool)
+{
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ffa_ops = {
+ .alloc = pool_ffa_op_alloc,
+ .free = pool_ffa_op_free,
+ .destroy_pool = pool_ffa_op_destroy_pool,
+};
+
+/**
+ * optee_ffa_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used with OP-TEE over FF-A. In this case command buffers
+ * and such are allocated from kernel's own memory.
+ */
+static struct tee_shm_pool *optee_ffa_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->ops = &pool_ffa_ops;
+
+ return pool;
+}
+
+/*
+ * 5. Do a normal scheduled call into secure world
+ *
+ * The function optee_ffa_do_call_with_arg() performs a normal scheduled
+ * call into secure world. During this call may normal world request help
+ * from normal world using RPCs, Remote Procedure Calls. This includes
+ * delivery of non-secure interrupts to for instance allow rescheduling of
+ * the current task.
+ */
+
+static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc_priv_buf(optee->ctx,
+ arg->params[0].u.value.b);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ arg->params[0] = (struct optee_msg_param){
+ .attr = OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT,
+ .u.fmem.size = tee_shm_get_size(shm),
+ .u.fmem.global_id = shm->sec_world_id,
+ .u.fmem.internal_offs = shm->offset,
+ };
+
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_ffa_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto err_bad_param;
+
+ shm = optee_shm_from_ffa_handle(optee, arg->params[0].u.value.b);
+ if (!shm)
+ goto err_bad_param;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ optee_rpc_cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ goto err_bad_param;
+ }
+ arg->ret = TEEC_SUCCESS;
+ return;
+
+err_bad_param:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_SHM_ALLOC:
+ handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
+ break;
+ case OPTEE_RPC_CMD_SHM_FREE:
+ handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
+ break;
+ default:
+ optee_rpc_cmd(ctx, optee, arg);
+ }
+}
+
+static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
+ u32 cmd, struct optee_msg_arg *arg)
+{
+ switch (cmd) {
+ case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
+ handle_ffa_rpc_func_cmd(ctx, optee, arg);
+ break;
+ case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
+ /* Interrupt delivered by now */
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n", cmd);
+ break;
+ }
+}
+
+static int optee_ffa_yielding_call(struct tee_context *ctx,
+ struct ffa_send_direct_data *data,
+ struct optee_msg_arg *rpc_arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ struct optee_call_waiter w;
+ u32 cmd = data->data0;
+ u32 w4 = data->data1;
+ u32 w5 = data->data2;
+ u32 w6 = data->data3;
+ int rc;
+
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ rc = msg_ops->sync_send_receive(ffa_dev, data);
+ if (rc)
+ goto done;
+
+ switch ((int)data->data0) {
+ case TEEC_SUCCESS:
+ break;
+ case TEEC_ERROR_BUSY:
+ if (cmd == OPTEE_FFA_YIELDING_CALL_RESUME) {
+ rc = -EIO;
+ goto done;
+ }
+
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ data->data0 = cmd;
+ data->data1 = w4;
+ data->data2 = w5;
+ data->data3 = w6;
+ continue;
+ default:
+ rc = -EIO;
+ goto done;
+ }
+
+ if (data->data1 == OPTEE_FFA_YIELDING_CALL_RETURN_DONE)
+ goto done;
+
+ /*
+ * OP-TEE has returned with a RPC request.
+ *
+ * Note that data->data4 (passed in register w7) is already
+ * filled in by ffa_mem_ops->sync_send_receive() returning
+ * above.
+ */
+ cond_resched();
+ optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
+ cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
+ data->data0 = cmd;
+ data->data1 = 0;
+ data->data2 = 0;
+ data->data3 = 0;
+ }
+done:
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return rc;
+}
+
+/**
+ * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world
+ * @ctx: calling context
+ * @shm: shared memory holding the message to pass to secure world
+ * @offs: offset of the message in @shm
+ *
+ * Does a FF-A call to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from FF-A, 0 is OK
+ */
+
+static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
+ struct tee_shm *shm, u_int offs)
+{
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
+ .data1 = (u32)shm->sec_world_id,
+ .data2 = (u32)(shm->sec_world_id >> 32),
+ .data3 = offs,
+ };
+ struct optee_msg_arg *arg;
+ unsigned int rpc_arg_offs;
+ struct optee_msg_arg *rpc_arg;
+
+ /*
+ * The shared memory object has to start on a page when passed as
+ * an argument struct. This is also what the shm pool allocator
+ * returns, but check this before calling secure world to catch
+ * eventual errors early in case something changes.
+ */
+ if (shm->offset)
+ return -EINVAL;
+
+ arg = tee_shm_get_va(shm, offs);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+
+ rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
+
+ return optee_ffa_yielding_call(ctx, &data, rpc_arg);
+}
+
+/*
+ * 6. Driver initialization
+ *
+ * During driver inititialization is the OP-TEE Secure Partition is probed
+ * to find out which features it supports so the driver can be initialized
+ * with a matching configuration.
+ */
+
+static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ const struct ffa_ops *ops)
+{
+ const struct ffa_msg_ops *msg_ops = ops->msg_ops;
+ struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
+ int rc;
+
+ msg_ops->mode_32bit_set(ffa_dev);
+
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d\n", rc);
+ return false;
+ }
+ if (data.data0 != OPTEE_FFA_VERSION_MAJOR ||
+ data.data1 < OPTEE_FFA_VERSION_MINOR) {
+ pr_err("Incompatible OP-TEE API version %lu.%lu",
+ data.data0, data.data1);
+ return false;
+ }
+
+ data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d\n", rc);
+ return false;
+ }
+ if (data.data2)
+ pr_info("revision %lu.%lu (%08lx)",
+ data.data0, data.data1, data.data2);
+ else
+ pr_info("revision %lu.%lu", data.data0, data.data1);
+
+ return true;
+}
+
+static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
+ const struct ffa_ops *ops,
+ u32 *sec_caps,
+ unsigned int *rpc_param_count)
+{
+ struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
+ int rc;
+
+ rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d", rc);
+ return false;
+ }
+ if (data.data0) {
+ pr_err("Unexpected exchange error %lu", data.data0);
+ return false;
+ }
+
+ *rpc_param_count = (u8)data.data1;
+ *sec_caps = data.data2;
+
+ return true;
+}
+
+static void optee_ffa_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM |
+ TEE_GEN_CAP_MEMREF_NULL,
+ };
+
+ *vers = v;
+}
+
+static int optee_ffa_open(struct tee_context *ctx)
+{
+ return optee_open(ctx, true);
+}
+
+static const struct tee_driver_ops optee_ffa_clnt_ops = {
+ .get_version = optee_ffa_get_version,
+ .open = optee_ffa_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+ .shm_register = optee_ffa_shm_register,
+ .shm_unregister = optee_ffa_shm_unregister,
+};
+
+static const struct tee_desc optee_ffa_clnt_desc = {
+ .name = DRIVER_NAME "-ffa-clnt",
+ .ops = &optee_ffa_clnt_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_ffa_supp_ops = {
+ .get_version = optee_ffa_get_version,
+ .open = optee_ffa_open,
+ .release = optee_release_supp,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+ .shm_register = optee_ffa_shm_register, /* same as for clnt ops */
+ .shm_unregister = optee_ffa_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_ffa_supp_desc = {
+ .name = DRIVER_NAME "-ffa-supp",
+ .ops = &optee_ffa_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static const struct optee_ops optee_ffa_ops = {
+ .do_call_with_arg = optee_ffa_do_call_with_arg,
+ .to_msg_param = optee_ffa_to_msg_param,
+ .from_msg_param = optee_ffa_from_msg_param,
+};
+
+static void optee_ffa_remove(struct ffa_device *ffa_dev)
+{
+ struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
+
+ optee_remove_common(optee);
+
+ mutex_destroy(&optee->ffa.mutex);
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+
+ kfree(optee);
+}
+
+static int optee_ffa_probe(struct ffa_device *ffa_dev)
+{
+ const struct ffa_ops *ffa_ops;
+ unsigned int rpc_param_count;
+ struct tee_shm_pool *pool;
+ struct tee_device *teedev;
+ struct tee_context *ctx;
+ u32 arg_cache_flags = 0;
+ struct optee *optee;
+ u32 sec_caps;
+ int rc;
+
+ ffa_ops = ffa_dev->ops;
+
+ if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
+ return -EINVAL;
+
+ if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
+ &rpc_param_count))
+ return -EINVAL;
+ if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET)
+ arg_cache_flags |= OPTEE_SHM_ARG_SHARED;
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee)
+ return -ENOMEM;
+
+ pool = optee_ffa_shm_pool_alloc_pages();
+ if (IS_ERR(pool)) {
+ rc = PTR_ERR(pool);
+ goto err_free_optee;
+ }
+ optee->pool = pool;
+
+ optee->ops = &optee_ffa_ops;
+ optee->ffa.ffa_dev = ffa_dev;
+ optee->rpc_param_count = rpc_param_count;
+
+ teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
+ optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_pool;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_ffa_supp_desc, NULL, optee->pool,
+ optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_unreg_teedev;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err_unreg_supp_teedev;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err_unreg_supp_teedev;
+
+ rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
+ if (rc)
+ goto err_unreg_supp_teedev;
+ mutex_init(&optee->ffa.mutex);
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_supp_init(&optee->supp);
+ optee_shm_arg_cache_init(optee, arg_cache_flags);
+ ffa_dev_set_drvdata(ffa_dev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ goto err_rhashtable_free;
+ }
+ optee->ctx = ctx;
+ rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
+ if (rc)
+ goto err_close_ctx;
+
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
+ if (rc)
+ goto err_unregister_devices;
+
+ pr_info("initialized driver\n");
+ return 0;
+
+err_unregister_devices:
+ optee_unregister_devices();
+ optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
+err_rhashtable_free:
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+ mutex_destroy(&optee->ffa.mutex);
+err_unreg_supp_teedev:
+ tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
+ tee_device_unregister(optee->teedev);
+err_free_pool:
+ tee_shm_pool_free(pool);
+err_free_optee:
+ kfree(optee);
+ return rc;
+}
+
+static const struct ffa_device_id optee_ffa_device_id[] = {
+ /* 486178e0-e7f8-11e3-bc5e0002a5d5c51b */
+ { UUID_INIT(0x486178e0, 0xe7f8, 0x11e3,
+ 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b) },
+ {}
+};
+
+static struct ffa_driver optee_ffa_driver = {
+ .name = "optee",
+ .probe = optee_ffa_probe,
+ .remove = optee_ffa_remove,
+ .id_table = optee_ffa_device_id,
+};
+
+int optee_ffa_abi_register(void)
+{
+ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
+ return ffa_register(&optee_ffa_driver);
+ else
+ return -EOPNOTSUPP;
+}
+
+void optee_ffa_abi_unregister(void)
+{
+ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
+ ffa_unregister(&optee_ffa_driver);
+}
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
new file mode 100644
index 000000000..05212842b
--- /dev/null
+++ b/drivers/tee/optee/notif.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+
+struct notif_entry {
+ struct list_head link;
+ struct completion c;
+ u_int key;
+};
+
+static bool have_key(struct optee *optee, u_int key)
+{
+ struct notif_entry *entry;
+
+ list_for_each_entry(entry, &optee->notif.db, link)
+ if (entry->key == key)
+ return true;
+
+ return false;
+}
+
+int optee_notif_wait(struct optee *optee, u_int key)
+{
+ unsigned long flags;
+ struct notif_entry *entry;
+ int rc = 0;
+
+ if (key > optee->notif.max_key)
+ return -EINVAL;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ init_completion(&entry->c);
+ entry->key = key;
+
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ /*
+ * If the bit is already set it means that the key has already
+ * been posted and we must not wait.
+ */
+ if (test_bit(key, optee->notif.bitmap)) {
+ clear_bit(key, optee->notif.bitmap);
+ goto out;
+ }
+
+ /*
+ * Check if someone is already waiting for this key. If there is
+ * it's a programming error.
+ */
+ if (have_key(optee, key)) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ list_add_tail(&entry->link, &optee->notif.db);
+
+ /*
+ * Unlock temporarily and wait for completion.
+ */
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+ wait_for_completion(&entry->c);
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ list_del(&entry->link);
+out:
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+
+ kfree(entry);
+
+ return rc;
+}
+
+int optee_notif_send(struct optee *optee, u_int key)
+{
+ unsigned long flags;
+ struct notif_entry *entry;
+
+ if (key > optee->notif.max_key)
+ return -EINVAL;
+
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ list_for_each_entry(entry, &optee->notif.db, link)
+ if (entry->key == key) {
+ complete(&entry->c);
+ goto out;
+ }
+
+ /* Only set the bit in case there where nobody waiting */
+ set_bit(key, optee->notif.bitmap);
+out:
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+
+ return 0;
+}
+
+int optee_notif_init(struct optee *optee, u_int max_key)
+{
+ spin_lock_init(&optee->notif.lock);
+ INIT_LIST_HEAD(&optee->notif.db);
+ optee->notif.bitmap = bitmap_zalloc(max_key, GFP_KERNEL);
+ if (!optee->notif.bitmap)
+ return -ENOMEM;
+
+ optee->notif.max_key = max_key;
+
+ return 0;
+}
+
+void optee_notif_uninit(struct optee *optee)
+{
+ bitmap_free(optee->notif.bitmap);
+}
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
new file mode 100644
index 000000000..97266243d
--- /dev/null
+++ b/drivers/tee/optee/optee_ffa.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2019-2021, Linaro Limited
+ */
+
+/*
+ * This file is exported by OP-TEE and is kept in sync between secure world
+ * and normal world drivers. We're using ARM FF-A 1.0 specification.
+ */
+
+#ifndef __OPTEE_FFA_H
+#define __OPTEE_FFA_H
+
+#include <linux/arm_ffa.h>
+
+/*
+ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and
+ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal
+ * messages.
+ *
+ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
+ * are using the AArch32 SMC calling convention with register usage as
+ * defined in FF-A specification:
+ * w0: Function ID (0x8400006F or 0x84000070)
+ * w1: Source/Destination IDs
+ * w2: Reserved (MBZ)
+ * w3-w7: Implementation defined, free to be used below
+ */
+
+#define OPTEE_FFA_VERSION_MAJOR 1
+#define OPTEE_FFA_VERSION_MINOR 0
+
+#define OPTEE_FFA_BLOCKING_CALL(id) (id)
+#define OPTEE_FFA_YIELDING_CALL_BIT 31
+#define OPTEE_FFA_YIELDING_CALL(id) ((id) | BIT(OPTEE_FFA_YIELDING_CALL_BIT))
+
+/*
+ * Returns the API version implemented, currently follows the FF-A version.
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_GET_API_VERSION
+ * w4-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: OPTEE_FFA_VERSION_MAJOR
+ * w4: OPTEE_FFA_VERSION_MINOR
+ * w5-w7: Not used (MBZ)
+ */
+#define OPTEE_FFA_GET_API_VERSION OPTEE_FFA_BLOCKING_CALL(0)
+
+/*
+ * Returns the revision of OP-TEE.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_GET_OS_VERSION
+ * w4-w7: Unused (MBZ)
+ *
+ * Return register usage:
+ * w3: CFG_OPTEE_REVISION_MAJOR
+ * w4: CFG_OPTEE_REVISION_MINOR
+ * w5: TEE_IMPL_GIT_SHA1 (or zero if not supported)
+ */
+#define OPTEE_FFA_GET_OS_VERSION OPTEE_FFA_BLOCKING_CALL(1)
+
+/*
+ * Exchange capabilities between normal world and secure world.
+ *
+ * Currently there are no defined capabilities. When features are added new
+ * capabilities may be added.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_EXCHANGE_CAPABILITIES
+ * w4-w7: Note used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4: Bit[7:0]: Number of parameters needed for RPC to be supplied
+ * as the second MSG arg struct for
+ * OPTEE_FFA_YIELDING_CALL_WITH_ARG.
+ * Bit[31:8]: Reserved (MBZ)
+ * w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
+ * unused bits MBZ.
+ * w6-w7: Not used (MBZ)
+ */
+/*
+ * Secure world supports giving an offset into the argument shared memory
+ * object, see also OPTEE_FFA_YIELDING_CALL_WITH_ARG
+ */
+#define OPTEE_FFA_SEC_CAP_ARG_OFFSET BIT(0)
+
+#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
+
+/*
+ * Unregister shared memory
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM
+ * w4: Shared memory handle, lower bits
+ * w5: Shared memory handle, higher bits
+ * w6-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
+
+/*
+ * Call with struct optee_msg_arg as argument in the supplied shared memory
+ * with a zero internal offset and normal cached memory attributes.
+ * Register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_WITH_ARG
+ * w4: Lower 32 bits of a 64-bit Shared memory handle
+ * w5: Upper 32 bits of a 64-bit Shared memory handle
+ * w6: Offset into shared memory pointing to a struct optee_msg_arg
+ * right after the parameters of this struct (at offset
+ * OPTEE_MSG_GET_ARG_SIZE(num_params) follows a struct optee_msg_arg
+ * for RPC, this struct has reserved space for the number of RPC
+ * parameters as returned by OPTEE_FFA_EXCHANGE_CAPABILITIES.
+ * MBZ unless the bit OPTEE_FFA_SEC_CAP_ARG_OFFSET is received with
+ * OPTEE_FFA_EXCHANGE_CAPABILITIES.
+ * w7: Not used (MBZ)
+ * Resume from RPC. Register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_RESUME
+ * w4-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Normal return (yielding call is completed). Register usage:
+ * w3: Error code, 0 on success
+ * w4: OPTEE_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w7: Not used (MBZ)
+ *
+ * RPC interrupt return (RPC from secure world). Register usage:
+ * w3: Error code == 0
+ * w4: Any defined RPC code but OPTEE_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Possible error codes in register w3:
+ * 0: Success
+ * FFA_DENIED: w4 isn't one of OPTEE_FFA_YIELDING_CALL_START
+ * OPTEE_FFA_YIELDING_CALL_RESUME
+ *
+ * Possible error codes for OPTEE_FFA_YIELDING_CALL_START,
+ * FFA_BUSY: Number of OP-TEE OS threads exceeded,
+ * try again later
+ * FFA_DENIED: RPC shared memory object not found
+ * FFA_INVALID_PARAMETER: Bad shared memory handle or offset into the memory
+ *
+ * Possible error codes for OPTEE_FFA_YIELDING_CALL_RESUME
+ * FFA_INVALID_PARAMETER: Bad resume info
+ */
+#define OPTEE_FFA_YIELDING_CALL_WITH_ARG OPTEE_FFA_YIELDING_CALL(0)
+#define OPTEE_FFA_YIELDING_CALL_RESUME OPTEE_FFA_YIELDING_CALL(1)
+
+#define OPTEE_FFA_YIELDING_CALL_RETURN_DONE 0
+#define OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD 1
+#define OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT 2
+
+#endif /*__OPTEE_FFA_H*/
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
new file mode 100644
index 000000000..70e9cc2ee
--- /dev/null
+++ b/drivers/tee/optee/optee_msg.h
@@ -0,0 +1,339 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+#ifndef _OPTEE_MSG_H
+#define _OPTEE_MSG_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/*
+ * This file defines the OP-TEE message protocol (ABI) used to communicate
+ * with an instance of OP-TEE running in secure world.
+ *
+ * This file is divided into two sections.
+ * 1. Formatting of messages.
+ * 2. Requests from normal world
+ */
+
+/*****************************************************************************
+ * Part 1 - formatting of messages
+ *****************************************************************************/
+
+#define OPTEE_MSG_ATTR_TYPE_NONE 0x0
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1
+#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5
+#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7
+#define OPTEE_MSG_ATTR_TYPE_FMEM_INPUT OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
+#define OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT
+#define OPTEE_MSG_ATTR_TYPE_FMEM_INOUT OPTEE_MSG_ATTR_TYPE_RMEM_INOUT
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9
+#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb
+
+#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0)
+
+/*
+ * Meta parameter to be absorbed by the Secure OS and not passed
+ * to the Trusted Application.
+ *
+ * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION.
+ */
+#define OPTEE_MSG_ATTR_META BIT(8)
+
+/*
+ * Pointer to a list of pages used to register user-defined SHM buffer.
+ * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*.
+ * buf_ptr should point to the beginning of the buffer. Buffer will contain
+ * list of page addresses. OP-TEE core can reconstruct contiguous buffer from
+ * that page addresses list. Page addresses are stored as 64 bit values.
+ * Last entry on a page should point to the next page of buffer.
+ * Every entry in buffer should point to a 4k page beginning (12 least
+ * significant bits must be equal to zero).
+ *
+ * 12 least significant bits of optee_msg_param.u.tmem.buf_ptr should hold
+ * page offset of user buffer.
+ *
+ * So, entries should be placed like members of this structure:
+ *
+ * struct page_data {
+ * uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1];
+ * uint64_t next_page_data;
+ * };
+ *
+ * Structure is designed to exactly fit into the page size
+ * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page.
+ *
+ * The size of 4KB is chosen because this is the smallest page size for ARM
+ * architectures. If REE uses larger pages, it should divide them to 4KB ones.
+ */
+#define OPTEE_MSG_ATTR_NONCONTIG BIT(9)
+
+/*
+ * Memory attributes for caching passed with temp memrefs. The actual value
+ * used is defined outside the message protocol with the exception of
+ * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already
+ * defined for the memory range should be used. If optee_smc.h is used as
+ * bearer of this protocol OPTEE_SMC_SHM_* is used for values.
+ */
+#define OPTEE_MSG_ATTR_CACHE_SHIFT 16
+#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0)
+#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0
+
+/*
+ * Same values as TEE_LOGIN_* from TEE Internal API
+ */
+#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000
+#define OPTEE_MSG_LOGIN_USER 0x00000001
+#define OPTEE_MSG_LOGIN_GROUP 0x00000002
+#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004
+#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
+#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
+
+/*
+ * Page size used in non-contiguous buffer entries
+ */
+#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096
+
+#define OPTEE_MSG_FMEM_INVALID_GLOBAL_ID 0xffffffffffffffff
+
+/**
+ * struct optee_msg_param_tmem - temporary memory reference parameter
+ * @buf_ptr: Address of the buffer
+ * @size: Size of the buffer
+ * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm
+ *
+ * Secure and normal world communicates pointers as physical address
+ * instead of the virtual address. This is because secure and normal world
+ * have completely independent memory mapping. Normal world can even have a
+ * hypervisor which need to translate the guest physical address (AKA IPA
+ * in ARM documentation) to a real physical address before passing the
+ * structure to secure world.
+ */
+struct optee_msg_param_tmem {
+ u64 buf_ptr;
+ u64 size;
+ u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_rmem - registered memory reference parameter
+ * @offs: Offset into shared memory reference
+ * @size: Size of the buffer
+ * @shm_ref: Shared memory reference, pointer to a struct tee_shm
+ */
+struct optee_msg_param_rmem {
+ u64 offs;
+ u64 size;
+ u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_fmem - ffa memory reference parameter
+ * @offs_lower: Lower bits of offset into shared memory reference
+ * @offs_upper: Upper bits of offset into shared memory reference
+ * @internal_offs: Internal offset into the first page of shared memory
+ * reference
+ * @size: Size of the buffer
+ * @global_id: Global identifier of Shared memory
+ */
+struct optee_msg_param_fmem {
+ u32 offs_low;
+ u16 offs_high;
+ u16 internal_offs;
+ u64 size;
+ u64 global_id;
+};
+
+/**
+ * struct optee_msg_param_value - opaque value parameter
+ *
+ * Value parameters are passed unchecked between normal and secure world.
+ */
+struct optee_msg_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
+};
+
+/**
+ * struct optee_msg_param - parameter used together with struct optee_msg_arg
+ * @attr: attributes
+ * @tmem: parameter by temporary memory reference
+ * @rmem: parameter by registered memory reference
+ * @fmem: parameter by ffa registered memory reference
+ * @value: parameter by opaque value
+ * @octets: parameter by octet string
+ *
+ * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
+ * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* or the alias PTEE_MSG_ATTR_TYPE_FMEM_* indicates
+ * @rmem or @fmem depending on the conduit.
+ * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
+ */
+struct optee_msg_param {
+ u64 attr;
+ union {
+ struct optee_msg_param_tmem tmem;
+ struct optee_msg_param_rmem rmem;
+ struct optee_msg_param_fmem fmem;
+ struct optee_msg_param_value value;
+ u8 octets[24];
+ } u;
+};
+
+/**
+ * struct optee_msg_arg - call argument
+ * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_*
+ * @func: Trusted Application function, specific to the Trusted Application,
+ * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND
+ * @session: In parameter for all OPTEE_MSG_CMD_* except
+ * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead
+ * @cancel_id: Cancellation id, a unique value to identify this request
+ * @ret: return value
+ * @ret_origin: origin of the return value
+ * @num_params: number of parameters supplied to the OS Command
+ * @params: the parameters supplied to the OS Command
+ *
+ * All normal calls to Trusted OS uses this struct. If cmd requires further
+ * information than what these fields hold it can be passed as a parameter
+ * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding
+ * attrs field). All parameters tagged as meta have to come first.
+ */
+struct optee_msg_arg {
+ u32 cmd;
+ u32 func;
+ u32 session;
+ u32 cancel_id;
+ u32 pad;
+ u32 ret;
+ u32 ret_origin;
+ u32 num_params;
+
+ /* num_params tells the actual number of element in params */
+ struct optee_msg_param params[];
+};
+
+/**
+ * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg
+ *
+ * @num_params: Number of parameters embedded in the struct optee_msg_arg
+ *
+ * Returns the size of the struct optee_msg_arg together with the number
+ * of embedded parameters.
+ */
+#define OPTEE_MSG_GET_ARG_SIZE(num_params) \
+ (sizeof(struct optee_msg_arg) + \
+ sizeof(struct optee_msg_param) * (num_params))
+
+/*****************************************************************************
+ * Part 2 - requests from normal world
+ *****************************************************************************/
+
+/*
+ * Return the following UID if using API specified in this file without
+ * further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1,
+ * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3.
+ */
+#define OPTEE_MSG_UID_0 0x384fb3e0
+#define OPTEE_MSG_UID_1 0xe7f811e3
+#define OPTEE_MSG_UID_2 0xaf630002
+#define OPTEE_MSG_UID_3 0xa5d5c51b
+#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01
+
+/*
+ * Returns 2.0 if using API specified in this file without further
+ * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR
+ * and OPTEE_MSG_REVISION_MINOR
+ */
+#define OPTEE_MSG_REVISION_MAJOR 2
+#define OPTEE_MSG_REVISION_MINOR 0
+#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in 4 32-bit words in the same way as
+ * OPTEE_MSG_FUNCID_CALLS_UID described above.
+ */
+#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0
+#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3
+#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002
+#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b
+#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in 2 32-bit words in the same way as
+ * OPTEE_MSG_CALLS_REVISION described above.
+ */
+#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
+
+/*
+ * Do a secure call with struct optee_msg_arg as argument
+ * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
+ *
+ * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application.
+ * The first two parameters are tagged as meta, holding two value
+ * parameters to pass the following information:
+ * param[0].u.value.a-b uuid of Trusted Application
+ * param[1].u.value.a-b uuid of Client
+ * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_*
+ *
+ * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened
+ * session to a Trusted Application. struct optee_msg_arg::func is Trusted
+ * Application function, specific to the Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to
+ * Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command.
+ *
+ * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The
+ * information is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
+ * [| OPTEE_MSG_ATTR_NONCONTIG]
+ * [in] param[0].u.tmem.buf_ptr physical address (of first fragment)
+ * [in] param[0].u.tmem.size size (of first fragment)
+ * [in] param[0].u.tmem.shm_ref holds shared memory reference
+ *
+ * OPTEE_MSG_CMD_UNREGISTER_SHM unregisters a previously registered shared
+ * memory reference. The information is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
+ * [in] param[0].u.rmem.shm_ref holds shared memory reference
+ * [in] param[0].u.rmem.offs 0
+ * [in] param[0].u.rmem.size 0
+ *
+ * OPTEE_MSG_CMD_DO_BOTTOM_HALF does the scheduled bottom half processing
+ * of a driver.
+ *
+ * OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
+ * normal world unable to process asynchronous notifications. Typically
+ * used when the driver is shut down.
+ */
+#define OPTEE_MSG_CMD_OPEN_SESSION 0
+#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
+#define OPTEE_MSG_CMD_CLOSE_SESSION 2
+#define OPTEE_MSG_CMD_CANCEL 3
+#define OPTEE_MSG_CMD_REGISTER_SHM 4
+#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
+#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
+#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
+#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
+
+#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
new file mode 100644
index 000000000..04ae58892
--- /dev/null
+++ b/drivers/tee/optee/optee_private.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+
+#ifndef OPTEE_PRIVATE_H
+#define OPTEE_PRIVATE_H
+
+#include <linux/arm-smccc.h>
+#include <linux/rhashtable.h>
+#include <linux/semaphore.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_msg.h"
+
+#define DRIVER_NAME "optee"
+
+#define OPTEE_MAX_ARG_SIZE 1024
+
+/* Some Global Platform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define TEEC_ERROR_BUSY 0xFFFF000D
+#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+/*
+ * This value should be larger than the number threads in secure world to
+ * meet the need from secure world. The number of threads in secure world
+ * are usually not even close to 255 so we should be safe for now.
+ */
+#define OPTEE_DEFAULT_MAX_NOTIF_VALUE 255
+
+typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ struct arm_smccc_res *);
+
+struct optee_call_waiter {
+ struct list_head list_node;
+ struct completion c;
+};
+
+struct optee_call_queue {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head waiters;
+};
+
+struct optee_notif {
+ u_int max_key;
+ /* Serializes access to the elements below in this struct */
+ spinlock_t lock;
+ struct list_head db;
+ u_long *bitmap;
+};
+
+#define OPTEE_SHM_ARG_ALLOC_PRIV BIT(0)
+#define OPTEE_SHM_ARG_SHARED BIT(1)
+struct optee_shm_arg_entry;
+struct optee_shm_arg_cache {
+ u32 flags;
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head shm_args;
+};
+
+/**
+ * struct optee_supp - supplicant synchronization struct
+ * @ctx the context of current connected supplicant.
+ * if !NULL the supplicant device is available for use,
+ * else busy
+ * @mutex: held while accessing content of this struct
+ * @req_id: current request id if supplicant is doing synchronous
+ * communication, else -1
+ * @reqs: queued request not yet retrieved by supplicant
+ * @idr: IDR holding all requests currently being processed
+ * by supplicant
+ * @reqs_c: completion used by supplicant when waiting for a
+ * request to be queued.
+ */
+struct optee_supp {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct tee_context *ctx;
+
+ int req_id;
+ struct list_head reqs;
+ struct idr idr;
+ struct completion reqs_c;
+};
+
+struct optee_smc {
+ optee_invoke_fn *invoke_fn;
+ void *memremaped_shm;
+ u32 sec_caps;
+ unsigned int notif_irq;
+};
+
+/**
+ * struct optee_ffa_data - FFA communication struct
+ * @ffa_dev FFA device, contains the destination id, the id of
+ * OP-TEE in secure world
+ * @ffa_ops FFA operations
+ * @mutex Serializes access to @global_ids
+ * @global_ids FF-A shared memory global handle translation
+ */
+struct optee_ffa {
+ struct ffa_device *ffa_dev;
+ /* Serializes access to @global_ids */
+ struct mutex mutex;
+ struct rhashtable global_ids;
+};
+
+struct optee;
+
+/**
+ * struct optee_ops - OP-TEE driver internal operations
+ * @do_call_with_arg: enters OP-TEE in secure world
+ * @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters
+ * @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param
+ *
+ * These OPs are only supposed to be used internally in the OP-TEE driver
+ * as a way of abstracting the different methogs of entering OP-TEE in
+ * secure world.
+ */
+struct optee_ops {
+ int (*do_call_with_arg)(struct tee_context *ctx,
+ struct tee_shm *shm_arg, u_int offs);
+ int (*to_msg_param)(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params, const struct tee_param *params);
+ int (*from_msg_param)(struct optee *optee, struct tee_param *params,
+ size_t num_params,
+ const struct optee_msg_param *msg_params);
+};
+
+/**
+ * struct optee - main service struct
+ * @supp_teedev: supplicant device
+ * @teedev: client device
+ * @ops: internal callbacks for different ways to reach secure
+ * world
+ * @ctx: driver internal TEE context
+ * @smc: specific to SMC ABI
+ * @ffa: specific to FF-A ABI
+ * @call_queue: queue of threads waiting to call @invoke_fn
+ * @notif: notification synchronization struct
+ * @supp: supplicant synchronization struct for RPC to supplicant
+ * @pool: shared memory pool
+ * @rpc_param_count: If > 0 number of RPC parameters to make room for
+ * @scan_bus_done flag if device registation was already done.
+ * @scan_bus_wq workqueue to scan optee bus and register optee drivers
+ * @scan_bus_work workq to scan optee bus and register optee drivers
+ */
+struct optee {
+ struct tee_device *supp_teedev;
+ struct tee_device *teedev;
+ const struct optee_ops *ops;
+ struct tee_context *ctx;
+ union {
+ struct optee_smc smc;
+ struct optee_ffa ffa;
+ };
+ struct optee_shm_arg_cache shm_arg_cache;
+ struct optee_call_queue call_queue;
+ struct optee_notif notif;
+ struct optee_supp supp;
+ struct tee_shm_pool *pool;
+ unsigned int rpc_param_count;
+ bool scan_bus_done;
+ struct workqueue_struct *scan_bus_wq;
+ struct work_struct scan_bus_work;
+};
+
+struct optee_session {
+ struct list_head list_node;
+ u32 session_id;
+};
+
+struct optee_context_data {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head sess_list;
+};
+
+struct optee_rpc_param {
+ u32 a0;
+ u32 a1;
+ u32 a2;
+ u32 a3;
+ u32 a4;
+ u32 a5;
+ u32 a6;
+ u32 a7;
+};
+
+/* Holds context that is preserved during one STD call */
+struct optee_call_ctx {
+ /* information about pages list used in last allocation */
+ void *pages_list;
+ size_t num_entries;
+};
+
+int optee_notif_init(struct optee *optee, u_int max_key);
+void optee_notif_uninit(struct optee *optee);
+int optee_notif_wait(struct optee *optee, u_int key);
+int optee_notif_send(struct optee *optee, u_int key);
+
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ struct tee_param *param);
+
+int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
+int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
+void optee_supp_init(struct optee_supp *supp);
+void optee_supp_uninit(struct optee_supp *supp);
+void optee_supp_release(struct optee_supp *supp);
+
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+
+int optee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+int optee_close_session_helper(struct tee_context *ctx, u32 session);
+int optee_close_session(struct tee_context *ctx, u32 session);
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+#define PTA_CMD_GET_DEVICES 0x0
+#define PTA_CMD_GET_DEVICES_SUPP 0x1
+int optee_enumerate_devices(u32 func);
+void optee_unregister_devices(void);
+
+int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start));
+void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm));
+
+
+void optee_remove_common(struct optee *optee);
+int optee_open(struct tee_context *ctx, bool cap_memref_null);
+void optee_release(struct tee_context *ctx);
+void optee_release_supp(struct tee_context *ctx);
+
+static inline void optee_from_msg_param_value(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ p->u.value.a = mp->u.value.a;
+ p->u.value.b = mp->u.value.b;
+ p->u.value.c = mp->u.value.c;
+}
+
+static inline void optee_to_msg_param_value(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ mp->u.value.a = p->u.value.a;
+ mp->u.value.b = p->u.value.b;
+ mp->u.value.c = p->u.value.c;
+}
+
+void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+int optee_check_mem_type(unsigned long start, size_t num_pages);
+
+void optee_shm_arg_cache_init(struct optee *optee, u32 flags);
+void optee_shm_arg_cache_uninit(struct optee *optee);
+struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
+ size_t num_params,
+ struct optee_shm_arg_entry **entry,
+ struct tee_shm **shm_ret,
+ u_int *offs);
+void optee_free_msg_arg(struct tee_context *ctx,
+ struct optee_shm_arg_entry *entry, u_int offs);
+size_t optee_msg_arg_size(size_t rpc_param_count);
+
+
+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz);
+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg);
+
+/*
+ * Small helpers
+ */
+
+static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1)
+{
+ return (void *)(unsigned long)(((u64)reg0 << 32) | reg1);
+}
+
+static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
+{
+ *reg0 = val >> 32;
+ *reg1 = val;
+}
+
+/* Registration of the ABIs */
+int optee_smc_abi_register(void);
+void optee_smc_abi_unregister(void);
+int optee_ffa_abi_register(void);
+void optee_ffa_abi_unregister(void);
+
+#endif /*OPTEE_PRIVATE_H*/
diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h
new file mode 100644
index 000000000..f3f06e099
--- /dev/null
+++ b/drivers/tee/optee/optee_rpc_cmd.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2016-2021, Linaro Limited
+ */
+
+#ifndef __OPTEE_RPC_CMD_H
+#define __OPTEE_RPC_CMD_H
+
+/*
+ * All RPC is done with a struct optee_msg_arg as bearer of information,
+ * struct optee_msg_arg::arg holds values defined by OPTEE_RPC_CMD_* below.
+ * Only the commands handled by the kernel driver are defined here.
+ *
+ * RPC communication with tee-supplicant is reversed compared to normal
+ * client communication described above. The supplicant receives requests
+ * and sends responses.
+ */
+
+/*
+ * Get time
+ *
+ * Returns number of seconds and nano seconds since the Epoch,
+ * 1970-01-01 00:00:00 +0000 (UTC).
+ *
+ * [out] value[0].a Number of seconds
+ * [out] value[0].b Number of nano seconds.
+ */
+#define OPTEE_RPC_CMD_GET_TIME 3
+
+/*
+ * Notification from/to secure world.
+ *
+ * If secure world needs to wait for something, for instance a mutex, it
+ * does a notification wait request instead of spinning in secure world.
+ * Conversely can a synchronous notification can be sent when a secure
+ * world mutex with a thread waiting thread is unlocked.
+ *
+ * This interface can also be used to wait for a asynchronous notification
+ * which instead is sent via a non-secure interrupt.
+ *
+ * Waiting on notification
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT
+ * [in] value[0].b notification value
+ *
+ * Sending a synchronous notification
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND
+ * [in] value[0].b notification value
+ */
+#define OPTEE_RPC_CMD_NOTIFICATION 4
+#define OPTEE_RPC_NOTIFICATION_WAIT 0
+#define OPTEE_RPC_NOTIFICATION_SEND 1
+
+/*
+ * Suspend execution
+ *
+ * [in] value[0].a Number of milliseconds to suspend
+ */
+#define OPTEE_RPC_CMD_SUSPEND 5
+
+/*
+ * Allocate a piece of shared memory
+ *
+ * [in] value[0].a Type of memory one of
+ * OPTEE_RPC_SHM_TYPE_* below
+ * [in] value[0].b Requested size
+ * [in] value[0].c Required alignment
+ * [out] memref[0] Buffer
+ */
+#define OPTEE_RPC_CMD_SHM_ALLOC 6
+/* Memory that can be shared with a non-secure user space application */
+#define OPTEE_RPC_SHM_TYPE_APPL 0
+/* Memory only shared with non-secure kernel */
+#define OPTEE_RPC_SHM_TYPE_KERNEL 1
+
+/*
+ * Free shared memory previously allocated with OPTEE_RPC_CMD_SHM_ALLOC
+ *
+ * [in] value[0].a Type of memory one of
+ * OPTEE_RPC_SHM_TYPE_* above
+ * [in] value[0].b Value of shared memory reference or cookie
+ */
+#define OPTEE_RPC_CMD_SHM_FREE 7
+
+/*
+ * Issue master requests (read and write operations) to an I2C chip.
+ *
+ * [in] value[0].a Transfer mode (OPTEE_RPC_I2C_TRANSFER_*)
+ * [in] value[0].b The I2C bus (a.k.a adapter).
+ * 16 bit field.
+ * [in] value[0].c The I2C chip (a.k.a address).
+ * 16 bit field (either 7 or 10 bit effective).
+ * [in] value[1].a The I2C master control flags (ie, 10 bit address).
+ * 16 bit field.
+ * [in/out] memref[2] Buffer used for data transfers.
+ * [out] value[3].a Number of bytes transferred by the REE.
+ */
+#define OPTEE_RPC_CMD_I2C_TRANSFER 21
+
+/* I2C master transfer modes */
+#define OPTEE_RPC_I2C_TRANSFER_RD 0
+#define OPTEE_RPC_I2C_TRANSFER_WR 1
+
+/* I2C master control flags */
+#define OPTEE_RPC_I2C_FLAGS_TEN_BIT BIT(0)
+
+#endif /*__OPTEE_RPC_CMD_H*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
new file mode 100644
index 000000000..73b5e7760
--- /dev/null
+++ b/drivers/tee/optee/optee_smc.h
@@ -0,0 +1,579 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+#ifndef OPTEE_SMC_H
+#define OPTEE_SMC_H
+
+#include <linux/arm-smccc.h>
+#include <linux/bitops.h>
+
+#define OPTEE_SMC_STD_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+
+/*
+ * Function specified by SMC Calling convention.
+ */
+#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00
+#define OPTEE_SMC_CALLS_COUNT \
+ ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \
+ SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_COUNT)
+
+/*
+ * Normal cached memory (write-back), shareable for SMP systems and not
+ * shareable for UP systems.
+ */
+#define OPTEE_SMC_SHM_CACHED 1
+
+/*
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
+ * 32-bit registers.
+ */
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Return the following UID if using API specified in this file
+ * without further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * see also OPTEE_MSG_UID_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
+#define OPTEE_SMC_CALLS_UID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_UID)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Returns 2.0 if using API specified in this file without further extensions.
+ * see also OPTEE_MSG_REVISION_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
+#define OPTEE_SMC_CALLS_REVISION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_REVISION)
+
+struct optee_smc_calls_revision_result {
+ unsigned long major;
+ unsigned long minor;
+ unsigned long reserved0;
+ unsigned long reserved1;
+};
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
+#define OPTEE_SMC_CALL_GET_OS_UUID \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
+ * described above. May optionally return a 32-bit build identifier in a2,
+ * with zero meaning unspecified.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
+#define OPTEE_SMC_CALL_GET_OS_REVISION \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
+
+struct optee_smc_call_get_os_revision_result {
+ unsigned long major;
+ unsigned long minor;
+ unsigned long build_id;
+ unsigned long reserved1;
+};
+
+/*
+ * Call with struct optee_msg_arg as argument
+ *
+ * When called with OPTEE_SMC_CALL_WITH_RPC_ARG or
+ * OPTEE_SMC_CALL_WITH_REGD_ARG in a0 there is one RPC struct optee_msg_arg
+ * following after the first struct optee_msg_arg. The RPC struct
+ * optee_msg_arg has reserved space for the number of RPC parameters as
+ * returned by OPTEE_SMC_EXCHANGE_CAPABILITIES.
+ *
+ * When calling these functions, normal world has a few responsibilities:
+ * 1. It must be able to handle eventual RPCs
+ * 2. Non-secure interrupts should not be masked
+ * 3. If asynchronous notifications has been negotiated successfully, then
+ * the interrupt for asynchronous notifications should be unmasked
+ * during this call.
+ *
+ * Call register usage, OPTEE_SMC_CALL_WITH_ARG and
+ * OPTEE_SMC_CALL_WITH_RPC_ARG:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_WITH_ARG or OPTEE_SMC_CALL_WITH_RPC_ARG
+ * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
+ * a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
+ * a3 Cache settings, not used if physical pointer is in a predefined shared
+ * memory area else per OPTEE_SMC_SHM_*
+ * a4-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Call register usage, OPTEE_SMC_CALL_WITH_REGD_ARG:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_WITH_REGD_ARG
+ * a1 Upper 32 bits of a 64-bit shared memory cookie
+ * a2 Lower 32 bits of a 64-bit shared memory cookie
+ * a3 Offset of the struct optee_msg_arg in the shared memory with the
+ * supplied cookie
+ * a4-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_*
+ * a1-3 Not used
+ * a4-7 Preserved
+ *
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
+ * a1-3 Preserved
+ * a4-7 Preserved
+ *
+ * RPC return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val)
+ * a1-2 RPC parameters
+ * a3-7 Resume information, must be preserved
+ *
+ * Possible return values:
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Call completed, result updated in
+ * the previously supplied struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
+ * try again later.
+ * OPTEE_SMC_RETURN_EBADADDR Bad physical pointer to struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
+ * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
+ * world.
+ */
+#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
+#define OPTEE_SMC_CALL_WITH_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
+#define OPTEE_SMC_CALL_WITH_RPC_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG)
+#define OPTEE_SMC_CALL_WITH_REGD_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG)
+
+/*
+ * Get Shared Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Physical address of start of SHM
+ * a2 Size of SHM
+ * a3 Cache settings of memory, as defined by the
+ * OPTEE_SMC_SHM_* values above
+ * a4-7 Preserved
+ *
+ * Not available register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7
+#define OPTEE_SMC_GET_SHM_CONFIG \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
+
+struct optee_smc_get_shm_config_result {
+ unsigned long status;
+ unsigned long start;
+ unsigned long size;
+ unsigned long settings;
+};
+
+/*
+ * Exchanges capabilities between normal world and secure world
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
+ * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
+ * a2-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2 The maximum secure world notification number
+ * a3 Bit[7:0]: Number of parameters needed for RPC to be supplied
+ * as the second MSG arg struct for
+ * OPTEE_SMC_CALL_WITH_ARG
+ * Bit[31:8]: Reserved (MBZ)
+ * a4-7 Preserved
+ *
+ * Error return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ */
+/* Normal world works as a uniprocessor system */
+#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0)
+/* Secure world has reserved shared memory for normal world to use */
+#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
+/* Secure world can communicate via previously unregistered shared memory */
+#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
+
+/*
+ * Secure world supports commands "register/unregister shared memory",
+ * secure world accepts command buffers located in any parts of non-secure RAM
+ */
+#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
+/* Secure world is built with virtualization support */
+#define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3)
+/* Secure world supports Shared Memory with a NULL reference */
+#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
+/* Secure world supports asynchronous notification of normal world */
+#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
+/* Secure world supports pre-allocating RPC arg struct */
+#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6)
+
+#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
+#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
+
+struct optee_smc_exchange_capabilities_result {
+ unsigned long status;
+ unsigned long capabilities;
+ unsigned long max_notif_value;
+ unsigned long data;
+};
+
+/*
+ * Disable and empties cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns one shared memory reference to free. To disable the
+ * cache and free all cached objects this function has to be called until
+ * it returns OPTEE_SMC_RETURN_ENOTAVAIL.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie
+ * a3-7 Preserved
+ *
+ * Cache empty return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10
+#define OPTEE_SMC_DISABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
+
+struct optee_smc_disable_shm_cache_result {
+ unsigned long status;
+ unsigned long shm_upper32;
+ unsigned long shm_lower32;
+ unsigned long reserved0;
+};
+
+/*
+ * Enable cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
+ * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11
+#define OPTEE_SMC_ENABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
+
+/*
+ * Query OP-TEE about number of supported threads
+ *
+ * Normal World OS or Hypervisor issues this call to find out how many
+ * threads OP-TEE supports. That is how many standard calls can be issued
+ * in parallel before OP-TEE will return OPTEE_SMC_RETURN_ETHREAD_LIMIT.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_THREAD_COUNT
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Number of threads
+ * a2-7 Preserved
+ *
+ * Error return:
+ * a0 OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Requested call is not implemented
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_THREAD_COUNT 15
+#define OPTEE_SMC_GET_THREAD_COUNT \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT)
+
+/*
+ * Inform OP-TEE that normal world is able to receive asynchronous
+ * notifications.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_ASYNC_NOTIF
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not supported return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF 16
+#define OPTEE_SMC_ENABLE_ASYNC_NOTIF \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF)
+
+/*
+ * Retrieve a value of notifications pending since the last call of this
+ * function.
+ *
+ * OP-TEE keeps a record of all posted values. When an interrupt is
+ * received which indicates that there are posted values this function
+ * should be called until all pended values have been retrieved. When a
+ * value is retrieved, it's cleared from the record in secure world.
+ *
+ * It is expected that this function is called from an interrupt handler
+ * in normal world.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 value
+ * a2 Bit[0]: OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID if the value in a1 is
+ * valid, else 0 if no values where pending
+ * a2 Bit[1]: OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING if another value is
+ * pending, else 0.
+ * Bit[31:2]: MBZ
+ * a3-7 Preserved
+ *
+ * Not supported return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID BIT(0)
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING BIT(1)
+
+/*
+ * Notification that OP-TEE expects a yielding call to do some bottom half
+ * work in a driver.
+ */
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF 0
+
+#define OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE 17
+#define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE)
+
+/* See OPTEE_SMC_CALL_WITH_RPC_ARG above */
+#define OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG 18
+
+/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */
+#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19
+
+/*
+ * Resume from RPC (for example after processing a foreign interrupt)
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
+ * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
+ * OPTEE_SMC_RETURN_RPC in a0
+ *
+ * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
+ *
+ * Possible return values
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Original call completed, result
+ * updated in the previously supplied.
+ * struct optee_msg_arg
+ * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal
+ * world.
+ * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume
+ * information was corrupt.
+ */
+#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3
+#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
+
+#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF
+
+#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
+ ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
+
+#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
+
+/*
+ * Allocate memory for RPC parameter passing. The memory is used to hold a
+ * struct optee_msg_arg.
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC
+ * a1 Size in bytes of required argument memory
+ * a2 Not used
+ * a3 Resume information, must be preserved
+ * a4-5 Not used
+ * a6-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1 Upper 32 bits of 64-bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated.
+ * a2 Lower 32 bits of 64-bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated
+ * a3 Preserved
+ * a4 Upper 32 bits of 64-bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a5 Lower 32 bits of 64-bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a6-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_ALLOC 0
+#define OPTEE_SMC_RETURN_RPC_ALLOC \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
+
+/*
+ * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_FREE
+ * a1 Upper 32 bits of 64-bit shared memory cookie belonging to this
+ * argument memory
+ * a2 Lower 32 bits of 64-bit shared memory cookie belonging to this
+ * argument memory
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FREE 2
+#define OPTEE_SMC_RETURN_RPC_FREE \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
+
+/*
+ * Deliver a foreign interrupt in normal world.
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
+ * a1-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4
+#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR)
+
+/*
+ * Do an RPC request. The supplied struct optee_msg_arg tells which
+ * request to do and the parameters for the request. The following fields
+ * are used (the rest are unused):
+ * - cmd the Request ID
+ * - ret return value of the request, filled in by normal world
+ * - num_params number of parameters for the request
+ * - params the parameters
+ * - param_attrs attributes of the parameters
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_CMD
+ * a1 Upper 32 bits of a 64-bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a2 Lower 32 bits of a 64-bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_CMD 5
+#define OPTEE_SMC_RETURN_RPC_CMD \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
+
+/* Returned in a0 */
+#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+
+/* Returned in a0 only from Trusted OS functions */
+#define OPTEE_SMC_RETURN_OK 0x0
+#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1
+#define OPTEE_SMC_RETURN_EBUSY 0x2
+#define OPTEE_SMC_RETURN_ERESUME 0x3
+#define OPTEE_SMC_RETURN_EBADADDR 0x4
+#define OPTEE_SMC_RETURN_EBADCMD 0x5
+#define OPTEE_SMC_RETURN_ENOMEM 0x6
+#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7
+#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret))
+
+static inline bool __optee_smc_return_is_rpc(u32 ret)
+{
+ return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION &&
+ (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) ==
+ OPTEE_SMC_RETURN_RPC_PREFIX;
+}
+
+#endif /* OPTEE_SMC_H */
diff --git a/drivers/tee/optee/optee_trace.h b/drivers/tee/optee/optee_trace.h
new file mode 100644
index 000000000..7c954eefa
--- /dev/null
+++ b/drivers/tee/optee/optee_trace.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * optee trace points
+ *
+ * Copyright (C) 2021 Synaptics Incorporated
+ * Author: Jisheng Zhang <jszhang@kernel.org>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM optee
+
+#if !defined(_TRACE_OPTEE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_OPTEE_H
+
+#include <linux/arm-smccc.h>
+#include <linux/tracepoint.h>
+#include "optee_private.h"
+
+TRACE_EVENT(optee_invoke_fn_begin,
+ TP_PROTO(struct optee_rpc_param *param),
+ TP_ARGS(param),
+
+ TP_STRUCT__entry(
+ __field(void *, param)
+ __array(u32, args, 8)
+ ),
+
+ TP_fast_assign(
+ __entry->param = param;
+ BUILD_BUG_ON(sizeof(*param) < sizeof(__entry->args));
+ memcpy(__entry->args, param, sizeof(__entry->args));
+ ),
+
+ TP_printk("param=%p (%x, %x, %x, %x, %x, %x, %x, %x)", __entry->param,
+ __entry->args[0], __entry->args[1], __entry->args[2],
+ __entry->args[3], __entry->args[4], __entry->args[5],
+ __entry->args[6], __entry->args[7])
+);
+
+TRACE_EVENT(optee_invoke_fn_end,
+ TP_PROTO(struct optee_rpc_param *param, struct arm_smccc_res *res),
+ TP_ARGS(param, res),
+
+ TP_STRUCT__entry(
+ __field(void *, param)
+ __array(unsigned long, rets, 4)
+ ),
+
+ TP_fast_assign(
+ __entry->param = param;
+ BUILD_BUG_ON(sizeof(*res) < sizeof(__entry->rets));
+ memcpy(__entry->rets, res, sizeof(__entry->rets));
+ ),
+
+ TP_printk("param=%p ret (%lx, %lx, %lx, %lx)", __entry->param,
+ __entry->rets[0], __entry->rets[1], __entry->rets[2],
+ __entry->rets[3])
+);
+#endif /* _TRACE_OPTEE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE optee_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
new file mode 100644
index 000000000..e69bc6380
--- /dev/null
+++ b/drivers/tee/optee/rpc.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+#include "optee_rpc_cmd.h"
+
+static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
+{
+ struct timespec64 ts;
+
+ if (arg->num_params != 1)
+ goto bad;
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
+ goto bad;
+
+ ktime_get_real_ts64(&ts);
+ arg->params[0].u.value.a = ts.tv_sec;
+ arg->params[0].u.value.b = ts.tv_nsec;
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+#if IS_REACHABLE(CONFIG_I2C)
+static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct tee_param *params;
+ struct i2c_adapter *adapter;
+ struct i2c_msg msg = { };
+ size_t i;
+ int ret = -EOPNOTSUPP;
+ u8 attr[] = {
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT,
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT,
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT,
+ };
+
+ if (arg->num_params != ARRAY_SIZE(attr)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params))
+ goto bad;
+
+ for (i = 0; i < arg->num_params; i++) {
+ if (params[i].attr != attr[i])
+ goto bad;
+ }
+
+ adapter = i2c_get_adapter(params[0].u.value.b);
+ if (!adapter)
+ goto bad;
+
+ if (params[1].u.value.a & OPTEE_RPC_I2C_FLAGS_TEN_BIT) {
+ if (!i2c_check_functionality(adapter,
+ I2C_FUNC_10BIT_ADDR)) {
+ i2c_put_adapter(adapter);
+ goto bad;
+ }
+
+ msg.flags = I2C_M_TEN;
+ }
+
+ msg.addr = params[0].u.value.c;
+ msg.buf = params[2].u.memref.shm->kaddr;
+ msg.len = params[2].u.memref.size;
+
+ switch (params[0].u.value.a) {
+ case OPTEE_RPC_I2C_TRANSFER_RD:
+ msg.flags |= I2C_M_RD;
+ break;
+ case OPTEE_RPC_I2C_TRANSFER_WR:
+ break;
+ default:
+ i2c_put_adapter(adapter);
+ goto bad;
+ }
+
+ ret = i2c_transfer(adapter, &msg, 1);
+
+ if (ret < 0) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ params[3].u.value.a = msg.len;
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params))
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ else
+ arg->ret = TEEC_SUCCESS;
+ }
+
+ i2c_put_adapter(adapter);
+ kfree(params);
+ return;
+bad:
+ kfree(params);
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+#else
+static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ arg->ret = TEEC_ERROR_NOT_SUPPORTED;
+}
+#endif
+
+static void handle_rpc_func_cmd_wq(struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ if (arg->num_params != 1)
+ goto bad;
+
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto bad;
+
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_NOTIFICATION_WAIT:
+ if (optee_notif_wait(optee, arg->params[0].u.value.b))
+ goto bad;
+ break;
+ case OPTEE_RPC_NOTIFICATION_SEND:
+ if (optee_notif_send(optee, arg->params[0].u.value.b))
+ goto bad;
+ break;
+ default:
+ goto bad;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
+{
+ u32 msec_to_wait;
+
+ if (arg->num_params != 1)
+ goto bad;
+
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto bad;
+
+ msec_to_wait = arg->params[0].u.value.a;
+
+ /* Go to interruptible sleep */
+ msleep_interruptible(msec_to_wait);
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_supp_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_param *params;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
+
+ if (optee->ops->to_msg_param(optee, arg->params, arg->num_params,
+ params))
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+out:
+ kfree(params);
+}
+
+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
+{
+ u32 ret;
+ struct tee_param param;
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct tee_shm *shm;
+
+ param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param.u.value.a = OPTEE_RPC_SHM_TYPE_APPL;
+ param.u.value.b = sz;
+ param.u.value.c = 0;
+
+ ret = optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_ALLOC, 1, &param);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&optee->supp.mutex);
+ /* Increases count as secure world doesn't have a reference */
+ shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
+ mutex_unlock(&optee->supp.mutex);
+ return shm;
+}
+
+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct tee_param param;
+
+ param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param.u.value.a = OPTEE_RPC_SHM_TYPE_APPL;
+ param.u.value.b = tee_shm_get_id(shm);
+ param.u.value.c = 0;
+
+ /*
+ * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
+ * world has released its reference.
+ *
+ * It's better to do this before sending the request to supplicant
+ * as we'd like to let the process doing the initial allocation to
+ * do release the last reference too in order to avoid stacking
+ * many pending fput() on the client process. This could otherwise
+ * happen if secure world does many allocate and free in a single
+ * invoke.
+ */
+ tee_shm_put(shm);
+
+ optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, &param);
+}
+
+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_GET_TIME:
+ handle_rpc_func_cmd_get_time(arg);
+ break;
+ case OPTEE_RPC_CMD_NOTIFICATION:
+ handle_rpc_func_cmd_wq(optee, arg);
+ break;
+ case OPTEE_RPC_CMD_SUSPEND:
+ handle_rpc_func_cmd_wait(arg);
+ break;
+ case OPTEE_RPC_CMD_I2C_TRANSFER:
+ handle_rpc_func_cmd_i2c_transfer(ctx, arg);
+ break;
+ default:
+ handle_rpc_supp_cmd(ctx, optee, arg);
+ }
+}
+
+
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
new file mode 100644
index 000000000..e6e0428f8
--- /dev/null
+++ b/drivers/tee/optee/smc_abi.c
@@ -0,0 +1,1591 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+#include "optee_rpc_cmd.h"
+#include <linux/kmemleak.h>
+#define CREATE_TRACE_POINTS
+#include "optee_trace.h"
+
+/*
+ * This file implement the SMC ABI used when communicating with secure world
+ * OP-TEE OS via raw SMCs.
+ * This file is divided into the following sections:
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ * 2. Low level support functions to register shared memory in secure world
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ * 4. Do a normal scheduled call into secure world
+ * 5. Asynchronous notification
+ * 6. Driver initialization.
+ */
+
+/*
+ * A typical OP-TEE private shm allocation is 224 bytes (argument struct
+ * with 6 parameters, needed for open session). So with an alignment of 512
+ * we'll waste a bit more than 50%. However, it's only expected that we'll
+ * have a handful of these structs allocated at a time. Most memory will
+ * be allocated aligned to the page size, So all in all this should scale
+ * up and down quite well.
+ */
+#define OPTEE_MIN_STATIC_POOL_ALIGN 9 /* 512 bytes aligned */
+
+/*
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ *
+ * optee_from_msg_param() and optee_to_msg_param() are the main
+ * functions.
+ */
+
+static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+ phys_addr_t pa;
+ int rc;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ p->u.memref.size = mp->u.tmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
+ if (!shm) {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(shm, 0, &pa);
+ if (rc)
+ return rc;
+
+ p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+ p->u.memref.shm = shm;
+
+ return 0;
+}
+
+static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ p->u.memref.size = mp->u.rmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
+
+ if (shm) {
+ p->u.memref.shm_offs = mp->u.rmem.offs;
+ p->u.memref.shm = shm;
+ } else {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ }
+}
+
+/**
+ * optee_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @optee: main service struct
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
+ size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ optee_from_msg_param_value(p, attr, mp);
+ break;
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ rc = from_msg_param_tmp_mem(p, attr, mp);
+ if (rc)
+ return rc;
+ break;
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ from_msg_param_reg_mem(p, attr, mp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ int rc;
+ phys_addr_t pa;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.tmem.size = p->u.memref.size;
+
+ if (!p->u.memref.shm) {
+ mp->u.tmem.buf_ptr = 0;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
+ if (rc)
+ return rc;
+
+ mp->u.tmem.buf_ptr = pa;
+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+ OPTEE_MSG_ATTR_CACHE_SHIFT;
+
+ return 0;
+}
+
+static int to_msg_param_reg_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.rmem.size = p->u.memref.size;
+ mp->u.rmem.offs = p->u.memref.shm_offs;
+ return 0;
+}
+
+/**
+ * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
+ * @optee: main service struct
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_to_msg_param(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params, const struct tee_param *params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ optee_to_msg_param_value(mp, p);
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (tee_shm_is_dynamic(p->u.memref.shm))
+ rc = to_msg_param_reg_mem(mp, p);
+ else
+ rc = to_msg_param_tmp_mem(mp, p);
+ if (rc)
+ return rc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * 2. Low level support functions to register shared memory in secure world
+ *
+ * Functions to enable/disable shared memory caching in secure world, that
+ * is, lazy freeing of previously allocated shared memory. Freeing is
+ * performed when a request has been compled.
+ *
+ * Functions to register and unregister shared memory both for normal
+ * clients and for tee-supplicant.
+ */
+
+/**
+ * optee_enable_shm_cache() - Enables caching of some shared memory allocation
+ * in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_enable_shm_cache(struct optee *optee)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == OPTEE_SMC_RETURN_OK)
+ break;
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * __optee_disable_shm_cache() - Disables caching of some shared memory
+ * allocation in OP-TEE
+ * @optee: main service struct
+ * @is_mapped: true if the cached shared memory addresses were mapped by this
+ * kernel, are safe to dereference, and should be freed
+ */
+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_disable_shm_cache_result result;
+ } res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
+ break; /* All shm's freed */
+ if (res.result.status == OPTEE_SMC_RETURN_OK) {
+ struct tee_shm *shm;
+
+ /*
+ * Shared memory references that were not mapped by
+ * this kernel must be ignored to prevent a crash.
+ */
+ if (!is_mapped)
+ continue;
+
+ shm = reg_pair_to_ptr(res.result.shm_upper32,
+ res.result.shm_lower32);
+ tee_shm_free(shm);
+ } else {
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
+ * allocations in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_disable_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, true);
+}
+
+/**
+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
+ * allocations in OP-TEE which are not
+ * currently mapped
+ * @optee: main service struct
+ */
+static void optee_disable_unmapped_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, false);
+}
+
+#define PAGELIST_ENTRIES_PER_PAGE \
+ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+/*
+ * The final entry in each pagelist page is a pointer to the next
+ * pagelist page.
+ */
+static size_t get_pages_list_size(size_t num_entries)
+{
+ int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+ return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+static u64 *optee_allocate_pages_list(size_t num_entries)
+{
+ return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
+}
+
+static void optee_free_pages_list(void *list, size_t num_entries)
+{
+ free_pages_exact(list, get_pages_list_size(num_entries));
+}
+
+/**
+ * optee_fill_pages_list() - write list of user pages to given shared
+ * buffer.
+ *
+ * @dst: page-aligned buffer where list of pages will be stored
+ * @pages: array of pages that represents shared buffer
+ * @num_pages: number of entries in @pages
+ * @page_offset: offset of user buffer from page start
+ *
+ * @dst should be big enough to hold list of user page addresses and
+ * links to the next pages of buffer
+ */
+static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+ size_t page_offset)
+{
+ int n = 0;
+ phys_addr_t optee_page;
+ /*
+ * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
+ * for details.
+ */
+ struct {
+ u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
+ u64 next_page_data;
+ } *pages_data;
+
+ /*
+ * Currently OP-TEE uses 4k page size and it does not looks
+ * like this will change in the future. On other hand, there are
+ * no know ARM architectures with page size < 4k.
+ * Thus the next built assert looks redundant. But the following
+ * code heavily relies on this assumption, so it is better be
+ * safe than sorry.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ pages_data = (void *)dst;
+ /*
+ * If linux page is bigger than 4k, and user buffer offset is
+ * larger than 4k/8k/12k/etc this will skip first 4k pages,
+ * because they bear no value data for OP-TEE.
+ */
+ optee_page = page_to_phys(*pages) +
+ round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ while (true) {
+ pages_data->pages_list[n++] = optee_page;
+
+ if (n == PAGELIST_ENTRIES_PER_PAGE) {
+ pages_data->next_page_data =
+ virt_to_phys(pages_data + 1);
+ pages_data++;
+ n = 0;
+ }
+
+ optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+ if (!(optee_page & ~PAGE_MASK)) {
+ if (!--num_pages)
+ break;
+ pages++;
+ optee_page = page_to_phys(*pages);
+ }
+ }
+}
+
+static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ u64 *pages_list;
+ size_t sz;
+ int rc;
+
+ if (!num_pages)
+ return -EINVAL;
+
+ rc = optee_check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ pages_list = optee_allocate_pages_list(num_pages);
+ if (!pages_list)
+ return -ENOMEM;
+
+ /*
+ * We're about to register shared memory we can't register shared
+ * memory for this request or there's a catch-22.
+ *
+ * So in this we'll have to do the good old temporary private
+ * allocation instead of using optee_get_msg_arg().
+ */
+ sz = optee_msg_arg_size(optee->rpc_param_count);
+ shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
+ if (IS_ERR(shm_arg)) {
+ rc = PTR_ERR(shm_arg);
+ goto out;
+ }
+ msg_arg = tee_shm_get_va(shm_arg, 0);
+ if (IS_ERR(msg_arg)) {
+ rc = PTR_ERR(msg_arg);
+ goto out;
+ }
+
+ optee_fill_pages_list(pages_list, pages, num_pages,
+ tee_shm_get_page_offset(shm));
+
+ memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+ msg_arg->num_params = 1;
+ msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
+ msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
+ msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
+ /*
+ * In the least bits of msg_arg->params->u.tmem.buf_ptr we
+ * store buffer offset from 4k page, as described in OP-TEE ABI.
+ */
+ msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+ tee_shm_free(shm_arg);
+out:
+ optee_free_pages_list(pages_list, num_pages);
+ return rc;
+}
+
+static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ int rc = 0;
+ size_t sz;
+
+ /*
+ * We're about to unregister shared memory and we may not be able
+ * register shared memory for this request in case we're called
+ * from optee_shm_arg_cache_uninit().
+ *
+ * So in order to keep things simple in this function just as in
+ * optee_shm_register() we'll use temporary private allocation
+ * instead of using optee_get_msg_arg().
+ */
+ sz = optee_msg_arg_size(optee->rpc_param_count);
+ shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
+ if (IS_ERR(shm_arg))
+ return PTR_ERR(shm_arg);
+ msg_arg = tee_shm_get_va(shm_arg, 0);
+ if (IS_ERR(msg_arg)) {
+ rc = PTR_ERR(msg_arg);
+ goto out;
+ }
+
+ memset(msg_arg, 0, sz);
+ msg_arg->num_params = 1;
+ msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+out:
+ tee_shm_free(shm_arg);
+ return rc;
+}
+
+static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ /*
+ * We don't want to register supplicant memory in OP-TEE.
+ * Instead information about it will be passed in RPC code.
+ */
+ return optee_check_mem_type(start, num_pages);
+}
+
+static int optee_shm_unregister_supp(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ return 0;
+}
+
+/*
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ *
+ * Implements an OP-TEE specific shared memory pool which is used
+ * when dynamic shared memory is supported by secure world.
+ *
+ * The main function is optee_shm_pool_alloc_pages().
+ */
+
+static int pool_op_alloc(struct tee_shm_pool *pool,
+ struct tee_shm *shm, size_t size, size_t align)
+{
+ /*
+ * Shared memory private to the OP-TEE driver doesn't need
+ * to be registered with OP-TEE.
+ */
+ if (shm->flags & TEE_SHM_PRIV)
+ return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
+
+ return optee_pool_op_alloc_helper(pool, shm, size, align,
+ optee_shm_register);
+}
+
+static void pool_op_free(struct tee_shm_pool *pool,
+ struct tee_shm *shm)
+{
+ if (!(shm->flags & TEE_SHM_PRIV))
+ optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
+ else
+ optee_pool_op_free_helper(pool, shm, NULL);
+}
+
+static void pool_op_destroy_pool(struct tee_shm_pool *pool)
+{
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_pool = pool_op_destroy_pool,
+};
+
+/**
+ * optee_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used when OP-TEE supports dymanic SHM. In this case
+ * command buffers and such are allocated from kernel's own memory.
+ */
+static struct tee_shm_pool *optee_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->ops = &pool_ops;
+
+ return pool;
+}
+
+/*
+ * 4. Do a normal scheduled call into secure world
+ *
+ * The function optee_smc_do_call_with_arg() performs a normal scheduled
+ * call into secure world. During this call may normal world request help
+ * from normal world using RPCs, Remote Procedure Calls. This includes
+ * delivery of non-secure interrupts to for instance allow rescheduling of
+ * the current task.
+ */
+
+static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ optee_rpc_cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ }
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
+{
+ phys_addr_t pa;
+ struct tee_shm *shm;
+ size_t sz;
+ size_t n;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (!arg->num_params ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ for (n = 1; n < arg->num_params; n++) {
+ if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+ }
+
+ sz = arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc_priv_buf(optee->ctx, sz);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
+ sz = tee_shm_get_size(shm);
+
+ if (tee_shm_is_dynamic(shm)) {
+ struct page **pages;
+ u64 *pages_list;
+ size_t page_num;
+
+ pages = tee_shm_get_pages(shm, &page_num);
+ if (!pages || !page_num) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ pages_list = optee_allocate_pages_list(page_num);
+ if (!pages_list) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ call_ctx->pages_list = pages_list;
+ call_ctx->num_entries = page_num;
+
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ /*
+ * In the least bits of u.tmem.buf_ptr we store buffer offset
+ * from 4k page, as described in OP-TEE ABI.
+ */
+ arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) &
+ (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+
+ optee_fill_pages_list(pages_list, pages, page_num,
+ tee_shm_get_page_offset(shm));
+ } else {
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ arg->params[0].u.tmem.buf_ptr = pa;
+ arg->params[0].u.tmem.size = sz;
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ tee_shm_free(shm);
+}
+
+static void free_pages_list(struct optee_call_ctx *call_ctx)
+{
+ if (call_ctx->pages_list) {
+ optee_free_pages_list(call_ctx->pages_list,
+ call_ctx->num_entries);
+ call_ctx->pages_list = NULL;
+ call_ctx->num_entries = 0;
+ }
+}
+
+static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
+{
+ free_pages_list(call_ctx);
+}
+
+static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
+{
+
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_SHM_ALLOC:
+ free_pages_list(call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
+ break;
+ case OPTEE_RPC_CMD_SHM_FREE:
+ handle_rpc_func_cmd_shm_free(ctx, arg);
+ break;
+ default:
+ optee_rpc_cmd(ctx, optee, arg);
+ }
+}
+
+/**
+ * optee_handle_rpc() - handle RPC from secure world
+ * @ctx: context doing the RPC
+ * @param: value of registers for the RPC
+ * @call_ctx: call context. Preserved during one OP-TEE invocation
+ *
+ * Result of RPC is written back into @param.
+ */
+static void optee_handle_rpc(struct tee_context *ctx,
+ struct optee_msg_arg *rpc_arg,
+ struct optee_rpc_param *param,
+ struct optee_call_ctx *call_ctx)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_msg_arg *arg;
+ struct tee_shm *shm;
+ phys_addr_t pa;
+
+ switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+ case OPTEE_SMC_RPC_FUNC_ALLOC:
+ shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1);
+ if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+ reg_pair_from_64(&param->a1, &param->a2, pa);
+ reg_pair_from_64(&param->a4, &param->a5,
+ (unsigned long)shm);
+ } else {
+ param->a1 = 0;
+ param->a2 = 0;
+ param->a4 = 0;
+ param->a5 = 0;
+ }
+ kmemleak_not_leak(shm);
+ break;
+ case OPTEE_SMC_RPC_FUNC_FREE:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ tee_shm_free(shm);
+ break;
+ case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
+ /*
+ * A foreign interrupt was raised while secure world was
+ * executing, since they are handled in Linux a dummy RPC is
+ * performed to let Linux take the interrupt through the normal
+ * vector.
+ */
+ break;
+ case OPTEE_SMC_RPC_FUNC_CMD:
+ if (rpc_arg) {
+ arg = rpc_arg;
+ } else {
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg)) {
+ pr_err("%s: tee_shm_get_va %p failed\n",
+ __func__, shm);
+ break;
+ }
+ }
+
+ handle_rpc_func_cmd(ctx, optee, arg, call_ctx);
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n",
+ (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
+ break;
+ }
+
+ param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
+}
+
+/**
+ * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
+ * @ctx: calling context
+ * @shm: shared memory holding the message to pass to secure world
+ * @offs: offset of the message in @shm
+ *
+ * Does and SMC to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from secure world, 0 is OK
+ */
+static int optee_smc_do_call_with_arg(struct tee_context *ctx,
+ struct tee_shm *shm, u_int offs)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_call_waiter w;
+ struct optee_rpc_param param = { };
+ struct optee_call_ctx call_ctx = { };
+ struct optee_msg_arg *rpc_arg = NULL;
+ int rc;
+
+ if (optee->rpc_param_count) {
+ struct optee_msg_arg *arg;
+ unsigned int rpc_arg_offs;
+
+ arg = tee_shm_get_va(shm, offs);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+
+ rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
+ }
+
+ if (rpc_arg && tee_shm_is_dynamic(shm)) {
+ param.a0 = OPTEE_SMC_CALL_WITH_REGD_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, (u_long)shm);
+ param.a3 = offs;
+ } else {
+ phys_addr_t parg;
+
+ rc = tee_shm_get_pa(shm, offs, &parg);
+ if (rc)
+ return rc;
+
+ if (rpc_arg)
+ param.a0 = OPTEE_SMC_CALL_WITH_RPC_ARG;
+ else
+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, parg);
+ }
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ trace_optee_invoke_fn_begin(&param);
+ optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
+ param.a4, param.a5, param.a6, param.a7,
+ &res);
+ trace_optee_invoke_fn_end(&param, &res);
+
+ if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
+ cond_resched();
+ param.a0 = res.a0;
+ param.a1 = res.a1;
+ param.a2 = res.a2;
+ param.a3 = res.a3;
+ optee_handle_rpc(ctx, rpc_arg, &param, &call_ctx);
+ } else {
+ rc = res.a0;
+ break;
+ }
+ }
+
+ optee_rpc_finalize_call(&call_ctx);
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return rc;
+}
+
+static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = cmd;
+ optee_smc_do_call_with_arg(ctx, shm, offs);
+
+ optee_free_msg_arg(ctx, entry, offs);
+ return 0;
+}
+
+static int optee_smc_do_bottom_half(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
+}
+
+static int optee_smc_stop_async_notif(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
+}
+
+/*
+ * 5. Asynchronous notification
+ */
+
+static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
+ bool *value_pending)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0) {
+ *value_valid = false;
+ return 0;
+ }
+ *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
+ *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
+ return res.a1;
+}
+
+static irqreturn_t notif_irq_handler(int irq, void *dev_id)
+{
+ struct optee *optee = dev_id;
+ bool do_bottom_half = false;
+ bool value_valid;
+ bool value_pending;
+ u32 value;
+
+ do {
+ value = get_async_notif_value(optee->smc.invoke_fn,
+ &value_valid, &value_pending);
+ if (!value_valid)
+ break;
+
+ if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
+ do_bottom_half = true;
+ else
+ optee_notif_send(optee, value);
+ } while (value_pending);
+
+ if (do_bottom_half)
+ return IRQ_WAKE_THREAD;
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
+{
+ struct optee *optee = dev_id;
+
+ optee_smc_do_bottom_half(optee->ctx);
+
+ return IRQ_HANDLED;
+}
+
+static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
+{
+ int rc;
+
+ rc = request_threaded_irq(irq, notif_irq_handler,
+ notif_irq_thread_fn,
+ 0, "optee_notification", optee);
+ if (rc)
+ return rc;
+
+ optee->smc.notif_irq = irq;
+
+ return 0;
+}
+
+static void optee_smc_notif_uninit_irq(struct optee *optee)
+{
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ optee_smc_stop_async_notif(optee->ctx);
+ if (optee->smc.notif_irq) {
+ free_irq(optee->smc.notif_irq, optee);
+ irq_dispose_mapping(optee->smc.notif_irq);
+ }
+ }
+}
+
+/*
+ * 6. Driver initialization
+ *
+ * During driver initialization is secure world probed to find out which
+ * features it supports so the driver can be initialized with a matching
+ * configuration. This involves for instance support for dynamic shared
+ * memory instead of a static memory carvout.
+ */
+
+static void optee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ v.gen_caps |= TEE_GEN_CAP_REG_MEM;
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
+ v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
+ *vers = v;
+}
+
+static int optee_smc_open(struct tee_context *ctx)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ u32 sec_caps = optee->smc.sec_caps;
+
+ return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
+}
+
+static const struct tee_driver_ops optee_clnt_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+ .shm_register = optee_shm_register,
+ .shm_unregister = optee_shm_unregister,
+};
+
+static const struct tee_desc optee_clnt_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &optee_clnt_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_supp_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release_supp,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+ .shm_register = optee_shm_register_supp,
+ .shm_unregister = optee_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_supp_desc = {
+ .name = DRIVER_NAME "-supp",
+ .ops = &optee_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static const struct optee_ops optee_ops = {
+ .do_call_with_arg = optee_smc_do_call_with_arg,
+ .to_msg_param = optee_to_msg_param,
+ .from_msg_param = optee_from_msg_param,
+};
+
+static int enable_async_notif(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0)
+ return -EINVAL;
+ return 0;
+}
+
+static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
+ res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
+ return true;
+ return false;
+}
+
+static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_call_get_os_revision_result result;
+ } res = {
+ .result = {
+ .build_id = 0
+ }
+ };
+
+ invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.build_id)
+ pr_info("revision %lu.%lu (%08lx)", res.result.major,
+ res.result.minor, res.result.build_id);
+ else
+ pr_info("revision %lu.%lu", res.result.major, res.result.minor);
+}
+
+static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_calls_revision_result result;
+ } res;
+
+ invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+
+ if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
+ (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
+ return true;
+ return false;
+}
+
+static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
+ u32 *sec_caps, u32 *max_notif_value,
+ unsigned int *rpc_param_count)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_exchange_capabilities_result result;
+ } res;
+ u32 a1 = 0;
+
+ /*
+ * TODO This isn't enough to tell if it's UP system (from kernel
+ * point of view) or not, is_smp() returns the information
+ * needed, but can't be called directly from here.
+ */
+ if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
+ a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
+
+ invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return false;
+
+ *sec_caps = res.result.capabilities;
+ if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
+ *max_notif_value = res.result.max_notif_value;
+ else
+ *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
+ if (*sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
+ *rpc_param_count = (u8)res.result.data;
+ else
+ *rpc_param_count = 0;
+
+ return true;
+}
+
+static struct tee_shm_pool *
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_shm_config_result result;
+ } res;
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t begin;
+ phys_addr_t end;
+ void *va;
+ void *rc;
+
+ invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK) {
+ pr_err("static shm service not available\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
+ pr_err("only normal cached shared memory supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ begin = roundup(res.result.start, PAGE_SIZE);
+ end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
+ paddr = begin;
+ size = end - begin;
+
+ va = memremap(paddr, size, MEMREMAP_WB);
+ if (!va) {
+ pr_err("shared memory ioremap failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vaddr = (unsigned long)va;
+
+ rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size,
+ OPTEE_MIN_STATIC_POOL_ALIGN);
+ if (IS_ERR(rc))
+ memunmap(va);
+ else
+ *memremaped_shm = va;
+
+ return rc;
+}
+
+/* Simple wrapper functions to be able to use a function pointer */
+static void optee_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static optee_invoke_fn *get_invoke_func(struct device *dev)
+{
+ const char *method;
+
+ pr_info("probing for conduit method.\n");
+
+ if (device_property_read_string(dev, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ if (!strcmp("hvc", method))
+ return optee_smccc_hvc;
+ else if (!strcmp("smc", method))
+ return optee_smccc_smc;
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return ERR_PTR(-EINVAL);
+}
+
+/* optee_remove - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * optee_remove is called by platform subsystem to alert the driver
+ * that it should release the device
+ */
+static int optee_smc_remove(struct platform_device *pdev)
+{
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+ * into the old shared memory range.
+ */
+ if (!optee->rpc_param_count)
+ optee_disable_shm_cache(optee);
+
+ optee_smc_notif_uninit_irq(optee);
+
+ optee_remove_common(optee);
+
+ if (optee->smc.memremaped_shm)
+ memunmap(optee->smc.memremaped_shm);
+
+ kfree(optee);
+
+ return 0;
+}
+
+/* optee_shutdown - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * platform_shutdown is called by the platform subsystem to alert
+ * the driver that a shutdown, reboot, or kexec is happening and
+ * device must be disabled.
+ */
+static void optee_shutdown(struct platform_device *pdev)
+{
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ if (!optee->rpc_param_count)
+ optee_disable_shm_cache(optee);
+}
+
+static int optee_probe(struct platform_device *pdev)
+{
+ optee_invoke_fn *invoke_fn;
+ struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
+ struct optee *optee = NULL;
+ void *memremaped_shm = NULL;
+ unsigned int rpc_param_count;
+ struct tee_device *teedev;
+ struct tee_context *ctx;
+ u32 max_notif_value;
+ u32 arg_cache_flags;
+ u32 sec_caps;
+ int rc;
+
+ invoke_fn = get_invoke_func(&pdev->dev);
+ if (IS_ERR(invoke_fn))
+ return PTR_ERR(invoke_fn);
+
+ if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
+ pr_warn("api uid mismatch\n");
+ return -EINVAL;
+ }
+
+ optee_msg_get_os_revision(invoke_fn);
+
+ if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
+ pr_warn("api revision mismatch\n");
+ return -EINVAL;
+ }
+
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
+ &max_notif_value,
+ &rpc_param_count)) {
+ pr_warn("capabilities mismatch\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Try to use dynamic shared memory if possible
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
+ /*
+ * If we have OPTEE_SMC_SEC_CAP_RPC_ARG we can ask
+ * optee_get_msg_arg() to pre-register (by having
+ * OPTEE_SHM_ARG_ALLOC_PRIV cleared) the page used to pass
+ * an argument struct.
+ *
+ * With the page is pre-registered we can use a non-zero
+ * offset for argument struct, this is indicated with
+ * OPTEE_SHM_ARG_SHARED.
+ *
+ * This means that optee_smc_do_call_with_arg() will use
+ * OPTEE_SMC_CALL_WITH_REGD_ARG for pre-registered pages.
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
+ arg_cache_flags = OPTEE_SHM_ARG_SHARED;
+ else
+ arg_cache_flags = OPTEE_SHM_ARG_ALLOC_PRIV;
+
+ pool = optee_shm_pool_alloc_pages();
+ }
+
+ /*
+ * If dynamic shared memory is not available or failed - try static one
+ */
+ if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) {
+ /*
+ * The static memory pool can use non-zero page offsets so
+ * let optee_get_msg_arg() know that with OPTEE_SHM_ARG_SHARED.
+ *
+ * optee_get_msg_arg() should not pre-register the
+ * allocated page used to pass an argument struct, this is
+ * indicated with OPTEE_SHM_ARG_ALLOC_PRIV.
+ *
+ * This means that optee_smc_do_call_with_arg() will use
+ * OPTEE_SMC_CALL_WITH_ARG if rpc_param_count is 0, else
+ * OPTEE_SMC_CALL_WITH_RPC_ARG.
+ */
+ arg_cache_flags = OPTEE_SHM_ARG_SHARED |
+ OPTEE_SHM_ARG_ALLOC_PRIV;
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+ }
+
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee) {
+ rc = -ENOMEM;
+ goto err_free_pool;
+ }
+
+ optee->ops = &optee_ops;
+ optee->smc.invoke_fn = invoke_fn;
+ optee->smc.sec_caps = sec_caps;
+ optee->rpc_param_count = rpc_param_count;
+
+ teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_optee;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_unreg_teedev;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err_unreg_supp_teedev;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err_unreg_supp_teedev;
+
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_supp_init(&optee->supp);
+ optee->smc.memremaped_shm = memremaped_shm;
+ optee->pool = pool;
+ optee_shm_arg_cache_init(optee, arg_cache_flags);
+
+ platform_set_drvdata(pdev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ goto err_supp_uninit;
+ }
+ optee->ctx = ctx;
+ rc = optee_notif_init(optee, max_notif_value);
+ if (rc)
+ goto err_close_ctx;
+
+ if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ unsigned int irq;
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0) {
+ pr_err("platform_get_irq: ret %d\n", rc);
+ goto err_notif_uninit;
+ }
+ irq = rc;
+
+ rc = optee_smc_notif_init_irq(optee, irq);
+ if (rc) {
+ irq_dispose_mapping(irq);
+ goto err_notif_uninit;
+ }
+ enable_async_notif(optee->smc.invoke_fn);
+ pr_info("Asynchronous notifications enabled\n");
+ }
+
+ /*
+ * Ensure that there are no pre-existing shm objects before enabling
+ * the shm cache so that there's no chance of receiving an invalid
+ * address during shutdown. This could occur, for example, if we're
+ * kexec booting from an older kernel that did not properly cleanup the
+ * shm cache.
+ */
+ optee_disable_unmapped_shm_cache(optee);
+
+ /*
+ * Only enable the shm cache in case we're not able to pass the RPC
+ * arg struct right after the normal arg struct.
+ */
+ if (!optee->rpc_param_count)
+ optee_enable_shm_cache(optee);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pr_info("dynamic shared memory is enabled\n");
+
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
+ if (rc)
+ goto err_disable_shm_cache;
+
+ pr_info("initialized driver\n");
+ return 0;
+
+err_disable_shm_cache:
+ if (!optee->rpc_param_count)
+ optee_disable_shm_cache(optee);
+ optee_smc_notif_uninit_irq(optee);
+ optee_unregister_devices();
+err_notif_uninit:
+ optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
+err_supp_uninit:
+ optee_shm_arg_cache_uninit(optee);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+err_unreg_supp_teedev:
+ tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
+ tee_device_unregister(optee->teedev);
+err_free_optee:
+ kfree(optee);
+err_free_pool:
+ tee_shm_pool_free(pool);
+ if (memremaped_shm)
+ memunmap(memremaped_shm);
+ return rc;
+}
+
+static const struct of_device_id optee_dt_match[] = {
+ { .compatible = "linaro,optee-tz" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, optee_dt_match);
+
+static struct platform_driver optee_driver = {
+ .probe = optee_probe,
+ .remove = optee_smc_remove,
+ .shutdown = optee_shutdown,
+ .driver = {
+ .name = "optee",
+ .of_match_table = optee_dt_match,
+ },
+};
+
+int optee_smc_abi_register(void)
+{
+ return platform_driver_register(&optee_driver);
+}
+
+void optee_smc_abi_unregister(void)
+{
+ platform_driver_unregister(&optee_driver);
+}
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
new file mode 100644
index 000000000..322a543b8
--- /dev/null
+++ b/drivers/tee/optee/supp.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Linaro Limited
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+
+struct optee_supp_req {
+ struct list_head link;
+
+ bool in_queue;
+ u32 func;
+ u32 ret;
+ size_t num_params;
+ struct tee_param *param;
+
+ struct completion c;
+};
+
+void optee_supp_init(struct optee_supp *supp)
+{
+ memset(supp, 0, sizeof(*supp));
+ mutex_init(&supp->mutex);
+ init_completion(&supp->reqs_c);
+ idr_init(&supp->idr);
+ INIT_LIST_HEAD(&supp->reqs);
+ supp->req_id = -1;
+}
+
+void optee_supp_uninit(struct optee_supp *supp)
+{
+ mutex_destroy(&supp->mutex);
+ idr_destroy(&supp->idr);
+}
+
+void optee_supp_release(struct optee_supp *supp)
+{
+ int id;
+ struct optee_supp_req *req;
+ struct optee_supp_req *req_tmp;
+
+ mutex_lock(&supp->mutex);
+
+ /* Abort all request retrieved by supplicant */
+ idr_for_each_entry(&supp->idr, req, id) {
+ idr_remove(&supp->idr, id);
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+ }
+
+ /* Abort all queued requests */
+ list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
+ list_del(&req->link);
+ req->in_queue = false;
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+ }
+
+ supp->ctx = NULL;
+ supp->req_id = -1;
+
+ mutex_unlock(&supp->mutex);
+}
+
+/**
+ * optee_supp_thrd_req() - request service from supplicant
+ * @ctx: context doing the request
+ * @func: function requested
+ * @num_params: number of elements in @param array
+ * @param: parameters for function
+ *
+ * Returns result of operation to be passed to secure world
+ */
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ struct tee_param *param)
+
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req;
+ bool interruptable;
+ u32 ret;
+
+ /*
+ * Return in case there is no supplicant available and
+ * non-blocking request.
+ */
+ if (!supp->ctx && ctx->supp_nowait)
+ return TEEC_ERROR_COMMUNICATION;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return TEEC_ERROR_OUT_OF_MEMORY;
+
+ init_completion(&req->c);
+ req->func = func;
+ req->num_params = num_params;
+ req->param = param;
+
+ /* Insert the request in the request list */
+ mutex_lock(&supp->mutex);
+ list_add_tail(&req->link, &supp->reqs);
+ req->in_queue = true;
+ mutex_unlock(&supp->mutex);
+
+ /* Tell an eventual waiter there's a new request */
+ complete(&supp->reqs_c);
+
+ /*
+ * Wait for supplicant to process and return result, once we've
+ * returned from wait_for_completion(&req->c) successfully we have
+ * exclusive access again.
+ */
+ while (wait_for_completion_interruptible(&req->c)) {
+ mutex_lock(&supp->mutex);
+ interruptable = !supp->ctx;
+ if (interruptable) {
+ /*
+ * There's no supplicant available and since the
+ * supp->mutex currently is held none can
+ * become available until the mutex released
+ * again.
+ *
+ * Interrupting an RPC to supplicant is only
+ * allowed as a way of slightly improving the user
+ * experience in case the supplicant hasn't been
+ * started yet. During normal operation the supplicant
+ * will serve all requests in a timely manner and
+ * interrupting then wouldn't make sense.
+ */
+ if (req->in_queue) {
+ list_del(&req->link);
+ req->in_queue = false;
+ }
+ }
+ mutex_unlock(&supp->mutex);
+
+ if (interruptable) {
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ break;
+ }
+ }
+
+ ret = req->ret;
+ kfree(req);
+
+ return ret;
+}
+
+static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
+ int num_params, int *id)
+{
+ struct optee_supp_req *req;
+
+ if (supp->req_id != -1) {
+ /*
+ * Supplicant should not mix synchronous and asnynchronous
+ * requests.
+ */
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (list_empty(&supp->reqs))
+ return NULL;
+
+ req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
+
+ if (num_params < req->num_params) {
+ /* Not enough room for parameters */
+ return ERR_PTR(-EINVAL);
+ }
+
+ *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
+ if (*id < 0)
+ return ERR_PTR(-ENOMEM);
+
+ list_del(&req->link);
+ req->in_queue = false;
+
+ return req;
+}
+
+static int supp_check_recv_params(size_t num_params, struct tee_param *params,
+ size_t *num_meta)
+{
+ size_t n;
+
+ if (!num_params)
+ return -EINVAL;
+
+ /*
+ * If there's memrefs we need to decrease those as they where
+ * increased earlier and we'll even refuse to accept any below.
+ */
+ for (n = 0; n < num_params; n++)
+ if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+
+ /*
+ * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
+ * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
+ */
+ for (n = 0; n < num_params; n++)
+ if (params[n].attr &&
+ params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
+ return -EINVAL;
+
+ /* At most we'll need one meta parameter so no need to check for more */
+ if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
+ *num_meta = 1;
+ else
+ *num_meta = 0;
+
+ return 0;
+}
+
+/**
+ * optee_supp_recv() - receive request for supplicant
+ * @ctx: context receiving the request
+ * @func: requested function in supplicant
+ * @num_params: number of elements allocated in @param, updated with number
+ * used elements
+ * @param: space for parameters for @func
+ *
+ * Returns 0 on success or <0 on failure
+ */
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req = NULL;
+ int id;
+ size_t num_meta;
+ int rc;
+
+ rc = supp_check_recv_params(*num_params, param, &num_meta);
+ if (rc)
+ return rc;
+
+ while (true) {
+ mutex_lock(&supp->mutex);
+ req = supp_pop_entry(supp, *num_params - num_meta, &id);
+ mutex_unlock(&supp->mutex);
+
+ if (req) {
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ break;
+ }
+
+ /*
+ * If we didn't get a request we'll block in
+ * wait_for_completion() to avoid needless spinning.
+ *
+ * This is where supplicant will be hanging most of
+ * the time, let's make this interruptable so we
+ * can easily restart supplicant if needed.
+ */
+ if (wait_for_completion_interruptible(&supp->reqs_c))
+ return -ERESTARTSYS;
+ }
+
+ if (num_meta) {
+ /*
+ * tee-supplicant support meta parameters -> requsts can be
+ * processed asynchronously.
+ */
+ param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+ TEE_IOCTL_PARAM_ATTR_META;
+ param->u.value.a = id;
+ param->u.value.b = 0;
+ param->u.value.c = 0;
+ } else {
+ mutex_lock(&supp->mutex);
+ supp->req_id = id;
+ mutex_unlock(&supp->mutex);
+ }
+
+ *func = req->func;
+ *num_params = req->num_params + num_meta;
+ memcpy(param + num_meta, req->param,
+ sizeof(struct tee_param) * req->num_params);
+
+ return 0;
+}
+
+static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
+ size_t num_params,
+ struct tee_param *param,
+ size_t *num_meta)
+{
+ struct optee_supp_req *req;
+ int id;
+ size_t nm;
+ const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+ TEE_IOCTL_PARAM_ATTR_META;
+
+ if (!num_params)
+ return ERR_PTR(-EINVAL);
+
+ if (supp->req_id == -1) {
+ if (param->attr != attr)
+ return ERR_PTR(-EINVAL);
+ id = param->u.value.a;
+ nm = 1;
+ } else {
+ id = supp->req_id;
+ nm = 0;
+ }
+
+ req = idr_find(&supp->idr, id);
+ if (!req)
+ return ERR_PTR(-ENOENT);
+
+ if ((num_params - nm) != req->num_params)
+ return ERR_PTR(-EINVAL);
+
+ idr_remove(&supp->idr, id);
+ supp->req_id = -1;
+ *num_meta = nm;
+
+ return req;
+}
+
+/**
+ * optee_supp_send() - send result of request from supplicant
+ * @ctx: context sending result
+ * @ret: return value of request
+ * @num_params: number of parameters returned
+ * @param: returned parameters
+ *
+ * Returns 0 on success or <0 on failure.
+ */
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req;
+ size_t n;
+ size_t num_meta;
+
+ mutex_lock(&supp->mutex);
+ req = supp_pop_req(supp, num_params, param, &num_meta);
+ mutex_unlock(&supp->mutex);
+
+ if (IS_ERR(req)) {
+ /* Something is wrong, let supplicant restart. */
+ return PTR_ERR(req);
+ }
+
+ /* Update out and in/out parameters */
+ for (n = 0; n < req->num_params; n++) {
+ struct tee_param *p = req->param + n;
+
+ switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ p->u.value.a = param[n + num_meta].u.value.a;
+ p->u.value.b = param[n + num_meta].u.value.b;
+ p->u.value.c = param[n + num_meta].u.value.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ p->u.memref.size = param[n + num_meta].u.memref.size;
+ break;
+ default:
+ break;
+ }
+ }
+ req->ret = ret;
+
+ /* Let the requesting thread continue */
+ complete(&req->c);
+
+ return 0;
+}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
new file mode 100644
index 000000000..98da206cd
--- /dev/null
+++ b/drivers/tee/tee_core.c
@@ -0,0 +1,1272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/cdev.h>
+#include <linux/cred.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
+#include <crypto/hash.h>
+#include <crypto/sha1.h>
+#include "tee_private.h"
+
+#define TEE_NUM_DEVICES 32
+
+#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
+
+#define TEE_UUID_NS_NAME_SIZE 128
+
+/*
+ * TEE Client UUID name space identifier (UUIDv4)
+ *
+ * Value here is random UUID that is allocated as name space identifier for
+ * forming Client UUID's for TEE environment using UUIDv5 scheme.
+ */
+static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
+ 0xa1, 0xb8, 0xec, 0x4b,
+ 0xc0, 0x8e, 0x01, 0xb6);
+
+/*
+ * Unprivileged devices in the lower half range and privileged devices in
+ * the upper half range.
+ */
+static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
+static DEFINE_SPINLOCK(driver_lock);
+
+static struct class *tee_class;
+static dev_t tee_devt;
+
+struct tee_context *teedev_open(struct tee_device *teedev)
+{
+ int rc;
+ struct tee_context *ctx;
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ kref_init(&ctx->refcount);
+ ctx->teedev = teedev;
+ rc = teedev->desc->ops->open(ctx);
+ if (rc)
+ goto err;
+
+ return ctx;
+err:
+ kfree(ctx);
+ tee_device_put(teedev);
+ return ERR_PTR(rc);
+
+}
+EXPORT_SYMBOL_GPL(teedev_open);
+
+void teedev_ctx_get(struct tee_context *ctx)
+{
+ if (ctx->releasing)
+ return;
+
+ kref_get(&ctx->refcount);
+}
+
+static void teedev_ctx_release(struct kref *ref)
+{
+ struct tee_context *ctx = container_of(ref, struct tee_context,
+ refcount);
+ ctx->releasing = true;
+ ctx->teedev->desc->ops->release(ctx);
+ kfree(ctx);
+}
+
+void teedev_ctx_put(struct tee_context *ctx)
+{
+ if (ctx->releasing)
+ return;
+
+ kref_put(&ctx->refcount, teedev_ctx_release);
+}
+
+void teedev_close_context(struct tee_context *ctx)
+{
+ struct tee_device *teedev = ctx->teedev;
+
+ teedev_ctx_put(ctx);
+ tee_device_put(teedev);
+}
+EXPORT_SYMBOL_GPL(teedev_close_context);
+
+static int tee_open(struct inode *inode, struct file *filp)
+{
+ struct tee_context *ctx;
+
+ ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev));
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ /*
+ * Default user-space behaviour is to wait for tee-supplicant
+ * if not present for any requests in this context.
+ */
+ ctx->supp_nowait = false;
+ filp->private_data = ctx;
+ return 0;
+}
+
+static int tee_release(struct inode *inode, struct file *filp)
+{
+ teedev_close_context(filp->private_data);
+ return 0;
+}
+
+/**
+ * uuid_v5() - Calculate UUIDv5
+ * @uuid: Resulting UUID
+ * @ns: Name space ID for UUIDv5 function
+ * @name: Name for UUIDv5 function
+ * @size: Size of name
+ *
+ * UUIDv5 is specific in RFC 4122.
+ *
+ * This implements section (for SHA-1):
+ * 4.3. Algorithm for Creating a Name-Based UUID
+ */
+static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
+ size_t size)
+{
+ unsigned char hash[SHA1_DIGEST_SIZE];
+ struct crypto_shash *shash = NULL;
+ struct shash_desc *desc = NULL;
+ int rc;
+
+ shash = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(shash)) {
+ rc = PTR_ERR(shash);
+ pr_err("shash(sha1) allocation failed\n");
+ return rc;
+ }
+
+ desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!desc) {
+ rc = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ desc->tfm = shash;
+
+ rc = crypto_shash_init(desc);
+ if (rc < 0)
+ goto out_free_desc;
+
+ rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
+ if (rc < 0)
+ goto out_free_desc;
+
+ rc = crypto_shash_update(desc, (const u8 *)name, size);
+ if (rc < 0)
+ goto out_free_desc;
+
+ rc = crypto_shash_final(desc, hash);
+ if (rc < 0)
+ goto out_free_desc;
+
+ memcpy(uuid->b, hash, UUID_SIZE);
+
+ /* Tag for version 5 */
+ uuid->b[6] = (hash[6] & 0x0F) | 0x50;
+ uuid->b[8] = (hash[8] & 0x3F) | 0x80;
+
+out_free_desc:
+ kfree(desc);
+
+out_free_shash:
+ crypto_free_shash(shash);
+ return rc;
+}
+
+int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ const u8 connection_data[TEE_IOCTL_UUID_LEN])
+{
+ gid_t ns_grp = (gid_t)-1;
+ kgid_t grp = INVALID_GID;
+ char *name = NULL;
+ int name_len;
+ int rc;
+
+ if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
+ connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
+ /* Nil UUID to be passed to TEE environment */
+ uuid_copy(uuid, &uuid_null);
+ return 0;
+ }
+
+ /*
+ * In Linux environment client UUID is based on UUIDv5.
+ *
+ * Determine client UUID with following semantics for 'name':
+ *
+ * For TEEC_LOGIN_USER:
+ * uid=<uid>
+ *
+ * For TEEC_LOGIN_GROUP:
+ * gid=<gid>
+ *
+ */
+
+ name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ switch (connection_method) {
+ case TEE_IOCTL_LOGIN_USER:
+ name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x",
+ current_euid().val);
+ if (name_len >= TEE_UUID_NS_NAME_SIZE) {
+ rc = -E2BIG;
+ goto out_free_name;
+ }
+ break;
+
+ case TEE_IOCTL_LOGIN_GROUP:
+ memcpy(&ns_grp, connection_data, sizeof(gid_t));
+ grp = make_kgid(current_user_ns(), ns_grp);
+ if (!gid_valid(grp) || !in_egroup_p(grp)) {
+ rc = -EPERM;
+ goto out_free_name;
+ }
+
+ name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x",
+ grp.val);
+ if (name_len >= TEE_UUID_NS_NAME_SIZE) {
+ rc = -E2BIG;
+ goto out_free_name;
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ goto out_free_name;
+ }
+
+ rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
+out_free_name:
+ kfree(name);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid);
+
+static int tee_ioctl_version(struct tee_context *ctx,
+ struct tee_ioctl_version_data __user *uvers)
+{
+ struct tee_ioctl_version_data vers;
+
+ ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
+
+ if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
+ vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
+
+ if (copy_to_user(uvers, &vers, sizeof(vers)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int tee_ioctl_shm_alloc(struct tee_context *ctx,
+ struct tee_ioctl_shm_alloc_data __user *udata)
+{
+ long ret;
+ struct tee_ioctl_shm_alloc_data data;
+ struct tee_shm *shm;
+
+ if (copy_from_user(&data, udata, sizeof(data)))
+ return -EFAULT;
+
+ /* Currently no input flags are supported */
+ if (data.flags)
+ return -EINVAL;
+
+ shm = tee_shm_alloc_user_buf(ctx, data.size);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ data.id = shm->id;
+ data.size = shm->size;
+
+ if (copy_to_user(udata, &data, sizeof(data)))
+ ret = -EFAULT;
+ else
+ ret = tee_shm_get_fd(shm);
+
+ /*
+ * When user space closes the file descriptor the shared memory
+ * should be freed or if tee_shm_get_fd() failed then it will
+ * be freed immediately.
+ */
+ tee_shm_put(shm);
+ return ret;
+}
+
+static int
+tee_ioctl_shm_register(struct tee_context *ctx,
+ struct tee_ioctl_shm_register_data __user *udata)
+{
+ long ret;
+ struct tee_ioctl_shm_register_data data;
+ struct tee_shm *shm;
+
+ if (copy_from_user(&data, udata, sizeof(data)))
+ return -EFAULT;
+
+ /* Currently no input flags are supported */
+ if (data.flags)
+ return -EINVAL;
+
+ shm = tee_shm_register_user_buf(ctx, data.addr, data.length);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ data.id = shm->id;
+ data.length = shm->size;
+
+ if (copy_to_user(udata, &data, sizeof(data)))
+ ret = -EFAULT;
+ else
+ ret = tee_shm_get_fd(shm);
+ /*
+ * When user space closes the file descriptor the shared memory
+ * should be freed or if tee_shm_get_fd() failed then it will
+ * be freed immediately.
+ */
+ tee_shm_put(shm);
+ return ret;
+}
+
+static int params_from_user(struct tee_context *ctx, struct tee_param *params,
+ size_t num_params,
+ struct tee_ioctl_param __user *uparams)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_shm *shm;
+ struct tee_ioctl_param ip;
+
+ if (copy_from_user(&ip, uparams + n, sizeof(ip)))
+ return -EFAULT;
+
+ /* All unused attribute bits has to be zero */
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
+ return -EINVAL;
+
+ params[n].attr = ip.attr;
+ switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ params[n].u.value.a = ip.a;
+ params[n].u.value.b = ip.b;
+ params[n].u.value.c = ip.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ /*
+ * If a NULL pointer is passed to a TA in the TEE,
+ * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
+ * indicating a NULL memory reference.
+ */
+ if (ip.c != TEE_MEMREF_NULL) {
+ /*
+ * If we fail to get a pointer to a shared
+ * memory object (and increase the ref count)
+ * from an identifier we return an error. All
+ * pointers that has been added in params have
+ * an increased ref count. It's the callers
+ * responibility to do tee_shm_put() on all
+ * resolved pointers.
+ */
+ shm = tee_shm_get_from_id(ctx, ip.c);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ /*
+ * Ensure offset + size does not overflow
+ * offset and does not overflow the size of
+ * the referred shared memory object.
+ */
+ if ((ip.a + ip.b) < ip.a ||
+ (ip.a + ip.b) > shm->size) {
+ tee_shm_put(shm);
+ return -EINVAL;
+ }
+ } else if (ctx->cap_memref_null) {
+ /* Pass NULL pointer to OP-TEE */
+ shm = NULL;
+ } else {
+ return -EINVAL;
+ }
+
+ params[n].u.memref.shm_offs = ip.a;
+ params[n].u.memref.size = ip.b;
+ params[n].u.memref.shm = shm;
+ break;
+ default:
+ /* Unknown attribute */
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int params_to_user(struct tee_ioctl_param __user *uparams,
+ size_t num_params, struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_ioctl_param __user *up = uparams + n;
+ struct tee_param *p = params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ if (put_user(p->u.value.a, &up->a) ||
+ put_user(p->u.value.b, &up->b) ||
+ put_user(p->u.value.c, &up->c))
+ return -EFAULT;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (put_user((u64)p->u.memref.size, &up->b))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int tee_ioctl_open_session(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ size_t n;
+ struct tee_ioctl_buf_data buf;
+ struct tee_ioctl_open_session_arg __user *uarg;
+ struct tee_ioctl_open_session_arg arg;
+ struct tee_ioctl_param __user *uparams = NULL;
+ struct tee_param *params = NULL;
+ bool have_session = false;
+
+ if (!ctx->teedev->desc->ops->open_session)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ return -EINVAL;
+
+ if (arg.num_params) {
+ params = kcalloc(arg.num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ uparams = uarg->params;
+ rc = params_from_user(ctx, params, arg.num_params, uparams);
+ if (rc)
+ goto out;
+ }
+
+ if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN &&
+ arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) {
+ pr_debug("login method not allowed for user-space client\n");
+ rc = -EPERM;
+ goto out;
+ }
+
+ rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
+ if (rc)
+ goto out;
+ have_session = true;
+
+ if (put_user(arg.session, &uarg->session) ||
+ put_user(arg.ret, &uarg->ret) ||
+ put_user(arg.ret_origin, &uarg->ret_origin)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ rc = params_to_user(uparams, arg.num_params, params);
+out:
+ /*
+ * If we've succeeded to open the session but failed to communicate
+ * it back to user space, close the session again to avoid leakage.
+ */
+ if (rc && have_session && ctx->teedev->desc->ops->close_session)
+ ctx->teedev->desc->ops->close_session(ctx, arg.session);
+
+ if (params) {
+ /* Decrease ref count for all valid shared memory pointers */
+ for (n = 0; n < arg.num_params; n++)
+ if (tee_param_is_memref(params + n) &&
+ params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+ kfree(params);
+ }
+
+ return rc;
+}
+
+static int tee_ioctl_invoke(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ size_t n;
+ struct tee_ioctl_buf_data buf;
+ struct tee_ioctl_invoke_arg __user *uarg;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_ioctl_param __user *uparams = NULL;
+ struct tee_param *params = NULL;
+
+ if (!ctx->teedev->desc->ops->invoke_func)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ return -EINVAL;
+
+ if (arg.num_params) {
+ params = kcalloc(arg.num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ uparams = uarg->params;
+ rc = params_from_user(ctx, params, arg.num_params, uparams);
+ if (rc)
+ goto out;
+ }
+
+ rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
+ if (rc)
+ goto out;
+
+ if (put_user(arg.ret, &uarg->ret) ||
+ put_user(arg.ret_origin, &uarg->ret_origin)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ rc = params_to_user(uparams, arg.num_params, params);
+out:
+ if (params) {
+ /* Decrease ref count for all valid shared memory pointers */
+ for (n = 0; n < arg.num_params; n++)
+ if (tee_param_is_memref(params + n) &&
+ params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+ kfree(params);
+ }
+ return rc;
+}
+
+static int tee_ioctl_cancel(struct tee_context *ctx,
+ struct tee_ioctl_cancel_arg __user *uarg)
+{
+ struct tee_ioctl_cancel_arg arg;
+
+ if (!ctx->teedev->desc->ops->cancel_req)
+ return -EINVAL;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
+ arg.session);
+}
+
+static int
+tee_ioctl_close_session(struct tee_context *ctx,
+ struct tee_ioctl_close_session_arg __user *uarg)
+{
+ struct tee_ioctl_close_session_arg arg;
+
+ if (!ctx->teedev->desc->ops->close_session)
+ return -EINVAL;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ return ctx->teedev->desc->ops->close_session(ctx, arg.session);
+}
+
+static int params_to_supp(struct tee_context *ctx,
+ struct tee_ioctl_param __user *uparams,
+ size_t num_params, struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_ioctl_param ip;
+ struct tee_param *p = params + n;
+
+ ip.attr = p->attr;
+ switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ ip.a = p->u.value.a;
+ ip.b = p->u.value.b;
+ ip.c = p->u.value.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ ip.b = p->u.memref.size;
+ if (!p->u.memref.shm) {
+ ip.a = 0;
+ ip.c = (u64)-1; /* invalid shm id */
+ break;
+ }
+ ip.a = p->u.memref.shm_offs;
+ ip.c = p->u.memref.shm->id;
+ break;
+ default:
+ ip.a = 0;
+ ip.b = 0;
+ ip.c = 0;
+ break;
+ }
+
+ if (copy_to_user(uparams + n, &ip, sizeof(ip)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int tee_ioctl_supp_recv(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ struct tee_ioctl_buf_data buf;
+ struct tee_iocl_supp_recv_arg __user *uarg;
+ struct tee_param *params;
+ u32 num_params;
+ u32 func;
+
+ if (!ctx->teedev->desc->ops->supp_recv)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (get_user(num_params, &uarg->num_params))
+ return -EFAULT;
+
+ if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
+ return -EINVAL;
+
+ params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ rc = params_from_user(ctx, params, num_params, uarg->params);
+ if (rc)
+ goto out;
+
+ rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
+ if (rc)
+ goto out;
+
+ if (put_user(func, &uarg->func) ||
+ put_user(num_params, &uarg->num_params)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = params_to_supp(ctx, uarg->params, num_params, params);
+out:
+ kfree(params);
+ return rc;
+}
+
+static int params_from_supp(struct tee_param *params, size_t num_params,
+ struct tee_ioctl_param __user *uparams)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ struct tee_ioctl_param ip;
+
+ if (copy_from_user(&ip, uparams + n, sizeof(ip)))
+ return -EFAULT;
+
+ /* All unused attribute bits has to be zero */
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
+ return -EINVAL;
+
+ p->attr = ip.attr;
+ switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ /* Only out and in/out values can be updated */
+ p->u.value.a = ip.a;
+ p->u.value.b = ip.b;
+ p->u.value.c = ip.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ /*
+ * Only the size of the memref can be updated.
+ * Since we don't have access to the original
+ * parameters here, only store the supplied size.
+ * The driver will copy the updated size into the
+ * original parameters.
+ */
+ p->u.memref.shm = NULL;
+ p->u.memref.shm_offs = 0;
+ p->u.memref.size = ip.b;
+ break;
+ default:
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ }
+ }
+ return 0;
+}
+
+static int tee_ioctl_supp_send(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ long rc;
+ struct tee_ioctl_buf_data buf;
+ struct tee_iocl_supp_send_arg __user *uarg;
+ struct tee_param *params;
+ u32 num_params;
+ u32 ret;
+
+ /* Not valid for this driver */
+ if (!ctx->teedev->desc->ops->supp_send)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (get_user(ret, &uarg->ret) ||
+ get_user(num_params, &uarg->num_params))
+ return -EFAULT;
+
+ if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
+ return -EINVAL;
+
+ params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ rc = params_from_supp(params, num_params, uarg->params);
+ if (rc)
+ goto out;
+
+ rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
+out:
+ kfree(params);
+ return rc;
+}
+
+static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct tee_context *ctx = filp->private_data;
+ void __user *uarg = (void __user *)arg;
+
+ switch (cmd) {
+ case TEE_IOC_VERSION:
+ return tee_ioctl_version(ctx, uarg);
+ case TEE_IOC_SHM_ALLOC:
+ return tee_ioctl_shm_alloc(ctx, uarg);
+ case TEE_IOC_SHM_REGISTER:
+ return tee_ioctl_shm_register(ctx, uarg);
+ case TEE_IOC_OPEN_SESSION:
+ return tee_ioctl_open_session(ctx, uarg);
+ case TEE_IOC_INVOKE:
+ return tee_ioctl_invoke(ctx, uarg);
+ case TEE_IOC_CANCEL:
+ return tee_ioctl_cancel(ctx, uarg);
+ case TEE_IOC_CLOSE_SESSION:
+ return tee_ioctl_close_session(ctx, uarg);
+ case TEE_IOC_SUPPL_RECV:
+ return tee_ioctl_supp_recv(ctx, uarg);
+ case TEE_IOC_SUPPL_SEND:
+ return tee_ioctl_supp_send(ctx, uarg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct file_operations tee_fops = {
+ .owner = THIS_MODULE,
+ .open = tee_open,
+ .release = tee_release,
+ .unlocked_ioctl = tee_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static void tee_release_device(struct device *dev)
+{
+ struct tee_device *teedev = container_of(dev, struct tee_device, dev);
+
+ spin_lock(&driver_lock);
+ clear_bit(teedev->id, dev_mask);
+ spin_unlock(&driver_lock);
+ mutex_destroy(&teedev->mutex);
+ idr_destroy(&teedev->idr);
+ kfree(teedev);
+}
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc: Descriptor for this driver
+ * @dev: Parent device for this device
+ * @pool: Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+ struct device *dev,
+ struct tee_shm_pool *pool,
+ void *driver_data)
+{
+ struct tee_device *teedev;
+ void *ret;
+ int rc, max_id;
+ int offs = 0;
+
+ if (!teedesc || !teedesc->name || !teedesc->ops ||
+ !teedesc->ops->get_version || !teedesc->ops->open ||
+ !teedesc->ops->release || !pool)
+ return ERR_PTR(-EINVAL);
+
+ teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
+ if (!teedev) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ max_id = TEE_NUM_DEVICES / 2;
+
+ if (teedesc->flags & TEE_DESC_PRIVILEGED) {
+ offs = TEE_NUM_DEVICES / 2;
+ max_id = TEE_NUM_DEVICES;
+ }
+
+ spin_lock(&driver_lock);
+ teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
+ if (teedev->id < max_id)
+ set_bit(teedev->id, dev_mask);
+ spin_unlock(&driver_lock);
+
+ if (teedev->id >= max_id) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
+ teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
+ teedev->id - offs);
+
+ teedev->dev.class = tee_class;
+ teedev->dev.release = tee_release_device;
+ teedev->dev.parent = dev;
+
+ teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
+
+ rc = dev_set_name(&teedev->dev, "%s", teedev->name);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto err_devt;
+ }
+
+ cdev_init(&teedev->cdev, &tee_fops);
+ teedev->cdev.owner = teedesc->owner;
+
+ dev_set_drvdata(&teedev->dev, driver_data);
+ device_initialize(&teedev->dev);
+
+ /* 1 as tee_device_unregister() does one final tee_device_put() */
+ teedev->num_users = 1;
+ init_completion(&teedev->c_no_users);
+ mutex_init(&teedev->mutex);
+ idr_init(&teedev->idr);
+
+ teedev->desc = teedesc;
+ teedev->pool = pool;
+
+ return teedev;
+err_devt:
+ unregister_chrdev_region(teedev->dev.devt, 1);
+err:
+ pr_err("could not register %s driver\n",
+ teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
+ if (teedev && teedev->id < TEE_NUM_DEVICES) {
+ spin_lock(&driver_lock);
+ clear_bit(teedev->id, dev_mask);
+ spin_unlock(&driver_lock);
+ }
+ kfree(teedev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tee_device_alloc);
+
+static ssize_t implementation_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tee_device *teedev = container_of(dev, struct tee_device, dev);
+ struct tee_ioctl_version_data vers;
+
+ teedev->desc->ops->get_version(teedev, &vers);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
+}
+static DEVICE_ATTR_RO(implementation_id);
+
+static struct attribute *tee_dev_attrs[] = {
+ &dev_attr_implementation_id.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(tee_dev);
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev: Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev)
+{
+ int rc;
+
+ if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
+ dev_err(&teedev->dev, "attempt to register twice\n");
+ return -EINVAL;
+ }
+
+ teedev->dev.groups = tee_dev_groups;
+
+ rc = cdev_device_add(&teedev->cdev, &teedev->dev);
+ if (rc) {
+ dev_err(&teedev->dev,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ teedev->name, MAJOR(teedev->dev.devt),
+ MINOR(teedev->dev.devt), rc);
+ return rc;
+ }
+
+ teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tee_device_register);
+
+void tee_device_put(struct tee_device *teedev)
+{
+ mutex_lock(&teedev->mutex);
+ /* Shouldn't put in this state */
+ if (!WARN_ON(!teedev->desc)) {
+ teedev->num_users--;
+ if (!teedev->num_users) {
+ teedev->desc = NULL;
+ complete(&teedev->c_no_users);
+ }
+ }
+ mutex_unlock(&teedev->mutex);
+}
+
+bool tee_device_get(struct tee_device *teedev)
+{
+ mutex_lock(&teedev->mutex);
+ if (!teedev->desc) {
+ mutex_unlock(&teedev->mutex);
+ return false;
+ }
+ teedev->num_users++;
+ mutex_unlock(&teedev->mutex);
+ return true;
+}
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev: Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev)
+{
+ if (!teedev)
+ return;
+
+ if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
+ cdev_device_del(&teedev->cdev, &teedev->dev);
+
+ tee_device_put(teedev);
+ wait_for_completion(&teedev->c_no_users);
+
+ /*
+ * No need to take a mutex any longer now since teedev->desc was
+ * set to NULL before teedev->c_no_users was completed.
+ */
+
+ teedev->pool = NULL;
+
+ put_device(&teedev->dev);
+}
+EXPORT_SYMBOL_GPL(tee_device_unregister);
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @teedev: Device containing the driver_data pointer
+ * @returns the driver_data pointer supplied to tee_device_alloc().
+ */
+void *tee_get_drvdata(struct tee_device *teedev)
+{
+ return dev_get_drvdata(&teedev->dev);
+}
+EXPORT_SYMBOL_GPL(tee_get_drvdata);
+
+struct match_dev_data {
+ struct tee_ioctl_version_data *vers;
+ const void *data;
+ int (*match)(struct tee_ioctl_version_data *, const void *);
+};
+
+static int match_dev(struct device *dev, const void *data)
+{
+ const struct match_dev_data *match_data = data;
+ struct tee_device *teedev = container_of(dev, struct tee_device, dev);
+
+ teedev->desc->ops->get_version(teedev, match_data->vers);
+ return match_data->match(match_data->vers, match_data->data);
+}
+
+struct tee_context *
+tee_client_open_context(struct tee_context *start,
+ int (*match)(struct tee_ioctl_version_data *,
+ const void *),
+ const void *data, struct tee_ioctl_version_data *vers)
+{
+ struct device *dev = NULL;
+ struct device *put_dev = NULL;
+ struct tee_context *ctx = NULL;
+ struct tee_ioctl_version_data v;
+ struct match_dev_data match_data = { vers ? vers : &v, data, match };
+
+ if (start)
+ dev = &start->teedev->dev;
+
+ do {
+ dev = class_find_device(tee_class, dev, &match_data, match_dev);
+ if (!dev) {
+ ctx = ERR_PTR(-ENOENT);
+ break;
+ }
+
+ put_device(put_dev);
+ put_dev = dev;
+
+ ctx = teedev_open(container_of(dev, struct tee_device, dev));
+ } while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
+
+ put_device(put_dev);
+ /*
+ * Default behaviour for in kernel client is to not wait for
+ * tee-supplicant if not present for any requests in this context.
+ * Also this flag could be configured again before call to
+ * tee_client_open_session() if any in kernel client requires
+ * different behaviour.
+ */
+ if (!IS_ERR(ctx))
+ ctx->supp_nowait = true;
+
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(tee_client_open_context);
+
+void tee_client_close_context(struct tee_context *ctx)
+{
+ teedev_close_context(ctx);
+}
+EXPORT_SYMBOL_GPL(tee_client_close_context);
+
+void tee_client_get_version(struct tee_context *ctx,
+ struct tee_ioctl_version_data *vers)
+{
+ ctx->teedev->desc->ops->get_version(ctx->teedev, vers);
+}
+EXPORT_SYMBOL_GPL(tee_client_get_version);
+
+int tee_client_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ if (!ctx->teedev->desc->ops->open_session)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->open_session(ctx, arg, param);
+}
+EXPORT_SYMBOL_GPL(tee_client_open_session);
+
+int tee_client_close_session(struct tee_context *ctx, u32 session)
+{
+ if (!ctx->teedev->desc->ops->close_session)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->close_session(ctx, session);
+}
+EXPORT_SYMBOL_GPL(tee_client_close_session);
+
+int tee_client_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ if (!ctx->teedev->desc->ops->invoke_func)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
+}
+EXPORT_SYMBOL_GPL(tee_client_invoke_func);
+
+int tee_client_cancel_req(struct tee_context *ctx,
+ struct tee_ioctl_cancel_arg *arg)
+{
+ if (!ctx->teedev->desc->ops->cancel_req)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
+ arg->session);
+}
+
+static int tee_client_device_match(struct device *dev,
+ struct device_driver *drv)
+{
+ const struct tee_client_device_id *id_table;
+ struct tee_client_device *tee_device;
+
+ id_table = to_tee_client_driver(drv)->id_table;
+ tee_device = to_tee_client_device(dev);
+
+ while (!uuid_is_null(&id_table->uuid)) {
+ if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
+ return 1;
+ id_table++;
+ }
+
+ return 0;
+}
+
+static int tee_client_device_uevent(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
+
+ return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
+}
+
+struct bus_type tee_bus_type = {
+ .name = "tee",
+ .match = tee_client_device_match,
+ .uevent = tee_client_device_uevent,
+};
+EXPORT_SYMBOL_GPL(tee_bus_type);
+
+static int __init tee_init(void)
+{
+ int rc;
+
+ tee_class = class_create(THIS_MODULE, "tee");
+ if (IS_ERR(tee_class)) {
+ pr_err("couldn't create class\n");
+ return PTR_ERR(tee_class);
+ }
+
+ rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
+ if (rc) {
+ pr_err("failed to allocate char dev region\n");
+ goto out_unreg_class;
+ }
+
+ rc = bus_register(&tee_bus_type);
+ if (rc) {
+ pr_err("failed to register tee bus\n");
+ goto out_unreg_chrdev;
+ }
+
+ return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
+out_unreg_class:
+ class_destroy(tee_class);
+ tee_class = NULL;
+
+ return rc;
+}
+
+static void __exit tee_exit(void)
+{
+ bus_unregister(&tee_bus_type);
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
+ class_destroy(tee_class);
+ tee_class = NULL;
+}
+
+subsys_initcall(tee_init);
+module_exit(tee_exit);
+
+MODULE_AUTHOR("Linaro");
+MODULE_DESCRIPTION("TEE Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
new file mode 100644
index 000000000..409cadcc1
--- /dev/null
+++ b/drivers/tee/tee_private.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ */
+#ifndef TEE_PRIVATE_H
+#define TEE_PRIVATE_H
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define TEE_DEVICE_FLAG_REGISTERED 0x1
+#define TEE_MAX_DEV_NAME_LEN 32
+
+/**
+ * struct tee_device - TEE Device representation
+ * @name: name of device
+ * @desc: description of device
+ * @id: unique id of device
+ * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above
+ * @dev: embedded basic device structure
+ * @cdev: embedded cdev
+ * @num_users: number of active users of this device
+ * @c_no_user: completion used when unregistering the device
+ * @mutex: mutex protecting @num_users and @idr
+ * @idr: register of user space shared memory objects allocated or
+ * registered on this device
+ * @pool: shared memory pool
+ */
+struct tee_device {
+ char name[TEE_MAX_DEV_NAME_LEN];
+ const struct tee_desc *desc;
+ int id;
+ unsigned int flags;
+
+ struct device dev;
+ struct cdev cdev;
+
+ size_t num_users;
+ struct completion c_no_users;
+ struct mutex mutex; /* protects num_users and idr */
+
+ struct idr idr;
+ struct tee_shm_pool *pool;
+};
+
+int tee_shm_init(void);
+
+int tee_shm_get_fd(struct tee_shm *shm);
+
+bool tee_device_get(struct tee_device *teedev);
+void tee_device_put(struct tee_device *teedev);
+
+void teedev_ctx_get(struct tee_context *ctx);
+void teedev_ctx_put(struct tee_context *ctx);
+
+struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size);
+struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
+ unsigned long addr, size_t length);
+
+#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
new file mode 100644
index 000000000..27295bda3
--- /dev/null
+++ b/drivers/tee/tee_shm.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
+ */
+#include <linux/anon_inodes.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include "tee_private.h"
+
+static void shm_put_kernel_pages(struct page **pages, size_t page_count)
+{
+ size_t n;
+
+ for (n = 0; n < page_count; n++)
+ put_page(pages[n]);
+}
+
+static int shm_get_kernel_pages(unsigned long start, size_t page_count,
+ struct page **pages)
+{
+ size_t n;
+ int rc;
+
+ if (is_vmalloc_addr((void *)start)) {
+ struct page *page;
+
+ for (n = 0; n < page_count; n++) {
+ page = vmalloc_to_page((void *)(start + PAGE_SIZE * n));
+ if (!page)
+ return -ENOMEM;
+
+ get_page(page);
+ pages[n] = page;
+ }
+ rc = page_count;
+ } else {
+ struct kvec *kiov;
+
+ kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
+ if (!kiov)
+ return -ENOMEM;
+
+ for (n = 0; n < page_count; n++) {
+ kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
+ kiov[n].iov_len = PAGE_SIZE;
+ }
+
+ rc = get_kernel_pages(kiov, page_count, 0, pages);
+ kfree(kiov);
+ }
+
+ return rc;
+}
+
+static void release_registered_pages(struct tee_shm *shm)
+{
+ if (shm->pages) {
+ if (shm->flags & TEE_SHM_USER_MAPPED)
+ unpin_user_pages(shm->pages, shm->num_pages);
+ else
+ shm_put_kernel_pages(shm->pages, shm->num_pages);
+
+ kfree(shm->pages);
+ }
+}
+
+static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
+{
+ if (shm->flags & TEE_SHM_POOL) {
+ teedev->pool->ops->free(teedev->pool, shm);
+ } else if (shm->flags & TEE_SHM_DYNAMIC) {
+ int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
+
+ if (rc)
+ dev_err(teedev->dev.parent,
+ "unregister shm %p failed: %d", shm, rc);
+
+ release_registered_pages(shm);
+ }
+
+ teedev_ctx_put(shm->ctx);
+
+ kfree(shm);
+
+ tee_device_put(teedev);
+}
+
+static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size,
+ size_t align, u32 flags, int id)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm *shm;
+ void *ret;
+ int rc;
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ if (!teedev->pool) {
+ /* teedev has been detached from driver */
+ ret = ERR_PTR(-EINVAL);
+ goto err_dev_put;
+ }
+
+ shm = kzalloc(sizeof(*shm), GFP_KERNEL);
+ if (!shm) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_dev_put;
+ }
+
+ refcount_set(&shm->refcount, 1);
+ shm->flags = flags;
+ shm->id = id;
+
+ /*
+ * We're assigning this as it is needed if the shm is to be
+ * registered. If this function returns OK then the caller expected
+ * to call teedev_ctx_get() or clear shm->ctx in case it's not
+ * needed any longer.
+ */
+ shm->ctx = ctx;
+
+ rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto err_kfree;
+ }
+
+ teedev_ctx_get(ctx);
+ return shm;
+err_kfree:
+ kfree(shm);
+err_dev_put:
+ tee_device_put(teedev);
+ return ret;
+}
+
+/**
+ * tee_shm_alloc_user_buf() - Allocate shared memory for user space
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ *
+ * Memory allocated as user space shared memory is automatically freed when
+ * the TEE file pointer is closed. The primary usage of this function is
+ * when the TEE driver doesn't support registering ordinary user space
+ * memory.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
+{
+ u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm *shm;
+ void *ret;
+ int id;
+
+ mutex_lock(&teedev->mutex);
+ id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
+ if (IS_ERR(shm)) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, id);
+ mutex_unlock(&teedev->mutex);
+ return shm;
+ }
+
+ mutex_lock(&teedev->mutex);
+ ret = idr_replace(&teedev->idr, shm, id);
+ mutex_unlock(&teedev->mutex);
+ if (IS_ERR(ret)) {
+ tee_shm_free(shm);
+ return ret;
+ }
+
+ return shm;
+}
+
+/**
+ * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ *
+ * The returned memory registered in secure world and is suitable to be
+ * passed as a memory buffer in parameter argument to
+ * tee_client_invoke_func(). The memory allocated is later freed with a
+ * call to tee_shm_free().
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
+{
+ u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
+
+ return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
+
+/**
+ * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
+ * kernel buffer
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ *
+ * This function returns similar shared memory as
+ * tee_shm_alloc_kernel_buf(), but with the difference that the memory
+ * might not be registered in secure world in case the driver supports
+ * passing memory not registered in advance.
+ *
+ * This function should normally only be used internally in the TEE
+ * drivers.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
+{
+ u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL;
+
+ return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
+
+static struct tee_shm *
+register_shm_helper(struct tee_context *ctx, unsigned long addr,
+ size_t length, u32 flags, int id)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm *shm;
+ unsigned long start;
+ size_t num_pages;
+ void *ret;
+ int rc;
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ if (!teedev->desc->ops->shm_register ||
+ !teedev->desc->ops->shm_unregister) {
+ ret = ERR_PTR(-ENOTSUPP);
+ goto err_dev_put;
+ }
+
+ teedev_ctx_get(ctx);
+
+ shm = kzalloc(sizeof(*shm), GFP_KERNEL);
+ if (!shm) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_ctx_put;
+ }
+
+ refcount_set(&shm->refcount, 1);
+ shm->flags = flags;
+ shm->ctx = ctx;
+ shm->id = id;
+ addr = untagged_addr(addr);
+ start = rounddown(addr, PAGE_SIZE);
+ shm->offset = addr - start;
+ shm->size = length;
+ num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
+ shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
+ if (!shm->pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_free_shm;
+ }
+
+ if (flags & TEE_SHM_USER_MAPPED)
+ rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
+ shm->pages);
+ else
+ rc = shm_get_kernel_pages(start, num_pages, shm->pages);
+ if (rc > 0)
+ shm->num_pages = rc;
+ if (rc != num_pages) {
+ if (rc >= 0)
+ rc = -ENOMEM;
+ ret = ERR_PTR(rc);
+ goto err_put_shm_pages;
+ }
+
+ rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
+ shm->num_pages, start);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto err_put_shm_pages;
+ }
+
+ return shm;
+err_put_shm_pages:
+ if (flags & TEE_SHM_USER_MAPPED)
+ unpin_user_pages(shm->pages, shm->num_pages);
+ else
+ shm_put_kernel_pages(shm->pages, shm->num_pages);
+ kfree(shm->pages);
+err_free_shm:
+ kfree(shm);
+err_ctx_put:
+ teedev_ctx_put(ctx);
+err_dev_put:
+ tee_device_put(teedev);
+ return ret;
+}
+
+/**
+ * tee_shm_register_user_buf() - Register a userspace shared memory buffer
+ * @ctx: Context that registers the shared memory
+ * @addr: The userspace address of the shared buffer
+ * @length: Length of the shared buffer
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
+ unsigned long addr, size_t length)
+{
+ u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm *shm;
+ void *ret;
+ int id;
+
+ if (!access_ok((void __user *)addr, length))
+ return ERR_PTR(-EFAULT);
+
+ mutex_lock(&teedev->mutex);
+ id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ shm = register_shm_helper(ctx, addr, length, flags, id);
+ if (IS_ERR(shm)) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, id);
+ mutex_unlock(&teedev->mutex);
+ return shm;
+ }
+
+ mutex_lock(&teedev->mutex);
+ ret = idr_replace(&teedev->idr, shm, id);
+ mutex_unlock(&teedev->mutex);
+ if (IS_ERR(ret)) {
+ tee_shm_free(shm);
+ return ret;
+ }
+
+ return shm;
+}
+
+/**
+ * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
+ * secure world
+ * @ctx: Context that registers the shared memory
+ * @addr: The buffer
+ * @length: Length of the buffer
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+
+struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
+ void *addr, size_t length)
+{
+ u32 flags = TEE_SHM_DYNAMIC;
+
+ return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1);
+}
+EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
+
+static int tee_shm_fop_release(struct inode *inode, struct file *filp)
+{
+ tee_shm_put(filp->private_data);
+ return 0;
+}
+
+static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct tee_shm *shm = filp->private_data;
+ size_t size = vma->vm_end - vma->vm_start;
+
+ /* Refuse sharing shared memory provided by application */
+ if (shm->flags & TEE_SHM_USER_MAPPED)
+ return -EINVAL;
+
+ /* check for overflowing the buffer's size */
+ if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations tee_shm_fops = {
+ .owner = THIS_MODULE,
+ .release = tee_shm_fop_release,
+ .mmap = tee_shm_fop_mmap,
+};
+
+/**
+ * tee_shm_get_fd() - Increase reference count and return file descriptor
+ * @shm: Shared memory handle
+ * @returns user space file descriptor to shared memory
+ */
+int tee_shm_get_fd(struct tee_shm *shm)
+{
+ int fd;
+
+ if (shm->id < 0)
+ return -EINVAL;
+
+ /* matched by tee_shm_put() in tee_shm_op_release() */
+ refcount_inc(&shm->refcount);
+ fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
+ if (fd < 0)
+ tee_shm_put(shm);
+ return fd;
+}
+
+/**
+ * tee_shm_free() - Free shared memory
+ * @shm: Handle to shared memory to free
+ */
+void tee_shm_free(struct tee_shm *shm)
+{
+ tee_shm_put(shm);
+}
+EXPORT_SYMBOL_GPL(tee_shm_free);
+
+/**
+ * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @returns virtual address of the shared memory + offs if offs is within
+ * the bounds of this shared memory, else an ERR_PTR
+ */
+void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
+{
+ if (!shm->kaddr)
+ return ERR_PTR(-EINVAL);
+ if (offs >= shm->size)
+ return ERR_PTR(-EINVAL);
+ return (char *)shm->kaddr + offs;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_va);
+
+/**
+ * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @pa: Physical address to return
+ * @returns 0 if offs is within the bounds of this shared memory, else an
+ * error code.
+ */
+int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
+{
+ if (offs >= shm->size)
+ return -EINVAL;
+ if (pa)
+ *pa = shm->paddr + offs;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_pa);
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx: Context owning the shared memory
+ * @id: Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
+{
+ struct tee_device *teedev;
+ struct tee_shm *shm;
+
+ if (!ctx)
+ return ERR_PTR(-EINVAL);
+
+ teedev = ctx->teedev;
+ mutex_lock(&teedev->mutex);
+ shm = idr_find(&teedev->idr, id);
+ /*
+ * If the tee_shm was found in the IDR it must have a refcount
+ * larger than 0 due to the guarantee in tee_shm_put() below. So
+ * it's safe to use refcount_inc().
+ */
+ if (!shm || shm->ctx != ctx)
+ shm = ERR_PTR(-EINVAL);
+ else
+ refcount_inc(&shm->refcount);
+ mutex_unlock(&teedev->mutex);
+ return shm;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm: Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm)
+{
+ struct tee_device *teedev = shm->ctx->teedev;
+ bool do_release = false;
+
+ mutex_lock(&teedev->mutex);
+ if (refcount_dec_and_test(&shm->refcount)) {
+ /*
+ * refcount has reached 0, we must now remove it from the
+ * IDR before releasing the mutex. This will guarantee that
+ * the refcount_inc() in tee_shm_get_from_id() never starts
+ * from 0.
+ */
+ if (shm->id >= 0)
+ idr_remove(&teedev->idr, shm->id);
+ do_release = true;
+ }
+ mutex_unlock(&teedev->mutex);
+
+ if (do_release)
+ tee_shm_release(teedev, shm);
+}
+EXPORT_SYMBOL_GPL(tee_shm_put);
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
new file mode 100644
index 000000000..058bfbac6
--- /dev/null
+++ b/drivers/tee/tee_shm_pool.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, 2017, 2022 Linaro Limited
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "tee_private.h"
+
+static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align)
+{
+ unsigned long va;
+ struct gen_pool *genpool = pool->private_data;
+ size_t a = max_t(size_t, align, BIT(genpool->min_alloc_order));
+ struct genpool_data_align data = { .align = a };
+ size_t s = roundup(size, a);
+
+ va = gen_pool_alloc_algo(genpool, s, gen_pool_first_fit_align, &data);
+ if (!va)
+ return -ENOMEM;
+
+ memset((void *)va, 0, s);
+ shm->kaddr = (void *)va;
+ shm->paddr = gen_pool_virt_to_phys(genpool, va);
+ shm->size = s;
+ /*
+ * This is from a static shared memory pool so no need to register
+ * each chunk, and no need to unregister later either.
+ */
+ shm->flags &= ~TEE_SHM_DYNAMIC;
+ return 0;
+}
+
+static void pool_op_gen_free(struct tee_shm_pool *pool, struct tee_shm *shm)
+{
+ gen_pool_free(pool->private_data, (unsigned long)shm->kaddr,
+ shm->size);
+ shm->kaddr = NULL;
+}
+
+static void pool_op_gen_destroy_pool(struct tee_shm_pool *pool)
+{
+ gen_pool_destroy(pool->private_data);
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ops_generic = {
+ .alloc = pool_op_gen_alloc,
+ .free = pool_op_gen_free,
+ .destroy_pool = pool_op_gen_destroy_pool,
+};
+
+struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
+ phys_addr_t paddr, size_t size,
+ int min_alloc_order)
+{
+ const size_t page_mask = PAGE_SIZE - 1;
+ struct tee_shm_pool *pool;
+ int rc;
+
+ /* Start and end must be page aligned */
+ if (vaddr & page_mask || paddr & page_mask || size & page_mask)
+ return ERR_PTR(-EINVAL);
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->private_data = gen_pool_create(min_alloc_order, -1);
+ if (!pool->private_data) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = gen_pool_add_virt(pool->private_data, vaddr, paddr, size, -1);
+ if (rc) {
+ gen_pool_destroy(pool->private_data);
+ goto err;
+ }
+
+ pool->ops = &pool_ops_generic;
+
+ return pool;
+err:
+ kfree(pool);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);