summaryrefslogtreecommitdiffstats
path: root/drivers/tee
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
commit01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch)
treeb406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/tee
parentAdding upstream version 6.7.12. (diff)
downloadlinux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz
linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/tee')
-rw-r--r--drivers/tee/optee/Kconfig2
-rw-r--r--drivers/tee/optee/call.c161
-rw-r--r--drivers/tee/optee/core.c62
-rw-r--r--drivers/tee/optee/ffa_abi.c107
-rw-r--r--drivers/tee/optee/optee_ffa.h28
-rw-r--r--drivers/tee/optee/optee_private.h40
-rw-r--r--drivers/tee/optee/smc_abi.c112
-rw-r--r--drivers/tee/tee_core.c8
-rw-r--r--drivers/tee/tee_shm.c78
9 files changed, 435 insertions, 163 deletions
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index 70898bbd5..976928641 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -23,4 +23,4 @@ config OPTEE_INSECURE_LOAD_IMAGE
https://trustedfirmware-a.readthedocs.io/en/latest/threat_model/threat_model.html
Additional documentation on kernel security risks are at
- Documentation/staging/tee.rst.
+ Documentation/tee/op-tee.rst.
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index df5fb5410..a91e50be1 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2015-2021, 2023 Linaro Limited
*/
#include <linux/device.h>
#include <linux/err.h>
@@ -39,9 +39,29 @@ struct optee_shm_arg_entry {
DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
};
+void optee_cq_init(struct optee_call_queue *cq, int thread_count)
+{
+ mutex_init(&cq->mutex);
+ INIT_LIST_HEAD(&cq->waiters);
+
+ /*
+ * If cq->total_thread_count is 0 then we're not trying to keep
+ * track of how many free threads we have, instead we're relying on
+ * the secure world to tell us when we're out of thread and have to
+ * wait for another thread to become available.
+ */
+ cq->total_thread_count = thread_count;
+ cq->free_thread_count = thread_count;
+}
+
void optee_cq_wait_init(struct optee_call_queue *cq,
- struct optee_call_waiter *w)
+ struct optee_call_waiter *w, bool sys_thread)
{
+ unsigned int free_thread_threshold;
+ bool need_wait = false;
+
+ memset(w, 0, sizeof(*w));
+
/*
* We're preparing to make a call to secure world. In case we can't
* allocate a thread in secure world we'll end up waiting in
@@ -60,8 +80,38 @@ void optee_cq_wait_init(struct optee_call_queue *cq,
*/
init_completion(&w->c);
list_add_tail(&w->list_node, &cq->waiters);
+ w->sys_thread = sys_thread;
+
+ if (cq->total_thread_count) {
+ if (sys_thread || !cq->sys_thread_req_count)
+ free_thread_threshold = 0;
+ else
+ free_thread_threshold = 1;
+
+ if (cq->free_thread_count > free_thread_threshold)
+ cq->free_thread_count--;
+ else
+ need_wait = true;
+ }
mutex_unlock(&cq->mutex);
+
+ while (need_wait) {
+ optee_cq_wait_for_completion(cq, w);
+ mutex_lock(&cq->mutex);
+
+ if (sys_thread || !cq->sys_thread_req_count)
+ free_thread_threshold = 0;
+ else
+ free_thread_threshold = 1;
+
+ if (cq->free_thread_count > free_thread_threshold) {
+ cq->free_thread_count--;
+ need_wait = false;
+ }
+
+ mutex_unlock(&cq->mutex);
+ }
}
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
@@ -83,6 +133,14 @@ static void optee_cq_complete_one(struct optee_call_queue *cq)
{
struct optee_call_waiter *w;
+ /* Wake a waiting system session if any, prior to a normal session */
+ list_for_each_entry(w, &cq->waiters, list_node) {
+ if (w->sys_thread && !completion_done(&w->c)) {
+ complete(&w->c);
+ return;
+ }
+ }
+
list_for_each_entry(w, &cq->waiters, list_node) {
if (!completion_done(&w->c)) {
complete(&w->c);
@@ -104,6 +162,8 @@ void optee_cq_wait_final(struct optee_call_queue *cq,
/* Get out of the list */
list_del(&w->list_node);
+ cq->free_thread_count++;
+
/* Wake up one eventual waiting task */
optee_cq_complete_one(cq);
@@ -119,6 +179,28 @@ void optee_cq_wait_final(struct optee_call_queue *cq,
mutex_unlock(&cq->mutex);
}
+/* Count registered system sessions to reserved a system thread or not */
+static bool optee_cq_incr_sys_thread_count(struct optee_call_queue *cq)
+{
+ if (cq->total_thread_count <= 1)
+ return false;
+
+ mutex_lock(&cq->mutex);
+ cq->sys_thread_req_count++;
+ mutex_unlock(&cq->mutex);
+
+ return true;
+}
+
+static void optee_cq_decr_sys_thread_count(struct optee_call_queue *cq)
+{
+ mutex_lock(&cq->mutex);
+ cq->sys_thread_req_count--;
+ /* If there's someone waiting, let it resume */
+ optee_cq_complete_one(cq);
+ mutex_unlock(&cq->mutex);
+}
+
/* Requires the filpstate mutex to be held */
static struct optee_session *find_session(struct optee_context_data *ctxdata,
u32 session_id)
@@ -328,7 +410,8 @@ int optee_open_session(struct tee_context *ctx,
goto out;
}
- if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ if (optee->ops->do_call_with_arg(ctx, shm, offs,
+ sess->use_sys_thread)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -360,7 +443,29 @@ out:
return rc;
}
-int optee_close_session_helper(struct tee_context *ctx, u32 session)
+int optee_system_session(struct tee_context *ctx, u32 session)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_session *sess;
+ int rc = -EINVAL;
+
+ mutex_lock(&ctxdata->mutex);
+
+ sess = find_session(ctxdata, session);
+ if (sess && (sess->use_sys_thread ||
+ optee_cq_incr_sys_thread_count(&optee->call_queue))) {
+ sess->use_sys_thread = true;
+ rc = 0;
+ }
+
+ mutex_unlock(&ctxdata->mutex);
+
+ return rc;
+}
+
+int optee_close_session_helper(struct tee_context *ctx, u32 session,
+ bool system_thread)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_shm_arg_entry *entry;
@@ -374,10 +479,13 @@ int optee_close_session_helper(struct tee_context *ctx, u32 session)
msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
msg_arg->session = session;
- optee->ops->do_call_with_arg(ctx, shm, offs);
+ optee->ops->do_call_with_arg(ctx, shm, offs, system_thread);
optee_free_msg_arg(ctx, entry, offs);
+ if (system_thread)
+ optee_cq_decr_sys_thread_count(&optee->call_queue);
+
return 0;
}
@@ -385,6 +493,7 @@ int optee_close_session(struct tee_context *ctx, u32 session)
{
struct optee_context_data *ctxdata = ctx->data;
struct optee_session *sess;
+ bool system_thread;
/* Check that the session is valid and remove it from the list */
mutex_lock(&ctxdata->mutex);
@@ -394,9 +503,10 @@ int optee_close_session(struct tee_context *ctx, u32 session)
mutex_unlock(&ctxdata->mutex);
if (!sess)
return -EINVAL;
+ system_thread = sess->use_sys_thread;
kfree(sess);
- return optee_close_session_helper(ctx, session);
+ return optee_close_session_helper(ctx, session, system_thread);
}
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
@@ -408,12 +518,15 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct optee_msg_arg *msg_arg;
struct optee_session *sess;
struct tee_shm *shm;
+ bool system_thread;
u_int offs;
int rc;
/* Check that the session is valid */
mutex_lock(&ctxdata->mutex);
sess = find_session(ctxdata, arg->session);
+ if (sess)
+ system_thread = sess->use_sys_thread;
mutex_unlock(&ctxdata->mutex);
if (!sess)
return -EINVAL;
@@ -432,7 +545,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
if (rc)
goto out;
- if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ if (optee->ops->do_call_with_arg(ctx, shm, offs, system_thread)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -457,12 +570,15 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
struct optee_shm_arg_entry *entry;
struct optee_msg_arg *msg_arg;
struct optee_session *sess;
+ bool system_thread;
struct tee_shm *shm;
u_int offs;
/* Check that the session is valid */
mutex_lock(&ctxdata->mutex);
sess = find_session(ctxdata, session);
+ if (sess)
+ system_thread = sess->use_sys_thread;
mutex_unlock(&ctxdata->mutex);
if (!sess)
return -EINVAL;
@@ -474,7 +590,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
msg_arg->session = session;
msg_arg->cancel_id = cancel_id;
- optee->ops->do_call_with_arg(ctx, shm, offs);
+ optee->ops->do_call_with_arg(ctx, shm, offs, system_thread);
optee_free_msg_arg(ctx, entry, offs);
return 0;
@@ -524,3 +640,32 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
return rc;
}
+
+static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = cmd;
+ optee->ops->do_call_with_arg(ctx, shm, offs, false);
+
+ optee_free_msg_arg(ctx, entry, offs);
+ return 0;
+}
+
+int optee_do_bottom_half(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
+}
+
+int optee_stop_async_notif(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
+}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 2a258bd3b..3aed554bc 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -15,7 +15,6 @@
#include <linux/string.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
#include "optee_private.h"
int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
@@ -26,46 +25,46 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
size_t num_pages,
unsigned long start))
{
- unsigned int order = get_order(size);
- struct page *page;
+ size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ struct page **pages;
+ unsigned int i;
int rc = 0;
/*
* Ignore alignment since this is already going to be page aligned
* and there's no need for any larger alignment.
*/
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!page)
+ shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!shm->kaddr)
return -ENOMEM;
- shm->kaddr = page_address(page);
- shm->paddr = page_to_phys(page);
- shm->size = PAGE_SIZE << order;
+ shm->paddr = virt_to_phys(shm->kaddr);
+ shm->size = nr_pages * PAGE_SIZE;
- if (shm_register) {
- unsigned int nr_pages = 1 << order, i;
- struct page **pages;
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
- pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- rc = -ENOMEM;
- goto err;
- }
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
- for (i = 0; i < nr_pages; i++)
- pages[i] = page + i;
+ shm->pages = pages;
+ shm->num_pages = nr_pages;
+ if (shm_register) {
rc = shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
- kfree(pages);
if (rc)
goto err;
}
return 0;
-
err:
- free_pages((unsigned long)shm->kaddr, order);
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
return rc;
}
@@ -75,8 +74,10 @@ void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
{
if (shm_unregister)
shm_unregister(shm->ctx, shm);
- free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ free_pages_exact(shm->kaddr, shm->size);
shm->kaddr = NULL;
+ kfree(shm->pages);
+ shm->pages = NULL;
}
static void optee_bus_scan(struct work_struct *work)
@@ -110,12 +111,7 @@ int optee_open(struct tee_context *ctx, bool cap_memref_null)
if (!optee->scan_bus_done) {
INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
- optee->scan_bus_wq = create_workqueue("optee_bus_scan");
- if (!optee->scan_bus_wq) {
- kfree(ctxdata);
- return -ECHILD;
- }
- queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
+ schedule_work(&optee->scan_bus_work);
optee->scan_bus_done = true;
}
}
@@ -129,7 +125,8 @@ int optee_open(struct tee_context *ctx, bool cap_memref_null)
static void optee_release_helper(struct tee_context *ctx,
int (*close_session)(struct tee_context *ctx,
- u32 session))
+ u32 session,
+ bool system_thread))
{
struct optee_context_data *ctxdata = ctx->data;
struct optee_session *sess;
@@ -141,7 +138,7 @@ static void optee_release_helper(struct tee_context *ctx,
list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
list_node) {
list_del(&sess->list_node);
- close_session(ctx, sess->session_id);
+ close_session(ctx, sess->session_id, sess->use_sys_thread);
kfree(sess);
}
kfree(ctxdata);
@@ -158,10 +155,7 @@ void optee_release_supp(struct tee_context *ctx)
struct optee *optee = tee_get_drvdata(ctx->teedev);
optee_release_helper(ctx, optee_close_session_helper);
- if (optee->scan_bus_wq) {
- destroy_workqueue(optee->scan_bus_wq);
- optee->scan_bus_wq = NULL;
- }
+
optee_supp_release(&optee->supp);
}
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 0828240f2..ecb5eb079 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2021, Linaro Limited
+ * Copyright (c) 2021, 2023 Linaro Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -528,7 +528,8 @@ static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
static int optee_ffa_yielding_call(struct tee_context *ctx,
struct ffa_send_direct_data *data,
- struct optee_msg_arg *rpc_arg)
+ struct optee_msg_arg *rpc_arg,
+ bool system_thread)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
@@ -541,7 +542,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
int rc;
/* Initialize waiter */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, system_thread);
while (true) {
rc = msg_ops->sync_send_receive(ffa_dev, data);
if (rc)
@@ -604,6 +605,7 @@ done:
* @ctx: calling context
* @shm: shared memory holding the message to pass to secure world
* @offs: offset of the message in @shm
+ * @system_thread: true if caller requests TEE system thread support
*
* Does a FF-A call to OP-TEE in secure world and handles eventual resulting
* Remote Procedure Calls (RPC) from OP-TEE.
@@ -612,7 +614,8 @@ done:
*/
static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
- struct tee_shm *shm, u_int offs)
+ struct tee_shm *shm, u_int offs,
+ bool system_thread)
{
struct ffa_send_direct_data data = {
.data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
@@ -642,7 +645,7 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
if (IS_ERR(rpc_arg))
return PTR_ERR(rpc_arg);
- return optee_ffa_yielding_call(ctx, &data, rpc_arg);
+ return optee_ffa_yielding_call(ctx, &data, rpc_arg, system_thread);
}
/*
@@ -692,7 +695,8 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
const struct ffa_ops *ops,
u32 *sec_caps,
- unsigned int *rpc_param_count)
+ unsigned int *rpc_param_count,
+ unsigned int *max_notif_value)
{
struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
int rc;
@@ -709,10 +713,39 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
*rpc_param_count = (u8)data.data1;
*sec_caps = data.data2;
+ if (data.data3)
+ *max_notif_value = data.data3;
+ else
+ *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
return true;
}
+static void notif_callback(int notify_id, void *cb_data)
+{
+ struct optee *optee = cb_data;
+
+ if (notify_id == optee->ffa.bottom_half_value)
+ optee_do_bottom_half(optee->ctx);
+ else
+ optee_notif_send(optee, notify_id);
+}
+
+static int enable_async_notif(struct optee *optee)
+{
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_ENABLE_ASYNC_NOTIF,
+ .data1 = optee->ffa.bottom_half_value,
+ };
+ int rc;
+
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ return rc;
+ return data.data0;
+}
+
static void optee_ffa_get_version(struct tee_device *teedev,
struct tee_ioctl_version_data *vers)
{
@@ -775,7 +808,11 @@ static const struct optee_ops optee_ffa_ops = {
static void optee_ffa_remove(struct ffa_device *ffa_dev)
{
struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
+ u32 bottom_half_id = optee->ffa.bottom_half_value;
+ if (bottom_half_id != U32_MAX)
+ ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev,
+ bottom_half_id);
optee_remove_common(optee);
mutex_destroy(&optee->ffa.mutex);
@@ -784,9 +821,51 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev)
kfree(optee);
}
+static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev,
+ struct optee *optee)
+{
+ bool is_per_vcpu = false;
+ u32 notif_id = 0;
+ int rc;
+
+ while (true) {
+ rc = ffa_dev->ops->notifier_ops->notify_request(ffa_dev,
+ is_per_vcpu,
+ notif_callback,
+ optee,
+ notif_id);
+ if (!rc)
+ break;
+ /*
+ * -EACCES means that the notification ID was
+ * already bound, try the next one as long as we
+ * haven't reached the max. Any other error is a
+ * permanent error, so skip asynchronous
+ * notifications in that case.
+ */
+ if (rc != -EACCES)
+ return rc;
+ notif_id++;
+ if (notif_id >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE)
+ return rc;
+ }
+ optee->ffa.bottom_half_value = notif_id;
+
+ rc = enable_async_notif(optee);
+ if (rc < 0) {
+ ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev,
+ notif_id);
+ optee->ffa.bottom_half_value = U32_MAX;
+ }
+
+ return rc;
+}
+
static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
+ const struct ffa_notifier_ops *notif_ops;
const struct ffa_ops *ffa_ops;
+ unsigned int max_notif_value;
unsigned int rpc_param_count;
struct tee_shm_pool *pool;
struct tee_device *teedev;
@@ -797,12 +876,13 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
int rc;
ffa_ops = ffa_dev->ops;
+ notif_ops = ffa_ops->notifier_ops;
if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
return -EINVAL;
if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
- &rpc_param_count))
+ &rpc_param_count, &max_notif_value))
return -EINVAL;
if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET)
arg_cache_flags |= OPTEE_SHM_ARG_SHARED;
@@ -820,6 +900,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
+ optee->ffa.bottom_half_value = U32_MAX;
optee->rpc_param_count = rpc_param_count;
teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
@@ -850,8 +931,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
if (rc)
goto err_unreg_supp_teedev;
mutex_init(&optee->ffa.mutex);
- mutex_init(&optee->call_queue.mutex);
- INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_cq_init(&optee->call_queue, 0);
optee_supp_init(&optee->supp);
optee_shm_arg_cache_init(optee, arg_cache_flags);
ffa_dev_set_drvdata(ffa_dev, optee);
@@ -864,6 +944,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
if (rc)
goto err_close_ctx;
+ if (sec_caps & OPTEE_FFA_SEC_CAP_ASYNC_NOTIF) {
+ rc = optee_ffa_async_notif_init(ffa_dev, optee);
+ if (rc < 0)
+ pr_err("Failed to initialize async notifications: %d",
+ rc);
+ }
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc)
@@ -874,6 +960,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
err_unregister_devices:
optee_unregister_devices();
+ if (optee->ffa.bottom_half_value != U32_MAX)
+ notif_ops->notify_relinquish(ffa_dev,
+ optee->ffa.bottom_half_value);
optee_notif_uninit(optee);
err_close_ctx:
teedev_close_context(ctx);
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
index 97266243d..5db779dc0 100644
--- a/drivers/tee/optee/optee_ffa.h
+++ b/drivers/tee/optee/optee_ffa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (c) 2019-2021, Linaro Limited
+ * Copyright (c) 2019-2021, 2023 Linaro Limited
*/
/*
@@ -73,7 +73,7 @@
*
* Call register usage:
* w3: Service ID, OPTEE_FFA_EXCHANGE_CAPABILITIES
- * w4-w7: Note used (MBZ)
+ * w4-w7: Not used (MBZ)
*
* Return register usage:
* w3: Error code, 0 on success
@@ -82,14 +82,16 @@
* OPTEE_FFA_YIELDING_CALL_WITH_ARG.
* Bit[31:8]: Reserved (MBZ)
* w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
- * unused bits MBZ.
- * w6-w7: Not used (MBZ)
+ * w6: The maximum secure world notification number
+ * w7: Not used (MBZ)
*/
/*
* Secure world supports giving an offset into the argument shared memory
* object, see also OPTEE_FFA_YIELDING_CALL_WITH_ARG
*/
#define OPTEE_FFA_SEC_CAP_ARG_OFFSET BIT(0)
+/* OP-TEE supports asynchronous notification via FF-A */
+#define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1)
#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
@@ -109,6 +111,24 @@
#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
/*
+ * Inform OP-TEE that the normal world is able to receive asynchronous
+ * notifications.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_ENABLE_ASYNC_NOTIF
+ * w4: Notification value to request bottom half processing, should be
+ * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE.
+ * w5-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5)
+
+#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64
+
+/*
* Call with struct optee_msg_arg as argument in the supplied shared memory
* with a zero internal offset and normal cached memory attributes.
* Register usage:
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 6bb5cae09..7a5243c78 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2015-2021, 2023 Linaro Limited
*/
#ifndef OPTEE_PRIVATE_H
@@ -40,15 +40,33 @@ typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
struct arm_smccc_res *);
+/*
+ * struct optee_call_waiter - TEE entry may need to wait for a free TEE thread
+ * @list_node Reference in waiters list
+ * @c Waiting completion reference
+ * @sys_thread True if waiter belongs to a system thread
+ */
struct optee_call_waiter {
struct list_head list_node;
struct completion c;
+ bool sys_thread;
};
+/*
+ * struct optee_call_queue - OP-TEE call queue management
+ * @mutex Serializes access to this struct
+ * @waiters List of threads waiting to enter OP-TEE
+ * @total_thread_count Overall number of thread context in OP-TEE or 0
+ * @free_thread_count Number of threads context free in OP-TEE
+ * @sys_thread_req_count Number of registered system thread sessions
+ */
struct optee_call_queue {
/* Serializes access to this struct */
struct mutex mutex;
struct list_head waiters;
+ int total_thread_count;
+ int free_thread_count;
+ int sys_thread_req_count;
};
struct optee_notif {
@@ -129,12 +147,14 @@ struct optee_smc {
* struct optee_ffa_data - FFA communication struct
* @ffa_dev FFA device, contains the destination id, the id of
* OP-TEE in secure world
- * @ffa_ops FFA operations
+ * @bottom_half_value Notification ID used for bottom half signalling or
+ * U32_MAX if unused
* @mutex Serializes access to @global_ids
* @global_ids FF-A shared memory global handle translation
*/
struct optee_ffa {
struct ffa_device *ffa_dev;
+ u32 bottom_half_value;
/* Serializes access to @global_ids */
struct mutex mutex;
struct rhashtable global_ids;
@@ -154,7 +174,8 @@ struct optee;
*/
struct optee_ops {
int (*do_call_with_arg)(struct tee_context *ctx,
- struct tee_shm *shm_arg, u_int offs);
+ struct tee_shm *shm_arg, u_int offs,
+ bool system_thread);
int (*to_msg_param)(struct optee *optee,
struct optee_msg_param *msg_params,
size_t num_params, const struct tee_param *params);
@@ -178,7 +199,6 @@ struct optee_ops {
* @pool: shared memory pool
* @rpc_param_count: If > 0 number of RPC parameters to make room for
* @scan_bus_done flag if device registation was already done.
- * @scan_bus_wq workqueue to scan optee bus and register optee drivers
* @scan_bus_work workq to scan optee bus and register optee drivers
*/
struct optee {
@@ -197,13 +217,13 @@ struct optee {
struct tee_shm_pool *pool;
unsigned int rpc_param_count;
bool scan_bus_done;
- struct workqueue_struct *scan_bus_wq;
struct work_struct scan_bus_work;
};
struct optee_session {
struct list_head list_node;
u32 session_id;
+ bool use_sys_thread;
};
struct optee_context_data {
@@ -250,7 +270,9 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
int optee_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param);
-int optee_close_session_helper(struct tee_context *ctx, u32 session);
+int optee_system_session(struct tee_context *ctx, u32 session);
+int optee_close_session_helper(struct tee_context *ctx, u32 session,
+ bool system_thread);
int optee_close_session(struct tee_context *ctx, u32 session);
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
@@ -298,8 +320,9 @@ static inline void optee_to_msg_param_value(struct optee_msg_param *mp,
mp->u.value.c = p->u.value.c;
}
+void optee_cq_init(struct optee_call_queue *cq, int thread_count);
void optee_cq_wait_init(struct optee_call_queue *cq,
- struct optee_call_waiter *w);
+ struct optee_call_waiter *w, bool sys_thread);
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
struct optee_call_waiter *w);
void optee_cq_wait_final(struct optee_call_queue *cq,
@@ -323,6 +346,9 @@ void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
struct optee_msg_arg *arg);
+int optee_do_bottom_half(struct tee_context *ctx);
+int optee_stop_async_notif(struct tee_context *ctx);
+
/*
* Small helpers
*/
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index d5b28fd35..a37f87087 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2015-2021, 2023 Linaro Limited
* Copyright (c) 2016, EPAM Systems
*/
@@ -283,7 +283,7 @@ static void optee_enable_shm_cache(struct optee *optee)
struct optee_call_waiter w;
/* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, false);
while (true) {
struct arm_smccc_res res;
@@ -308,7 +308,7 @@ static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
struct optee_call_waiter w;
/* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, false);
while (true) {
union {
struct arm_smccc_res smccc;
@@ -507,7 +507,7 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
- if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0, false) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
@@ -550,7 +550,7 @@ static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
- if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0, false) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
out:
@@ -678,10 +678,11 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{
- phys_addr_t pa;
struct tee_shm *shm;
size_t sz;
size_t n;
+ struct page **pages;
+ size_t page_count;
arg->ret_origin = TEEC_ORIGIN_COMMS;
@@ -716,32 +717,23 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
return;
}
- if (tee_shm_get_pa(shm, 0, &pa)) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- goto bad;
- }
-
- sz = tee_shm_get_size(shm);
-
- if (tee_shm_is_dynamic(shm)) {
- struct page **pages;
+ /*
+ * If there are pages it's dynamically allocated shared memory (not
+ * from the reserved shared memory pool) and needs to be
+ * registered.
+ */
+ pages = tee_shm_get_pages(shm, &page_count);
+ if (pages) {
u64 *pages_list;
- size_t page_num;
-
- pages = tee_shm_get_pages(shm, &page_num);
- if (!pages || !page_num) {
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
- goto bad;
- }
- pages_list = optee_allocate_pages_list(page_num);
+ pages_list = optee_allocate_pages_list(page_count);
if (!pages_list) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
goto bad;
}
call_ctx->pages_list = pages_list;
- call_ctx->num_entries = page_num;
+ call_ctx->num_entries = page_count;
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
OPTEE_MSG_ATTR_NONCONTIG;
@@ -752,17 +744,22 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) &
(OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
- arg->params[0].u.tmem.size = tee_shm_get_size(shm);
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
- optee_fill_pages_list(pages_list, pages, page_num,
+ optee_fill_pages_list(pages_list, pages, page_count,
tee_shm_get_page_offset(shm));
} else {
+ phys_addr_t pa;
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
arg->params[0].u.tmem.buf_ptr = pa;
- arg->params[0].u.tmem.size = sz;
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
}
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
arg->ret = TEEC_SUCCESS;
return;
@@ -806,6 +803,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
/**
* optee_handle_rpc() - handle RPC from secure world
* @ctx: context doing the RPC
+ * @rpc_arg: pointer to RPC arguments if any, or NULL if none
* @param: value of registers for the RPC
* @call_ctx: call context. Preserved during one OP-TEE invocation
*
@@ -878,6 +876,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
* @ctx: calling context
* @shm: shared memory holding the message to pass to secure world
* @offs: offset of the message in @shm
+ * @system_thread: true if caller requests TEE system thread support
*
* Does and SMC to OP-TEE in secure world and handles eventual resulting
* Remote Procedure Calls (RPC) from OP-TEE.
@@ -885,7 +884,8 @@ static void optee_handle_rpc(struct tee_context *ctx,
* Returns return code from secure world, 0 is OK
*/
static int optee_smc_do_call_with_arg(struct tee_context *ctx,
- struct tee_shm *shm, u_int offs)
+ struct tee_shm *shm, u_int offs,
+ bool system_thread)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_call_waiter w;
@@ -926,7 +926,7 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
reg_pair_from_64(&param.a1, &param.a2, parg);
}
/* Initialize waiter */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, system_thread);
while (true) {
struct arm_smccc_res res;
@@ -965,34 +965,6 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
return rc;
}
-static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
-{
- struct optee_shm_arg_entry *entry;
- struct optee_msg_arg *msg_arg;
- struct tee_shm *shm;
- u_int offs;
-
- msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
- if (IS_ERR(msg_arg))
- return PTR_ERR(msg_arg);
-
- msg_arg->cmd = cmd;
- optee_smc_do_call_with_arg(ctx, shm, offs);
-
- optee_free_msg_arg(ctx, entry, offs);
- return 0;
-}
-
-static int optee_smc_do_bottom_half(struct tee_context *ctx)
-{
- return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
-}
-
-static int optee_smc_stop_async_notif(struct tee_context *ctx)
-{
- return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
-}
-
/*
* 5. Asynchronous notification
*/
@@ -1048,7 +1020,7 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
{
struct optee *optee = dev_id;
- optee_smc_do_bottom_half(optee->ctx);
+ optee_do_bottom_half(optee->ctx);
return IRQ_HANDLED;
}
@@ -1086,7 +1058,7 @@ static void notif_pcpu_irq_work_fn(struct work_struct *work)
notif_pcpu_work);
struct optee *optee = container_of(optee_smc, struct optee, smc);
- optee_smc_do_bottom_half(optee->ctx);
+ optee_do_bottom_half(optee->ctx);
}
static int init_pcpu_irq(struct optee *optee, u_int irq)
@@ -1158,7 +1130,7 @@ static void uninit_pcpu_irq(struct optee *optee)
static void optee_smc_notif_uninit_irq(struct optee *optee)
{
if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
- optee_smc_stop_async_notif(optee->ctx);
+ optee_stop_async_notif(optee->ctx);
if (optee->smc.notif_irq) {
if (irq_is_percpu_devid(optee->smc.notif_irq))
uninit_pcpu_irq(optee);
@@ -1210,6 +1182,7 @@ static const struct tee_driver_ops optee_clnt_ops = {
.release = optee_release,
.open_session = optee_open_session,
.close_session = optee_close_session,
+ .system_session = optee_system_session,
.invoke_func = optee_invoke_func,
.cancel_req = optee_cancel_req,
.shm_register = optee_shm_register,
@@ -1357,6 +1330,16 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
return true;
}
+static unsigned int optee_msg_get_thread_count(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_GET_THREAD_COUNT, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ return 0;
+ return res.a1;
+}
+
static struct tee_shm_pool *
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
{
@@ -1609,6 +1592,7 @@ static int optee_probe(struct platform_device *pdev)
struct optee *optee = NULL;
void *memremaped_shm = NULL;
unsigned int rpc_param_count;
+ unsigned int thread_count;
struct tee_device *teedev;
struct tee_context *ctx;
u32 max_notif_value;
@@ -1636,6 +1620,7 @@ static int optee_probe(struct platform_device *pdev)
return -EINVAL;
}
+ thread_count = optee_msg_get_thread_count(invoke_fn);
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
&max_notif_value,
&rpc_param_count)) {
@@ -1725,8 +1710,7 @@ static int optee_probe(struct platform_device *pdev)
if (rc)
goto err_unreg_supp_teedev;
- mutex_init(&optee->call_queue.mutex);
- INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_cq_init(&optee->call_queue, thread_count);
optee_supp_init(&optee->supp);
optee->smc.memremaped_shm = memremaped_shm;
optee->pool = pool;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 5ddfd5d9a..792d6fae4 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -1173,6 +1173,14 @@ int tee_client_close_session(struct tee_context *ctx, u32 session)
}
EXPORT_SYMBOL_GPL(tee_client_close_session);
+int tee_client_system_session(struct tee_context *ctx, u32 session)
+{
+ if (!ctx->teedev->desc->ops->system_session)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->system_session(ctx, session);
+}
+EXPORT_SYMBOL_GPL(tee_client_system_session);
+
int tee_client_invoke_func(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param)
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 673cf0359..731d9028b 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -22,23 +22,12 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count)
put_page(pages[n]);
}
-static int shm_get_kernel_pages(unsigned long start, size_t page_count,
- struct page **pages)
+static void shm_get_kernel_pages(struct page **pages, size_t page_count)
{
- struct page *page;
size_t n;
- if (WARN_ON_ONCE(is_vmalloc_addr((void *)start) ||
- is_kmap_addr((void *)start)))
- return -EINVAL;
-
- page = virt_to_page((void *)start);
- for (n = 0; n < page_count; n++) {
- pages[n] = page + n;
+ for (n = 0; n < page_count; n++)
get_page(pages[n]);
- }
-
- return page_count;
}
static void release_registered_pages(struct tee_shm *shm)
@@ -214,13 +203,14 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
static struct tee_shm *
-register_shm_helper(struct tee_context *ctx, unsigned long addr,
- size_t length, u32 flags, int id)
+register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
+ int id)
{
struct tee_device *teedev = ctx->teedev;
struct tee_shm *shm;
- unsigned long start;
- size_t num_pages;
+ unsigned long start, addr;
+ size_t num_pages, off;
+ ssize_t len;
void *ret;
int rc;
@@ -245,31 +235,38 @@ register_shm_helper(struct tee_context *ctx, unsigned long addr,
shm->flags = flags;
shm->ctx = ctx;
shm->id = id;
- addr = untagged_addr(addr);
+ addr = untagged_addr((unsigned long)iter_iov_addr(iter));
start = rounddown(addr, PAGE_SIZE);
- shm->offset = addr - start;
- shm->size = length;
- num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
+ num_pages = iov_iter_npages(iter, INT_MAX);
+ if (!num_pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_ctx_put;
+ }
+
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
if (!shm->pages) {
ret = ERR_PTR(-ENOMEM);
goto err_free_shm;
}
- if (flags & TEE_SHM_USER_MAPPED)
- rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
- shm->pages);
- else
- rc = shm_get_kernel_pages(start, num_pages, shm->pages);
- if (rc > 0)
- shm->num_pages = rc;
- if (rc != num_pages) {
- if (rc >= 0)
- rc = -ENOMEM;
- ret = ERR_PTR(rc);
- goto err_put_shm_pages;
+ len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
+ &off);
+ if (unlikely(len <= 0)) {
+ ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
+ goto err_free_shm_pages;
}
+ /*
+ * iov_iter_extract_kvec_pages does not get reference on the pages,
+ * get a reference on them.
+ */
+ if (iov_iter_is_kvec(iter))
+ shm_get_kernel_pages(shm->pages, num_pages);
+
+ shm->offset = off;
+ shm->size = len;
+ shm->num_pages = num_pages;
+
rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
shm->num_pages, start);
if (rc) {
@@ -279,10 +276,11 @@ register_shm_helper(struct tee_context *ctx, unsigned long addr,
return shm;
err_put_shm_pages:
- if (flags & TEE_SHM_USER_MAPPED)
+ if (!iov_iter_is_kvec(iter))
unpin_user_pages(shm->pages, shm->num_pages);
else
shm_put_kernel_pages(shm->pages, shm->num_pages);
+err_free_shm_pages:
kfree(shm->pages);
err_free_shm:
kfree(shm);
@@ -307,6 +305,7 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
struct tee_device *teedev = ctx->teedev;
struct tee_shm *shm;
+ struct iov_iter iter;
void *ret;
int id;
@@ -319,7 +318,8 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
if (id < 0)
return ERR_PTR(id);
- shm = register_shm_helper(ctx, addr, length, flags, id);
+ iov_iter_ubuf(&iter, ITER_DEST, (void __user *)addr, length);
+ shm = register_shm_helper(ctx, &iter, flags, id);
if (IS_ERR(shm)) {
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, id);
@@ -352,8 +352,14 @@ struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
void *addr, size_t length)
{
u32 flags = TEE_SHM_DYNAMIC;
+ struct kvec kvec;
+ struct iov_iter iter;
+
+ kvec.iov_base = addr;
+ kvec.iov_len = length;
+ iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length);
- return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1);
+ return register_shm_helper(ctx, &iter, flags, -1);
}
EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);