1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2021, Linaro Limited
4 * Copyright (c) 2016, EPAM Systems
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/arm-smccc.h>
10 #include <linux/errno.h>
12 #include <linux/sched.h>
14 #include <linux/module.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/tee_drv.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
23 #include "optee_private.h"
24 #include "optee_smc.h"
25 #include "optee_rpc_cmd.h"
26 #define CREATE_TRACE_POINTS
27 #include "optee_trace.h"
30 * This file implement the SMC ABI used when communicating with secure world
31 * OP-TEE OS via raw SMCs.
32 * This file is divided into the following sections:
33 * 1. Convert between struct tee_param and struct optee_msg_param
34 * 2. Low level support functions to register shared memory in secure world
35 * 3. Dynamic shared memory pool based on alloc_pages()
36 * 4. Do a normal scheduled call into secure world
37 * 5. Driver initialization.
40 #define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
43 * 1. Convert between struct tee_param and struct optee_msg_param
45 * optee_from_msg_param() and optee_to_msg_param() are the main
49 static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
50 const struct optee_msg_param *mp)
56 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
57 attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
58 p->u.memref.size = mp->u.tmem.size;
59 shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
61 p->u.memref.shm_offs = 0;
62 p->u.memref.shm = NULL;
66 rc = tee_shm_get_pa(shm, 0, &pa);
70 p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
71 p->u.memref.shm = shm;
73 /* Check that the memref is covered by the shm object */
74 if (p->u.memref.size) {
75 size_t o = p->u.memref.shm_offs +
78 rc = tee_shm_get_pa(shm, o, NULL);
86 static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
87 const struct optee_msg_param *mp)
91 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
92 attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
93 p->u.memref.size = mp->u.rmem.size;
94 shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
97 p->u.memref.shm_offs = mp->u.rmem.offs;
98 p->u.memref.shm = shm;
100 p->u.memref.shm_offs = 0;
101 p->u.memref.shm = NULL;
106 * optee_from_msg_param() - convert from OPTEE_MSG parameters to
108 * @optee: main service struct
109 * @params: subsystem internal parameter representation
110 * @num_params: number of elements in the parameter arrays
111 * @msg_params: OPTEE_MSG parameters
112 * Returns 0 on success or <0 on failure
114 static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
116 const struct optee_msg_param *msg_params)
121 for (n = 0; n < num_params; n++) {
122 struct tee_param *p = params + n;
123 const struct optee_msg_param *mp = msg_params + n;
124 u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
127 case OPTEE_MSG_ATTR_TYPE_NONE:
128 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
129 memset(&p->u, 0, sizeof(p->u));
131 case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
132 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
133 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
134 optee_from_msg_param_value(p, attr, mp);
136 case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
137 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
138 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
139 rc = from_msg_param_tmp_mem(p, attr, mp);
143 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
144 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
145 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
146 from_msg_param_reg_mem(p, attr, mp);
156 static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
157 const struct tee_param *p)
162 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
163 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
165 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
166 mp->u.tmem.size = p->u.memref.size;
168 if (!p->u.memref.shm) {
169 mp->u.tmem.buf_ptr = 0;
173 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
177 mp->u.tmem.buf_ptr = pa;
178 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
179 OPTEE_MSG_ATTR_CACHE_SHIFT;
184 static int to_msg_param_reg_mem(struct optee_msg_param *mp,
185 const struct tee_param *p)
187 mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
188 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
190 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
191 mp->u.rmem.size = p->u.memref.size;
192 mp->u.rmem.offs = p->u.memref.shm_offs;
197 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
198 * @optee: main service struct
199 * @msg_params: OPTEE_MSG parameters
200 * @num_params: number of elements in the parameter arrays
201 * @params: subsystem itnernal parameter representation
202 * Returns 0 on success or <0 on failure
204 static int optee_to_msg_param(struct optee *optee,
205 struct optee_msg_param *msg_params,
206 size_t num_params, const struct tee_param *params)
211 for (n = 0; n < num_params; n++) {
212 const struct tee_param *p = params + n;
213 struct optee_msg_param *mp = msg_params + n;
216 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
217 mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
218 memset(&mp->u, 0, sizeof(mp->u));
220 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
221 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
222 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
223 optee_to_msg_param_value(mp, p);
225 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
226 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
227 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
228 if (tee_shm_is_registered(p->u.memref.shm))
229 rc = to_msg_param_reg_mem(mp, p);
231 rc = to_msg_param_tmp_mem(mp, p);
243 * 2. Low level support functions to register shared memory in secure world
245 * Functions to enable/disable shared memory caching in secure world, that
246 * is, lazy freeing of previously allocated shared memory. Freeing is
247 * performed when a request has been compled.
249 * Functions to register and unregister shared memory both for normal
250 * clients and for tee-supplicant.
254 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
256 * @optee: main service struct
258 static void optee_enable_shm_cache(struct optee *optee)
260 struct optee_call_waiter w;
262 /* We need to retry until secure world isn't busy. */
263 optee_cq_wait_init(&optee->call_queue, &w);
265 struct arm_smccc_res res;
267 optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
268 0, 0, 0, 0, 0, 0, 0, &res);
269 if (res.a0 == OPTEE_SMC_RETURN_OK)
271 optee_cq_wait_for_completion(&optee->call_queue, &w);
273 optee_cq_wait_final(&optee->call_queue, &w);
277 * __optee_disable_shm_cache() - Disables caching of some shared memory
278 * allocation in OP-TEE
279 * @optee: main service struct
280 * @is_mapped: true if the cached shared memory addresses were mapped by this
281 * kernel, are safe to dereference, and should be freed
283 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
285 struct optee_call_waiter w;
287 /* We need to retry until secure world isn't busy. */
288 optee_cq_wait_init(&optee->call_queue, &w);
291 struct arm_smccc_res smccc;
292 struct optee_smc_disable_shm_cache_result result;
295 optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
296 0, 0, 0, 0, 0, 0, 0, &res.smccc);
297 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
298 break; /* All shm's freed */
299 if (res.result.status == OPTEE_SMC_RETURN_OK) {
303 * Shared memory references that were not mapped by
304 * this kernel must be ignored to prevent a crash.
309 shm = reg_pair_to_ptr(res.result.shm_upper32,
310 res.result.shm_lower32);
313 optee_cq_wait_for_completion(&optee->call_queue, &w);
316 optee_cq_wait_final(&optee->call_queue, &w);
320 * optee_disable_shm_cache() - Disables caching of mapped shared memory
321 * allocations in OP-TEE
322 * @optee: main service struct
324 static void optee_disable_shm_cache(struct optee *optee)
326 return __optee_disable_shm_cache(optee, true);
330 * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
331 * allocations in OP-TEE which are not
333 * @optee: main service struct
335 static void optee_disable_unmapped_shm_cache(struct optee *optee)
337 return __optee_disable_shm_cache(optee, false);
340 #define PAGELIST_ENTRIES_PER_PAGE \
341 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
344 * The final entry in each pagelist page is a pointer to the next
347 static size_t get_pages_list_size(size_t num_entries)
349 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
351 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
354 static u64 *optee_allocate_pages_list(size_t num_entries)
356 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
359 static void optee_free_pages_list(void *list, size_t num_entries)
361 free_pages_exact(list, get_pages_list_size(num_entries));
365 * optee_fill_pages_list() - write list of user pages to given shared
368 * @dst: page-aligned buffer where list of pages will be stored
369 * @pages: array of pages that represents shared buffer
370 * @num_pages: number of entries in @pages
371 * @page_offset: offset of user buffer from page start
373 * @dst should be big enough to hold list of user page addresses and
374 * links to the next pages of buffer
376 static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
380 phys_addr_t optee_page;
382 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
386 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
391 * Currently OP-TEE uses 4k page size and it does not looks
392 * like this will change in the future. On other hand, there are
393 * no know ARM architectures with page size < 4k.
394 * Thus the next built assert looks redundant. But the following
395 * code heavily relies on this assumption, so it is better be
398 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
400 pages_data = (void *)dst;
402 * If linux page is bigger than 4k, and user buffer offset is
403 * larger than 4k/8k/12k/etc this will skip first 4k pages,
404 * because they bear no value data for OP-TEE.
406 optee_page = page_to_phys(*pages) +
407 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
410 pages_data->pages_list[n++] = optee_page;
412 if (n == PAGELIST_ENTRIES_PER_PAGE) {
413 pages_data->next_page_data =
414 virt_to_phys(pages_data + 1);
419 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
420 if (!(optee_page & ~PAGE_MASK)) {
424 optee_page = page_to_phys(*pages);
429 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
430 struct page **pages, size_t num_pages,
433 struct optee *optee = tee_get_drvdata(ctx->teedev);
434 struct optee_msg_arg *msg_arg;
435 struct tee_shm *shm_arg;
442 rc = optee_check_mem_type(start, num_pages);
446 pages_list = optee_allocate_pages_list(num_pages);
450 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
451 if (IS_ERR(shm_arg)) {
452 rc = PTR_ERR(shm_arg);
456 optee_fill_pages_list(pages_list, pages, num_pages,
457 tee_shm_get_page_offset(shm));
459 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
460 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
461 OPTEE_MSG_ATTR_NONCONTIG;
462 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
463 msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
465 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
466 * store buffer offset from 4k page, as described in OP-TEE ABI.
468 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
469 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
471 if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
472 msg_arg->ret != TEEC_SUCCESS)
475 tee_shm_free(shm_arg);
477 optee_free_pages_list(pages_list, num_pages);
481 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
483 struct optee *optee = tee_get_drvdata(ctx->teedev);
484 struct optee_msg_arg *msg_arg;
485 struct tee_shm *shm_arg;
488 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
490 return PTR_ERR(shm_arg);
492 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
494 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
495 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
497 if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
498 msg_arg->ret != TEEC_SUCCESS)
500 tee_shm_free(shm_arg);
504 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
505 struct page **pages, size_t num_pages,
509 * We don't want to register supplicant memory in OP-TEE.
510 * Instead information about it will be passed in RPC code.
512 return optee_check_mem_type(start, num_pages);
515 static int optee_shm_unregister_supp(struct tee_context *ctx,
522 * 3. Dynamic shared memory pool based on alloc_pages()
524 * Implements an OP-TEE specific shared memory pool which is used
525 * when dynamic shared memory is supported by secure world.
527 * The main function is optee_shm_pool_alloc_pages().
530 static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
531 struct tee_shm *shm, size_t size)
534 * Shared memory private to the OP-TEE driver doesn't need
535 * to be registered with OP-TEE.
537 if (shm->flags & TEE_SHM_PRIV)
538 return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
540 return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
543 static void pool_op_free(struct tee_shm_pool_mgr *poolm,
546 if (!(shm->flags & TEE_SHM_PRIV))
547 optee_shm_unregister(shm->ctx, shm);
549 free_pages((unsigned long)shm->kaddr, get_order(shm->size));
553 static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
558 static const struct tee_shm_pool_mgr_ops pool_ops = {
559 .alloc = pool_op_alloc,
560 .free = pool_op_free,
561 .destroy_poolmgr = pool_op_destroy_poolmgr,
565 * optee_shm_pool_alloc_pages() - create page-based allocator pool
567 * This pool is used when OP-TEE supports dymanic SHM. In this case
568 * command buffers and such are allocated from kernel's own memory.
570 static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
572 struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
575 return ERR_PTR(-ENOMEM);
577 mgr->ops = &pool_ops;
583 * 4. Do a normal scheduled call into secure world
585 * The function optee_smc_do_call_with_arg() performs a normal scheduled
586 * call into secure world. During this call may normal world request help
587 * from normal world using RPCs, Remote Procedure Calls. This includes
588 * delivery of non-secure interrupts to for instance allow rescheduling of
592 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
593 struct optee_msg_arg *arg)
597 arg->ret_origin = TEEC_ORIGIN_COMMS;
599 if (arg->num_params != 1 ||
600 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
601 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
605 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
606 switch (arg->params[0].u.value.a) {
607 case OPTEE_RPC_SHM_TYPE_APPL:
608 optee_rpc_cmd_free_suppl(ctx, shm);
610 case OPTEE_RPC_SHM_TYPE_KERNEL:
614 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
616 arg->ret = TEEC_SUCCESS;
619 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
620 struct optee_msg_arg *arg,
621 struct optee_call_ctx *call_ctx)
628 arg->ret_origin = TEEC_ORIGIN_COMMS;
630 if (!arg->num_params ||
631 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
632 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
636 for (n = 1; n < arg->num_params; n++) {
637 if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
638 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
643 sz = arg->params[0].u.value.b;
644 switch (arg->params[0].u.value.a) {
645 case OPTEE_RPC_SHM_TYPE_APPL:
646 shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
648 case OPTEE_RPC_SHM_TYPE_KERNEL:
649 shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
652 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
657 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
661 if (tee_shm_get_pa(shm, 0, &pa)) {
662 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
666 sz = tee_shm_get_size(shm);
668 if (tee_shm_is_registered(shm)) {
673 pages = tee_shm_get_pages(shm, &page_num);
674 if (!pages || !page_num) {
675 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
679 pages_list = optee_allocate_pages_list(page_num);
681 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
685 call_ctx->pages_list = pages_list;
686 call_ctx->num_entries = page_num;
688 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
689 OPTEE_MSG_ATTR_NONCONTIG;
691 * In the least bits of u.tmem.buf_ptr we store buffer offset
692 * from 4k page, as described in OP-TEE ABI.
694 arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
695 (tee_shm_get_page_offset(shm) &
696 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
697 arg->params[0].u.tmem.size = tee_shm_get_size(shm);
698 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
700 optee_fill_pages_list(pages_list, pages, page_num,
701 tee_shm_get_page_offset(shm));
703 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
704 arg->params[0].u.tmem.buf_ptr = pa;
705 arg->params[0].u.tmem.size = sz;
706 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
709 arg->ret = TEEC_SUCCESS;
715 static void free_pages_list(struct optee_call_ctx *call_ctx)
717 if (call_ctx->pages_list) {
718 optee_free_pages_list(call_ctx->pages_list,
719 call_ctx->num_entries);
720 call_ctx->pages_list = NULL;
721 call_ctx->num_entries = 0;
725 static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
727 free_pages_list(call_ctx);
730 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
732 struct optee_call_ctx *call_ctx)
734 struct optee_msg_arg *arg;
736 arg = tee_shm_get_va(shm, 0);
738 pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
743 case OPTEE_RPC_CMD_SHM_ALLOC:
744 free_pages_list(call_ctx);
745 handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
747 case OPTEE_RPC_CMD_SHM_FREE:
748 handle_rpc_func_cmd_shm_free(ctx, arg);
751 optee_rpc_cmd(ctx, optee, arg);
756 * optee_handle_rpc() - handle RPC from secure world
757 * @ctx: context doing the RPC
758 * @param: value of registers for the RPC
759 * @call_ctx: call context. Preserved during one OP-TEE invocation
761 * Result of RPC is written back into @param.
763 static void optee_handle_rpc(struct tee_context *ctx,
764 struct optee_rpc_param *param,
765 struct optee_call_ctx *call_ctx)
767 struct tee_device *teedev = ctx->teedev;
768 struct optee *optee = tee_get_drvdata(teedev);
772 switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
773 case OPTEE_SMC_RPC_FUNC_ALLOC:
774 shm = tee_shm_alloc(ctx, param->a1,
775 TEE_SHM_MAPPED | TEE_SHM_PRIV);
776 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
777 reg_pair_from_64(¶m->a1, ¶m->a2, pa);
778 reg_pair_from_64(¶m->a4, ¶m->a5,
787 case OPTEE_SMC_RPC_FUNC_FREE:
788 shm = reg_pair_to_ptr(param->a1, param->a2);
791 case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
793 * A foreign interrupt was raised while secure world was
794 * executing, since they are handled in Linux a dummy RPC is
795 * performed to let Linux take the interrupt through the normal
799 case OPTEE_SMC_RPC_FUNC_CMD:
800 shm = reg_pair_to_ptr(param->a1, param->a2);
801 handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
804 pr_warn("Unknown RPC func 0x%x\n",
805 (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
809 param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
813 * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
814 * @ctx: calling context
815 * @arg: shared memory holding the message to pass to secure world
817 * Does and SMC to OP-TEE in secure world and handles eventual resulting
818 * Remote Procedure Calls (RPC) from OP-TEE.
820 * Returns return code from secure world, 0 is OK
822 static int optee_smc_do_call_with_arg(struct tee_context *ctx,
825 struct optee *optee = tee_get_drvdata(ctx->teedev);
826 struct optee_call_waiter w;
827 struct optee_rpc_param param = { };
828 struct optee_call_ctx call_ctx = { };
832 rc = tee_shm_get_pa(arg, 0, &parg);
836 param.a0 = OPTEE_SMC_CALL_WITH_ARG;
837 reg_pair_from_64(¶m.a1, ¶m.a2, parg);
838 /* Initialize waiter */
839 optee_cq_wait_init(&optee->call_queue, &w);
841 struct arm_smccc_res res;
843 trace_optee_invoke_fn_begin(¶m);
844 optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
845 param.a4, param.a5, param.a6, param.a7,
847 trace_optee_invoke_fn_end(¶m, &res);
849 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
851 * Out of threads in secure world, wait for a thread
854 optee_cq_wait_for_completion(&optee->call_queue, &w);
855 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
861 optee_handle_rpc(ctx, ¶m, &call_ctx);
868 optee_rpc_finalize_call(&call_ctx);
870 * We're done with our thread in secure world, if there's any
871 * thread waiters wake up one.
873 optee_cq_wait_final(&optee->call_queue, &w);
879 * 5. Driver initialization
881 * During driver inititialization is secure world probed to find out which
882 * features it supports so the driver can be initialized with a matching
883 * configuration. This involves for instance support for dynamic shared
884 * memory instead of a static memory carvout.
887 static void optee_get_version(struct tee_device *teedev,
888 struct tee_ioctl_version_data *vers)
890 struct tee_ioctl_version_data v = {
891 .impl_id = TEE_IMPL_ID_OPTEE,
892 .impl_caps = TEE_OPTEE_CAP_TZ,
893 .gen_caps = TEE_GEN_CAP_GP,
895 struct optee *optee = tee_get_drvdata(teedev);
897 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
898 v.gen_caps |= TEE_GEN_CAP_REG_MEM;
899 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
900 v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
904 static int optee_smc_open(struct tee_context *ctx)
906 struct optee *optee = tee_get_drvdata(ctx->teedev);
907 u32 sec_caps = optee->smc.sec_caps;
909 return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
912 static const struct tee_driver_ops optee_clnt_ops = {
913 .get_version = optee_get_version,
914 .open = optee_smc_open,
915 .release = optee_release,
916 .open_session = optee_open_session,
917 .close_session = optee_close_session,
918 .invoke_func = optee_invoke_func,
919 .cancel_req = optee_cancel_req,
920 .shm_register = optee_shm_register,
921 .shm_unregister = optee_shm_unregister,
924 static const struct tee_desc optee_clnt_desc = {
925 .name = DRIVER_NAME "-clnt",
926 .ops = &optee_clnt_ops,
927 .owner = THIS_MODULE,
930 static const struct tee_driver_ops optee_supp_ops = {
931 .get_version = optee_get_version,
932 .open = optee_smc_open,
933 .release = optee_release_supp,
934 .supp_recv = optee_supp_recv,
935 .supp_send = optee_supp_send,
936 .shm_register = optee_shm_register_supp,
937 .shm_unregister = optee_shm_unregister_supp,
940 static const struct tee_desc optee_supp_desc = {
941 .name = DRIVER_NAME "-supp",
942 .ops = &optee_supp_ops,
943 .owner = THIS_MODULE,
944 .flags = TEE_DESC_PRIVILEGED,
947 static const struct optee_ops optee_ops = {
948 .do_call_with_arg = optee_smc_do_call_with_arg,
949 .to_msg_param = optee_to_msg_param,
950 .from_msg_param = optee_from_msg_param,
953 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
955 struct arm_smccc_res res;
957 invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
959 if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
960 res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
965 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
968 struct arm_smccc_res smccc;
969 struct optee_smc_call_get_os_revision_result result;
976 invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
979 if (res.result.build_id)
980 pr_info("revision %lu.%lu (%08lx)", res.result.major,
981 res.result.minor, res.result.build_id);
983 pr_info("revision %lu.%lu", res.result.major, res.result.minor);
986 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
989 struct arm_smccc_res smccc;
990 struct optee_smc_calls_revision_result result;
993 invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
995 if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
996 (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
1001 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
1005 struct arm_smccc_res smccc;
1006 struct optee_smc_exchange_capabilities_result result;
1011 * TODO This isn't enough to tell if it's UP system (from kernel
1012 * point of view) or not, is_smp() returns the information
1013 * needed, but can't be called directly from here.
1015 if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1016 a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1018 invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1021 if (res.result.status != OPTEE_SMC_RETURN_OK)
1024 *sec_caps = res.result.capabilities;
1028 static struct tee_shm_pool *optee_config_dyn_shm(void)
1030 struct tee_shm_pool_mgr *priv_mgr;
1031 struct tee_shm_pool_mgr *dmabuf_mgr;
1034 rc = optee_shm_pool_alloc_pages();
1039 rc = optee_shm_pool_alloc_pages();
1041 tee_shm_pool_mgr_destroy(priv_mgr);
1046 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1048 tee_shm_pool_mgr_destroy(priv_mgr);
1049 tee_shm_pool_mgr_destroy(dmabuf_mgr);
1055 static struct tee_shm_pool *
1056 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1059 struct arm_smccc_res smccc;
1060 struct optee_smc_get_shm_config_result result;
1062 unsigned long vaddr;
1068 struct tee_shm_pool_mgr *priv_mgr;
1069 struct tee_shm_pool_mgr *dmabuf_mgr;
1071 const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
1073 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1074 if (res.result.status != OPTEE_SMC_RETURN_OK) {
1075 pr_err("static shm service not available\n");
1076 return ERR_PTR(-ENOENT);
1079 if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1080 pr_err("only normal cached shared memory supported\n");
1081 return ERR_PTR(-EINVAL);
1084 begin = roundup(res.result.start, PAGE_SIZE);
1085 end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1089 if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
1090 pr_err("too small shared memory area\n");
1091 return ERR_PTR(-EINVAL);
1094 va = memremap(paddr, size, MEMREMAP_WB);
1096 pr_err("shared memory ioremap failed\n");
1097 return ERR_PTR(-EINVAL);
1099 vaddr = (unsigned long)va;
1101 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
1102 3 /* 8 bytes aligned */);
1111 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
1113 goto err_free_priv_mgr;
1116 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1118 goto err_free_dmabuf_mgr;
1120 *memremaped_shm = va;
1124 err_free_dmabuf_mgr:
1125 tee_shm_pool_mgr_destroy(dmabuf_mgr);
1127 tee_shm_pool_mgr_destroy(priv_mgr);
1133 /* Simple wrapper functions to be able to use a function pointer */
1134 static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1135 unsigned long a2, unsigned long a3,
1136 unsigned long a4, unsigned long a5,
1137 unsigned long a6, unsigned long a7,
1138 struct arm_smccc_res *res)
1140 arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1143 static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1144 unsigned long a2, unsigned long a3,
1145 unsigned long a4, unsigned long a5,
1146 unsigned long a6, unsigned long a7,
1147 struct arm_smccc_res *res)
1149 arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1152 static optee_invoke_fn *get_invoke_func(struct device *dev)
1156 pr_info("probing for conduit method.\n");
1158 if (device_property_read_string(dev, "method", &method)) {
1159 pr_warn("missing \"method\" property\n");
1160 return ERR_PTR(-ENXIO);
1163 if (!strcmp("hvc", method))
1164 return optee_smccc_hvc;
1165 else if (!strcmp("smc", method))
1166 return optee_smccc_smc;
1168 pr_warn("invalid \"method\" property: %s\n", method);
1169 return ERR_PTR(-EINVAL);
1172 /* optee_remove - Device Removal Routine
1173 * @pdev: platform device information struct
1175 * optee_remove is called by platform subsystem to alert the driver
1176 * that it should release the device
1178 static int optee_smc_remove(struct platform_device *pdev)
1180 struct optee *optee = platform_get_drvdata(pdev);
1183 * Ask OP-TEE to free all cached shared memory objects to decrease
1184 * reference counters and also avoid wild pointers in secure world
1185 * into the old shared memory range.
1187 optee_disable_shm_cache(optee);
1189 optee_remove_common(optee);
1191 if (optee->smc.memremaped_shm)
1192 memunmap(optee->smc.memremaped_shm);
1199 /* optee_shutdown - Device Removal Routine
1200 * @pdev: platform device information struct
1202 * platform_shutdown is called by the platform subsystem to alert
1203 * the driver that a shutdown, reboot, or kexec is happening and
1204 * device must be disabled.
1206 static void optee_shutdown(struct platform_device *pdev)
1208 optee_disable_shm_cache(platform_get_drvdata(pdev));
1211 static int optee_probe(struct platform_device *pdev)
1213 optee_invoke_fn *invoke_fn;
1214 struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1215 struct optee *optee = NULL;
1216 void *memremaped_shm = NULL;
1217 struct tee_device *teedev;
1221 invoke_fn = get_invoke_func(&pdev->dev);
1222 if (IS_ERR(invoke_fn))
1223 return PTR_ERR(invoke_fn);
1225 if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1226 pr_warn("api uid mismatch\n");
1230 optee_msg_get_os_revision(invoke_fn);
1232 if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1233 pr_warn("api revision mismatch\n");
1237 if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
1238 pr_warn("capabilities mismatch\n");
1243 * Try to use dynamic shared memory if possible
1245 if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1246 pool = optee_config_dyn_shm();
1249 * If dynamic shared memory is not available or failed - try static one
1251 if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
1252 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1255 return PTR_ERR(pool);
1257 optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1263 optee->ops = &optee_ops;
1264 optee->smc.invoke_fn = invoke_fn;
1265 optee->smc.sec_caps = sec_caps;
1267 teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1268 if (IS_ERR(teedev)) {
1269 rc = PTR_ERR(teedev);
1272 optee->teedev = teedev;
1274 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1275 if (IS_ERR(teedev)) {
1276 rc = PTR_ERR(teedev);
1279 optee->supp_teedev = teedev;
1281 rc = tee_device_register(optee->teedev);
1285 rc = tee_device_register(optee->supp_teedev);
1289 mutex_init(&optee->call_queue.mutex);
1290 INIT_LIST_HEAD(&optee->call_queue.waiters);
1291 optee_wait_queue_init(&optee->wait_queue);
1292 optee_supp_init(&optee->supp);
1293 optee->smc.memremaped_shm = memremaped_shm;
1297 * Ensure that there are no pre-existing shm objects before enabling
1298 * the shm cache so that there's no chance of receiving an invalid
1299 * address during shutdown. This could occur, for example, if we're
1300 * kexec booting from an older kernel that did not properly cleanup the
1303 optee_disable_unmapped_shm_cache(optee);
1305 optee_enable_shm_cache(optee);
1307 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1308 pr_info("dynamic shared memory is enabled\n");
1310 platform_set_drvdata(pdev, optee);
1312 rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1314 optee_smc_remove(pdev);
1318 pr_info("initialized driver\n");
1323 * tee_device_unregister() is safe to call even if the
1324 * devices hasn't been registered with
1325 * tee_device_register() yet.
1327 tee_device_unregister(optee->supp_teedev);
1328 tee_device_unregister(optee->teedev);
1332 tee_shm_pool_free(pool);
1334 memunmap(memremaped_shm);
1338 static const struct of_device_id optee_dt_match[] = {
1339 { .compatible = "linaro,optee-tz" },
1342 MODULE_DEVICE_TABLE(of, optee_dt_match);
1344 static struct platform_driver optee_driver = {
1345 .probe = optee_probe,
1346 .remove = optee_smc_remove,
1347 .shutdown = optee_shutdown,
1350 .of_match_table = optee_dt_match,
1354 int optee_smc_abi_register(void)
1356 return platform_driver_register(&optee_driver);
1359 void optee_smc_abi_unregister(void)
1361 platform_driver_unregister(&optee_driver);