1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2016, Linaro Limited
5 #include <linux/device.h>
6 #include <linux/dma-buf.h>
7 #include <linux/fdtable.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/tee_drv.h>
12 #include <linux/uio.h>
13 #include <linux/module.h>
14 #include "tee_private.h"
16 MODULE_IMPORT_NS(DMA_BUF);
18 static void release_registered_pages(struct tee_shm *shm)
21 if (shm->flags & TEE_SHM_USER_MAPPED) {
22 unpin_user_pages(shm->pages, shm->num_pages);
26 for (n = 0; n < shm->num_pages; n++)
27 put_page(shm->pages[n]);
34 static void tee_shm_release(struct tee_shm *shm)
36 struct tee_device *teedev = shm->ctx->teedev;
38 if (shm->flags & TEE_SHM_DMA_BUF) {
39 mutex_lock(&teedev->mutex);
40 idr_remove(&teedev->idr, shm->id);
41 mutex_unlock(&teedev->mutex);
44 if (shm->flags & TEE_SHM_POOL) {
45 struct tee_shm_pool_mgr *poolm;
47 if (shm->flags & TEE_SHM_DMA_BUF)
48 poolm = teedev->pool->dma_buf_mgr;
50 poolm = teedev->pool->private_mgr;
52 poolm->ops->free(poolm, shm);
53 } else if (shm->flags & TEE_SHM_REGISTER) {
54 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
57 dev_err(teedev->dev.parent,
58 "unregister shm %p failed: %d", shm, rc);
60 release_registered_pages(shm);
63 teedev_ctx_put(shm->ctx);
67 tee_device_put(teedev);
70 static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
71 *attach, enum dma_data_direction dir)
76 static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
77 struct sg_table *table,
78 enum dma_data_direction dir)
82 static void tee_shm_op_release(struct dma_buf *dmabuf)
84 struct tee_shm *shm = dmabuf->priv;
89 static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
91 struct tee_shm *shm = dmabuf->priv;
92 size_t size = vma->vm_end - vma->vm_start;
94 /* Refuse sharing shared memory provided by application */
95 if (shm->flags & TEE_SHM_USER_MAPPED)
98 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
99 size, vma->vm_page_prot);
102 static const struct dma_buf_ops tee_shm_dma_buf_ops = {
103 .map_dma_buf = tee_shm_op_map_dma_buf,
104 .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
105 .release = tee_shm_op_release,
106 .mmap = tee_shm_op_mmap,
109 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
111 struct tee_device *teedev = ctx->teedev;
112 struct tee_shm_pool_mgr *poolm = NULL;
117 if (!(flags & TEE_SHM_MAPPED)) {
118 dev_err(teedev->dev.parent,
119 "only mapped allocations supported\n");
120 return ERR_PTR(-EINVAL);
123 if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
124 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
125 return ERR_PTR(-EINVAL);
128 if (!tee_device_get(teedev))
129 return ERR_PTR(-EINVAL);
132 /* teedev has been detached from driver */
133 ret = ERR_PTR(-EINVAL);
137 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
139 ret = ERR_PTR(-ENOMEM);
143 shm->flags = flags | TEE_SHM_POOL;
145 if (flags & TEE_SHM_DMA_BUF)
146 poolm = teedev->pool->dma_buf_mgr;
148 poolm = teedev->pool->private_mgr;
150 rc = poolm->ops->alloc(poolm, shm, size);
157 if (flags & TEE_SHM_DMA_BUF) {
158 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
160 mutex_lock(&teedev->mutex);
161 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
162 mutex_unlock(&teedev->mutex);
164 ret = ERR_PTR(shm->id);
168 exp_info.ops = &tee_shm_dma_buf_ops;
169 exp_info.size = shm->size;
170 exp_info.flags = O_RDWR;
173 shm->dmabuf = dma_buf_export(&exp_info);
174 if (IS_ERR(shm->dmabuf)) {
175 ret = ERR_CAST(shm->dmabuf);
184 if (flags & TEE_SHM_DMA_BUF) {
185 mutex_lock(&teedev->mutex);
186 idr_remove(&teedev->idr, shm->id);
187 mutex_unlock(&teedev->mutex);
190 poolm->ops->free(poolm, shm);
194 tee_device_put(teedev);
197 EXPORT_SYMBOL_GPL(tee_shm_alloc);
200 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
201 * @ctx: Context that allocates the shared memory
202 * @size: Requested size of shared memory
204 * The returned memory registered in secure world and is suitable to be
205 * passed as a memory buffer in parameter argument to
206 * tee_client_invoke_func(). The memory allocated is later freed with a
207 * call to tee_shm_free().
209 * @returns a pointer to 'struct tee_shm'
211 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
213 return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
215 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
217 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
218 size_t length, u32 flags)
220 struct tee_device *teedev = ctx->teedev;
221 const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
222 const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
229 if (flags != req_user_flags && flags != req_kernel_flags)
230 return ERR_PTR(-ENOTSUPP);
232 if (!tee_device_get(teedev))
233 return ERR_PTR(-EINVAL);
235 if (!teedev->desc->ops->shm_register ||
236 !teedev->desc->ops->shm_unregister) {
237 tee_device_put(teedev);
238 return ERR_PTR(-ENOTSUPP);
243 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
245 ret = ERR_PTR(-ENOMEM);
249 shm->flags = flags | TEE_SHM_REGISTER;
252 addr = untagged_addr(addr);
253 start = rounddown(addr, PAGE_SIZE);
254 shm->offset = addr - start;
256 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
257 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
259 ret = ERR_PTR(-ENOMEM);
263 if (flags & TEE_SHM_USER_MAPPED) {
264 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
270 kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
272 ret = ERR_PTR(-ENOMEM);
276 for (i = 0; i < num_pages; i++) {
277 kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
278 kiov[i].iov_len = PAGE_SIZE;
281 rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
286 if (rc != num_pages) {
293 mutex_lock(&teedev->mutex);
294 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
295 mutex_unlock(&teedev->mutex);
298 ret = ERR_PTR(shm->id);
302 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
303 shm->num_pages, start);
309 if (flags & TEE_SHM_DMA_BUF) {
310 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
312 exp_info.ops = &tee_shm_dma_buf_ops;
313 exp_info.size = shm->size;
314 exp_info.flags = O_RDWR;
317 shm->dmabuf = dma_buf_export(&exp_info);
318 if (IS_ERR(shm->dmabuf)) {
319 ret = ERR_CAST(shm->dmabuf);
320 teedev->desc->ops->shm_unregister(ctx, shm);
329 mutex_lock(&teedev->mutex);
330 idr_remove(&teedev->idr, shm->id);
331 mutex_unlock(&teedev->mutex);
333 release_registered_pages(shm);
337 tee_device_put(teedev);
340 EXPORT_SYMBOL_GPL(tee_shm_register);
343 * tee_shm_get_fd() - Increase reference count and return file descriptor
344 * @shm: Shared memory handle
345 * @returns user space file descriptor to shared memory
347 int tee_shm_get_fd(struct tee_shm *shm)
351 if (!(shm->flags & TEE_SHM_DMA_BUF))
354 get_dma_buf(shm->dmabuf);
355 fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
357 dma_buf_put(shm->dmabuf);
362 * tee_shm_free() - Free shared memory
363 * @shm: Handle to shared memory to free
365 void tee_shm_free(struct tee_shm *shm)
368 * dma_buf_put() decreases the dmabuf reference counter and will
369 * call tee_shm_release() when the last reference is gone.
371 * In the case of driver private memory we call tee_shm_release
372 * directly instead as it doesn't have a reference counter.
374 if (shm->flags & TEE_SHM_DMA_BUF)
375 dma_buf_put(shm->dmabuf);
377 tee_shm_release(shm);
379 EXPORT_SYMBOL_GPL(tee_shm_free);
382 * tee_shm_va2pa() - Get physical address of a virtual address
383 * @shm: Shared memory handle
384 * @va: Virtual address to tranlsate
385 * @pa: Returned physical address
386 * @returns 0 on success and < 0 on failure
388 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
390 if (!(shm->flags & TEE_SHM_MAPPED))
392 /* Check that we're in the range of the shm */
393 if ((char *)va < (char *)shm->kaddr)
395 if ((char *)va >= ((char *)shm->kaddr + shm->size))
398 return tee_shm_get_pa(
399 shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
401 EXPORT_SYMBOL_GPL(tee_shm_va2pa);
404 * tee_shm_pa2va() - Get virtual address of a physical address
405 * @shm: Shared memory handle
406 * @pa: Physical address to tranlsate
407 * @va: Returned virtual address
408 * @returns 0 on success and < 0 on failure
410 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
412 if (!(shm->flags & TEE_SHM_MAPPED))
414 /* Check that we're in the range of the shm */
417 if (pa >= (shm->paddr + shm->size))
421 void *v = tee_shm_get_va(shm, pa - shm->paddr);
429 EXPORT_SYMBOL_GPL(tee_shm_pa2va);
432 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
433 * @shm: Shared memory handle
434 * @offs: Offset from start of this shared memory
435 * @returns virtual address of the shared memory + offs if offs is within
436 * the bounds of this shared memory, else an ERR_PTR
438 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
440 if (!(shm->flags & TEE_SHM_MAPPED))
441 return ERR_PTR(-EINVAL);
442 if (offs >= shm->size)
443 return ERR_PTR(-EINVAL);
444 return (char *)shm->kaddr + offs;
446 EXPORT_SYMBOL_GPL(tee_shm_get_va);
449 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
450 * @shm: Shared memory handle
451 * @offs: Offset from start of this shared memory
452 * @pa: Physical address to return
453 * @returns 0 if offs is within the bounds of this shared memory, else an
456 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
458 if (offs >= shm->size)
461 *pa = shm->paddr + offs;
464 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
467 * tee_shm_get_from_id() - Find shared memory object and increase reference
469 * @ctx: Context owning the shared memory
470 * @id: Id of shared memory object
471 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
473 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
475 struct tee_device *teedev;
479 return ERR_PTR(-EINVAL);
481 teedev = ctx->teedev;
482 mutex_lock(&teedev->mutex);
483 shm = idr_find(&teedev->idr, id);
484 if (!shm || shm->ctx != ctx)
485 shm = ERR_PTR(-EINVAL);
486 else if (shm->flags & TEE_SHM_DMA_BUF)
487 get_dma_buf(shm->dmabuf);
488 mutex_unlock(&teedev->mutex);
491 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
494 * tee_shm_put() - Decrease reference count on a shared memory handle
495 * @shm: Shared memory handle
497 void tee_shm_put(struct tee_shm *shm)
499 if (shm->flags & TEE_SHM_DMA_BUF)
500 dma_buf_put(shm->dmabuf);
502 EXPORT_SYMBOL_GPL(tee_shm_put);