perf tools: Apply correct label to user/kernel symbols in branch mode
[platform/kernel/linux-starfive.git] / drivers / tee / tee_shm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
4  */
5 #include <linux/anon_inodes.h>
6 #include <linux/device.h>
7 #include <linux/idr.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/tee_drv.h>
12 #include <linux/uio.h>
13 #include "tee_private.h"
14
15 static void release_registered_pages(struct tee_shm *shm)
16 {
17         if (shm->pages) {
18                 if (shm->flags & TEE_SHM_USER_MAPPED) {
19                         unpin_user_pages(shm->pages, shm->num_pages);
20                 } else {
21                         size_t n;
22
23                         for (n = 0; n < shm->num_pages; n++)
24                                 put_page(shm->pages[n]);
25                 }
26
27                 kfree(shm->pages);
28         }
29 }
30
31 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
32 {
33         if (shm->flags & TEE_SHM_POOL) {
34                 struct tee_shm_pool_mgr *poolm;
35
36                 if (shm->flags & TEE_SHM_DMA_BUF)
37                         poolm = teedev->pool->dma_buf_mgr;
38                 else
39                         poolm = teedev->pool->private_mgr;
40
41                 poolm->ops->free(poolm, shm);
42         } else if (shm->flags & TEE_SHM_REGISTER) {
43                 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
44
45                 if (rc)
46                         dev_err(teedev->dev.parent,
47                                 "unregister shm %p failed: %d", shm, rc);
48
49                 release_registered_pages(shm);
50         }
51
52         teedev_ctx_put(shm->ctx);
53
54         kfree(shm);
55
56         tee_device_put(teedev);
57 }
58
59 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
60 {
61         struct tee_device *teedev = ctx->teedev;
62         struct tee_shm_pool_mgr *poolm = NULL;
63         struct tee_shm *shm;
64         void *ret;
65         int rc;
66
67         if (!(flags & TEE_SHM_MAPPED)) {
68                 dev_err(teedev->dev.parent,
69                         "only mapped allocations supported\n");
70                 return ERR_PTR(-EINVAL);
71         }
72
73         if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
74                 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
75                 return ERR_PTR(-EINVAL);
76         }
77
78         if (!tee_device_get(teedev))
79                 return ERR_PTR(-EINVAL);
80
81         if (!teedev->pool) {
82                 /* teedev has been detached from driver */
83                 ret = ERR_PTR(-EINVAL);
84                 goto err_dev_put;
85         }
86
87         shm = kzalloc(sizeof(*shm), GFP_KERNEL);
88         if (!shm) {
89                 ret = ERR_PTR(-ENOMEM);
90                 goto err_dev_put;
91         }
92
93         refcount_set(&shm->refcount, 1);
94         shm->flags = flags | TEE_SHM_POOL;
95         shm->ctx = ctx;
96         if (flags & TEE_SHM_DMA_BUF)
97                 poolm = teedev->pool->dma_buf_mgr;
98         else
99                 poolm = teedev->pool->private_mgr;
100
101         rc = poolm->ops->alloc(poolm, shm, size);
102         if (rc) {
103                 ret = ERR_PTR(rc);
104                 goto err_kfree;
105         }
106
107         if (flags & TEE_SHM_DMA_BUF) {
108                 mutex_lock(&teedev->mutex);
109                 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
110                 mutex_unlock(&teedev->mutex);
111                 if (shm->id < 0) {
112                         ret = ERR_PTR(shm->id);
113                         goto err_pool_free;
114                 }
115         }
116
117         teedev_ctx_get(ctx);
118
119         return shm;
120 err_pool_free:
121         poolm->ops->free(poolm, shm);
122 err_kfree:
123         kfree(shm);
124 err_dev_put:
125         tee_device_put(teedev);
126         return ret;
127 }
128 EXPORT_SYMBOL_GPL(tee_shm_alloc);
129
130 /**
131  * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
132  * @ctx:        Context that allocates the shared memory
133  * @size:       Requested size of shared memory
134  *
135  * The returned memory registered in secure world and is suitable to be
136  * passed as a memory buffer in parameter argument to
137  * tee_client_invoke_func(). The memory allocated is later freed with a
138  * call to tee_shm_free().
139  *
140  * @returns a pointer to 'struct tee_shm'
141  */
142 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
143 {
144         return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
145 }
146 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
147
148 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
149                                  size_t length, u32 flags)
150 {
151         struct tee_device *teedev = ctx->teedev;
152         const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
153         const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
154         struct tee_shm *shm;
155         void *ret;
156         int rc;
157         int num_pages;
158         unsigned long start;
159
160         if (flags != req_user_flags && flags != req_kernel_flags)
161                 return ERR_PTR(-ENOTSUPP);
162
163         if (!tee_device_get(teedev))
164                 return ERR_PTR(-EINVAL);
165
166         if (!teedev->desc->ops->shm_register ||
167             !teedev->desc->ops->shm_unregister) {
168                 tee_device_put(teedev);
169                 return ERR_PTR(-ENOTSUPP);
170         }
171
172         teedev_ctx_get(ctx);
173
174         shm = kzalloc(sizeof(*shm), GFP_KERNEL);
175         if (!shm) {
176                 ret = ERR_PTR(-ENOMEM);
177                 goto err;
178         }
179
180         refcount_set(&shm->refcount, 1);
181         shm->flags = flags | TEE_SHM_REGISTER;
182         shm->ctx = ctx;
183         shm->id = -1;
184         addr = untagged_addr(addr);
185         start = rounddown(addr, PAGE_SIZE);
186         shm->offset = addr - start;
187         shm->size = length;
188         num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
189         shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
190         if (!shm->pages) {
191                 ret = ERR_PTR(-ENOMEM);
192                 goto err;
193         }
194
195         if (flags & TEE_SHM_USER_MAPPED) {
196                 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
197                                          shm->pages);
198         } else {
199                 struct kvec *kiov;
200                 int i;
201
202                 kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
203                 if (!kiov) {
204                         ret = ERR_PTR(-ENOMEM);
205                         goto err;
206                 }
207
208                 for (i = 0; i < num_pages; i++) {
209                         kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
210                         kiov[i].iov_len = PAGE_SIZE;
211                 }
212
213                 rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
214                 kfree(kiov);
215         }
216         if (rc > 0)
217                 shm->num_pages = rc;
218         if (rc != num_pages) {
219                 if (rc >= 0)
220                         rc = -ENOMEM;
221                 ret = ERR_PTR(rc);
222                 goto err;
223         }
224
225         mutex_lock(&teedev->mutex);
226         shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
227         mutex_unlock(&teedev->mutex);
228
229         if (shm->id < 0) {
230                 ret = ERR_PTR(shm->id);
231                 goto err;
232         }
233
234         rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
235                                              shm->num_pages, start);
236         if (rc) {
237                 ret = ERR_PTR(rc);
238                 goto err;
239         }
240
241         return shm;
242 err:
243         if (shm) {
244                 if (shm->id >= 0) {
245                         mutex_lock(&teedev->mutex);
246                         idr_remove(&teedev->idr, shm->id);
247                         mutex_unlock(&teedev->mutex);
248                 }
249                 release_registered_pages(shm);
250         }
251         kfree(shm);
252         teedev_ctx_put(ctx);
253         tee_device_put(teedev);
254         return ret;
255 }
256 EXPORT_SYMBOL_GPL(tee_shm_register);
257
258 static int tee_shm_fop_release(struct inode *inode, struct file *filp)
259 {
260         tee_shm_put(filp->private_data);
261         return 0;
262 }
263
264 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
265 {
266         struct tee_shm *shm = filp->private_data;
267         size_t size = vma->vm_end - vma->vm_start;
268
269         /* Refuse sharing shared memory provided by application */
270         if (shm->flags & TEE_SHM_USER_MAPPED)
271                 return -EINVAL;
272
273         /* check for overflowing the buffer's size */
274         if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
275                 return -EINVAL;
276
277         return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
278                                size, vma->vm_page_prot);
279 }
280
281 static const struct file_operations tee_shm_fops = {
282         .owner = THIS_MODULE,
283         .release = tee_shm_fop_release,
284         .mmap = tee_shm_fop_mmap,
285 };
286
287 /**
288  * tee_shm_get_fd() - Increase reference count and return file descriptor
289  * @shm:        Shared memory handle
290  * @returns user space file descriptor to shared memory
291  */
292 int tee_shm_get_fd(struct tee_shm *shm)
293 {
294         int fd;
295
296         if (!(shm->flags & TEE_SHM_DMA_BUF))
297                 return -EINVAL;
298
299         /* matched by tee_shm_put() in tee_shm_op_release() */
300         refcount_inc(&shm->refcount);
301         fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
302         if (fd < 0)
303                 tee_shm_put(shm);
304         return fd;
305 }
306
307 /**
308  * tee_shm_free() - Free shared memory
309  * @shm:        Handle to shared memory to free
310  */
311 void tee_shm_free(struct tee_shm *shm)
312 {
313         tee_shm_put(shm);
314 }
315 EXPORT_SYMBOL_GPL(tee_shm_free);
316
317 /**
318  * tee_shm_va2pa() - Get physical address of a virtual address
319  * @shm:        Shared memory handle
320  * @va:         Virtual address to tranlsate
321  * @pa:         Returned physical address
322  * @returns 0 on success and < 0 on failure
323  */
324 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
325 {
326         if (!(shm->flags & TEE_SHM_MAPPED))
327                 return -EINVAL;
328         /* Check that we're in the range of the shm */
329         if ((char *)va < (char *)shm->kaddr)
330                 return -EINVAL;
331         if ((char *)va >= ((char *)shm->kaddr + shm->size))
332                 return -EINVAL;
333
334         return tee_shm_get_pa(
335                         shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
336 }
337 EXPORT_SYMBOL_GPL(tee_shm_va2pa);
338
339 /**
340  * tee_shm_pa2va() - Get virtual address of a physical address
341  * @shm:        Shared memory handle
342  * @pa:         Physical address to tranlsate
343  * @va:         Returned virtual address
344  * @returns 0 on success and < 0 on failure
345  */
346 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
347 {
348         if (!(shm->flags & TEE_SHM_MAPPED))
349                 return -EINVAL;
350         /* Check that we're in the range of the shm */
351         if (pa < shm->paddr)
352                 return -EINVAL;
353         if (pa >= (shm->paddr + shm->size))
354                 return -EINVAL;
355
356         if (va) {
357                 void *v = tee_shm_get_va(shm, pa - shm->paddr);
358
359                 if (IS_ERR(v))
360                         return PTR_ERR(v);
361                 *va = v;
362         }
363         return 0;
364 }
365 EXPORT_SYMBOL_GPL(tee_shm_pa2va);
366
367 /**
368  * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
369  * @shm:        Shared memory handle
370  * @offs:       Offset from start of this shared memory
371  * @returns virtual address of the shared memory + offs if offs is within
372  *      the bounds of this shared memory, else an ERR_PTR
373  */
374 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
375 {
376         if (!(shm->flags & TEE_SHM_MAPPED))
377                 return ERR_PTR(-EINVAL);
378         if (offs >= shm->size)
379                 return ERR_PTR(-EINVAL);
380         return (char *)shm->kaddr + offs;
381 }
382 EXPORT_SYMBOL_GPL(tee_shm_get_va);
383
384 /**
385  * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
386  * @shm:        Shared memory handle
387  * @offs:       Offset from start of this shared memory
388  * @pa:         Physical address to return
389  * @returns 0 if offs is within the bounds of this shared memory, else an
390  *      error code.
391  */
392 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
393 {
394         if (offs >= shm->size)
395                 return -EINVAL;
396         if (pa)
397                 *pa = shm->paddr + offs;
398         return 0;
399 }
400 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
401
402 /**
403  * tee_shm_get_from_id() - Find shared memory object and increase reference
404  * count
405  * @ctx:        Context owning the shared memory
406  * @id:         Id of shared memory object
407  * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
408  */
409 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
410 {
411         struct tee_device *teedev;
412         struct tee_shm *shm;
413
414         if (!ctx)
415                 return ERR_PTR(-EINVAL);
416
417         teedev = ctx->teedev;
418         mutex_lock(&teedev->mutex);
419         shm = idr_find(&teedev->idr, id);
420         /*
421          * If the tee_shm was found in the IDR it must have a refcount
422          * larger than 0 due to the guarantee in tee_shm_put() below. So
423          * it's safe to use refcount_inc().
424          */
425         if (!shm || shm->ctx != ctx)
426                 shm = ERR_PTR(-EINVAL);
427         else
428                 refcount_inc(&shm->refcount);
429         mutex_unlock(&teedev->mutex);
430         return shm;
431 }
432 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
433
434 /**
435  * tee_shm_put() - Decrease reference count on a shared memory handle
436  * @shm:        Shared memory handle
437  */
438 void tee_shm_put(struct tee_shm *shm)
439 {
440         struct tee_device *teedev = shm->ctx->teedev;
441         bool do_release = false;
442
443         mutex_lock(&teedev->mutex);
444         if (refcount_dec_and_test(&shm->refcount)) {
445                 /*
446                  * refcount has reached 0, we must now remove it from the
447                  * IDR before releasing the mutex. This will guarantee that
448                  * the refcount_inc() in tee_shm_get_from_id() never starts
449                  * from 0.
450                  */
451                 if (shm->flags & TEE_SHM_DMA_BUF)
452                         idr_remove(&teedev->idr, shm->id);
453                 do_release = true;
454         }
455         mutex_unlock(&teedev->mutex);
456
457         if (do_release)
458                 tee_shm_release(teedev, shm);
459 }
460 EXPORT_SYMBOL_GPL(tee_shm_put);