powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / drivers / accel / ivpu / ivpu_mmu_context.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5
6 #include <linux/bitfield.h>
7 #include <linux/highmem.h>
8
9 #include "ivpu_drv.h"
10 #include "ivpu_hw.h"
11 #include "ivpu_mmu.h"
12 #include "ivpu_mmu_context.h"
13
14 #define IVPU_MMU_PGD_INDEX_MASK          GENMASK(47, 39)
15 #define IVPU_MMU_PUD_INDEX_MASK          GENMASK(38, 30)
16 #define IVPU_MMU_PMD_INDEX_MASK          GENMASK(29, 21)
17 #define IVPU_MMU_PTE_INDEX_MASK          GENMASK(20, 12)
18 #define IVPU_MMU_ENTRY_FLAGS_MASK        (BIT(52) | GENMASK(11, 0))
19 #define IVPU_MMU_ENTRY_FLAG_CONT         BIT(52)
20 #define IVPU_MMU_ENTRY_FLAG_NG           BIT(11)
21 #define IVPU_MMU_ENTRY_FLAG_AF           BIT(10)
22 #define IVPU_MMU_ENTRY_FLAG_USER         BIT(6)
23 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
24 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE    BIT(1)
25 #define IVPU_MMU_ENTRY_FLAG_VALID        BIT(0)
26
27 #define IVPU_MMU_PAGE_SIZE       SZ_4K
28 #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
29 #define IVPU_MMU_PTE_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
30 #define IVPU_MMU_PMD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
31 #define IVPU_MMU_PUD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
32 #define IVPU_MMU_PGD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
33 #define IVPU_MMU_PGTABLE_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
34
35 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
36 #define IVPU_MMU_ENTRY_VALID   (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
37 #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
38 #define IVPU_MMU_ENTRY_MAPPED  (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
39                                 IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
40
41 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
42 {
43         dma_addr_t pgd_dma;
44
45         pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
46                                                   GFP_KERNEL);
47         if (!pgtable->pgd_dma_ptr)
48                 return -ENOMEM;
49
50         pgtable->pgd_dma = pgd_dma;
51
52         return 0;
53 }
54
55 static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
56 {
57         if (cpu_addr)
58                 dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
59                                   dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK);
60 }
61
62 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
63 {
64         int pgd_idx, pud_idx, pmd_idx;
65         dma_addr_t pud_dma, pmd_dma, pte_dma;
66         u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
67
68         for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
69                 pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
70                 pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
71
72                 if (!pud_dma_ptr)
73                         continue;
74
75                 for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
76                         pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
77                         pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
78
79                         if (!pmd_dma_ptr)
80                                 continue;
81
82                         for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
83                                 pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
84                                 pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
85
86                                 ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
87                         }
88
89                         kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
90                         ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
91                 }
92
93                 kfree(pgtable->pmd_ptrs[pgd_idx]);
94                 kfree(pgtable->pte_ptrs[pgd_idx]);
95                 ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
96         }
97
98         ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
99 }
100
101 static u64*
102 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
103 {
104         u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
105         dma_addr_t pud_dma;
106
107         if (pud_dma_ptr)
108                 return pud_dma_ptr;
109
110         pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
111         if (!pud_dma_ptr)
112                 return NULL;
113
114         drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
115         pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
116         if (!pgtable->pmd_ptrs[pgd_idx])
117                 goto err_free_pud_dma_ptr;
118
119         drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
120         pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
121         if (!pgtable->pte_ptrs[pgd_idx])
122                 goto err_free_pmd_ptrs;
123
124         pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
125         pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
126
127         return pud_dma_ptr;
128
129 err_free_pmd_ptrs:
130         kfree(pgtable->pmd_ptrs[pgd_idx]);
131
132 err_free_pud_dma_ptr:
133         ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
134         return NULL;
135 }
136
137 static u64*
138 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
139                     int pud_idx)
140 {
141         u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
142         dma_addr_t pmd_dma;
143
144         if (pmd_dma_ptr)
145                 return pmd_dma_ptr;
146
147         pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
148         if (!pmd_dma_ptr)
149                 return NULL;
150
151         drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
152         pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
153         if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
154                 goto err_free_pmd_dma_ptr;
155
156         pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
157         pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
158
159         return pmd_dma_ptr;
160
161 err_free_pmd_dma_ptr:
162         ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
163         return NULL;
164 }
165
166 static u64*
167 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
168                     int pgd_idx, int pud_idx, int pmd_idx)
169 {
170         u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
171         dma_addr_t pte_dma;
172
173         if (pte_dma_ptr)
174                 return pte_dma_ptr;
175
176         pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
177         if (!pte_dma_ptr)
178                 return NULL;
179
180         pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
181         pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
182
183         return pte_dma_ptr;
184 }
185
186 static int
187 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
188                           u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
189 {
190         u64 *pte;
191         int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
192         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
193         int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
194         int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
195
196         /* Allocate PUD - second level page table if needed */
197         if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
198                 return -ENOMEM;
199
200         /* Allocate PMD - third level page table if needed */
201         if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
202                 return -ENOMEM;
203
204         /* Allocate PTE - fourth level page table if needed */
205         pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
206         if (!pte)
207                 return -ENOMEM;
208
209         /* Update PTE */
210         pte[pte_idx] = dma_addr | prot;
211
212         return 0;
213 }
214
215 static int
216 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
217                               dma_addr_t dma_addr, u64 prot)
218 {
219         size_t size = IVPU_MMU_CONT_PAGES_SIZE;
220
221         drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
222         drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
223
224         prot |= IVPU_MMU_ENTRY_FLAG_CONT;
225
226         while (size) {
227                 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
228
229                 if (ret)
230                         return ret;
231
232                 size -= IVPU_MMU_PAGE_SIZE;
233                 vpu_addr += IVPU_MMU_PAGE_SIZE;
234                 dma_addr += IVPU_MMU_PAGE_SIZE;
235         }
236
237         return 0;
238 }
239
240 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
241 {
242         int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
243         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
244         int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
245         int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
246
247         /* Update PTE with dummy physical address and clear flags */
248         ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
249 }
250
251 static void
252 ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
253 {
254         struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
255         u64 end_addr = vpu_addr + size;
256
257         /* Align to PMD entry (2 MB) */
258         vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
259
260         while (vpu_addr < end_addr) {
261                 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
262                 u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE;
263
264                 while (vpu_addr < end_addr && vpu_addr < pud_end) {
265                         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
266                         u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
267
268                         while (vpu_addr < end_addr && vpu_addr < pmd_end) {
269                                 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
270
271                                 clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
272                                                     IVPU_MMU_PGTABLE_SIZE);
273                                 vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
274                         }
275                         clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
276                                             IVPU_MMU_PGTABLE_SIZE);
277                 }
278                 clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
279         }
280         clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
281 }
282
283 static int
284 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
285                            u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
286 {
287         int map_size;
288         int ret;
289
290         while (size) {
291                 if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
292                     IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
293                         ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
294                         map_size = IVPU_MMU_CONT_PAGES_SIZE;
295                 } else {
296                         ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
297                         map_size = IVPU_MMU_PAGE_SIZE;
298                 }
299
300                 if (ret)
301                         return ret;
302
303                 vpu_addr += map_size;
304                 dma_addr += map_size;
305                 size -= map_size;
306         }
307
308         return 0;
309 }
310
311 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
312 {
313         while (size) {
314                 ivpu_mmu_context_unmap_page(ctx, vpu_addr);
315                 vpu_addr += IVPU_MMU_PAGE_SIZE;
316                 size -= IVPU_MMU_PAGE_SIZE;
317         }
318 }
319
320 int
321 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
322                          u64 vpu_addr, struct sg_table *sgt,  bool llc_coherent)
323 {
324         struct scatterlist *sg;
325         int ret;
326         u64 prot;
327         u64 i;
328
329         if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
330                 return -EINVAL;
331         /*
332          * VPU is only 32 bit, but DMA engine is 38 bit
333          * Ranges < 2 GB are reserved for VPU internal registers
334          * Limit range to 8 GB
335          */
336         if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
337                 return -EINVAL;
338
339         prot = IVPU_MMU_ENTRY_MAPPED;
340         if (llc_coherent)
341                 prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
342
343         mutex_lock(&ctx->lock);
344
345         for_each_sgtable_dma_sg(sgt, sg, i) {
346                 dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
347                 size_t size = sg_dma_len(sg) + sg->offset;
348
349                 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
350                 if (ret) {
351                         ivpu_err(vdev, "Failed to map context pages\n");
352                         mutex_unlock(&ctx->lock);
353                         return ret;
354                 }
355                 ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
356                 vpu_addr += size;
357         }
358
359         mutex_unlock(&ctx->lock);
360
361         ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
362         if (ret)
363                 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
364         return ret;
365 }
366
367 void
368 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
369                            u64 vpu_addr, struct sg_table *sgt)
370 {
371         struct scatterlist *sg;
372         int ret;
373         u64 i;
374
375         if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
376                 ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
377
378         mutex_lock(&ctx->lock);
379
380         for_each_sgtable_dma_sg(sgt, sg, i) {
381                 size_t size = sg_dma_len(sg) + sg->offset;
382
383                 ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
384                 ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
385                 vpu_addr += size;
386         }
387
388         mutex_unlock(&ctx->lock);
389
390         ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
391         if (ret)
392                 ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
393 }
394
395 int
396 ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
397                                     const struct ivpu_addr_range *range,
398                                     u64 size, struct drm_mm_node *node)
399 {
400         lockdep_assert_held(&ctx->lock);
401
402         if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
403                 if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
404                                                  range->start, range->end, DRM_MM_INSERT_BEST))
405                         return 0;
406         }
407
408         return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
409                                            range->start, range->end, DRM_MM_INSERT_BEST);
410 }
411
412 void
413 ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
414 {
415         lockdep_assert_held(&ctx->lock);
416
417         drm_mm_remove_node(node);
418 }
419
420 static int
421 ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
422 {
423         u64 start, end;
424         int ret;
425
426         mutex_init(&ctx->lock);
427         INIT_LIST_HEAD(&ctx->bo_list);
428
429         ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
430         if (ret)
431                 return ret;
432
433         if (!context_id) {
434                 start = vdev->hw->ranges.global.start;
435                 end = vdev->hw->ranges.shave.end;
436         } else {
437                 start = vdev->hw->ranges.user.start;
438                 end = vdev->hw->ranges.dma.end;
439         }
440
441         drm_mm_init(&ctx->mm, start, end - start);
442         ctx->id = context_id;
443
444         return 0;
445 }
446
447 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
448 {
449         if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
450                 return;
451
452         mutex_destroy(&ctx->lock);
453         ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
454         drm_mm_takedown(&ctx->mm);
455
456         ctx->pgtable.pgd_dma_ptr = NULL;
457         ctx->pgtable.pgd_dma = 0;
458 }
459
460 int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
461 {
462         return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
463 }
464
465 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
466 {
467         return ivpu_mmu_context_fini(vdev, &vdev->gctx);
468 }
469
470 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
471 {
472         struct ivpu_file_priv *file_priv;
473
474         xa_lock(&vdev->context_xa);
475
476         file_priv = xa_load(&vdev->context_xa, ssid);
477         if (file_priv)
478                 file_priv->has_mmu_faults = true;
479
480         xa_unlock(&vdev->context_xa);
481 }
482
483 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
484 {
485         int ret;
486
487         drm_WARN_ON(&vdev->drm, !ctx_id);
488
489         ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
490         if (ret) {
491                 ivpu_err(vdev, "Failed to initialize context: %d\n", ret);
492                 return ret;
493         }
494
495         ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
496         if (ret) {
497                 ivpu_err(vdev, "Failed to set page table: %d\n", ret);
498                 goto err_context_fini;
499         }
500
501         return 0;
502
503 err_context_fini:
504         ivpu_mmu_context_fini(vdev, ctx);
505         return ret;
506 }
507
508 void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
509 {
510         drm_WARN_ON(&vdev->drm, !ctx->id);
511
512         ivpu_mmu_clear_pgtable(vdev, ctx->id);
513         ivpu_mmu_context_fini(vdev, ctx);
514 }