accel/qaic: Implement quirk for SOC_HW_VERSION
[platform/kernel/linux-rpi.git] / drivers / accel / ivpu / ivpu_mmu_context.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5
6 #include <linux/bitfield.h>
7 #include <linux/highmem.h>
8
9 #include "ivpu_drv.h"
10 #include "ivpu_hw.h"
11 #include "ivpu_mmu.h"
12 #include "ivpu_mmu_context.h"
13
14 #define IVPU_MMU_VPU_ADDRESS_MASK        GENMASK(47, 12)
15 #define IVPU_MMU_PGD_INDEX_MASK          GENMASK(47, 39)
16 #define IVPU_MMU_PUD_INDEX_MASK          GENMASK(38, 30)
17 #define IVPU_MMU_PMD_INDEX_MASK          GENMASK(29, 21)
18 #define IVPU_MMU_PTE_INDEX_MASK          GENMASK(20, 12)
19 #define IVPU_MMU_ENTRY_FLAGS_MASK        (BIT(52) | GENMASK(11, 0))
20 #define IVPU_MMU_ENTRY_FLAG_CONT         BIT(52)
21 #define IVPU_MMU_ENTRY_FLAG_NG           BIT(11)
22 #define IVPU_MMU_ENTRY_FLAG_AF           BIT(10)
23 #define IVPU_MMU_ENTRY_FLAG_USER         BIT(6)
24 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
25 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE    BIT(1)
26 #define IVPU_MMU_ENTRY_FLAG_VALID        BIT(0)
27
28 #define IVPU_MMU_PAGE_SIZE       SZ_4K
29 #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
30 #define IVPU_MMU_PTE_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
31 #define IVPU_MMU_PMD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
32 #define IVPU_MMU_PUD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
33 #define IVPU_MMU_PGD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
34 #define IVPU_MMU_PGTABLE_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
35
36 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
37 #define IVPU_MMU_ENTRY_VALID   (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
38 #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
39 #define IVPU_MMU_ENTRY_MAPPED  (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
40                                 IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
41
42 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
43 {
44         dma_addr_t pgd_dma;
45
46         pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
47                                                   GFP_KERNEL);
48         if (!pgtable->pgd_dma_ptr)
49                 return -ENOMEM;
50
51         pgtable->pgd_dma = pgd_dma;
52
53         return 0;
54 }
55
56 static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
57 {
58         if (cpu_addr)
59                 dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
60                                   dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK);
61 }
62
63 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
64 {
65         int pgd_idx, pud_idx, pmd_idx;
66         dma_addr_t pud_dma, pmd_dma, pte_dma;
67         u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
68
69         for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
70                 pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
71                 pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
72
73                 if (!pud_dma_ptr)
74                         continue;
75
76                 for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
77                         pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
78                         pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
79
80                         if (!pmd_dma_ptr)
81                                 continue;
82
83                         for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
84                                 pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
85                                 pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
86
87                                 ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
88                         }
89
90                         kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
91                         ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
92                 }
93
94                 kfree(pgtable->pmd_ptrs[pgd_idx]);
95                 kfree(pgtable->pte_ptrs[pgd_idx]);
96                 ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
97         }
98
99         ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
100 }
101
102 static u64*
103 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
104 {
105         u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
106         dma_addr_t pud_dma;
107
108         if (pud_dma_ptr)
109                 return pud_dma_ptr;
110
111         pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
112         if (!pud_dma_ptr)
113                 return NULL;
114
115         drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
116         pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
117         if (!pgtable->pmd_ptrs[pgd_idx])
118                 goto err_free_pud_dma_ptr;
119
120         drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
121         pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
122         if (!pgtable->pte_ptrs[pgd_idx])
123                 goto err_free_pmd_ptrs;
124
125         pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
126         pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
127
128         return pud_dma_ptr;
129
130 err_free_pmd_ptrs:
131         kfree(pgtable->pmd_ptrs[pgd_idx]);
132
133 err_free_pud_dma_ptr:
134         ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
135         return NULL;
136 }
137
138 static u64*
139 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
140                     int pud_idx)
141 {
142         u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
143         dma_addr_t pmd_dma;
144
145         if (pmd_dma_ptr)
146                 return pmd_dma_ptr;
147
148         pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
149         if (!pmd_dma_ptr)
150                 return NULL;
151
152         drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
153         pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
154         if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
155                 goto err_free_pmd_dma_ptr;
156
157         pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
158         pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
159
160         return pmd_dma_ptr;
161
162 err_free_pmd_dma_ptr:
163         ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
164         return NULL;
165 }
166
167 static u64*
168 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
169                     int pgd_idx, int pud_idx, int pmd_idx)
170 {
171         u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
172         dma_addr_t pte_dma;
173
174         if (pte_dma_ptr)
175                 return pte_dma_ptr;
176
177         pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
178         if (!pte_dma_ptr)
179                 return NULL;
180
181         pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
182         pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
183
184         return pte_dma_ptr;
185 }
186
187 static int
188 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
189                           u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
190 {
191         u64 *pte;
192         int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
193         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
194         int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
195         int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
196
197         /* Allocate PUD - second level page table if needed */
198         if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
199                 return -ENOMEM;
200
201         /* Allocate PMD - third level page table if needed */
202         if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
203                 return -ENOMEM;
204
205         /* Allocate PTE - fourth level page table if needed */
206         pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
207         if (!pte)
208                 return -ENOMEM;
209
210         /* Update PTE */
211         pte[pte_idx] = dma_addr | prot;
212
213         return 0;
214 }
215
216 static int
217 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
218                               dma_addr_t dma_addr, u64 prot)
219 {
220         size_t size = IVPU_MMU_CONT_PAGES_SIZE;
221
222         drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
223         drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
224
225         prot |= IVPU_MMU_ENTRY_FLAG_CONT;
226
227         while (size) {
228                 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
229
230                 if (ret)
231                         return ret;
232
233                 size -= IVPU_MMU_PAGE_SIZE;
234                 vpu_addr += IVPU_MMU_PAGE_SIZE;
235                 dma_addr += IVPU_MMU_PAGE_SIZE;
236         }
237
238         return 0;
239 }
240
241 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
242 {
243         int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
244         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
245         int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
246         int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
247
248         /* Update PTE with dummy physical address and clear flags */
249         ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
250 }
251
252 static void
253 ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
254 {
255         struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
256         u64 end_addr = vpu_addr + size;
257
258         /* Align to PMD entry (2 MB) */
259         vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
260
261         while (vpu_addr < end_addr) {
262                 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
263                 u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE;
264
265                 while (vpu_addr < end_addr && vpu_addr < pud_end) {
266                         int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
267                         u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
268
269                         while (vpu_addr < end_addr && vpu_addr < pmd_end) {
270                                 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
271
272                                 clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
273                                                     IVPU_MMU_PGTABLE_SIZE);
274                                 vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
275                         }
276                         clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
277                                             IVPU_MMU_PGTABLE_SIZE);
278                 }
279                 clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
280         }
281         clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
282 }
283
284 static int
285 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
286                            u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
287 {
288         int map_size;
289         int ret;
290
291         while (size) {
292                 if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
293                     IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
294                         ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
295                         map_size = IVPU_MMU_CONT_PAGES_SIZE;
296                 } else {
297                         ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
298                         map_size = IVPU_MMU_PAGE_SIZE;
299                 }
300
301                 if (ret)
302                         return ret;
303
304                 vpu_addr += map_size;
305                 dma_addr += map_size;
306                 size -= map_size;
307         }
308
309         return 0;
310 }
311
312 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
313 {
314         while (size) {
315                 ivpu_mmu_context_unmap_page(ctx, vpu_addr);
316                 vpu_addr += IVPU_MMU_PAGE_SIZE;
317                 size -= IVPU_MMU_PAGE_SIZE;
318         }
319 }
320
321 int
322 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
323                          u64 vpu_addr, struct sg_table *sgt,  bool llc_coherent)
324 {
325         struct scatterlist *sg;
326         int ret;
327         u64 prot;
328         u64 i;
329
330         if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
331                 return -EINVAL;
332
333         if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
334                 return -EINVAL;
335
336         prot = IVPU_MMU_ENTRY_MAPPED;
337         if (llc_coherent)
338                 prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
339
340         mutex_lock(&ctx->lock);
341
342         for_each_sgtable_dma_sg(sgt, sg, i) {
343                 dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
344                 size_t size = sg_dma_len(sg) + sg->offset;
345
346                 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
347                 if (ret) {
348                         ivpu_err(vdev, "Failed to map context pages\n");
349                         mutex_unlock(&ctx->lock);
350                         return ret;
351                 }
352                 ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
353                 vpu_addr += size;
354         }
355
356         mutex_unlock(&ctx->lock);
357
358         ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
359         if (ret)
360                 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
361         return ret;
362 }
363
364 void
365 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
366                            u64 vpu_addr, struct sg_table *sgt)
367 {
368         struct scatterlist *sg;
369         int ret;
370         u64 i;
371
372         if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
373                 ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
374
375         mutex_lock(&ctx->lock);
376
377         for_each_sgtable_dma_sg(sgt, sg, i) {
378                 size_t size = sg_dma_len(sg) + sg->offset;
379
380                 ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
381                 ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
382                 vpu_addr += size;
383         }
384
385         mutex_unlock(&ctx->lock);
386
387         ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
388         if (ret)
389                 ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
390 }
391
392 int
393 ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
394                                     const struct ivpu_addr_range *range,
395                                     u64 size, struct drm_mm_node *node)
396 {
397         lockdep_assert_held(&ctx->lock);
398
399         if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
400                 if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
401                                                  range->start, range->end, DRM_MM_INSERT_BEST))
402                         return 0;
403         }
404
405         return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
406                                            range->start, range->end, DRM_MM_INSERT_BEST);
407 }
408
409 void
410 ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
411 {
412         lockdep_assert_held(&ctx->lock);
413
414         drm_mm_remove_node(node);
415 }
416
417 static int
418 ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
419 {
420         u64 start, end;
421         int ret;
422
423         mutex_init(&ctx->lock);
424         INIT_LIST_HEAD(&ctx->bo_list);
425
426         ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
427         if (ret)
428                 return ret;
429
430         if (!context_id) {
431                 start = vdev->hw->ranges.global.start;
432                 end = vdev->hw->ranges.shave.end;
433         } else {
434                 start = vdev->hw->ranges.user.start;
435                 end = vdev->hw->ranges.dma.end;
436         }
437
438         drm_mm_init(&ctx->mm, start, end - start);
439         ctx->id = context_id;
440
441         return 0;
442 }
443
444 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
445 {
446         if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
447                 return;
448
449         mutex_destroy(&ctx->lock);
450         ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
451         drm_mm_takedown(&ctx->mm);
452
453         ctx->pgtable.pgd_dma_ptr = NULL;
454         ctx->pgtable.pgd_dma = 0;
455 }
456
457 int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
458 {
459         return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
460 }
461
462 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
463 {
464         return ivpu_mmu_context_fini(vdev, &vdev->gctx);
465 }
466
467 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
468 {
469         struct ivpu_file_priv *file_priv;
470
471         xa_lock(&vdev->context_xa);
472
473         file_priv = xa_load(&vdev->context_xa, ssid);
474         if (file_priv)
475                 file_priv->has_mmu_faults = true;
476
477         xa_unlock(&vdev->context_xa);
478 }
479
480 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
481 {
482         int ret;
483
484         drm_WARN_ON(&vdev->drm, !ctx_id);
485
486         ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
487         if (ret) {
488                 ivpu_err(vdev, "Failed to initialize context: %d\n", ret);
489                 return ret;
490         }
491
492         ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
493         if (ret) {
494                 ivpu_err(vdev, "Failed to set page table: %d\n", ret);
495                 goto err_context_fini;
496         }
497
498         return 0;
499
500 err_context_fini:
501         ivpu_mmu_context_fini(vdev, ctx);
502         return ret;
503 }
504
505 void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
506 {
507         drm_WARN_ON(&vdev->drm, !ctx->id);
508
509         ivpu_mmu_clear_pgtable(vdev, ctx->id);
510         ivpu_mmu_context_fini(vdev, ctx);
511 }