1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <linux/bitfield.h>
7 #include <linux/highmem.h>
12 #include "ivpu_mmu_context.h"
14 #define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12)
15 #define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
16 #define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
17 #define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
18 #define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)
19 #define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0))
20 #define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)
21 #define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
22 #define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
23 #define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
24 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
25 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
26 #define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)
28 #define IVPU_MMU_PAGE_SIZE SZ_4K
29 #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
30 #define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
31 #define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
32 #define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
33 #define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
34 #define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
36 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
37 #define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
38 #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
39 #define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
40 IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
42 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
46 pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
48 if (!pgtable->pgd_dma_ptr)
51 pgtable->pgd_dma = pgd_dma;
56 static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
59 dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
60 dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK);
63 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
65 int pgd_idx, pud_idx, pmd_idx;
66 dma_addr_t pud_dma, pmd_dma, pte_dma;
67 u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
69 for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
70 pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
71 pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
76 for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
77 pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
78 pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
83 for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
84 pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
85 pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
87 ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
90 kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
91 ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
94 kfree(pgtable->pmd_ptrs[pgd_idx]);
95 kfree(pgtable->pte_ptrs[pgd_idx]);
96 ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
99 ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
103 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
105 u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
111 pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
115 drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
116 pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
117 if (!pgtable->pmd_ptrs[pgd_idx])
118 goto err_free_pud_dma_ptr;
120 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
121 pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
122 if (!pgtable->pte_ptrs[pgd_idx])
123 goto err_free_pmd_ptrs;
125 pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
126 pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
131 kfree(pgtable->pmd_ptrs[pgd_idx]);
133 err_free_pud_dma_ptr:
134 ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
139 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
142 u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
148 pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
152 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
153 pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
154 if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
155 goto err_free_pmd_dma_ptr;
157 pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
158 pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
162 err_free_pmd_dma_ptr:
163 ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
168 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
169 int pgd_idx, int pud_idx, int pmd_idx)
171 u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
177 pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
181 pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
182 pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
188 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
189 u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
192 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
193 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
194 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
195 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
197 /* Allocate PUD - second level page table if needed */
198 if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
201 /* Allocate PMD - third level page table if needed */
202 if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
205 /* Allocate PTE - fourth level page table if needed */
206 pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
211 pte[pte_idx] = dma_addr | prot;
217 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
218 dma_addr_t dma_addr, u64 prot)
220 size_t size = IVPU_MMU_CONT_PAGES_SIZE;
222 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
223 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
225 prot |= IVPU_MMU_ENTRY_FLAG_CONT;
228 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
233 size -= IVPU_MMU_PAGE_SIZE;
234 vpu_addr += IVPU_MMU_PAGE_SIZE;
235 dma_addr += IVPU_MMU_PAGE_SIZE;
241 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
243 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
244 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
245 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
246 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
248 /* Update PTE with dummy physical address and clear flags */
249 ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
253 ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
255 struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
256 u64 end_addr = vpu_addr + size;
258 /* Align to PMD entry (2 MB) */
259 vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
261 while (vpu_addr < end_addr) {
262 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
263 u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE;
265 while (vpu_addr < end_addr && vpu_addr < pud_end) {
266 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
267 u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
269 while (vpu_addr < end_addr && vpu_addr < pmd_end) {
270 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
272 clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
273 IVPU_MMU_PGTABLE_SIZE);
274 vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
276 clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
277 IVPU_MMU_PGTABLE_SIZE);
279 clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
281 clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
285 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
286 u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
292 if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
293 IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
294 ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
295 map_size = IVPU_MMU_CONT_PAGES_SIZE;
297 ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
298 map_size = IVPU_MMU_PAGE_SIZE;
304 vpu_addr += map_size;
305 dma_addr += map_size;
312 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
315 ivpu_mmu_context_unmap_page(ctx, vpu_addr);
316 vpu_addr += IVPU_MMU_PAGE_SIZE;
317 size -= IVPU_MMU_PAGE_SIZE;
322 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
323 u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
325 struct scatterlist *sg;
330 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
333 if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
336 prot = IVPU_MMU_ENTRY_MAPPED;
338 prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
340 mutex_lock(&ctx->lock);
342 for_each_sgtable_dma_sg(sgt, sg, i) {
343 dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
344 size_t size = sg_dma_len(sg) + sg->offset;
346 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
348 ivpu_err(vdev, "Failed to map context pages\n");
349 mutex_unlock(&ctx->lock);
352 ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
356 mutex_unlock(&ctx->lock);
358 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
360 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
365 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
366 u64 vpu_addr, struct sg_table *sgt)
368 struct scatterlist *sg;
372 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
373 ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
375 mutex_lock(&ctx->lock);
377 for_each_sgtable_dma_sg(sgt, sg, i) {
378 size_t size = sg_dma_len(sg) + sg->offset;
380 ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
381 ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
385 mutex_unlock(&ctx->lock);
387 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
389 ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
393 ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
394 const struct ivpu_addr_range *range,
395 u64 size, struct drm_mm_node *node)
397 lockdep_assert_held(&ctx->lock);
399 if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
400 if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
401 range->start, range->end, DRM_MM_INSERT_BEST))
405 return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
406 range->start, range->end, DRM_MM_INSERT_BEST);
410 ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
412 lockdep_assert_held(&ctx->lock);
414 drm_mm_remove_node(node);
418 ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
423 mutex_init(&ctx->lock);
424 INIT_LIST_HEAD(&ctx->bo_list);
426 ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
431 start = vdev->hw->ranges.global.start;
432 end = vdev->hw->ranges.shave.end;
434 start = vdev->hw->ranges.user.start;
435 end = vdev->hw->ranges.dma.end;
438 drm_mm_init(&ctx->mm, start, end - start);
439 ctx->id = context_id;
444 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
446 if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
449 mutex_destroy(&ctx->lock);
450 ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
451 drm_mm_takedown(&ctx->mm);
453 ctx->pgtable.pgd_dma_ptr = NULL;
454 ctx->pgtable.pgd_dma = 0;
457 int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
459 return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
462 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
464 return ivpu_mmu_context_fini(vdev, &vdev->gctx);
467 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
469 struct ivpu_file_priv *file_priv;
471 xa_lock(&vdev->context_xa);
473 file_priv = xa_load(&vdev->context_xa, ssid);
475 file_priv->has_mmu_faults = true;
477 xa_unlock(&vdev->context_xa);
480 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
484 drm_WARN_ON(&vdev->drm, !ctx_id);
486 ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
488 ivpu_err(vdev, "Failed to initialize context: %d\n", ret);
492 ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
494 ivpu_err(vdev, "Failed to set page table: %d\n", ret);
495 goto err_context_fini;
501 ivpu_mmu_context_fini(vdev, ctx);
505 void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
507 drm_WARN_ON(&vdev->drm, !ctx->id);
509 ivpu_mmu_clear_pgtable(vdev, ctx->id);
510 ivpu_mmu_context_fini(vdev, ctx);