Merge tag 'drm-misc-next-2020-02-10' of git://anongit.freedesktop.org/drm/drm-misc...
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / panfrost / panfrost_mmu.c
1 // SPDX-License-Identifier:     GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 #include <linux/atomic.h>
4 #include <linux/bitfield.h>
5 #include <linux/delay.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/interrupt.h>
8 #include <linux/io.h>
9 #include <linux/iopoll.h>
10 #include <linux/io-pgtable.h>
11 #include <linux/iommu.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/shmem_fs.h>
15 #include <linux/sizes.h>
16
17 #include "panfrost_device.h"
18 #include "panfrost_mmu.h"
19 #include "panfrost_gem.h"
20 #include "panfrost_features.h"
21 #include "panfrost_regs.h"
22
23 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
24 #define mmu_read(dev, reg) readl(dev->iomem + reg)
25
26 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
27 {
28         int ret;
29         u32 val;
30
31         /* Wait for the MMU status to indicate there is no active command, in
32          * case one is pending. */
33         ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
34                 val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
35
36         if (ret)
37                 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
38
39         return ret;
40 }
41
42 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
43 {
44         int status;
45
46         /* write AS_COMMAND when MMU is ready to accept another command */
47         status = wait_ready(pfdev, as_nr);
48         if (!status)
49                 mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
50
51         return status;
52 }
53
54 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
55                         u64 iova, size_t size)
56 {
57         u8 region_width;
58         u64 region = iova & PAGE_MASK;
59         /*
60          * fls returns:
61          * 1 .. 32
62          *
63          * 10 + fls(num_pages)
64          * results in the range (11 .. 42)
65          */
66
67         size = round_up(size, PAGE_SIZE);
68
69         region_width = 10 + fls(size >> PAGE_SHIFT);
70         if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
71                 /* not pow2, so must go up to the next pow2 */
72                 region_width += 1;
73         }
74         region |= region_width;
75
76         /* Lock the region that needs to be updated */
77         mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
78         mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
79         write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
80 }
81
82
83 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
84                                       u64 iova, size_t size, u32 op)
85 {
86         if (as_nr < 0)
87                 return 0;
88
89         if (op != AS_COMMAND_UNLOCK)
90                 lock_region(pfdev, as_nr, iova, size);
91
92         /* Run the MMU operation */
93         write_cmd(pfdev, as_nr, op);
94
95         /* Wait for the flush to complete */
96         return wait_ready(pfdev, as_nr);
97 }
98
99 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
100                                struct panfrost_mmu *mmu,
101                                u64 iova, size_t size, u32 op)
102 {
103         int ret;
104
105         spin_lock(&pfdev->as_lock);
106         ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
107         spin_unlock(&pfdev->as_lock);
108         return ret;
109 }
110
111 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
112 {
113         int as_nr = mmu->as;
114         struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
115         u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
116         u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
117
118         mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
119
120         mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
121         mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
122
123         /* Need to revisit mem attrs.
124          * NC is the default, Mali driver is inner WT.
125          */
126         mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
127         mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
128
129         write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
130 }
131
132 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
133 {
134         mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
135
136         mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
137         mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
138
139         mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
140         mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
141
142         write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
143 }
144
145 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
146 {
147         int as;
148
149         spin_lock(&pfdev->as_lock);
150
151         as = mmu->as;
152         if (as >= 0) {
153                 int en = atomic_inc_return(&mmu->as_count);
154                 WARN_ON(en >= NUM_JOB_SLOTS);
155
156                 list_move(&mmu->list, &pfdev->as_lru_list);
157                 goto out;
158         }
159
160         /* Check for a free AS */
161         as = ffz(pfdev->as_alloc_mask);
162         if (!(BIT(as) & pfdev->features.as_present)) {
163                 struct panfrost_mmu *lru_mmu;
164
165                 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
166                         if (!atomic_read(&lru_mmu->as_count))
167                                 break;
168                 }
169                 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
170
171                 list_del_init(&lru_mmu->list);
172                 as = lru_mmu->as;
173
174                 WARN_ON(as < 0);
175                 lru_mmu->as = -1;
176         }
177
178         /* Assign the free or reclaimed AS to the FD */
179         mmu->as = as;
180         set_bit(as, &pfdev->as_alloc_mask);
181         atomic_set(&mmu->as_count, 1);
182         list_add(&mmu->list, &pfdev->as_lru_list);
183
184         dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
185
186         panfrost_mmu_enable(pfdev, mmu);
187
188 out:
189         spin_unlock(&pfdev->as_lock);
190         return as;
191 }
192
193 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
194 {
195         atomic_dec(&mmu->as_count);
196         WARN_ON(atomic_read(&mmu->as_count) < 0);
197 }
198
199 void panfrost_mmu_reset(struct panfrost_device *pfdev)
200 {
201         struct panfrost_mmu *mmu, *mmu_tmp;
202
203         spin_lock(&pfdev->as_lock);
204
205         pfdev->as_alloc_mask = 0;
206
207         list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
208                 mmu->as = -1;
209                 atomic_set(&mmu->as_count, 0);
210                 list_del_init(&mmu->list);
211         }
212
213         spin_unlock(&pfdev->as_lock);
214
215         mmu_write(pfdev, MMU_INT_CLEAR, ~0);
216         mmu_write(pfdev, MMU_INT_MASK, ~0);
217 }
218
219 static size_t get_pgsize(u64 addr, size_t size)
220 {
221         if (addr & (SZ_2M - 1) || size < SZ_2M)
222                 return SZ_4K;
223
224         return SZ_2M;
225 }
226
227 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
228                                      struct panfrost_mmu *mmu,
229                                      u64 iova, size_t size)
230 {
231         if (mmu->as < 0)
232                 return;
233
234         pm_runtime_get_noresume(pfdev->dev);
235
236         /* Flush the PTs only if we're already awake */
237         if (pm_runtime_active(pfdev->dev))
238                 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
239
240         pm_runtime_put_sync_autosuspend(pfdev->dev);
241 }
242
243 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
244                       u64 iova, int prot, struct sg_table *sgt)
245 {
246         unsigned int count;
247         struct scatterlist *sgl;
248         struct io_pgtable_ops *ops = mmu->pgtbl_ops;
249         u64 start_iova = iova;
250
251         for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
252                 unsigned long paddr = sg_dma_address(sgl);
253                 size_t len = sg_dma_len(sgl);
254
255                 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
256
257                 while (len) {
258                         size_t pgsize = get_pgsize(iova | paddr, len);
259
260                         ops->map(ops, iova, paddr, pgsize, prot);
261                         iova += pgsize;
262                         paddr += pgsize;
263                         len -= pgsize;
264                 }
265         }
266
267         panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
268
269         return 0;
270 }
271
272 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
273 {
274         struct panfrost_gem_object *bo = mapping->obj;
275         struct drm_gem_object *obj = &bo->base.base;
276         struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
277         struct sg_table *sgt;
278         int prot = IOMMU_READ | IOMMU_WRITE;
279
280         if (WARN_ON(mapping->active))
281                 return 0;
282
283         if (bo->noexec)
284                 prot |= IOMMU_NOEXEC;
285
286         sgt = drm_gem_shmem_get_pages_sgt(obj);
287         if (WARN_ON(IS_ERR(sgt)))
288                 return PTR_ERR(sgt);
289
290         mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
291                    prot, sgt);
292         mapping->active = true;
293
294         return 0;
295 }
296
297 void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
298 {
299         struct panfrost_gem_object *bo = mapping->obj;
300         struct drm_gem_object *obj = &bo->base.base;
301         struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
302         struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
303         u64 iova = mapping->mmnode.start << PAGE_SHIFT;
304         size_t len = mapping->mmnode.size << PAGE_SHIFT;
305         size_t unmapped_len = 0;
306
307         if (WARN_ON(!mapping->active))
308                 return;
309
310         dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
311                 mapping->mmu->as, iova, len);
312
313         while (unmapped_len < len) {
314                 size_t unmapped_page;
315                 size_t pgsize = get_pgsize(iova, len - unmapped_len);
316
317                 if (ops->iova_to_phys(ops, iova)) {
318                         unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
319                         WARN_ON(unmapped_page != pgsize);
320                 }
321                 iova += pgsize;
322                 unmapped_len += pgsize;
323         }
324
325         panfrost_mmu_flush_range(pfdev, mapping->mmu,
326                                  mapping->mmnode.start << PAGE_SHIFT, len);
327         mapping->active = false;
328 }
329
330 static void mmu_tlb_inv_context_s1(void *cookie)
331 {}
332
333 static void mmu_tlb_sync_context(void *cookie)
334 {
335         //struct panfrost_device *pfdev = cookie;
336         // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
337 }
338
339 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
340                                void *cookie)
341 {
342         mmu_tlb_sync_context(cookie);
343 }
344
345 static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
346                                void *cookie)
347 {
348         mmu_tlb_sync_context(cookie);
349 }
350
351 static const struct iommu_flush_ops mmu_tlb_ops = {
352         .tlb_flush_all  = mmu_tlb_inv_context_s1,
353         .tlb_flush_walk = mmu_tlb_flush_walk,
354         .tlb_flush_leaf = mmu_tlb_flush_leaf,
355 };
356
357 int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
358 {
359         struct panfrost_mmu *mmu = &priv->mmu;
360         struct panfrost_device *pfdev = priv->pfdev;
361
362         INIT_LIST_HEAD(&mmu->list);
363         mmu->as = -1;
364
365         mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
366                 .pgsize_bitmap  = SZ_4K | SZ_2M,
367                 .ias            = FIELD_GET(0xff, pfdev->features.mmu_features),
368                 .oas            = FIELD_GET(0xff00, pfdev->features.mmu_features),
369                 .tlb            = &mmu_tlb_ops,
370                 .iommu_dev      = pfdev->dev,
371         };
372
373         mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
374                                               priv);
375         if (!mmu->pgtbl_ops)
376                 return -EINVAL;
377
378         return 0;
379 }
380
381 void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
382 {
383         struct panfrost_device *pfdev = priv->pfdev;
384         struct panfrost_mmu *mmu = &priv->mmu;
385
386         spin_lock(&pfdev->as_lock);
387         if (mmu->as >= 0) {
388                 pm_runtime_get_noresume(pfdev->dev);
389                 if (pm_runtime_active(pfdev->dev))
390                         panfrost_mmu_disable(pfdev, mmu->as);
391                 pm_runtime_put_autosuspend(pfdev->dev);
392
393                 clear_bit(mmu->as, &pfdev->as_alloc_mask);
394                 clear_bit(mmu->as, &pfdev->as_in_use_mask);
395                 list_del(&mmu->list);
396         }
397         spin_unlock(&pfdev->as_lock);
398
399         free_io_pgtable_ops(mmu->pgtbl_ops);
400 }
401
402 static struct panfrost_gem_mapping *
403 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
404 {
405         struct panfrost_gem_mapping *mapping = NULL;
406         struct panfrost_file_priv *priv;
407         struct drm_mm_node *node;
408         u64 offset = addr >> PAGE_SHIFT;
409         struct panfrost_mmu *mmu;
410
411         spin_lock(&pfdev->as_lock);
412         list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
413                 if (as == mmu->as)
414                         goto found_mmu;
415         }
416         goto out;
417
418 found_mmu:
419         priv = container_of(mmu, struct panfrost_file_priv, mmu);
420
421         spin_lock(&priv->mm_lock);
422
423         drm_mm_for_each_node(node, &priv->mm) {
424                 if (offset >= node->start &&
425                     offset < (node->start + node->size)) {
426                         mapping = drm_mm_node_to_panfrost_mapping(node);
427
428                         kref_get(&mapping->refcount);
429                         break;
430                 }
431         }
432
433         spin_unlock(&priv->mm_lock);
434 out:
435         spin_unlock(&pfdev->as_lock);
436         return mapping;
437 }
438
439 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
440
441 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
442                                        u64 addr)
443 {
444         int ret, i;
445         struct panfrost_gem_mapping *bomapping;
446         struct panfrost_gem_object *bo;
447         struct address_space *mapping;
448         pgoff_t page_offset;
449         struct sg_table *sgt;
450         struct page **pages;
451
452         bomapping = addr_to_mapping(pfdev, as, addr);
453         if (!bomapping)
454                 return -ENOENT;
455
456         bo = bomapping->obj;
457         if (!bo->is_heap) {
458                 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
459                          bomapping->mmnode.start << PAGE_SHIFT);
460                 ret = -EINVAL;
461                 goto err_bo;
462         }
463         WARN_ON(bomapping->mmu->as != as);
464
465         /* Assume 2MB alignment and size multiple */
466         addr &= ~((u64)SZ_2M - 1);
467         page_offset = addr >> PAGE_SHIFT;
468         page_offset -= bomapping->mmnode.start;
469
470         mutex_lock(&bo->base.pages_lock);
471
472         if (!bo->base.pages) {
473                 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
474                                      sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
475                 if (!bo->sgts) {
476                         mutex_unlock(&bo->base.pages_lock);
477                         ret = -ENOMEM;
478                         goto err_bo;
479                 }
480
481                 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
482                                        sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
483                 if (!pages) {
484                         kfree(bo->sgts);
485                         bo->sgts = NULL;
486                         mutex_unlock(&bo->base.pages_lock);
487                         ret = -ENOMEM;
488                         goto err_bo;
489                 }
490                 bo->base.pages = pages;
491                 bo->base.pages_use_count = 1;
492         } else
493                 pages = bo->base.pages;
494
495         mapping = bo->base.base.filp->f_mapping;
496         mapping_set_unevictable(mapping);
497
498         for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
499                 pages[i] = shmem_read_mapping_page(mapping, i);
500                 if (IS_ERR(pages[i])) {
501                         mutex_unlock(&bo->base.pages_lock);
502                         ret = PTR_ERR(pages[i]);
503                         goto err_pages;
504                 }
505         }
506
507         mutex_unlock(&bo->base.pages_lock);
508
509         sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
510         ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
511                                         NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
512         if (ret)
513                 goto err_pages;
514
515         if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
516                 ret = -EINVAL;
517                 goto err_map;
518         }
519
520         mmu_map_sg(pfdev, bomapping->mmu, addr,
521                    IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
522
523         bomapping->active = true;
524
525         dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
526
527         panfrost_gem_mapping_put(bomapping);
528
529         return 0;
530
531 err_map:
532         sg_free_table(sgt);
533 err_pages:
534         drm_gem_shmem_put_pages(&bo->base);
535 err_bo:
536         drm_gem_object_put_unlocked(&bo->base.base);
537         return ret;
538 }
539
540 static const char *access_type_name(struct panfrost_device *pfdev,
541                 u32 fault_status)
542 {
543         switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
544         case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
545                 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
546                         return "ATOMIC";
547                 else
548                         return "UNKNOWN";
549         case AS_FAULTSTATUS_ACCESS_TYPE_READ:
550                 return "READ";
551         case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
552                 return "WRITE";
553         case AS_FAULTSTATUS_ACCESS_TYPE_EX:
554                 return "EXECUTE";
555         default:
556                 WARN_ON(1);
557                 return NULL;
558         }
559 }
560
561 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
562 {
563         struct panfrost_device *pfdev = data;
564
565         if (!mmu_read(pfdev, MMU_INT_STAT))
566                 return IRQ_NONE;
567
568         mmu_write(pfdev, MMU_INT_MASK, 0);
569         return IRQ_WAKE_THREAD;
570 }
571
572 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
573 {
574         struct panfrost_device *pfdev = data;
575         u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
576         int i, ret;
577
578         for (i = 0; status; i++) {
579                 u32 mask = BIT(i) | BIT(i + 16);
580                 u64 addr;
581                 u32 fault_status;
582                 u32 exception_type;
583                 u32 access_type;
584                 u32 source_id;
585
586                 if (!(status & mask))
587                         continue;
588
589                 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
590                 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
591                 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
592
593                 /* decode the fault status */
594                 exception_type = fault_status & 0xFF;
595                 access_type = (fault_status >> 8) & 0x3;
596                 source_id = (fault_status >> 16);
597
598                 /* Page fault only */
599                 if ((status & mask) == BIT(i)) {
600                         WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
601
602                         ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
603                         if (!ret) {
604                                 mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
605                                 status &= ~mask;
606                                 continue;
607                         }
608                 }
609
610                 /* terminal fault, print info about the fault */
611                 dev_err(pfdev->dev,
612                         "Unhandled Page fault in AS%d at VA 0x%016llX\n"
613                         "Reason: %s\n"
614                         "raw fault status: 0x%X\n"
615                         "decoded fault status: %s\n"
616                         "exception type 0x%X: %s\n"
617                         "access type 0x%X: %s\n"
618                         "source id 0x%X\n",
619                         i, addr,
620                         "TODO",
621                         fault_status,
622                         (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
623                         exception_type, panfrost_exception_name(pfdev, exception_type),
624                         access_type, access_type_name(pfdev, fault_status),
625                         source_id);
626
627                 mmu_write(pfdev, MMU_INT_CLEAR, mask);
628
629                 status &= ~mask;
630         }
631
632         mmu_write(pfdev, MMU_INT_MASK, ~0);
633         return IRQ_HANDLED;
634 };
635
636 int panfrost_mmu_init(struct panfrost_device *pfdev)
637 {
638         int err, irq;
639
640         irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
641         if (irq <= 0)
642                 return -ENODEV;
643
644         err = devm_request_threaded_irq(pfdev->dev, irq,
645                                         panfrost_mmu_irq_handler,
646                                         panfrost_mmu_irq_handler_thread,
647                                         IRQF_SHARED, KBUILD_MODNAME "-mmu",
648                                         pfdev);
649
650         if (err) {
651                 dev_err(pfdev->dev, "failed to request mmu irq");
652                 return err;
653         }
654
655         return 0;
656 }
657
658 void panfrost_mmu_fini(struct panfrost_device *pfdev)
659 {
660         mmu_write(pfdev, MMU_INT_MASK, 0);
661 }