Merge tag 'mediatek-drm-next-6.4' of https://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / ttm / ttm_pool.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25
26 /* Pooling of allocated pages is necessary because changing the caching
27  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28  * invalidate for those addresses.
29  *
30  * Additional to that allocations from the DMA coherent API are pooled as well
31  * cause they are rather slow compared to alloc_pages+map.
32  */
33
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/debugfs.h>
37 #include <linux/highmem.h>
38 #include <linux/sched/mm.h>
39
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_tt.h>
46 #include <drm/ttm/ttm_bo.h>
47
48 #include "ttm_module.h"
49
50 #define TTM_MAX_ORDER (PMD_SHIFT - PAGE_SHIFT)
51 #define __TTM_DIM_ORDER (TTM_MAX_ORDER + 1)
52 /* Some architectures have a weird PMD_SHIFT */
53 #define TTM_DIM_ORDER (__TTM_DIM_ORDER <= MAX_ORDER ? __TTM_DIM_ORDER : MAX_ORDER)
54
55 /**
56  * struct ttm_pool_dma - Helper object for coherent DMA mappings
57  *
58  * @addr: original DMA address returned for the mapping
59  * @vaddr: original vaddr return for the mapping and order in the lower bits
60  */
61 struct ttm_pool_dma {
62         dma_addr_t addr;
63         unsigned long vaddr;
64 };
65
66 static unsigned long page_pool_size;
67
68 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
69 module_param(page_pool_size, ulong, 0644);
70
71 static atomic_long_t allocated_pages;
72
73 static struct ttm_pool_type global_write_combined[TTM_DIM_ORDER];
74 static struct ttm_pool_type global_uncached[TTM_DIM_ORDER];
75
76 static struct ttm_pool_type global_dma32_write_combined[TTM_DIM_ORDER];
77 static struct ttm_pool_type global_dma32_uncached[TTM_DIM_ORDER];
78
79 static spinlock_t shrinker_lock;
80 static struct list_head shrinker_list;
81 static struct shrinker mm_shrinker;
82
83 /* Allocate pages of size 1 << order with the given gfp_flags */
84 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
85                                         unsigned int order)
86 {
87         unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
88         struct ttm_pool_dma *dma;
89         struct page *p;
90         void *vaddr;
91
92         /* Don't set the __GFP_COMP flag for higher order allocations.
93          * Mapping pages directly into an userspace process and calling
94          * put_page() on a TTM allocated page is illegal.
95          */
96         if (order)
97                 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
98                         __GFP_KSWAPD_RECLAIM;
99
100         if (!pool->use_dma_alloc) {
101                 p = alloc_pages(gfp_flags, order);
102                 if (p)
103                         p->private = order;
104                 return p;
105         }
106
107         dma = kmalloc(sizeof(*dma), GFP_KERNEL);
108         if (!dma)
109                 return NULL;
110
111         if (order)
112                 attr |= DMA_ATTR_NO_WARN;
113
114         vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
115                                 &dma->addr, gfp_flags, attr);
116         if (!vaddr)
117                 goto error_free;
118
119         /* TODO: This is an illegal abuse of the DMA API, but we need to rework
120          * TTM page fault handling and extend the DMA API to clean this up.
121          */
122         if (is_vmalloc_addr(vaddr))
123                 p = vmalloc_to_page(vaddr);
124         else
125                 p = virt_to_page(vaddr);
126
127         dma->vaddr = (unsigned long)vaddr | order;
128         p->private = (unsigned long)dma;
129         return p;
130
131 error_free:
132         kfree(dma);
133         return NULL;
134 }
135
136 /* Reset the caching and pages of size 1 << order */
137 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
138                                unsigned int order, struct page *p)
139 {
140         unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
141         struct ttm_pool_dma *dma;
142         void *vaddr;
143
144 #ifdef CONFIG_X86
145         /* We don't care that set_pages_wb is inefficient here. This is only
146          * used when we have to shrink and CPU overhead is irrelevant then.
147          */
148         if (caching != ttm_cached && !PageHighMem(p))
149                 set_pages_wb(p, 1 << order);
150 #endif
151
152         if (!pool || !pool->use_dma_alloc) {
153                 __free_pages(p, order);
154                 return;
155         }
156
157         if (order)
158                 attr |= DMA_ATTR_NO_WARN;
159
160         dma = (void *)p->private;
161         vaddr = (void *)(dma->vaddr & PAGE_MASK);
162         dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
163                        attr);
164         kfree(dma);
165 }
166
167 /* Apply a new caching to an array of pages */
168 static int ttm_pool_apply_caching(struct page **first, struct page **last,
169                                   enum ttm_caching caching)
170 {
171 #ifdef CONFIG_X86
172         unsigned int num_pages = last - first;
173
174         if (!num_pages)
175                 return 0;
176
177         switch (caching) {
178         case ttm_cached:
179                 break;
180         case ttm_write_combined:
181                 return set_pages_array_wc(first, num_pages);
182         case ttm_uncached:
183                 return set_pages_array_uc(first, num_pages);
184         }
185 #endif
186         return 0;
187 }
188
189 /* Map pages of 1 << order size and fill the DMA address array  */
190 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
191                         struct page *p, dma_addr_t **dma_addr)
192 {
193         dma_addr_t addr;
194         unsigned int i;
195
196         if (pool->use_dma_alloc) {
197                 struct ttm_pool_dma *dma = (void *)p->private;
198
199                 addr = dma->addr;
200         } else {
201                 size_t size = (1ULL << order) * PAGE_SIZE;
202
203                 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
204                 if (dma_mapping_error(pool->dev, addr))
205                         return -EFAULT;
206         }
207
208         for (i = 1 << order; i ; --i) {
209                 *(*dma_addr)++ = addr;
210                 addr += PAGE_SIZE;
211         }
212
213         return 0;
214 }
215
216 /* Unmap pages of 1 << order size */
217 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
218                            unsigned int num_pages)
219 {
220         /* Unmapped while freeing the page */
221         if (pool->use_dma_alloc)
222                 return;
223
224         dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
225                        DMA_BIDIRECTIONAL);
226 }
227
228 /* Give pages into a specific pool_type */
229 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
230 {
231         unsigned int i, num_pages = 1 << pt->order;
232
233         for (i = 0; i < num_pages; ++i) {
234                 if (PageHighMem(p))
235                         clear_highpage(p + i);
236                 else
237                         clear_page(page_address(p + i));
238         }
239
240         spin_lock(&pt->lock);
241         list_add(&p->lru, &pt->pages);
242         spin_unlock(&pt->lock);
243         atomic_long_add(1 << pt->order, &allocated_pages);
244 }
245
246 /* Take pages from a specific pool_type, return NULL when nothing available */
247 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
248 {
249         struct page *p;
250
251         spin_lock(&pt->lock);
252         p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
253         if (p) {
254                 atomic_long_sub(1 << pt->order, &allocated_pages);
255                 list_del(&p->lru);
256         }
257         spin_unlock(&pt->lock);
258
259         return p;
260 }
261
262 /* Initialize and add a pool type to the global shrinker list */
263 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
264                                enum ttm_caching caching, unsigned int order)
265 {
266         pt->pool = pool;
267         pt->caching = caching;
268         pt->order = order;
269         spin_lock_init(&pt->lock);
270         INIT_LIST_HEAD(&pt->pages);
271
272         spin_lock(&shrinker_lock);
273         list_add_tail(&pt->shrinker_list, &shrinker_list);
274         spin_unlock(&shrinker_lock);
275 }
276
277 /* Remove a pool_type from the global shrinker list and free all pages */
278 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
279 {
280         struct page *p;
281
282         spin_lock(&shrinker_lock);
283         list_del(&pt->shrinker_list);
284         spin_unlock(&shrinker_lock);
285
286         while ((p = ttm_pool_type_take(pt)))
287                 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
288 }
289
290 /* Return the pool_type to use for the given caching and order */
291 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
292                                                   enum ttm_caching caching,
293                                                   unsigned int order)
294 {
295         if (pool->use_dma_alloc)
296                 return &pool->caching[caching].orders[order];
297
298 #ifdef CONFIG_X86
299         switch (caching) {
300         case ttm_write_combined:
301                 if (pool->use_dma32)
302                         return &global_dma32_write_combined[order];
303
304                 return &global_write_combined[order];
305         case ttm_uncached:
306                 if (pool->use_dma32)
307                         return &global_dma32_uncached[order];
308
309                 return &global_uncached[order];
310         default:
311                 break;
312         }
313 #endif
314
315         return NULL;
316 }
317
318 /* Free pages using the global shrinker list */
319 static unsigned int ttm_pool_shrink(void)
320 {
321         struct ttm_pool_type *pt;
322         unsigned int num_pages;
323         struct page *p;
324
325         spin_lock(&shrinker_lock);
326         pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
327         list_move_tail(&pt->shrinker_list, &shrinker_list);
328         spin_unlock(&shrinker_lock);
329
330         p = ttm_pool_type_take(pt);
331         if (p) {
332                 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
333                 num_pages = 1 << pt->order;
334         } else {
335                 num_pages = 0;
336         }
337
338         return num_pages;
339 }
340
341 /* Return the allocation order based for a page */
342 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
343 {
344         if (pool->use_dma_alloc) {
345                 struct ttm_pool_dma *dma = (void *)p->private;
346
347                 return dma->vaddr & ~PAGE_MASK;
348         }
349
350         return p->private;
351 }
352
353 /* Called when we got a page, either from a pool or newly allocated */
354 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
355                                    struct page *p, dma_addr_t **dma_addr,
356                                    unsigned long *num_pages,
357                                    struct page ***pages)
358 {
359         unsigned int i;
360         int r;
361
362         if (*dma_addr) {
363                 r = ttm_pool_map(pool, order, p, dma_addr);
364                 if (r)
365                         return r;
366         }
367
368         *num_pages -= 1 << order;
369         for (i = 1 << order; i; --i, ++(*pages), ++p)
370                 **pages = p;
371
372         return 0;
373 }
374
375 /**
376  * ttm_pool_free_range() - Free a range of TTM pages
377  * @pool: The pool used for allocating.
378  * @tt: The struct ttm_tt holding the page pointers.
379  * @caching: The page caching mode used by the range.
380  * @start_page: index for first page to free.
381  * @end_page: index for last page to free + 1.
382  *
383  * During allocation the ttm_tt page-vector may be populated with ranges of
384  * pages with different attributes if allocation hit an error without being
385  * able to completely fulfill the allocation. This function can be used
386  * to free these individual ranges.
387  */
388 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
389                                 enum ttm_caching caching,
390                                 pgoff_t start_page, pgoff_t end_page)
391 {
392         struct page **pages = tt->pages;
393         unsigned int order;
394         pgoff_t i, nr;
395
396         for (i = start_page; i < end_page; i += nr, pages += nr) {
397                 struct ttm_pool_type *pt = NULL;
398
399                 order = ttm_pool_page_order(pool, *pages);
400                 nr = (1UL << order);
401                 if (tt->dma_address)
402                         ttm_pool_unmap(pool, tt->dma_address[i], nr);
403
404                 pt = ttm_pool_select_type(pool, caching, order);
405                 if (pt)
406                         ttm_pool_type_give(pt, *pages);
407                 else
408                         ttm_pool_free_page(pool, caching, order, *pages);
409         }
410 }
411
412 /**
413  * ttm_pool_alloc - Fill a ttm_tt object
414  *
415  * @pool: ttm_pool to use
416  * @tt: ttm_tt object to fill
417  * @ctx: operation context
418  *
419  * Fill the ttm_tt object with pages and also make sure to DMA map them when
420  * necessary.
421  *
422  * Returns: 0 on successe, negative error code otherwise.
423  */
424 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
425                    struct ttm_operation_ctx *ctx)
426 {
427         pgoff_t num_pages = tt->num_pages;
428         dma_addr_t *dma_addr = tt->dma_address;
429         struct page **caching = tt->pages;
430         struct page **pages = tt->pages;
431         enum ttm_caching page_caching;
432         gfp_t gfp_flags = GFP_USER;
433         pgoff_t caching_divide;
434         unsigned int order;
435         struct page *p;
436         int r;
437
438         WARN_ON(!num_pages || ttm_tt_is_populated(tt));
439         WARN_ON(dma_addr && !pool->dev);
440
441         if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
442                 gfp_flags |= __GFP_ZERO;
443
444         if (ctx->gfp_retry_mayfail)
445                 gfp_flags |= __GFP_RETRY_MAYFAIL;
446
447         if (pool->use_dma32)
448                 gfp_flags |= GFP_DMA32;
449         else
450                 gfp_flags |= GFP_HIGHUSER;
451
452         for (order = min_t(unsigned int, TTM_MAX_ORDER, __fls(num_pages));
453              num_pages;
454              order = min_t(unsigned int, order, __fls(num_pages))) {
455                 struct ttm_pool_type *pt;
456
457                 page_caching = tt->caching;
458                 pt = ttm_pool_select_type(pool, tt->caching, order);
459                 p = pt ? ttm_pool_type_take(pt) : NULL;
460                 if (p) {
461                         r = ttm_pool_apply_caching(caching, pages,
462                                                    tt->caching);
463                         if (r)
464                                 goto error_free_page;
465
466                         caching = pages;
467                         do {
468                                 r = ttm_pool_page_allocated(pool, order, p,
469                                                             &dma_addr,
470                                                             &num_pages,
471                                                             &pages);
472                                 if (r)
473                                         goto error_free_page;
474
475                                 caching = pages;
476                                 if (num_pages < (1 << order))
477                                         break;
478
479                                 p = ttm_pool_type_take(pt);
480                         } while (p);
481                 }
482
483                 page_caching = ttm_cached;
484                 while (num_pages >= (1 << order) &&
485                        (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
486
487                         if (PageHighMem(p)) {
488                                 r = ttm_pool_apply_caching(caching, pages,
489                                                            tt->caching);
490                                 if (r)
491                                         goto error_free_page;
492                                 caching = pages;
493                         }
494                         r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
495                                                     &num_pages, &pages);
496                         if (r)
497                                 goto error_free_page;
498                         if (PageHighMem(p))
499                                 caching = pages;
500                 }
501
502                 if (!p) {
503                         if (order) {
504                                 --order;
505                                 continue;
506                         }
507                         r = -ENOMEM;
508                         goto error_free_all;
509                 }
510         }
511
512         r = ttm_pool_apply_caching(caching, pages, tt->caching);
513         if (r)
514                 goto error_free_all;
515
516         return 0;
517
518 error_free_page:
519         ttm_pool_free_page(pool, page_caching, order, p);
520
521 error_free_all:
522         num_pages = tt->num_pages - num_pages;
523         caching_divide = caching - tt->pages;
524         ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
525         ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
526
527         return r;
528 }
529 EXPORT_SYMBOL(ttm_pool_alloc);
530
531 /**
532  * ttm_pool_free - Free the backing pages from a ttm_tt object
533  *
534  * @pool: Pool to give pages back to.
535  * @tt: ttm_tt object to unpopulate
536  *
537  * Give the packing pages back to a pool or free them
538  */
539 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
540 {
541         ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
542
543         while (atomic_long_read(&allocated_pages) > page_pool_size)
544                 ttm_pool_shrink();
545 }
546 EXPORT_SYMBOL(ttm_pool_free);
547
548 /**
549  * ttm_pool_init - Initialize a pool
550  *
551  * @pool: the pool to initialize
552  * @dev: device for DMA allocations and mappings
553  * @use_dma_alloc: true if coherent DMA alloc should be used
554  * @use_dma32: true if GFP_DMA32 should be used
555  *
556  * Initialize the pool and its pool types.
557  */
558 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
559                    bool use_dma_alloc, bool use_dma32)
560 {
561         unsigned int i, j;
562
563         WARN_ON(!dev && use_dma_alloc);
564
565         pool->dev = dev;
566         pool->use_dma_alloc = use_dma_alloc;
567         pool->use_dma32 = use_dma32;
568
569         if (use_dma_alloc) {
570                 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
571                         for (j = 0; j < TTM_DIM_ORDER; ++j)
572                                 ttm_pool_type_init(&pool->caching[i].orders[j],
573                                                    pool, i, j);
574         }
575 }
576
577 /**
578  * ttm_pool_fini - Cleanup a pool
579  *
580  * @pool: the pool to clean up
581  *
582  * Free all pages in the pool and unregister the types from the global
583  * shrinker.
584  */
585 void ttm_pool_fini(struct ttm_pool *pool)
586 {
587         unsigned int i, j;
588
589         if (pool->use_dma_alloc) {
590                 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
591                         for (j = 0; j < TTM_DIM_ORDER; ++j)
592                                 ttm_pool_type_fini(&pool->caching[i].orders[j]);
593         }
594
595         /* We removed the pool types from the LRU, but we need to also make sure
596          * that no shrinker is concurrently freeing pages from the pool.
597          */
598         synchronize_shrinkers();
599 }
600
601 /* As long as pages are available make sure to release at least one */
602 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
603                                             struct shrink_control *sc)
604 {
605         unsigned long num_freed = 0;
606
607         do
608                 num_freed += ttm_pool_shrink();
609         while (!num_freed && atomic_long_read(&allocated_pages));
610
611         return num_freed;
612 }
613
614 /* Return the number of pages available or SHRINK_EMPTY if we have none */
615 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
616                                              struct shrink_control *sc)
617 {
618         unsigned long num_pages = atomic_long_read(&allocated_pages);
619
620         return num_pages ? num_pages : SHRINK_EMPTY;
621 }
622
623 #ifdef CONFIG_DEBUG_FS
624 /* Count the number of pages available in a pool_type */
625 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
626 {
627         unsigned int count = 0;
628         struct page *p;
629
630         spin_lock(&pt->lock);
631         /* Only used for debugfs, the overhead doesn't matter */
632         list_for_each_entry(p, &pt->pages, lru)
633                 ++count;
634         spin_unlock(&pt->lock);
635
636         return count;
637 }
638
639 /* Print a nice header for the order */
640 static void ttm_pool_debugfs_header(struct seq_file *m)
641 {
642         unsigned int i;
643
644         seq_puts(m, "\t ");
645         for (i = 0; i < TTM_DIM_ORDER; ++i)
646                 seq_printf(m, " ---%2u---", i);
647         seq_puts(m, "\n");
648 }
649
650 /* Dump information about the different pool types */
651 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
652                                     struct seq_file *m)
653 {
654         unsigned int i;
655
656         for (i = 0; i < TTM_DIM_ORDER; ++i)
657                 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
658         seq_puts(m, "\n");
659 }
660
661 /* Dump the total amount of allocated pages */
662 static void ttm_pool_debugfs_footer(struct seq_file *m)
663 {
664         seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
665                    atomic_long_read(&allocated_pages), page_pool_size);
666 }
667
668 /* Dump the information for the global pools */
669 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
670 {
671         ttm_pool_debugfs_header(m);
672
673         spin_lock(&shrinker_lock);
674         seq_puts(m, "wc\t:");
675         ttm_pool_debugfs_orders(global_write_combined, m);
676         seq_puts(m, "uc\t:");
677         ttm_pool_debugfs_orders(global_uncached, m);
678         seq_puts(m, "wc 32\t:");
679         ttm_pool_debugfs_orders(global_dma32_write_combined, m);
680         seq_puts(m, "uc 32\t:");
681         ttm_pool_debugfs_orders(global_dma32_uncached, m);
682         spin_unlock(&shrinker_lock);
683
684         ttm_pool_debugfs_footer(m);
685
686         return 0;
687 }
688 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
689
690 /**
691  * ttm_pool_debugfs - Debugfs dump function for a pool
692  *
693  * @pool: the pool to dump the information for
694  * @m: seq_file to dump to
695  *
696  * Make a debugfs dump with the per pool and global information.
697  */
698 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
699 {
700         unsigned int i;
701
702         if (!pool->use_dma_alloc) {
703                 seq_puts(m, "unused\n");
704                 return 0;
705         }
706
707         ttm_pool_debugfs_header(m);
708
709         spin_lock(&shrinker_lock);
710         for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
711                 seq_puts(m, "DMA ");
712                 switch (i) {
713                 case ttm_cached:
714                         seq_puts(m, "\t:");
715                         break;
716                 case ttm_write_combined:
717                         seq_puts(m, "wc\t:");
718                         break;
719                 case ttm_uncached:
720                         seq_puts(m, "uc\t:");
721                         break;
722                 }
723                 ttm_pool_debugfs_orders(pool->caching[i].orders, m);
724         }
725         spin_unlock(&shrinker_lock);
726
727         ttm_pool_debugfs_footer(m);
728         return 0;
729 }
730 EXPORT_SYMBOL(ttm_pool_debugfs);
731
732 /* Test the shrinker functions and dump the result */
733 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
734 {
735         struct shrink_control sc = { .gfp_mask = GFP_NOFS };
736
737         fs_reclaim_acquire(GFP_KERNEL);
738         seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
739                    ttm_pool_shrinker_scan(&mm_shrinker, &sc));
740         fs_reclaim_release(GFP_KERNEL);
741
742         return 0;
743 }
744 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
745
746 #endif
747
748 /**
749  * ttm_pool_mgr_init - Initialize globals
750  *
751  * @num_pages: default number of pages
752  *
753  * Initialize the global locks and lists for the MM shrinker.
754  */
755 int ttm_pool_mgr_init(unsigned long num_pages)
756 {
757         unsigned int i;
758
759         BUILD_BUG_ON(TTM_DIM_ORDER > MAX_ORDER);
760         BUILD_BUG_ON(TTM_DIM_ORDER < 1);
761
762         if (!page_pool_size)
763                 page_pool_size = num_pages;
764
765         spin_lock_init(&shrinker_lock);
766         INIT_LIST_HEAD(&shrinker_list);
767
768         for (i = 0; i < TTM_DIM_ORDER; ++i) {
769                 ttm_pool_type_init(&global_write_combined[i], NULL,
770                                    ttm_write_combined, i);
771                 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
772
773                 ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
774                                    ttm_write_combined, i);
775                 ttm_pool_type_init(&global_dma32_uncached[i], NULL,
776                                    ttm_uncached, i);
777         }
778
779 #ifdef CONFIG_DEBUG_FS
780         debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
781                             &ttm_pool_debugfs_globals_fops);
782         debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
783                             &ttm_pool_debugfs_shrink_fops);
784 #endif
785
786         mm_shrinker.count_objects = ttm_pool_shrinker_count;
787         mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
788         mm_shrinker.seeks = 1;
789         return register_shrinker(&mm_shrinker, "drm-ttm_pool");
790 }
791
792 /**
793  * ttm_pool_mgr_fini - Finalize globals
794  *
795  * Cleanup the global pools and unregister the MM shrinker.
796  */
797 void ttm_pool_mgr_fini(void)
798 {
799         unsigned int i;
800
801         for (i = 0; i < TTM_DIM_ORDER; ++i) {
802                 ttm_pool_type_fini(&global_write_combined[i]);
803                 ttm_pool_type_fini(&global_uncached[i]);
804
805                 ttm_pool_type_fini(&global_dma32_write_combined[i]);
806                 ttm_pool_type_fini(&global_dma32_uncached[i]);
807         }
808
809         unregister_shrinker(&mm_shrinker);
810         WARN_ON(!list_empty(&shrinker_list));
811 }