powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / gem / i915_gem_shmem.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include <linux/pagevec.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10
11 #include <drm/drm_cache.h>
12
13 #include "gem/i915_gem_region.h"
14 #include "i915_drv.h"
15 #include "i915_gem_object.h"
16 #include "i915_gem_tiling.h"
17 #include "i915_gemfs.h"
18 #include "i915_scatterlist.h"
19 #include "i915_trace.h"
20
21 /*
22  * Move folios to appropriate lru and release the batch, decrementing the
23  * ref count of those folios.
24  */
25 static void check_release_folio_batch(struct folio_batch *fbatch)
26 {
27         check_move_unevictable_folios(fbatch);
28         __folio_batch_release(fbatch);
29         cond_resched();
30 }
31
32 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
33                          bool dirty, bool backup)
34 {
35         struct sgt_iter sgt_iter;
36         struct folio_batch fbatch;
37         struct folio *last = NULL;
38         struct page *page;
39
40         mapping_clear_unevictable(mapping);
41
42         folio_batch_init(&fbatch);
43         for_each_sgt_page(page, sgt_iter, st) {
44                 struct folio *folio = page_folio(page);
45
46                 if (folio == last)
47                         continue;
48                 last = folio;
49                 if (dirty)
50                         folio_mark_dirty(folio);
51                 if (backup)
52                         folio_mark_accessed(folio);
53
54                 if (!folio_batch_add(&fbatch, folio))
55                         check_release_folio_batch(&fbatch);
56         }
57         if (fbatch.nr)
58                 check_release_folio_batch(&fbatch);
59
60         sg_free_table(st);
61 }
62
63 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
64                          size_t size, struct intel_memory_region *mr,
65                          struct address_space *mapping,
66                          unsigned int max_segment)
67 {
68         unsigned int page_count; /* restricted by sg_alloc_table */
69         unsigned long i;
70         struct scatterlist *sg;
71         unsigned long next_pfn = 0;     /* suppress gcc warning */
72         gfp_t noreclaim;
73         int ret;
74
75         if (overflows_type(size / PAGE_SIZE, page_count))
76                 return -E2BIG;
77
78         page_count = size / PAGE_SIZE;
79         /*
80          * If there's no chance of allocating enough pages for the whole
81          * object, bail early.
82          */
83         if (size > resource_size(&mr->region))
84                 return -ENOMEM;
85
86         if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
87                 return -ENOMEM;
88
89         /*
90          * Get the list of pages out of our struct file.  They'll be pinned
91          * at this point until we release them.
92          *
93          * Fail silently without starting the shrinker
94          */
95         mapping_set_unevictable(mapping);
96         noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
97         noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
98
99         sg = st->sgl;
100         st->nents = 0;
101         for (i = 0; i < page_count; i++) {
102                 struct folio *folio;
103                 const unsigned int shrink[] = {
104                         I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
105                         0,
106                 }, *s = shrink;
107                 gfp_t gfp = noreclaim;
108
109                 do {
110                         cond_resched();
111                         folio = shmem_read_folio_gfp(mapping, i, gfp);
112                         if (!IS_ERR(folio))
113                                 break;
114
115                         if (!*s) {
116                                 ret = PTR_ERR(folio);
117                                 goto err_sg;
118                         }
119
120                         i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
121
122                         /*
123                          * We've tried hard to allocate the memory by reaping
124                          * our own buffer, now let the real VM do its job and
125                          * go down in flames if truly OOM.
126                          *
127                          * However, since graphics tend to be disposable,
128                          * defer the oom here by reporting the ENOMEM back
129                          * to userspace.
130                          */
131                         if (!*s) {
132                                 /* reclaim and warn, but no oom */
133                                 gfp = mapping_gfp_mask(mapping);
134
135                                 /*
136                                  * Our bo are always dirty and so we require
137                                  * kswapd to reclaim our pages (direct reclaim
138                                  * does not effectively begin pageout of our
139                                  * buffers on its own). However, direct reclaim
140                                  * only waits for kswapd when under allocation
141                                  * congestion. So as a result __GFP_RECLAIM is
142                                  * unreliable and fails to actually reclaim our
143                                  * dirty pages -- unless you try over and over
144                                  * again with !__GFP_NORETRY. However, we still
145                                  * want to fail this allocation rather than
146                                  * trigger the out-of-memory killer and for
147                                  * this we want __GFP_RETRY_MAYFAIL.
148                                  */
149                                 gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
150                         }
151                 } while (1);
152
153                 if (!i ||
154                     sg->length >= max_segment ||
155                     folio_pfn(folio) != next_pfn) {
156                         if (i)
157                                 sg = sg_next(sg);
158
159                         st->nents++;
160                         sg_set_folio(sg, folio, folio_size(folio), 0);
161                 } else {
162                         /* XXX: could overflow? */
163                         sg->length += folio_size(folio);
164                 }
165                 next_pfn = folio_pfn(folio) + folio_nr_pages(folio);
166                 i += folio_nr_pages(folio) - 1;
167
168                 /* Check that the i965g/gm workaround works. */
169                 GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
170         }
171         if (sg) /* loop terminated early; short sg table */
172                 sg_mark_end(sg);
173
174         /* Trim unused sg entries to avoid wasting memory. */
175         i915_sg_trim(st);
176
177         return 0;
178 err_sg:
179         sg_mark_end(sg);
180         if (sg != st->sgl) {
181                 shmem_sg_free_table(st, mapping, false, false);
182         } else {
183                 mapping_clear_unevictable(mapping);
184                 sg_free_table(st);
185         }
186
187         /*
188          * shmemfs first checks if there is enough memory to allocate the page
189          * and reports ENOSPC should there be insufficient, along with the usual
190          * ENOMEM for a genuine allocation failure.
191          *
192          * We use ENOSPC in our driver to mean that we have run out of aperture
193          * space and so want to translate the error from shmemfs back to our
194          * usual understanding of ENOMEM.
195          */
196         if (ret == -ENOSPC)
197                 ret = -ENOMEM;
198
199         return ret;
200 }
201
202 static int shmem_get_pages(struct drm_i915_gem_object *obj)
203 {
204         struct drm_i915_private *i915 = to_i915(obj->base.dev);
205         struct intel_memory_region *mem = obj->mm.region;
206         struct address_space *mapping = obj->base.filp->f_mapping;
207         unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
208         struct sg_table *st;
209         struct sgt_iter sgt_iter;
210         struct page *page;
211         int ret;
212
213         /*
214          * Assert that the object is not currently in any GPU domain. As it
215          * wasn't in the GTT, there shouldn't be any way it could have been in
216          * a GPU cache
217          */
218         GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
219         GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
220
221 rebuild_st:
222         st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
223         if (!st)
224                 return -ENOMEM;
225
226         ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
227                                    max_segment);
228         if (ret)
229                 goto err_st;
230
231         ret = i915_gem_gtt_prepare_pages(obj, st);
232         if (ret) {
233                 /*
234                  * DMA remapping failed? One possible cause is that
235                  * it could not reserve enough large entries, asking
236                  * for PAGE_SIZE chunks instead may be helpful.
237                  */
238                 if (max_segment > PAGE_SIZE) {
239                         for_each_sgt_page(page, sgt_iter, st)
240                                 put_page(page);
241                         sg_free_table(st);
242                         kfree(st);
243
244                         max_segment = PAGE_SIZE;
245                         goto rebuild_st;
246                 } else {
247                         dev_warn(i915->drm.dev,
248                                  "Failed to DMA remap %zu pages\n",
249                                  obj->base.size >> PAGE_SHIFT);
250                         goto err_pages;
251                 }
252         }
253
254         if (i915_gem_object_needs_bit17_swizzle(obj))
255                 i915_gem_object_do_bit_17_swizzle(obj, st);
256
257         if (i915_gem_object_can_bypass_llc(obj))
258                 obj->cache_dirty = true;
259
260         __i915_gem_object_set_pages(obj, st);
261
262         return 0;
263
264 err_pages:
265         shmem_sg_free_table(st, mapping, false, false);
266         /*
267          * shmemfs first checks if there is enough memory to allocate the page
268          * and reports ENOSPC should there be insufficient, along with the usual
269          * ENOMEM for a genuine allocation failure.
270          *
271          * We use ENOSPC in our driver to mean that we have run out of aperture
272          * space and so want to translate the error from shmemfs back to our
273          * usual understanding of ENOMEM.
274          */
275 err_st:
276         if (ret == -ENOSPC)
277                 ret = -ENOMEM;
278
279         kfree(st);
280
281         return ret;
282 }
283
284 static int
285 shmem_truncate(struct drm_i915_gem_object *obj)
286 {
287         /*
288          * Our goal here is to return as much of the memory as
289          * is possible back to the system as we are called from OOM.
290          * To do this we must instruct the shmfs to drop all of its
291          * backing pages, *now*.
292          */
293         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
294         obj->mm.madv = __I915_MADV_PURGED;
295         obj->mm.pages = ERR_PTR(-EFAULT);
296
297         return 0;
298 }
299
300 void __shmem_writeback(size_t size, struct address_space *mapping)
301 {
302         struct writeback_control wbc = {
303                 .sync_mode = WB_SYNC_NONE,
304                 .nr_to_write = SWAP_CLUSTER_MAX,
305                 .range_start = 0,
306                 .range_end = LLONG_MAX,
307                 .for_reclaim = 1,
308         };
309         unsigned long i;
310
311         /*
312          * Leave mmapings intact (GTT will have been revoked on unbinding,
313          * leaving only CPU mmapings around) and add those pages to the LRU
314          * instead of invoking writeback so they are aged and paged out
315          * as normal.
316          */
317
318         /* Begin writeback on each dirty page */
319         for (i = 0; i < size >> PAGE_SHIFT; i++) {
320                 struct page *page;
321
322                 page = find_lock_page(mapping, i);
323                 if (!page)
324                         continue;
325
326                 if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
327                         int ret;
328
329                         SetPageReclaim(page);
330                         ret = mapping->a_ops->writepage(page, &wbc);
331                         if (!PageWriteback(page))
332                                 ClearPageReclaim(page);
333                         if (!ret)
334                                 goto put;
335                 }
336                 unlock_page(page);
337 put:
338                 put_page(page);
339         }
340 }
341
342 static void
343 shmem_writeback(struct drm_i915_gem_object *obj)
344 {
345         __shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
346 }
347
348 static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
349 {
350         switch (obj->mm.madv) {
351         case I915_MADV_DONTNEED:
352                 return i915_gem_object_truncate(obj);
353         case __I915_MADV_PURGED:
354                 return 0;
355         }
356
357         if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
358                 shmem_writeback(obj);
359
360         return 0;
361 }
362
363 void
364 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
365                                 struct sg_table *pages,
366                                 bool needs_clflush)
367 {
368         struct drm_i915_private *i915 = to_i915(obj->base.dev);
369
370         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
371
372         if (obj->mm.madv == I915_MADV_DONTNEED)
373                 obj->mm.dirty = false;
374
375         if (needs_clflush &&
376             (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
377             !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
378                 drm_clflush_sg(pages);
379
380         __start_cpu_write(obj);
381         /*
382          * On non-LLC igfx platforms, force the flush-on-acquire if this is ever
383          * swapped-in. Our async flush path is not trust worthy enough yet(and
384          * happens in the wrong order), and with some tricks it's conceivable
385          * for userspace to change the cache-level to I915_CACHE_NONE after the
386          * pages are swapped-in, and since execbuf binds the object before doing
387          * the async flush, we have a race window.
388          */
389         if (!HAS_LLC(i915) && !IS_DGFX(i915))
390                 obj->cache_dirty = true;
391 }
392
393 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
394 {
395         __i915_gem_object_release_shmem(obj, pages, true);
396
397         i915_gem_gtt_finish_pages(obj, pages);
398
399         if (i915_gem_object_needs_bit17_swizzle(obj))
400                 i915_gem_object_save_bit_17_swizzle(obj, pages);
401
402         shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
403                             obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
404         kfree(pages);
405         obj->mm.dirty = false;
406 }
407
408 static void
409 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
410 {
411         if (likely(i915_gem_object_has_struct_page(obj)))
412                 i915_gem_object_put_pages_shmem(obj, pages);
413         else
414                 i915_gem_object_put_pages_phys(obj, pages);
415 }
416
417 static int
418 shmem_pwrite(struct drm_i915_gem_object *obj,
419              const struct drm_i915_gem_pwrite *arg)
420 {
421         struct address_space *mapping = obj->base.filp->f_mapping;
422         const struct address_space_operations *aops = mapping->a_ops;
423         char __user *user_data = u64_to_user_ptr(arg->data_ptr);
424         u64 remain, offset;
425         unsigned int pg;
426
427         /* Caller already validated user args */
428         GEM_BUG_ON(!access_ok(user_data, arg->size));
429
430         if (!i915_gem_object_has_struct_page(obj))
431                 return i915_gem_object_pwrite_phys(obj, arg);
432
433         /*
434          * Before we instantiate/pin the backing store for our use, we
435          * can prepopulate the shmemfs filp efficiently using a write into
436          * the pagecache. We avoid the penalty of instantiating all the
437          * pages, important if the user is just writing to a few and never
438          * uses the object on the GPU, and using a direct write into shmemfs
439          * allows it to avoid the cost of retrieving a page (either swapin
440          * or clearing-before-use) before it is overwritten.
441          */
442         if (i915_gem_object_has_pages(obj))
443                 return -ENODEV;
444
445         if (obj->mm.madv != I915_MADV_WILLNEED)
446                 return -EFAULT;
447
448         /*
449          * Before the pages are instantiated the object is treated as being
450          * in the CPU domain. The pages will be clflushed as required before
451          * use, and we can freely write into the pages directly. If userspace
452          * races pwrite with any other operation; corruption will ensue -
453          * that is userspace's prerogative!
454          */
455
456         remain = arg->size;
457         offset = arg->offset;
458         pg = offset_in_page(offset);
459
460         do {
461                 unsigned int len, unwritten;
462                 struct page *page;
463                 void *data, *vaddr;
464                 int err;
465                 char __maybe_unused c;
466
467                 len = PAGE_SIZE - pg;
468                 if (len > remain)
469                         len = remain;
470
471                 /* Prefault the user page to reduce potential recursion */
472                 err = __get_user(c, user_data);
473                 if (err)
474                         return err;
475
476                 err = __get_user(c, user_data + len - 1);
477                 if (err)
478                         return err;
479
480                 err = aops->write_begin(obj->base.filp, mapping, offset, len,
481                                         &page, &data);
482                 if (err < 0)
483                         return err;
484
485                 vaddr = kmap_atomic(page);
486                 unwritten = __copy_from_user_inatomic(vaddr + pg,
487                                                       user_data,
488                                                       len);
489                 kunmap_atomic(vaddr);
490
491                 err = aops->write_end(obj->base.filp, mapping, offset, len,
492                                       len - unwritten, page, data);
493                 if (err < 0)
494                         return err;
495
496                 /* We don't handle -EFAULT, leave it to the caller to check */
497                 if (unwritten)
498                         return -ENODEV;
499
500                 remain -= len;
501                 user_data += len;
502                 offset += len;
503                 pg = 0;
504         } while (remain);
505
506         return 0;
507 }
508
509 static int
510 shmem_pread(struct drm_i915_gem_object *obj,
511             const struct drm_i915_gem_pread *arg)
512 {
513         if (!i915_gem_object_has_struct_page(obj))
514                 return i915_gem_object_pread_phys(obj, arg);
515
516         return -ENODEV;
517 }
518
519 static void shmem_release(struct drm_i915_gem_object *obj)
520 {
521         if (i915_gem_object_has_struct_page(obj))
522                 i915_gem_object_release_memory_region(obj);
523
524         fput(obj->base.filp);
525 }
526
527 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
528         .name = "i915_gem_object_shmem",
529         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
530
531         .get_pages = shmem_get_pages,
532         .put_pages = shmem_put_pages,
533         .truncate = shmem_truncate,
534         .shrink = shmem_shrink,
535
536         .pwrite = shmem_pwrite,
537         .pread = shmem_pread,
538
539         .release = shmem_release,
540 };
541
542 static int __create_shmem(struct drm_i915_private *i915,
543                           struct drm_gem_object *obj,
544                           resource_size_t size)
545 {
546         unsigned long flags = VM_NORESERVE;
547         struct file *filp;
548
549         drm_gem_private_object_init(&i915->drm, obj, size);
550
551         /* XXX: The __shmem_file_setup() function returns -EINVAL if size is
552          * greater than MAX_LFS_FILESIZE.
553          * To handle the same error as other code that returns -E2BIG when
554          * the size is too large, we add a code that returns -E2BIG when the
555          * size is larger than the size that can be handled.
556          * If BITS_PER_LONG is 32, size > MAX_LFS_FILESIZE is always false,
557          * so we only needs to check when BITS_PER_LONG is 64.
558          * If BITS_PER_LONG is 32, E2BIG checks are processed when
559          * i915_gem_object_size_2big() is called before init_object() callback
560          * is called.
561          */
562         if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
563                 return -E2BIG;
564
565         if (i915->mm.gemfs)
566                 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
567                                                  flags);
568         else
569                 filp = shmem_file_setup("i915", size, flags);
570         if (IS_ERR(filp))
571                 return PTR_ERR(filp);
572
573         obj->filp = filp;
574         return 0;
575 }
576
577 static int shmem_object_init(struct intel_memory_region *mem,
578                              struct drm_i915_gem_object *obj,
579                              resource_size_t offset,
580                              resource_size_t size,
581                              resource_size_t page_size,
582                              unsigned int flags)
583 {
584         static struct lock_class_key lock_class;
585         struct drm_i915_private *i915 = mem->i915;
586         struct address_space *mapping;
587         unsigned int cache_level;
588         gfp_t mask;
589         int ret;
590
591         ret = __create_shmem(i915, &obj->base, size);
592         if (ret)
593                 return ret;
594
595         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
596         if (IS_I965GM(i915) || IS_I965G(i915)) {
597                 /* 965gm cannot relocate objects above 4GiB. */
598                 mask &= ~__GFP_HIGHMEM;
599                 mask |= __GFP_DMA32;
600         }
601
602         mapping = obj->base.filp->f_mapping;
603         mapping_set_gfp_mask(mapping, mask);
604         GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
605
606         i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
607         obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
608         obj->write_domain = I915_GEM_DOMAIN_CPU;
609         obj->read_domains = I915_GEM_DOMAIN_CPU;
610
611         /*
612          * MTL doesn't snoop CPU cache by default for GPU access (namely
613          * 1-way coherency). However some UMD's are currently depending on
614          * that. Make 1-way coherent the default setting for MTL. A follow
615          * up patch will extend the GEM_CREATE uAPI to allow UMD's specify
616          * caching mode at BO creation time
617          */
618         if (HAS_LLC(i915) || (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)))
619                 /* On some devices, we can have the GPU use the LLC (the CPU
620                  * cache) for about a 10% performance improvement
621                  * compared to uncached.  Graphics requests other than
622                  * display scanout are coherent with the CPU in
623                  * accessing this cache.  This means in this mode we
624                  * don't need to clflush on the CPU side, and on the
625                  * GPU side we only need to flush internal caches to
626                  * get data visible to the CPU.
627                  *
628                  * However, we maintain the display planes as UC, and so
629                  * need to rebind when first used as such.
630                  */
631                 cache_level = I915_CACHE_LLC;
632         else
633                 cache_level = I915_CACHE_NONE;
634
635         i915_gem_object_set_cache_coherency(obj, cache_level);
636
637         i915_gem_object_init_memory_region(obj, mem);
638
639         return 0;
640 }
641
642 struct drm_i915_gem_object *
643 i915_gem_object_create_shmem(struct drm_i915_private *i915,
644                              resource_size_t size)
645 {
646         return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
647                                              size, 0, 0);
648 }
649
650 /* Allocate a new GEM object and fill it with the supplied data */
651 struct drm_i915_gem_object *
652 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
653                                        const void *data, resource_size_t size)
654 {
655         struct drm_i915_gem_object *obj;
656         struct file *file;
657         const struct address_space_operations *aops;
658         resource_size_t offset;
659         int err;
660
661         GEM_WARN_ON(IS_DGFX(dev_priv));
662         obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
663         if (IS_ERR(obj))
664                 return obj;
665
666         GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
667
668         file = obj->base.filp;
669         aops = file->f_mapping->a_ops;
670         offset = 0;
671         do {
672                 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
673                 struct page *page;
674                 void *pgdata, *vaddr;
675
676                 err = aops->write_begin(file, file->f_mapping, offset, len,
677                                         &page, &pgdata);
678                 if (err < 0)
679                         goto fail;
680
681                 vaddr = kmap(page);
682                 memcpy(vaddr, data, len);
683                 kunmap(page);
684
685                 err = aops->write_end(file, file->f_mapping, offset, len, len,
686                                       page, pgdata);
687                 if (err < 0)
688                         goto fail;
689
690                 size -= len;
691                 data += len;
692                 offset += len;
693         } while (size);
694
695         return obj;
696
697 fail:
698         i915_gem_object_put(obj);
699         return ERR_PTR(err);
700 }
701
702 static int init_shmem(struct intel_memory_region *mem)
703 {
704         i915_gemfs_init(mem->i915);
705         intel_memory_region_set_name(mem, "system");
706
707         return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
708 }
709
710 static int release_shmem(struct intel_memory_region *mem)
711 {
712         i915_gemfs_fini(mem->i915);
713         return 0;
714 }
715
716 static const struct intel_memory_region_ops shmem_region_ops = {
717         .init = init_shmem,
718         .release = release_shmem,
719         .init_object = shmem_object_init,
720 };
721
722 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
723                                                  u16 type, u16 instance)
724 {
725         return intel_memory_region_create(i915, 0,
726                                           totalram_pages() << PAGE_SHIFT,
727                                           PAGE_SIZE, 0, 0,
728                                           type, instance,
729                                           &shmem_region_ops);
730 }
731
732 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
733 {
734         return obj->ops == &i915_gem_shmem_ops;
735 }