Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_mman.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gem/i915_gem_region.h"
14 #include "huge_gem_object.h"
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
17 #include "selftests/igt_flush_test.h"
18 #include "selftests/igt_mmap.h"
19
20 struct tile {
21         unsigned int width;
22         unsigned int height;
23         unsigned int stride;
24         unsigned int size;
25         unsigned int tiling;
26         unsigned int swizzle;
27 };
28
29 static u64 swizzle_bit(unsigned int bit, u64 offset)
30 {
31         return (offset & BIT_ULL(bit)) >> (bit - 6);
32 }
33
34 static u64 tiled_offset(const struct tile *tile, u64 v)
35 {
36         u64 x, y;
37
38         if (tile->tiling == I915_TILING_NONE)
39                 return v;
40
41         y = div64_u64_rem(v, tile->stride, &x);
42         v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
43
44         if (tile->tiling == I915_TILING_X) {
45                 v += y * tile->width;
46                 v += div64_u64_rem(x, tile->width, &x) << tile->size;
47                 v += x;
48         } else if (tile->width == 128) {
49                 const unsigned int ytile_span = 16;
50                 const unsigned int ytile_height = 512;
51
52                 v += y * ytile_span;
53                 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
54                 v += x;
55         } else {
56                 const unsigned int ytile_span = 32;
57                 const unsigned int ytile_height = 256;
58
59                 v += y * ytile_span;
60                 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
61                 v += x;
62         }
63
64         switch (tile->swizzle) {
65         case I915_BIT_6_SWIZZLE_9:
66                 v ^= swizzle_bit(9, v);
67                 break;
68         case I915_BIT_6_SWIZZLE_9_10:
69                 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
70                 break;
71         case I915_BIT_6_SWIZZLE_9_11:
72                 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
73                 break;
74         case I915_BIT_6_SWIZZLE_9_10_11:
75                 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
76                 break;
77         }
78
79         return v;
80 }
81
82 static int check_partial_mapping(struct drm_i915_gem_object *obj,
83                                  const struct tile *tile,
84                                  struct rnd_state *prng)
85 {
86         const unsigned long npages = obj->base.size / PAGE_SIZE;
87         struct i915_ggtt_view view;
88         struct i915_vma *vma;
89         unsigned long page;
90         u32 __iomem *io;
91         struct page *p;
92         unsigned int n;
93         u64 offset;
94         u32 *cpu;
95         int err;
96
97         err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
98         if (err) {
99                 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
100                        tile->tiling, tile->stride, err);
101                 return err;
102         }
103
104         GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
105         GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
106
107         i915_gem_object_lock(obj, NULL);
108         err = i915_gem_object_set_to_gtt_domain(obj, true);
109         i915_gem_object_unlock(obj);
110         if (err) {
111                 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
112                 return err;
113         }
114
115         page = i915_prandom_u32_max_state(npages, prng);
116         view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
117
118         vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
119         if (IS_ERR(vma)) {
120                 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
121                        page, (int)PTR_ERR(vma));
122                 return PTR_ERR(vma);
123         }
124
125         n = page - view.partial.offset;
126         GEM_BUG_ON(n >= view.partial.size);
127
128         io = i915_vma_pin_iomap(vma);
129         i915_vma_unpin(vma);
130         if (IS_ERR(io)) {
131                 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
132                        page, (int)PTR_ERR(io));
133                 err = PTR_ERR(io);
134                 goto out;
135         }
136
137         iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
138         i915_vma_unpin_iomap(vma);
139
140         offset = tiled_offset(tile, page << PAGE_SHIFT);
141         if (offset >= obj->base.size)
142                 goto out;
143
144         intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
145
146         p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
147         cpu = kmap(p) + offset_in_page(offset);
148         drm_clflush_virt_range(cpu, sizeof(*cpu));
149         if (*cpu != (u32)page) {
150                 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
151                        page, n,
152                        view.partial.offset,
153                        view.partial.size,
154                        vma->size >> PAGE_SHIFT,
155                        tile->tiling ? tile_row_pages(obj) : 0,
156                        vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
157                        offset >> PAGE_SHIFT,
158                        (unsigned int)offset_in_page(offset),
159                        offset,
160                        (u32)page, *cpu);
161                 err = -EINVAL;
162         }
163         *cpu = 0;
164         drm_clflush_virt_range(cpu, sizeof(*cpu));
165         kunmap(p);
166
167 out:
168         __i915_vma_put(vma);
169         return err;
170 }
171
172 static int check_partial_mappings(struct drm_i915_gem_object *obj,
173                                   const struct tile *tile,
174                                   unsigned long end_time)
175 {
176         const unsigned int nreal = obj->scratch / PAGE_SIZE;
177         const unsigned long npages = obj->base.size / PAGE_SIZE;
178         struct i915_vma *vma;
179         unsigned long page;
180         int err;
181
182         err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
183         if (err) {
184                 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
185                        tile->tiling, tile->stride, err);
186                 return err;
187         }
188
189         GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
190         GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
191
192         i915_gem_object_lock(obj, NULL);
193         err = i915_gem_object_set_to_gtt_domain(obj, true);
194         i915_gem_object_unlock(obj);
195         if (err) {
196                 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
197                 return err;
198         }
199
200         for_each_prime_number_from(page, 1, npages) {
201                 struct i915_ggtt_view view =
202                         compute_partial_view(obj, page, MIN_CHUNK_PAGES);
203                 u32 __iomem *io;
204                 struct page *p;
205                 unsigned int n;
206                 u64 offset;
207                 u32 *cpu;
208
209                 GEM_BUG_ON(view.partial.size > nreal);
210                 cond_resched();
211
212                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
213                 if (IS_ERR(vma)) {
214                         pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
215                                page, (int)PTR_ERR(vma));
216                         return PTR_ERR(vma);
217                 }
218
219                 n = page - view.partial.offset;
220                 GEM_BUG_ON(n >= view.partial.size);
221
222                 io = i915_vma_pin_iomap(vma);
223                 i915_vma_unpin(vma);
224                 if (IS_ERR(io)) {
225                         pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
226                                page, (int)PTR_ERR(io));
227                         return PTR_ERR(io);
228                 }
229
230                 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
231                 i915_vma_unpin_iomap(vma);
232
233                 offset = tiled_offset(tile, page << PAGE_SHIFT);
234                 if (offset >= obj->base.size)
235                         continue;
236
237                 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
238
239                 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
240                 cpu = kmap(p) + offset_in_page(offset);
241                 drm_clflush_virt_range(cpu, sizeof(*cpu));
242                 if (*cpu != (u32)page) {
243                         pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
244                                page, n,
245                                view.partial.offset,
246                                view.partial.size,
247                                vma->size >> PAGE_SHIFT,
248                                tile->tiling ? tile_row_pages(obj) : 0,
249                                vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
250                                offset >> PAGE_SHIFT,
251                                (unsigned int)offset_in_page(offset),
252                                offset,
253                                (u32)page, *cpu);
254                         err = -EINVAL;
255                 }
256                 *cpu = 0;
257                 drm_clflush_virt_range(cpu, sizeof(*cpu));
258                 kunmap(p);
259                 if (err)
260                         return err;
261
262                 __i915_vma_put(vma);
263
264                 if (igt_timeout(end_time,
265                                 "%s: timed out after tiling=%d stride=%d\n",
266                                 __func__, tile->tiling, tile->stride))
267                         return -EINTR;
268         }
269
270         return 0;
271 }
272
273 static unsigned int
274 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
275 {
276         if (GRAPHICS_VER(i915) <= 2) {
277                 tile->height = 16;
278                 tile->width = 128;
279                 tile->size = 11;
280         } else if (tile->tiling == I915_TILING_Y &&
281                    HAS_128_BYTE_Y_TILING(i915)) {
282                 tile->height = 32;
283                 tile->width = 128;
284                 tile->size = 12;
285         } else {
286                 tile->height = 8;
287                 tile->width = 512;
288                 tile->size = 12;
289         }
290
291         if (GRAPHICS_VER(i915) < 4)
292                 return 8192 / tile->width;
293         else if (GRAPHICS_VER(i915) < 7)
294                 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
295         else
296                 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
297 }
298
299 static int igt_partial_tiling(void *arg)
300 {
301         const unsigned int nreal = 1 << 12; /* largest tile row x2 */
302         struct drm_i915_private *i915 = arg;
303         struct drm_i915_gem_object *obj;
304         intel_wakeref_t wakeref;
305         int tiling;
306         int err;
307
308         if (!i915_ggtt_has_aperture(&i915->ggtt))
309                 return 0;
310
311         /* We want to check the page mapping and fencing of a large object
312          * mmapped through the GTT. The object we create is larger than can
313          * possibly be mmaped as a whole, and so we must use partial GGTT vma.
314          * We then check that a write through each partial GGTT vma ends up
315          * in the right set of pages within the object, and with the expected
316          * tiling, which we verify by manual swizzling.
317          */
318
319         obj = huge_gem_object(i915,
320                               nreal << PAGE_SHIFT,
321                               (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
322         if (IS_ERR(obj))
323                 return PTR_ERR(obj);
324
325         err = i915_gem_object_pin_pages_unlocked(obj);
326         if (err) {
327                 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
328                        nreal, obj->base.size / PAGE_SIZE, err);
329                 goto out;
330         }
331
332         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
333
334         if (1) {
335                 IGT_TIMEOUT(end);
336                 struct tile tile;
337
338                 tile.height = 1;
339                 tile.width = 1;
340                 tile.size = 0;
341                 tile.stride = 0;
342                 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
343                 tile.tiling = I915_TILING_NONE;
344
345                 err = check_partial_mappings(obj, &tile, end);
346                 if (err && err != -EINTR)
347                         goto out_unlock;
348         }
349
350         for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
351                 IGT_TIMEOUT(end);
352                 unsigned int max_pitch;
353                 unsigned int pitch;
354                 struct tile tile;
355
356                 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
357                         /*
358                          * The swizzling pattern is actually unknown as it
359                          * varies based on physical address of each page.
360                          * See i915_gem_detect_bit_6_swizzle().
361                          */
362                         break;
363
364                 tile.tiling = tiling;
365                 switch (tiling) {
366                 case I915_TILING_X:
367                         tile.swizzle = i915->ggtt.bit_6_swizzle_x;
368                         break;
369                 case I915_TILING_Y:
370                         tile.swizzle = i915->ggtt.bit_6_swizzle_y;
371                         break;
372                 }
373
374                 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
375                 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
376                     tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
377                         continue;
378
379                 max_pitch = setup_tile_size(&tile, i915);
380
381                 for (pitch = max_pitch; pitch; pitch >>= 1) {
382                         tile.stride = tile.width * pitch;
383                         err = check_partial_mappings(obj, &tile, end);
384                         if (err == -EINTR)
385                                 goto next_tiling;
386                         if (err)
387                                 goto out_unlock;
388
389                         if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
390                                 tile.stride = tile.width * (pitch - 1);
391                                 err = check_partial_mappings(obj, &tile, end);
392                                 if (err == -EINTR)
393                                         goto next_tiling;
394                                 if (err)
395                                         goto out_unlock;
396                         }
397
398                         if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
399                                 tile.stride = tile.width * (pitch + 1);
400                                 err = check_partial_mappings(obj, &tile, end);
401                                 if (err == -EINTR)
402                                         goto next_tiling;
403                                 if (err)
404                                         goto out_unlock;
405                         }
406                 }
407
408                 if (GRAPHICS_VER(i915) >= 4) {
409                         for_each_prime_number(pitch, max_pitch) {
410                                 tile.stride = tile.width * pitch;
411                                 err = check_partial_mappings(obj, &tile, end);
412                                 if (err == -EINTR)
413                                         goto next_tiling;
414                                 if (err)
415                                         goto out_unlock;
416                         }
417                 }
418
419 next_tiling: ;
420         }
421
422 out_unlock:
423         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
424         i915_gem_object_unpin_pages(obj);
425 out:
426         i915_gem_object_put(obj);
427         return err;
428 }
429
430 static int igt_smoke_tiling(void *arg)
431 {
432         const unsigned int nreal = 1 << 12; /* largest tile row x2 */
433         struct drm_i915_private *i915 = arg;
434         struct drm_i915_gem_object *obj;
435         intel_wakeref_t wakeref;
436         I915_RND_STATE(prng);
437         unsigned long count;
438         IGT_TIMEOUT(end);
439         int err;
440
441         if (!i915_ggtt_has_aperture(&i915->ggtt))
442                 return 0;
443
444         /*
445          * igt_partial_tiling() does an exhastive check of partial tiling
446          * chunking, but will undoubtably run out of time. Here, we do a
447          * randomised search and hope over many runs of 1s with different
448          * seeds we will do a thorough check.
449          *
450          * Remember to look at the st_seed if we see a flip-flop in BAT!
451          */
452
453         if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
454                 return 0;
455
456         obj = huge_gem_object(i915,
457                               nreal << PAGE_SHIFT,
458                               (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
459         if (IS_ERR(obj))
460                 return PTR_ERR(obj);
461
462         err = i915_gem_object_pin_pages_unlocked(obj);
463         if (err) {
464                 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
465                        nreal, obj->base.size / PAGE_SIZE, err);
466                 goto out;
467         }
468
469         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
470
471         count = 0;
472         do {
473                 struct tile tile;
474
475                 tile.tiling =
476                         i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
477                 switch (tile.tiling) {
478                 case I915_TILING_NONE:
479                         tile.height = 1;
480                         tile.width = 1;
481                         tile.size = 0;
482                         tile.stride = 0;
483                         tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
484                         break;
485
486                 case I915_TILING_X:
487                         tile.swizzle = i915->ggtt.bit_6_swizzle_x;
488                         break;
489                 case I915_TILING_Y:
490                         tile.swizzle = i915->ggtt.bit_6_swizzle_y;
491                         break;
492                 }
493
494                 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
495                     tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
496                         continue;
497
498                 if (tile.tiling != I915_TILING_NONE) {
499                         unsigned int max_pitch = setup_tile_size(&tile, i915);
500
501                         tile.stride =
502                                 i915_prandom_u32_max_state(max_pitch, &prng);
503                         tile.stride = (1 + tile.stride) * tile.width;
504                         if (GRAPHICS_VER(i915) < 4)
505                                 tile.stride = rounddown_pow_of_two(tile.stride);
506                 }
507
508                 err = check_partial_mapping(obj, &tile, &prng);
509                 if (err)
510                         break;
511
512                 count++;
513         } while (!__igt_timeout(end, NULL));
514
515         pr_info("%s: Completed %lu trials\n", __func__, count);
516
517         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
518         i915_gem_object_unpin_pages(obj);
519 out:
520         i915_gem_object_put(obj);
521         return err;
522 }
523
524 static int make_obj_busy(struct drm_i915_gem_object *obj)
525 {
526         struct drm_i915_private *i915 = to_i915(obj->base.dev);
527         struct intel_engine_cs *engine;
528
529         for_each_uabi_engine(engine, i915) {
530                 struct i915_request *rq;
531                 struct i915_vma *vma;
532                 struct i915_gem_ww_ctx ww;
533                 int err;
534
535                 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
536                 if (IS_ERR(vma))
537                         return PTR_ERR(vma);
538
539                 i915_gem_ww_ctx_init(&ww, false);
540 retry:
541                 err = i915_gem_object_lock(obj, &ww);
542                 if (!err)
543                         err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
544                 if (err)
545                         goto err;
546
547                 rq = intel_engine_create_kernel_request(engine);
548                 if (IS_ERR(rq)) {
549                         err = PTR_ERR(rq);
550                         goto err_unpin;
551                 }
552
553                 err = i915_request_await_object(rq, vma->obj, true);
554                 if (err == 0)
555                         err = i915_vma_move_to_active(vma, rq,
556                                                       EXEC_OBJECT_WRITE);
557
558                 i915_request_add(rq);
559 err_unpin:
560                 i915_vma_unpin(vma);
561 err:
562                 if (err == -EDEADLK) {
563                         err = i915_gem_ww_ctx_backoff(&ww);
564                         if (!err)
565                                 goto retry;
566                 }
567                 i915_gem_ww_ctx_fini(&ww);
568                 if (err)
569                         return err;
570         }
571
572         i915_gem_object_put(obj); /* leave it only alive via its active ref */
573         return 0;
574 }
575
576 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
577 {
578         if (HAS_LMEM(i915))
579                 return I915_MMAP_TYPE_FIXED;
580
581         return I915_MMAP_TYPE_GTT;
582 }
583
584 static struct drm_i915_gem_object *
585 create_sys_or_internal(struct drm_i915_private *i915,
586                        unsigned long size)
587 {
588         if (HAS_LMEM(i915)) {
589                 struct intel_memory_region *sys_region =
590                         i915->mm.regions[INTEL_REGION_SMEM];
591
592                 return __i915_gem_object_create_user(i915, size, &sys_region, 1);
593         }
594
595         return i915_gem_object_create_internal(i915, size);
596 }
597
598 static bool assert_mmap_offset(struct drm_i915_private *i915,
599                                unsigned long size,
600                                int expected)
601 {
602         struct drm_i915_gem_object *obj;
603         u64 offset;
604         int ret;
605
606         obj = create_sys_or_internal(i915, size);
607         if (IS_ERR(obj))
608                 return expected && expected == PTR_ERR(obj);
609
610         ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
611         i915_gem_object_put(obj);
612
613         return ret == expected;
614 }
615
616 static void disable_retire_worker(struct drm_i915_private *i915)
617 {
618         i915_gem_driver_unregister__shrinker(i915);
619         intel_gt_pm_get(&i915->gt);
620         cancel_delayed_work_sync(&i915->gt.requests.retire_work);
621 }
622
623 static void restore_retire_worker(struct drm_i915_private *i915)
624 {
625         igt_flush_test(i915);
626         intel_gt_pm_put(&i915->gt);
627         i915_gem_driver_register__shrinker(i915);
628 }
629
630 static void mmap_offset_lock(struct drm_i915_private *i915)
631         __acquires(&i915->drm.vma_offset_manager->vm_lock)
632 {
633         write_lock(&i915->drm.vma_offset_manager->vm_lock);
634 }
635
636 static void mmap_offset_unlock(struct drm_i915_private *i915)
637         __releases(&i915->drm.vma_offset_manager->vm_lock)
638 {
639         write_unlock(&i915->drm.vma_offset_manager->vm_lock);
640 }
641
642 static int igt_mmap_offset_exhaustion(void *arg)
643 {
644         struct drm_i915_private *i915 = arg;
645         struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
646         struct drm_i915_gem_object *obj;
647         struct drm_mm_node *hole, *next;
648         int loop, err = 0;
649         u64 offset;
650         int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
651
652         /* Disable background reaper */
653         disable_retire_worker(i915);
654         GEM_BUG_ON(!i915->gt.awake);
655         intel_gt_retire_requests(&i915->gt);
656         i915_gem_drain_freed_objects(i915);
657
658         /* Trim the device mmap space to only a page */
659         mmap_offset_lock(i915);
660         loop = 1; /* PAGE_SIZE units */
661         list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
662                 struct drm_mm_node *resv;
663
664                 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
665                 if (!resv) {
666                         err = -ENOMEM;
667                         goto out_park;
668                 }
669
670                 resv->start = drm_mm_hole_node_start(hole) + loop;
671                 resv->size = hole->hole_size - loop;
672                 resv->color = -1ul;
673                 loop = 0;
674
675                 if (!resv->size) {
676                         kfree(resv);
677                         continue;
678                 }
679
680                 pr_debug("Reserving hole [%llx + %llx]\n",
681                          resv->start, resv->size);
682
683                 err = drm_mm_reserve_node(mm, resv);
684                 if (err) {
685                         pr_err("Failed to trim VMA manager, err=%d\n", err);
686                         kfree(resv);
687                         goto out_park;
688                 }
689         }
690         GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
691         mmap_offset_unlock(i915);
692
693         /* Just fits! */
694         if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
695                 pr_err("Unable to insert object into single page hole\n");
696                 err = -EINVAL;
697                 goto out;
698         }
699
700         /* Too large */
701         if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
702                 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
703                 err = -EINVAL;
704                 goto out;
705         }
706
707         /* Fill the hole, further allocation attempts should then fail */
708         obj = create_sys_or_internal(i915, PAGE_SIZE);
709         if (IS_ERR(obj)) {
710                 err = PTR_ERR(obj);
711                 pr_err("Unable to create object for reclaimed hole\n");
712                 goto out;
713         }
714
715         err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
716         if (err) {
717                 pr_err("Unable to insert object into reclaimed hole\n");
718                 goto err_obj;
719         }
720
721         if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
722                 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
723                 err = -EINVAL;
724                 goto err_obj;
725         }
726
727         i915_gem_object_put(obj);
728
729         /* Now fill with busy dead objects that we expect to reap */
730         for (loop = 0; loop < 3; loop++) {
731                 if (intel_gt_is_wedged(&i915->gt))
732                         break;
733
734                 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
735                 if (IS_ERR(obj)) {
736                         err = PTR_ERR(obj);
737                         goto out;
738                 }
739
740                 err = make_obj_busy(obj);
741                 if (err) {
742                         pr_err("[loop %d] Failed to busy the object\n", loop);
743                         goto err_obj;
744                 }
745         }
746
747 out:
748         mmap_offset_lock(i915);
749 out_park:
750         drm_mm_for_each_node_safe(hole, next, mm) {
751                 if (hole->color != -1ul)
752                         continue;
753
754                 drm_mm_remove_node(hole);
755                 kfree(hole);
756         }
757         mmap_offset_unlock(i915);
758         restore_retire_worker(i915);
759         return err;
760 err_obj:
761         i915_gem_object_put(obj);
762         goto out;
763 }
764
765 static int gtt_set(struct drm_i915_gem_object *obj)
766 {
767         struct i915_vma *vma;
768         void __iomem *map;
769         int err = 0;
770
771         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
772         if (IS_ERR(vma))
773                 return PTR_ERR(vma);
774
775         intel_gt_pm_get(vma->vm->gt);
776         map = i915_vma_pin_iomap(vma);
777         i915_vma_unpin(vma);
778         if (IS_ERR(map)) {
779                 err = PTR_ERR(map);
780                 goto out;
781         }
782
783         memset_io(map, POISON_INUSE, obj->base.size);
784         i915_vma_unpin_iomap(vma);
785
786 out:
787         intel_gt_pm_put(vma->vm->gt);
788         return err;
789 }
790
791 static int gtt_check(struct drm_i915_gem_object *obj)
792 {
793         struct i915_vma *vma;
794         void __iomem *map;
795         int err = 0;
796
797         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
798         if (IS_ERR(vma))
799                 return PTR_ERR(vma);
800
801         intel_gt_pm_get(vma->vm->gt);
802         map = i915_vma_pin_iomap(vma);
803         i915_vma_unpin(vma);
804         if (IS_ERR(map)) {
805                 err = PTR_ERR(map);
806                 goto out;
807         }
808
809         if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
810                 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
811                        obj->mm.region->name);
812                 err = -EINVAL;
813         }
814         i915_vma_unpin_iomap(vma);
815
816 out:
817         intel_gt_pm_put(vma->vm->gt);
818         return err;
819 }
820
821 static int wc_set(struct drm_i915_gem_object *obj)
822 {
823         void *vaddr;
824
825         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
826         if (IS_ERR(vaddr))
827                 return PTR_ERR(vaddr);
828
829         memset(vaddr, POISON_INUSE, obj->base.size);
830         i915_gem_object_flush_map(obj);
831         i915_gem_object_unpin_map(obj);
832
833         return 0;
834 }
835
836 static int wc_check(struct drm_i915_gem_object *obj)
837 {
838         void *vaddr;
839         int err = 0;
840
841         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
842         if (IS_ERR(vaddr))
843                 return PTR_ERR(vaddr);
844
845         if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
846                 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
847                        obj->mm.region->name);
848                 err = -EINVAL;
849         }
850         i915_gem_object_unpin_map(obj);
851
852         return err;
853 }
854
855 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
856 {
857         bool no_map;
858
859         if (obj->ops->mmap_offset)
860                 return type == I915_MMAP_TYPE_FIXED;
861         else if (type == I915_MMAP_TYPE_FIXED)
862                 return false;
863
864         if (type == I915_MMAP_TYPE_GTT &&
865             !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
866                 return false;
867
868         i915_gem_object_lock(obj, NULL);
869         no_map = (type != I915_MMAP_TYPE_GTT &&
870                   !i915_gem_object_has_struct_page(obj) &&
871                   !i915_gem_object_has_iomem(obj));
872         i915_gem_object_unlock(obj);
873
874         return !no_map;
875 }
876
877 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
878 static int __igt_mmap(struct drm_i915_private *i915,
879                       struct drm_i915_gem_object *obj,
880                       enum i915_mmap_type type)
881 {
882         struct vm_area_struct *area;
883         unsigned long addr;
884         int err, i;
885         u64 offset;
886
887         if (!can_mmap(obj, type))
888                 return 0;
889
890         err = wc_set(obj);
891         if (err == -ENXIO)
892                 err = gtt_set(obj);
893         if (err)
894                 return err;
895
896         err = __assign_mmap_offset(obj, type, &offset, NULL);
897         if (err)
898                 return err;
899
900         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
901         if (IS_ERR_VALUE(addr))
902                 return addr;
903
904         pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
905
906         area = vma_lookup(current->mm, addr);
907         if (!area) {
908                 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
909                        obj->mm.region->name);
910                 err = -EINVAL;
911                 goto out_unmap;
912         }
913
914         for (i = 0; i < obj->base.size / sizeof(u32); i++) {
915                 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
916                 u32 x;
917
918                 if (get_user(x, ux)) {
919                         pr_err("%s: Unable to read from mmap, offset:%zd\n",
920                                obj->mm.region->name, i * sizeof(x));
921                         err = -EFAULT;
922                         goto out_unmap;
923                 }
924
925                 if (x != expand32(POISON_INUSE)) {
926                         pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
927                                obj->mm.region->name,
928                                i * sizeof(x), x, expand32(POISON_INUSE));
929                         err = -EINVAL;
930                         goto out_unmap;
931                 }
932
933                 x = expand32(POISON_FREE);
934                 if (put_user(x, ux)) {
935                         pr_err("%s: Unable to write to mmap, offset:%zd\n",
936                                obj->mm.region->name, i * sizeof(x));
937                         err = -EFAULT;
938                         goto out_unmap;
939                 }
940         }
941
942         if (type == I915_MMAP_TYPE_GTT)
943                 intel_gt_flush_ggtt_writes(&i915->gt);
944
945         err = wc_check(obj);
946         if (err == -ENXIO)
947                 err = gtt_check(obj);
948 out_unmap:
949         vm_munmap(addr, obj->base.size);
950         return err;
951 }
952
953 static int igt_mmap(void *arg)
954 {
955         struct drm_i915_private *i915 = arg;
956         struct intel_memory_region *mr;
957         enum intel_region_id id;
958
959         for_each_memory_region(mr, i915, id) {
960                 unsigned long sizes[] = {
961                         PAGE_SIZE,
962                         mr->min_page_size,
963                         SZ_4M,
964                 };
965                 int i;
966
967                 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
968                         struct drm_i915_gem_object *obj;
969                         int err;
970
971                         obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
972                         if (obj == ERR_PTR(-ENODEV))
973                                 continue;
974
975                         if (IS_ERR(obj))
976                                 return PTR_ERR(obj);
977
978                         err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
979                         if (err == 0)
980                                 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
981                         if (err == 0)
982                                 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
983
984                         i915_gem_object_put(obj);
985                         if (err)
986                                 return err;
987                 }
988         }
989
990         return 0;
991 }
992
993 static const char *repr_mmap_type(enum i915_mmap_type type)
994 {
995         switch (type) {
996         case I915_MMAP_TYPE_GTT: return "gtt";
997         case I915_MMAP_TYPE_WB: return "wb";
998         case I915_MMAP_TYPE_WC: return "wc";
999         case I915_MMAP_TYPE_UC: return "uc";
1000         case I915_MMAP_TYPE_FIXED: return "fixed";
1001         default: return "unknown";
1002         }
1003 }
1004
1005 static bool can_access(struct drm_i915_gem_object *obj)
1006 {
1007         bool access;
1008
1009         i915_gem_object_lock(obj, NULL);
1010         access = i915_gem_object_has_struct_page(obj) ||
1011                 i915_gem_object_has_iomem(obj);
1012         i915_gem_object_unlock(obj);
1013
1014         return access;
1015 }
1016
1017 static int __igt_mmap_access(struct drm_i915_private *i915,
1018                              struct drm_i915_gem_object *obj,
1019                              enum i915_mmap_type type)
1020 {
1021         unsigned long __user *ptr;
1022         unsigned long A, B;
1023         unsigned long x, y;
1024         unsigned long addr;
1025         int err;
1026         u64 offset;
1027
1028         memset(&A, 0xAA, sizeof(A));
1029         memset(&B, 0xBB, sizeof(B));
1030
1031         if (!can_mmap(obj, type) || !can_access(obj))
1032                 return 0;
1033
1034         err = __assign_mmap_offset(obj, type, &offset, NULL);
1035         if (err)
1036                 return err;
1037
1038         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1039         if (IS_ERR_VALUE(addr))
1040                 return addr;
1041         ptr = (unsigned long __user *)addr;
1042
1043         err = __put_user(A, ptr);
1044         if (err) {
1045                 pr_err("%s(%s): failed to write into user mmap\n",
1046                        obj->mm.region->name, repr_mmap_type(type));
1047                 goto out_unmap;
1048         }
1049
1050         intel_gt_flush_ggtt_writes(&i915->gt);
1051
1052         err = access_process_vm(current, addr, &x, sizeof(x), 0);
1053         if (err != sizeof(x)) {
1054                 pr_err("%s(%s): access_process_vm() read failed\n",
1055                        obj->mm.region->name, repr_mmap_type(type));
1056                 goto out_unmap;
1057         }
1058
1059         err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1060         if (err != sizeof(B)) {
1061                 pr_err("%s(%s): access_process_vm() write failed\n",
1062                        obj->mm.region->name, repr_mmap_type(type));
1063                 goto out_unmap;
1064         }
1065
1066         intel_gt_flush_ggtt_writes(&i915->gt);
1067
1068         err = __get_user(y, ptr);
1069         if (err) {
1070                 pr_err("%s(%s): failed to read from user mmap\n",
1071                        obj->mm.region->name, repr_mmap_type(type));
1072                 goto out_unmap;
1073         }
1074
1075         if (x != A || y != B) {
1076                 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1077                        obj->mm.region->name, repr_mmap_type(type),
1078                        x, y);
1079                 err = -EINVAL;
1080                 goto out_unmap;
1081         }
1082
1083 out_unmap:
1084         vm_munmap(addr, obj->base.size);
1085         return err;
1086 }
1087
1088 static int igt_mmap_access(void *arg)
1089 {
1090         struct drm_i915_private *i915 = arg;
1091         struct intel_memory_region *mr;
1092         enum intel_region_id id;
1093
1094         for_each_memory_region(mr, i915, id) {
1095                 struct drm_i915_gem_object *obj;
1096                 int err;
1097
1098                 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1099                 if (obj == ERR_PTR(-ENODEV))
1100                         continue;
1101
1102                 if (IS_ERR(obj))
1103                         return PTR_ERR(obj);
1104
1105                 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1106                 if (err == 0)
1107                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1108                 if (err == 0)
1109                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1110                 if (err == 0)
1111                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1112                 if (err == 0)
1113                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1114
1115                 i915_gem_object_put(obj);
1116                 if (err)
1117                         return err;
1118         }
1119
1120         return 0;
1121 }
1122
1123 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1124                           struct drm_i915_gem_object *obj,
1125                           enum i915_mmap_type type)
1126 {
1127         struct intel_engine_cs *engine;
1128         unsigned long addr;
1129         u32 __user *ux;
1130         u32 bbe;
1131         int err;
1132         u64 offset;
1133
1134         /*
1135          * Verify that the mmap access into the backing store aligns with
1136          * that of the GPU, i.e. that mmap is indeed writing into the same
1137          * page as being read by the GPU.
1138          */
1139
1140         if (!can_mmap(obj, type))
1141                 return 0;
1142
1143         err = wc_set(obj);
1144         if (err == -ENXIO)
1145                 err = gtt_set(obj);
1146         if (err)
1147                 return err;
1148
1149         err = __assign_mmap_offset(obj, type, &offset, NULL);
1150         if (err)
1151                 return err;
1152
1153         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1154         if (IS_ERR_VALUE(addr))
1155                 return addr;
1156
1157         ux = u64_to_user_ptr((u64)addr);
1158         bbe = MI_BATCH_BUFFER_END;
1159         if (put_user(bbe, ux)) {
1160                 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1161                 err = -EFAULT;
1162                 goto out_unmap;
1163         }
1164
1165         if (type == I915_MMAP_TYPE_GTT)
1166                 intel_gt_flush_ggtt_writes(&i915->gt);
1167
1168         for_each_uabi_engine(engine, i915) {
1169                 struct i915_request *rq;
1170                 struct i915_vma *vma;
1171                 struct i915_gem_ww_ctx ww;
1172
1173                 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1174                 if (IS_ERR(vma)) {
1175                         err = PTR_ERR(vma);
1176                         goto out_unmap;
1177                 }
1178
1179                 i915_gem_ww_ctx_init(&ww, false);
1180 retry:
1181                 err = i915_gem_object_lock(obj, &ww);
1182                 if (!err)
1183                         err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1184                 if (err)
1185                         goto out_ww;
1186
1187                 rq = i915_request_create(engine->kernel_context);
1188                 if (IS_ERR(rq)) {
1189                         err = PTR_ERR(rq);
1190                         goto out_unpin;
1191                 }
1192
1193                 err = i915_request_await_object(rq, vma->obj, false);
1194                 if (err == 0)
1195                         err = i915_vma_move_to_active(vma, rq, 0);
1196
1197                 err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1198                 i915_request_get(rq);
1199                 i915_request_add(rq);
1200
1201                 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1202                         struct drm_printer p =
1203                                 drm_info_printer(engine->i915->drm.dev);
1204
1205                         pr_err("%s(%s, %s): Failed to execute batch\n",
1206                                __func__, engine->name, obj->mm.region->name);
1207                         intel_engine_dump(engine, &p,
1208                                           "%s\n", engine->name);
1209
1210                         intel_gt_set_wedged(engine->gt);
1211                         err = -EIO;
1212                 }
1213                 i915_request_put(rq);
1214
1215 out_unpin:
1216                 i915_vma_unpin(vma);
1217 out_ww:
1218                 if (err == -EDEADLK) {
1219                         err = i915_gem_ww_ctx_backoff(&ww);
1220                         if (!err)
1221                                 goto retry;
1222                 }
1223                 i915_gem_ww_ctx_fini(&ww);
1224                 if (err)
1225                         goto out_unmap;
1226         }
1227
1228 out_unmap:
1229         vm_munmap(addr, obj->base.size);
1230         return err;
1231 }
1232
1233 static int igt_mmap_gpu(void *arg)
1234 {
1235         struct drm_i915_private *i915 = arg;
1236         struct intel_memory_region *mr;
1237         enum intel_region_id id;
1238
1239         for_each_memory_region(mr, i915, id) {
1240                 struct drm_i915_gem_object *obj;
1241                 int err;
1242
1243                 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1244                 if (obj == ERR_PTR(-ENODEV))
1245                         continue;
1246
1247                 if (IS_ERR(obj))
1248                         return PTR_ERR(obj);
1249
1250                 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1251                 if (err == 0)
1252                         err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1253                 if (err == 0)
1254                         err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1255
1256                 i915_gem_object_put(obj);
1257                 if (err)
1258                         return err;
1259         }
1260
1261         return 0;
1262 }
1263
1264 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1265 {
1266         if (!pte_present(*pte) || pte_none(*pte)) {
1267                 pr_err("missing PTE:%lx\n",
1268                        (addr - (unsigned long)data) >> PAGE_SHIFT);
1269                 return -EINVAL;
1270         }
1271
1272         return 0;
1273 }
1274
1275 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1276 {
1277         if (pte_present(*pte) && !pte_none(*pte)) {
1278                 pr_err("present PTE:%lx; expected to be revoked\n",
1279                        (addr - (unsigned long)data) >> PAGE_SHIFT);
1280                 return -EINVAL;
1281         }
1282
1283         return 0;
1284 }
1285
1286 static int check_present(unsigned long addr, unsigned long len)
1287 {
1288         return apply_to_page_range(current->mm, addr, len,
1289                                    check_present_pte, (void *)addr);
1290 }
1291
1292 static int check_absent(unsigned long addr, unsigned long len)
1293 {
1294         return apply_to_page_range(current->mm, addr, len,
1295                                    check_absent_pte, (void *)addr);
1296 }
1297
1298 static int prefault_range(u64 start, u64 len)
1299 {
1300         const char __user *addr, *end;
1301         char __maybe_unused c;
1302         int err;
1303
1304         addr = u64_to_user_ptr(start);
1305         end = addr + len;
1306
1307         for (; addr < end; addr += PAGE_SIZE) {
1308                 err = __get_user(c, addr);
1309                 if (err)
1310                         return err;
1311         }
1312
1313         return __get_user(c, end - 1);
1314 }
1315
1316 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1317                              struct drm_i915_gem_object *obj,
1318                              enum i915_mmap_type type)
1319 {
1320         unsigned long addr;
1321         int err;
1322         u64 offset;
1323
1324         if (!can_mmap(obj, type))
1325                 return 0;
1326
1327         err = __assign_mmap_offset(obj, type, &offset, NULL);
1328         if (err)
1329                 return err;
1330
1331         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1332         if (IS_ERR_VALUE(addr))
1333                 return addr;
1334
1335         err = prefault_range(addr, obj->base.size);
1336         if (err)
1337                 goto out_unmap;
1338
1339         err = check_present(addr, obj->base.size);
1340         if (err) {
1341                 pr_err("%s: was not present\n", obj->mm.region->name);
1342                 goto out_unmap;
1343         }
1344
1345         /*
1346          * After unbinding the object from the GGTT, its address may be reused
1347          * for other objects. Ergo we have to revoke the previous mmap PTE
1348          * access as it no longer points to the same object.
1349          */
1350         err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1351         if (err) {
1352                 pr_err("Failed to unbind object!\n");
1353                 goto out_unmap;
1354         }
1355
1356         if (type != I915_MMAP_TYPE_GTT) {
1357                 i915_gem_object_lock(obj, NULL);
1358                 __i915_gem_object_put_pages(obj);
1359                 i915_gem_object_unlock(obj);
1360                 if (i915_gem_object_has_pages(obj)) {
1361                         pr_err("Failed to put-pages object!\n");
1362                         err = -EINVAL;
1363                         goto out_unmap;
1364                 }
1365         }
1366
1367         if (!obj->ops->mmap_ops) {
1368                 err = check_absent(addr, obj->base.size);
1369                 if (err) {
1370                         pr_err("%s: was not absent\n", obj->mm.region->name);
1371                         goto out_unmap;
1372                 }
1373         } else {
1374                 /* ttm allows access to evicted regions by design */
1375
1376                 err = check_present(addr, obj->base.size);
1377                 if (err) {
1378                         pr_err("%s: was not present\n", obj->mm.region->name);
1379                         goto out_unmap;
1380                 }
1381         }
1382
1383 out_unmap:
1384         vm_munmap(addr, obj->base.size);
1385         return err;
1386 }
1387
1388 static int igt_mmap_revoke(void *arg)
1389 {
1390         struct drm_i915_private *i915 = arg;
1391         struct intel_memory_region *mr;
1392         enum intel_region_id id;
1393
1394         for_each_memory_region(mr, i915, id) {
1395                 struct drm_i915_gem_object *obj;
1396                 int err;
1397
1398                 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1399                 if (obj == ERR_PTR(-ENODEV))
1400                         continue;
1401
1402                 if (IS_ERR(obj))
1403                         return PTR_ERR(obj);
1404
1405                 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1406                 if (err == 0)
1407                         err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1408                 if (err == 0)
1409                         err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1410
1411                 i915_gem_object_put(obj);
1412                 if (err)
1413                         return err;
1414         }
1415
1416         return 0;
1417 }
1418
1419 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1420 {
1421         static const struct i915_subtest tests[] = {
1422                 SUBTEST(igt_partial_tiling),
1423                 SUBTEST(igt_smoke_tiling),
1424                 SUBTEST(igt_mmap_offset_exhaustion),
1425                 SUBTEST(igt_mmap),
1426                 SUBTEST(igt_mmap_access),
1427                 SUBTEST(igt_mmap_revoke),
1428                 SUBTEST(igt_mmap_gpu),
1429         };
1430
1431         return i915_subtests(tests, i915);
1432 }