Merge tag 's390-5.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31 #include "gt/intel_gpu_commands.h"
32
33 #include "i915_random.h"
34 #include "i915_selftest.h"
35
36 #include "mock_drm.h"
37 #include "mock_gem_device.h"
38 #include "mock_gtt.h"
39 #include "igt_flush_test.h"
40
41 static void cleanup_freed_objects(struct drm_i915_private *i915)
42 {
43         i915_gem_drain_freed_objects(i915);
44 }
45
46 static void fake_free_pages(struct drm_i915_gem_object *obj,
47                             struct sg_table *pages)
48 {
49         sg_free_table(pages);
50         kfree(pages);
51 }
52
53 static int fake_get_pages(struct drm_i915_gem_object *obj)
54 {
55 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
56 #define PFN_BIAS 0x1000
57         struct sg_table *pages;
58         struct scatterlist *sg;
59         unsigned int sg_page_sizes;
60         typeof(obj->base.size) rem;
61
62         pages = kmalloc(sizeof(*pages), GFP);
63         if (!pages)
64                 return -ENOMEM;
65
66         rem = round_up(obj->base.size, BIT(31)) >> 31;
67         if (sg_alloc_table(pages, rem, GFP)) {
68                 kfree(pages);
69                 return -ENOMEM;
70         }
71
72         sg_page_sizes = 0;
73         rem = obj->base.size;
74         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
75                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
76
77                 GEM_BUG_ON(!len);
78                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
79                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
80                 sg_dma_len(sg) = len;
81                 sg_page_sizes |= len;
82
83                 rem -= len;
84         }
85         GEM_BUG_ON(rem);
86
87         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
88
89         return 0;
90 #undef GFP
91 }
92
93 static void fake_put_pages(struct drm_i915_gem_object *obj,
94                            struct sg_table *pages)
95 {
96         fake_free_pages(obj, pages);
97         obj->mm.dirty = false;
98 }
99
100 static const struct drm_i915_gem_object_ops fake_ops = {
101         .name = "fake-gem",
102         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
103         .get_pages = fake_get_pages,
104         .put_pages = fake_put_pages,
105 };
106
107 static struct drm_i915_gem_object *
108 fake_dma_object(struct drm_i915_private *i915, u64 size)
109 {
110         static struct lock_class_key lock_class;
111         struct drm_i915_gem_object *obj;
112
113         GEM_BUG_ON(!size);
114         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
115
116         if (overflows_type(size, obj->base.size))
117                 return ERR_PTR(-E2BIG);
118
119         obj = i915_gem_object_alloc();
120         if (!obj)
121                 goto err;
122
123         drm_gem_private_object_init(&i915->drm, &obj->base, size);
124         i915_gem_object_init(obj, &fake_ops, &lock_class);
125
126         i915_gem_object_set_volatile(obj);
127
128         obj->write_domain = I915_GEM_DOMAIN_CPU;
129         obj->read_domains = I915_GEM_DOMAIN_CPU;
130         obj->cache_level = I915_CACHE_NONE;
131
132         /* Preallocate the "backing storage" */
133         if (i915_gem_object_pin_pages(obj))
134                 goto err_obj;
135
136         i915_gem_object_unpin_pages(obj);
137         return obj;
138
139 err_obj:
140         i915_gem_object_put(obj);
141 err:
142         return ERR_PTR(-ENOMEM);
143 }
144
145 static int igt_ppgtt_alloc(void *arg)
146 {
147         struct drm_i915_private *dev_priv = arg;
148         struct i915_ppgtt *ppgtt;
149         u64 size, last, limit;
150         int err = 0;
151
152         /* Allocate a ppggt and try to fill the entire range */
153
154         if (!HAS_PPGTT(dev_priv))
155                 return 0;
156
157         ppgtt = i915_ppgtt_create(&dev_priv->gt);
158         if (IS_ERR(ppgtt))
159                 return PTR_ERR(ppgtt);
160
161         if (!ppgtt->vm.allocate_va_range)
162                 goto err_ppgtt_cleanup;
163
164         /*
165          * While we only allocate the page tables here and so we could
166          * address a much larger GTT than we could actually fit into
167          * RAM, a practical limit is the amount of physical pages in the system.
168          * This should ensure that we do not run into the oomkiller during
169          * the test and take down the machine wilfully.
170          */
171         limit = totalram_pages() << PAGE_SHIFT;
172         limit = min(ppgtt->vm.total, limit);
173
174         /* Check we can allocate the entire range */
175         for (size = 4096; size <= limit; size <<= 2) {
176                 struct i915_vm_pt_stash stash = {};
177
178                 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
179                 if (err)
180                         goto err_ppgtt_cleanup;
181
182                 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
183                 if (err) {
184                         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
185                         goto err_ppgtt_cleanup;
186                 }
187
188                 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
189                 cond_resched();
190
191                 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
192
193                 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
194         }
195
196         /* Check we can incrementally allocate the entire range */
197         for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
198                 struct i915_vm_pt_stash stash = {};
199
200                 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
201                 if (err)
202                         goto err_ppgtt_cleanup;
203
204                 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
205                 if (err) {
206                         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
207                         goto err_ppgtt_cleanup;
208                 }
209
210                 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
211                                             last, size - last);
212                 cond_resched();
213
214                 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
215         }
216
217 err_ppgtt_cleanup:
218         i915_vm_put(&ppgtt->vm);
219         return err;
220 }
221
222 static int lowlevel_hole(struct i915_address_space *vm,
223                          u64 hole_start, u64 hole_end,
224                          unsigned long end_time)
225 {
226         I915_RND_STATE(seed_prng);
227         struct i915_vma *mock_vma;
228         unsigned int size;
229
230         mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
231         if (!mock_vma)
232                 return -ENOMEM;
233
234         /* Keep creating larger objects until one cannot fit into the hole */
235         for (size = 12; (hole_end - hole_start) >> size; size++) {
236                 I915_RND_SUBSTATE(prng, seed_prng);
237                 struct drm_i915_gem_object *obj;
238                 unsigned int *order, count, n;
239                 u64 hole_size;
240
241                 hole_size = (hole_end - hole_start) >> size;
242                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
243                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
244                 count = hole_size >> 1;
245                 if (!count) {
246                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
247                                  __func__, hole_start, hole_end, size, hole_size);
248                         break;
249                 }
250
251                 do {
252                         order = i915_random_order(count, &prng);
253                         if (order)
254                                 break;
255                 } while (count >>= 1);
256                 if (!count) {
257                         kfree(mock_vma);
258                         return -ENOMEM;
259                 }
260                 GEM_BUG_ON(!order);
261
262                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
263                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
264
265                 /* Ignore allocation failures (i.e. don't report them as
266                  * a test failure) as we are purposefully allocating very
267                  * large objects without checking that we have sufficient
268                  * memory. We expect to hit -ENOMEM.
269                  */
270
271                 obj = fake_dma_object(vm->i915, BIT_ULL(size));
272                 if (IS_ERR(obj)) {
273                         kfree(order);
274                         break;
275                 }
276
277                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
278
279                 if (i915_gem_object_pin_pages(obj)) {
280                         i915_gem_object_put(obj);
281                         kfree(order);
282                         break;
283                 }
284
285                 for (n = 0; n < count; n++) {
286                         u64 addr = hole_start + order[n] * BIT_ULL(size);
287                         intel_wakeref_t wakeref;
288
289                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
290
291                         if (igt_timeout(end_time,
292                                         "%s timed out before %d/%d\n",
293                                         __func__, n, count)) {
294                                 hole_end = hole_start; /* quit */
295                                 break;
296                         }
297
298                         if (vm->allocate_va_range) {
299                                 struct i915_vm_pt_stash stash = {};
300
301                                 if (i915_vm_alloc_pt_stash(vm, &stash,
302                                                            BIT_ULL(size)))
303                                         break;
304
305                                 if (i915_vm_pin_pt_stash(vm, &stash)) {
306                                         i915_vm_free_pt_stash(vm, &stash);
307                                         break;
308                                 }
309
310                                 vm->allocate_va_range(vm, &stash,
311                                                       addr, BIT_ULL(size));
312
313                                 i915_vm_free_pt_stash(vm, &stash);
314                         }
315
316                         mock_vma->pages = obj->mm.pages;
317                         mock_vma->node.size = BIT_ULL(size);
318                         mock_vma->node.start = addr;
319
320                         with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
321                                 vm->insert_entries(vm, mock_vma,
322                                                    I915_CACHE_NONE, 0);
323                 }
324                 count = n;
325
326                 i915_random_reorder(order, count, &prng);
327                 for (n = 0; n < count; n++) {
328                         u64 addr = hole_start + order[n] * BIT_ULL(size);
329                         intel_wakeref_t wakeref;
330
331                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
332                         with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
333                                 vm->clear_range(vm, addr, BIT_ULL(size));
334                 }
335
336                 i915_gem_object_unpin_pages(obj);
337                 i915_gem_object_put(obj);
338
339                 kfree(order);
340
341                 cleanup_freed_objects(vm->i915);
342         }
343
344         kfree(mock_vma);
345         return 0;
346 }
347
348 static void close_object_list(struct list_head *objects,
349                               struct i915_address_space *vm)
350 {
351         struct drm_i915_gem_object *obj, *on;
352         int ignored;
353
354         list_for_each_entry_safe(obj, on, objects, st_link) {
355                 struct i915_vma *vma;
356
357                 vma = i915_vma_instance(obj, vm, NULL);
358                 if (!IS_ERR(vma))
359                         ignored = i915_vma_unbind(vma);
360
361                 list_del(&obj->st_link);
362                 i915_gem_object_put(obj);
363         }
364 }
365
366 static int fill_hole(struct i915_address_space *vm,
367                      u64 hole_start, u64 hole_end,
368                      unsigned long end_time)
369 {
370         const u64 hole_size = hole_end - hole_start;
371         struct drm_i915_gem_object *obj;
372         const unsigned long max_pages =
373                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
374         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
375         unsigned long npages, prime, flags;
376         struct i915_vma *vma;
377         LIST_HEAD(objects);
378         int err;
379
380         /* Try binding many VMA working inwards from either edge */
381
382         flags = PIN_OFFSET_FIXED | PIN_USER;
383         if (i915_is_ggtt(vm))
384                 flags |= PIN_GLOBAL;
385
386         for_each_prime_number_from(prime, 2, max_step) {
387                 for (npages = 1; npages <= max_pages; npages *= prime) {
388                         const u64 full_size = npages << PAGE_SHIFT;
389                         const struct {
390                                 const char *name;
391                                 u64 offset;
392                                 int step;
393                         } phases[] = {
394                                 { "top-down", hole_end, -1, },
395                                 { "bottom-up", hole_start, 1, },
396                                 { }
397                         }, *p;
398
399                         obj = fake_dma_object(vm->i915, full_size);
400                         if (IS_ERR(obj))
401                                 break;
402
403                         list_add(&obj->st_link, &objects);
404
405                         /* Align differing sized objects against the edges, and
406                          * check we don't walk off into the void when binding
407                          * them into the GTT.
408                          */
409                         for (p = phases; p->name; p++) {
410                                 u64 offset;
411
412                                 offset = p->offset;
413                                 list_for_each_entry(obj, &objects, st_link) {
414                                         vma = i915_vma_instance(obj, vm, NULL);
415                                         if (IS_ERR(vma))
416                                                 continue;
417
418                                         if (p->step < 0) {
419                                                 if (offset < hole_start + obj->base.size)
420                                                         break;
421                                                 offset -= obj->base.size;
422                                         }
423
424                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
425                                         if (err) {
426                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
427                                                        __func__, p->name, err, npages, prime, offset);
428                                                 goto err;
429                                         }
430
431                                         if (!drm_mm_node_allocated(&vma->node) ||
432                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
433                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
434                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
435                                                        offset);
436                                                 err = -EINVAL;
437                                                 goto err;
438                                         }
439
440                                         i915_vma_unpin(vma);
441
442                                         if (p->step > 0) {
443                                                 if (offset + obj->base.size > hole_end)
444                                                         break;
445                                                 offset += obj->base.size;
446                                         }
447                                 }
448
449                                 offset = p->offset;
450                                 list_for_each_entry(obj, &objects, st_link) {
451                                         vma = i915_vma_instance(obj, vm, NULL);
452                                         if (IS_ERR(vma))
453                                                 continue;
454
455                                         if (p->step < 0) {
456                                                 if (offset < hole_start + obj->base.size)
457                                                         break;
458                                                 offset -= obj->base.size;
459                                         }
460
461                                         if (!drm_mm_node_allocated(&vma->node) ||
462                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
463                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
464                                                        __func__, p->name, vma->node.start, vma->node.size,
465                                                        offset);
466                                                 err = -EINVAL;
467                                                 goto err;
468                                         }
469
470                                         err = i915_vma_unbind(vma);
471                                         if (err) {
472                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
473                                                        __func__, p->name, vma->node.start, vma->node.size,
474                                                        err);
475                                                 goto err;
476                                         }
477
478                                         if (p->step > 0) {
479                                                 if (offset + obj->base.size > hole_end)
480                                                         break;
481                                                 offset += obj->base.size;
482                                         }
483                                 }
484
485                                 offset = p->offset;
486                                 list_for_each_entry_reverse(obj, &objects, st_link) {
487                                         vma = i915_vma_instance(obj, vm, NULL);
488                                         if (IS_ERR(vma))
489                                                 continue;
490
491                                         if (p->step < 0) {
492                                                 if (offset < hole_start + obj->base.size)
493                                                         break;
494                                                 offset -= obj->base.size;
495                                         }
496
497                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
498                                         if (err) {
499                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
500                                                        __func__, p->name, err, npages, prime, offset);
501                                                 goto err;
502                                         }
503
504                                         if (!drm_mm_node_allocated(&vma->node) ||
505                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
506                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
507                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
508                                                        offset);
509                                                 err = -EINVAL;
510                                                 goto err;
511                                         }
512
513                                         i915_vma_unpin(vma);
514
515                                         if (p->step > 0) {
516                                                 if (offset + obj->base.size > hole_end)
517                                                         break;
518                                                 offset += obj->base.size;
519                                         }
520                                 }
521
522                                 offset = p->offset;
523                                 list_for_each_entry_reverse(obj, &objects, st_link) {
524                                         vma = i915_vma_instance(obj, vm, NULL);
525                                         if (IS_ERR(vma))
526                                                 continue;
527
528                                         if (p->step < 0) {
529                                                 if (offset < hole_start + obj->base.size)
530                                                         break;
531                                                 offset -= obj->base.size;
532                                         }
533
534                                         if (!drm_mm_node_allocated(&vma->node) ||
535                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
536                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
537                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
538                                                        offset);
539                                                 err = -EINVAL;
540                                                 goto err;
541                                         }
542
543                                         err = i915_vma_unbind(vma);
544                                         if (err) {
545                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
546                                                        __func__, p->name, vma->node.start, vma->node.size,
547                                                        err);
548                                                 goto err;
549                                         }
550
551                                         if (p->step > 0) {
552                                                 if (offset + obj->base.size > hole_end)
553                                                         break;
554                                                 offset += obj->base.size;
555                                         }
556                                 }
557                         }
558
559                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
560                                         __func__, npages, prime)) {
561                                 err = -EINTR;
562                                 goto err;
563                         }
564                 }
565
566                 close_object_list(&objects, vm);
567                 cleanup_freed_objects(vm->i915);
568         }
569
570         return 0;
571
572 err:
573         close_object_list(&objects, vm);
574         return err;
575 }
576
577 static int walk_hole(struct i915_address_space *vm,
578                      u64 hole_start, u64 hole_end,
579                      unsigned long end_time)
580 {
581         const u64 hole_size = hole_end - hole_start;
582         const unsigned long max_pages =
583                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
584         unsigned long flags;
585         u64 size;
586
587         /* Try binding a single VMA in different positions within the hole */
588
589         flags = PIN_OFFSET_FIXED | PIN_USER;
590         if (i915_is_ggtt(vm))
591                 flags |= PIN_GLOBAL;
592
593         for_each_prime_number_from(size, 1, max_pages) {
594                 struct drm_i915_gem_object *obj;
595                 struct i915_vma *vma;
596                 u64 addr;
597                 int err = 0;
598
599                 obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
600                 if (IS_ERR(obj))
601                         break;
602
603                 vma = i915_vma_instance(obj, vm, NULL);
604                 if (IS_ERR(vma)) {
605                         err = PTR_ERR(vma);
606                         goto err_put;
607                 }
608
609                 for (addr = hole_start;
610                      addr + obj->base.size < hole_end;
611                      addr += obj->base.size) {
612                         err = i915_vma_pin(vma, 0, 0, addr | flags);
613                         if (err) {
614                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
615                                        __func__, addr, vma->size,
616                                        hole_start, hole_end, err);
617                                 goto err_put;
618                         }
619                         i915_vma_unpin(vma);
620
621                         if (!drm_mm_node_allocated(&vma->node) ||
622                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
623                                 pr_err("%s incorrect at %llx + %llx\n",
624                                        __func__, addr, vma->size);
625                                 err = -EINVAL;
626                                 goto err_put;
627                         }
628
629                         err = i915_vma_unbind(vma);
630                         if (err) {
631                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
632                                        __func__, addr, vma->size, err);
633                                 goto err_put;
634                         }
635
636                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
637
638                         if (igt_timeout(end_time,
639                                         "%s timed out at %llx\n",
640                                         __func__, addr)) {
641                                 err = -EINTR;
642                                 goto err_put;
643                         }
644                 }
645
646 err_put:
647                 i915_gem_object_put(obj);
648                 if (err)
649                         return err;
650
651                 cleanup_freed_objects(vm->i915);
652         }
653
654         return 0;
655 }
656
657 static int pot_hole(struct i915_address_space *vm,
658                     u64 hole_start, u64 hole_end,
659                     unsigned long end_time)
660 {
661         struct drm_i915_gem_object *obj;
662         struct i915_vma *vma;
663         unsigned long flags;
664         unsigned int pot;
665         int err = 0;
666
667         flags = PIN_OFFSET_FIXED | PIN_USER;
668         if (i915_is_ggtt(vm))
669                 flags |= PIN_GLOBAL;
670
671         obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
672         if (IS_ERR(obj))
673                 return PTR_ERR(obj);
674
675         vma = i915_vma_instance(obj, vm, NULL);
676         if (IS_ERR(vma)) {
677                 err = PTR_ERR(vma);
678                 goto err_obj;
679         }
680
681         /* Insert a pair of pages across every pot boundary within the hole */
682         for (pot = fls64(hole_end - 1) - 1;
683              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
684              pot--) {
685                 u64 step = BIT_ULL(pot);
686                 u64 addr;
687
688                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
689                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
690                      addr += step) {
691                         err = i915_vma_pin(vma, 0, 0, addr | flags);
692                         if (err) {
693                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
694                                        __func__,
695                                        addr,
696                                        hole_start, hole_end,
697                                        err);
698                                 goto err_obj;
699                         }
700
701                         if (!drm_mm_node_allocated(&vma->node) ||
702                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
703                                 pr_err("%s incorrect at %llx + %llx\n",
704                                        __func__, addr, vma->size);
705                                 i915_vma_unpin(vma);
706                                 err = i915_vma_unbind(vma);
707                                 err = -EINVAL;
708                                 goto err_obj;
709                         }
710
711                         i915_vma_unpin(vma);
712                         err = i915_vma_unbind(vma);
713                         GEM_BUG_ON(err);
714                 }
715
716                 if (igt_timeout(end_time,
717                                 "%s timed out after %d/%d\n",
718                                 __func__, pot, fls64(hole_end - 1) - 1)) {
719                         err = -EINTR;
720                         goto err_obj;
721                 }
722         }
723
724 err_obj:
725         i915_gem_object_put(obj);
726         return err;
727 }
728
729 static int drunk_hole(struct i915_address_space *vm,
730                       u64 hole_start, u64 hole_end,
731                       unsigned long end_time)
732 {
733         I915_RND_STATE(prng);
734         unsigned int size;
735         unsigned long flags;
736
737         flags = PIN_OFFSET_FIXED | PIN_USER;
738         if (i915_is_ggtt(vm))
739                 flags |= PIN_GLOBAL;
740
741         /* Keep creating larger objects until one cannot fit into the hole */
742         for (size = 12; (hole_end - hole_start) >> size; size++) {
743                 struct drm_i915_gem_object *obj;
744                 unsigned int *order, count, n;
745                 struct i915_vma *vma;
746                 u64 hole_size;
747                 int err = -ENODEV;
748
749                 hole_size = (hole_end - hole_start) >> size;
750                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
751                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
752                 count = hole_size >> 1;
753                 if (!count) {
754                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
755                                  __func__, hole_start, hole_end, size, hole_size);
756                         break;
757                 }
758
759                 do {
760                         order = i915_random_order(count, &prng);
761                         if (order)
762                                 break;
763                 } while (count >>= 1);
764                 if (!count)
765                         return -ENOMEM;
766                 GEM_BUG_ON(!order);
767
768                 /* Ignore allocation failures (i.e. don't report them as
769                  * a test failure) as we are purposefully allocating very
770                  * large objects without checking that we have sufficient
771                  * memory. We expect to hit -ENOMEM.
772                  */
773
774                 obj = fake_dma_object(vm->i915, BIT_ULL(size));
775                 if (IS_ERR(obj)) {
776                         kfree(order);
777                         break;
778                 }
779
780                 vma = i915_vma_instance(obj, vm, NULL);
781                 if (IS_ERR(vma)) {
782                         err = PTR_ERR(vma);
783                         goto err_obj;
784                 }
785
786                 GEM_BUG_ON(vma->size != BIT_ULL(size));
787
788                 for (n = 0; n < count; n++) {
789                         u64 addr = hole_start + order[n] * BIT_ULL(size);
790
791                         err = i915_vma_pin(vma, 0, 0, addr | flags);
792                         if (err) {
793                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
794                                        __func__,
795                                        addr, BIT_ULL(size),
796                                        hole_start, hole_end,
797                                        err);
798                                 goto err_obj;
799                         }
800
801                         if (!drm_mm_node_allocated(&vma->node) ||
802                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
803                                 pr_err("%s incorrect at %llx + %llx\n",
804                                        __func__, addr, BIT_ULL(size));
805                                 i915_vma_unpin(vma);
806                                 err = i915_vma_unbind(vma);
807                                 err = -EINVAL;
808                                 goto err_obj;
809                         }
810
811                         i915_vma_unpin(vma);
812                         err = i915_vma_unbind(vma);
813                         GEM_BUG_ON(err);
814
815                         if (igt_timeout(end_time,
816                                         "%s timed out after %d/%d\n",
817                                         __func__, n, count)) {
818                                 err = -EINTR;
819                                 goto err_obj;
820                         }
821                 }
822
823 err_obj:
824                 i915_gem_object_put(obj);
825                 kfree(order);
826                 if (err)
827                         return err;
828
829                 cleanup_freed_objects(vm->i915);
830         }
831
832         return 0;
833 }
834
835 static int __shrink_hole(struct i915_address_space *vm,
836                          u64 hole_start, u64 hole_end,
837                          unsigned long end_time)
838 {
839         struct drm_i915_gem_object *obj;
840         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
841         unsigned int order = 12;
842         LIST_HEAD(objects);
843         int err = 0;
844         u64 addr;
845
846         /* Keep creating larger objects until one cannot fit into the hole */
847         for (addr = hole_start; addr < hole_end; ) {
848                 struct i915_vma *vma;
849                 u64 size = BIT_ULL(order++);
850
851                 size = min(size, hole_end - addr);
852                 obj = fake_dma_object(vm->i915, size);
853                 if (IS_ERR(obj)) {
854                         err = PTR_ERR(obj);
855                         break;
856                 }
857
858                 list_add(&obj->st_link, &objects);
859
860                 vma = i915_vma_instance(obj, vm, NULL);
861                 if (IS_ERR(vma)) {
862                         err = PTR_ERR(vma);
863                         break;
864                 }
865
866                 GEM_BUG_ON(vma->size != size);
867
868                 err = i915_vma_pin(vma, 0, 0, addr | flags);
869                 if (err) {
870                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
871                                __func__, addr, size, hole_start, hole_end, err);
872                         break;
873                 }
874
875                 if (!drm_mm_node_allocated(&vma->node) ||
876                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
877                         pr_err("%s incorrect at %llx + %llx\n",
878                                __func__, addr, size);
879                         i915_vma_unpin(vma);
880                         err = i915_vma_unbind(vma);
881                         err = -EINVAL;
882                         break;
883                 }
884
885                 i915_vma_unpin(vma);
886                 addr += size;
887
888                 /*
889                  * Since we are injecting allocation faults at random intervals,
890                  * wait for this allocation to complete before we change the
891                  * faultinjection.
892                  */
893                 err = i915_vma_sync(vma);
894                 if (err)
895                         break;
896
897                 if (igt_timeout(end_time,
898                                 "%s timed out at ofset %llx [%llx - %llx]\n",
899                                 __func__, addr, hole_start, hole_end)) {
900                         err = -EINTR;
901                         break;
902                 }
903         }
904
905         close_object_list(&objects, vm);
906         cleanup_freed_objects(vm->i915);
907         return err;
908 }
909
910 static int shrink_hole(struct i915_address_space *vm,
911                        u64 hole_start, u64 hole_end,
912                        unsigned long end_time)
913 {
914         unsigned long prime;
915         int err;
916
917         vm->fault_attr.probability = 999;
918         atomic_set(&vm->fault_attr.times, -1);
919
920         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
921                 vm->fault_attr.interval = prime;
922                 err = __shrink_hole(vm, hole_start, hole_end, end_time);
923                 if (err)
924                         break;
925         }
926
927         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
928
929         return err;
930 }
931
932 static int shrink_boom(struct i915_address_space *vm,
933                        u64 hole_start, u64 hole_end,
934                        unsigned long end_time)
935 {
936         unsigned int sizes[] = { SZ_2M, SZ_1G };
937         struct drm_i915_gem_object *purge;
938         struct drm_i915_gem_object *explode;
939         int err;
940         int i;
941
942         /*
943          * Catch the case which shrink_hole seems to miss. The setup here
944          * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
945          * ensuring that all vma assiocated with the respective pd/pdp are
946          * unpinned at the time.
947          */
948
949         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
950                 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
951                 unsigned int size = sizes[i];
952                 struct i915_vma *vma;
953
954                 purge = fake_dma_object(vm->i915, size);
955                 if (IS_ERR(purge))
956                         return PTR_ERR(purge);
957
958                 vma = i915_vma_instance(purge, vm, NULL);
959                 if (IS_ERR(vma)) {
960                         err = PTR_ERR(vma);
961                         goto err_purge;
962                 }
963
964                 err = i915_vma_pin(vma, 0, 0, flags);
965                 if (err)
966                         goto err_purge;
967
968                 /* Should now be ripe for purging */
969                 i915_vma_unpin(vma);
970
971                 explode = fake_dma_object(vm->i915, size);
972                 if (IS_ERR(explode)) {
973                         err = PTR_ERR(explode);
974                         goto err_purge;
975                 }
976
977                 vm->fault_attr.probability = 100;
978                 vm->fault_attr.interval = 1;
979                 atomic_set(&vm->fault_attr.times, -1);
980
981                 vma = i915_vma_instance(explode, vm, NULL);
982                 if (IS_ERR(vma)) {
983                         err = PTR_ERR(vma);
984                         goto err_explode;
985                 }
986
987                 err = i915_vma_pin(vma, 0, 0, flags | size);
988                 if (err)
989                         goto err_explode;
990
991                 i915_vma_unpin(vma);
992
993                 i915_gem_object_put(purge);
994                 i915_gem_object_put(explode);
995
996                 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
997                 cleanup_freed_objects(vm->i915);
998         }
999
1000         return 0;
1001
1002 err_explode:
1003         i915_gem_object_put(explode);
1004 err_purge:
1005         i915_gem_object_put(purge);
1006         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1007         return err;
1008 }
1009
1010 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1011                           int (*func)(struct i915_address_space *vm,
1012                                       u64 hole_start, u64 hole_end,
1013                                       unsigned long end_time))
1014 {
1015         struct i915_ppgtt *ppgtt;
1016         IGT_TIMEOUT(end_time);
1017         struct file *file;
1018         int err;
1019
1020         if (!HAS_FULL_PPGTT(dev_priv))
1021                 return 0;
1022
1023         file = mock_file(dev_priv);
1024         if (IS_ERR(file))
1025                 return PTR_ERR(file);
1026
1027         ppgtt = i915_ppgtt_create(&dev_priv->gt);
1028         if (IS_ERR(ppgtt)) {
1029                 err = PTR_ERR(ppgtt);
1030                 goto out_free;
1031         }
1032         GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1033         GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1034
1035         err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1036
1037         i915_vm_put(&ppgtt->vm);
1038
1039 out_free:
1040         fput(file);
1041         return err;
1042 }
1043
1044 static int igt_ppgtt_fill(void *arg)
1045 {
1046         return exercise_ppgtt(arg, fill_hole);
1047 }
1048
1049 static int igt_ppgtt_walk(void *arg)
1050 {
1051         return exercise_ppgtt(arg, walk_hole);
1052 }
1053
1054 static int igt_ppgtt_pot(void *arg)
1055 {
1056         return exercise_ppgtt(arg, pot_hole);
1057 }
1058
1059 static int igt_ppgtt_drunk(void *arg)
1060 {
1061         return exercise_ppgtt(arg, drunk_hole);
1062 }
1063
1064 static int igt_ppgtt_lowlevel(void *arg)
1065 {
1066         return exercise_ppgtt(arg, lowlevel_hole);
1067 }
1068
1069 static int igt_ppgtt_shrink(void *arg)
1070 {
1071         return exercise_ppgtt(arg, shrink_hole);
1072 }
1073
1074 static int igt_ppgtt_shrink_boom(void *arg)
1075 {
1076         return exercise_ppgtt(arg, shrink_boom);
1077 }
1078
1079 static int sort_holes(void *priv, const struct list_head *A,
1080                       const struct list_head *B)
1081 {
1082         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1083         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1084
1085         if (a->start < b->start)
1086                 return -1;
1087         else
1088                 return 1;
1089 }
1090
1091 static int exercise_ggtt(struct drm_i915_private *i915,
1092                          int (*func)(struct i915_address_space *vm,
1093                                      u64 hole_start, u64 hole_end,
1094                                      unsigned long end_time))
1095 {
1096         struct i915_ggtt *ggtt = &i915->ggtt;
1097         u64 hole_start, hole_end, last = 0;
1098         struct drm_mm_node *node;
1099         IGT_TIMEOUT(end_time);
1100         int err = 0;
1101
1102 restart:
1103         list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1104         drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1105                 if (hole_start < last)
1106                         continue;
1107
1108                 if (ggtt->vm.mm.color_adjust)
1109                         ggtt->vm.mm.color_adjust(node, 0,
1110                                                  &hole_start, &hole_end);
1111                 if (hole_start >= hole_end)
1112                         continue;
1113
1114                 err = func(&ggtt->vm, hole_start, hole_end, end_time);
1115                 if (err)
1116                         break;
1117
1118                 /* As we have manipulated the drm_mm, the list may be corrupt */
1119                 last = hole_end;
1120                 goto restart;
1121         }
1122
1123         return err;
1124 }
1125
1126 static int igt_ggtt_fill(void *arg)
1127 {
1128         return exercise_ggtt(arg, fill_hole);
1129 }
1130
1131 static int igt_ggtt_walk(void *arg)
1132 {
1133         return exercise_ggtt(arg, walk_hole);
1134 }
1135
1136 static int igt_ggtt_pot(void *arg)
1137 {
1138         return exercise_ggtt(arg, pot_hole);
1139 }
1140
1141 static int igt_ggtt_drunk(void *arg)
1142 {
1143         return exercise_ggtt(arg, drunk_hole);
1144 }
1145
1146 static int igt_ggtt_lowlevel(void *arg)
1147 {
1148         return exercise_ggtt(arg, lowlevel_hole);
1149 }
1150
1151 static int igt_ggtt_page(void *arg)
1152 {
1153         const unsigned int count = PAGE_SIZE/sizeof(u32);
1154         I915_RND_STATE(prng);
1155         struct drm_i915_private *i915 = arg;
1156         struct i915_ggtt *ggtt = &i915->ggtt;
1157         struct drm_i915_gem_object *obj;
1158         intel_wakeref_t wakeref;
1159         struct drm_mm_node tmp;
1160         unsigned int *order, n;
1161         int err;
1162
1163         if (!i915_ggtt_has_aperture(ggtt))
1164                 return 0;
1165
1166         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1167         if (IS_ERR(obj))
1168                 return PTR_ERR(obj);
1169
1170         err = i915_gem_object_pin_pages(obj);
1171         if (err)
1172                 goto out_free;
1173
1174         memset(&tmp, 0, sizeof(tmp));
1175         mutex_lock(&ggtt->vm.mutex);
1176         err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1177                                           count * PAGE_SIZE, 0,
1178                                           I915_COLOR_UNEVICTABLE,
1179                                           0, ggtt->mappable_end,
1180                                           DRM_MM_INSERT_LOW);
1181         mutex_unlock(&ggtt->vm.mutex);
1182         if (err)
1183                 goto out_unpin;
1184
1185         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1186
1187         for (n = 0; n < count; n++) {
1188                 u64 offset = tmp.start + n * PAGE_SIZE;
1189
1190                 ggtt->vm.insert_page(&ggtt->vm,
1191                                      i915_gem_object_get_dma_address(obj, 0),
1192                                      offset, I915_CACHE_NONE, 0);
1193         }
1194
1195         order = i915_random_order(count, &prng);
1196         if (!order) {
1197                 err = -ENOMEM;
1198                 goto out_remove;
1199         }
1200
1201         for (n = 0; n < count; n++) {
1202                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1203                 u32 __iomem *vaddr;
1204
1205                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1206                 iowrite32(n, vaddr + n);
1207                 io_mapping_unmap_atomic(vaddr);
1208         }
1209         intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1210
1211         i915_random_reorder(order, count, &prng);
1212         for (n = 0; n < count; n++) {
1213                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1214                 u32 __iomem *vaddr;
1215                 u32 val;
1216
1217                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1218                 val = ioread32(vaddr + n);
1219                 io_mapping_unmap_atomic(vaddr);
1220
1221                 if (val != n) {
1222                         pr_err("insert page failed: found %d, expected %d\n",
1223                                val, n);
1224                         err = -EINVAL;
1225                         break;
1226                 }
1227         }
1228
1229         kfree(order);
1230 out_remove:
1231         ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1232         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1233         mutex_lock(&ggtt->vm.mutex);
1234         drm_mm_remove_node(&tmp);
1235         mutex_unlock(&ggtt->vm.mutex);
1236 out_unpin:
1237         i915_gem_object_unpin_pages(obj);
1238 out_free:
1239         i915_gem_object_put(obj);
1240         return err;
1241 }
1242
1243 static void track_vma_bind(struct i915_vma *vma)
1244 {
1245         struct drm_i915_gem_object *obj = vma->obj;
1246
1247         __i915_gem_object_pin_pages(obj);
1248
1249         GEM_BUG_ON(vma->pages);
1250         atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1251         __i915_gem_object_pin_pages(obj);
1252         vma->pages = obj->mm.pages;
1253
1254         mutex_lock(&vma->vm->mutex);
1255         list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1256         mutex_unlock(&vma->vm->mutex);
1257 }
1258
1259 static int exercise_mock(struct drm_i915_private *i915,
1260                          int (*func)(struct i915_address_space *vm,
1261                                      u64 hole_start, u64 hole_end,
1262                                      unsigned long end_time))
1263 {
1264         const u64 limit = totalram_pages() << PAGE_SHIFT;
1265         struct i915_address_space *vm;
1266         struct i915_gem_context *ctx;
1267         IGT_TIMEOUT(end_time);
1268         int err;
1269
1270         ctx = mock_context(i915, "mock");
1271         if (!ctx)
1272                 return -ENOMEM;
1273
1274         vm = i915_gem_context_get_vm_rcu(ctx);
1275         err = func(vm, 0, min(vm->total, limit), end_time);
1276         i915_vm_put(vm);
1277
1278         mock_context_close(ctx);
1279         return err;
1280 }
1281
1282 static int igt_mock_fill(void *arg)
1283 {
1284         struct i915_ggtt *ggtt = arg;
1285
1286         return exercise_mock(ggtt->vm.i915, fill_hole);
1287 }
1288
1289 static int igt_mock_walk(void *arg)
1290 {
1291         struct i915_ggtt *ggtt = arg;
1292
1293         return exercise_mock(ggtt->vm.i915, walk_hole);
1294 }
1295
1296 static int igt_mock_pot(void *arg)
1297 {
1298         struct i915_ggtt *ggtt = arg;
1299
1300         return exercise_mock(ggtt->vm.i915, pot_hole);
1301 }
1302
1303 static int igt_mock_drunk(void *arg)
1304 {
1305         struct i915_ggtt *ggtt = arg;
1306
1307         return exercise_mock(ggtt->vm.i915, drunk_hole);
1308 }
1309
1310 static int igt_gtt_reserve(void *arg)
1311 {
1312         struct i915_ggtt *ggtt = arg;
1313         struct drm_i915_gem_object *obj, *on;
1314         I915_RND_STATE(prng);
1315         LIST_HEAD(objects);
1316         u64 total;
1317         int err = -ENODEV;
1318
1319         /* i915_gem_gtt_reserve() tries to reserve the precise range
1320          * for the node, and evicts if it has to. So our test checks that
1321          * it can give us the requsted space and prevent overlaps.
1322          */
1323
1324         /* Start by filling the GGTT */
1325         for (total = 0;
1326              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1327              total += 2 * I915_GTT_PAGE_SIZE) {
1328                 struct i915_vma *vma;
1329
1330                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1331                                                       2 * PAGE_SIZE);
1332                 if (IS_ERR(obj)) {
1333                         err = PTR_ERR(obj);
1334                         goto out;
1335                 }
1336
1337                 err = i915_gem_object_pin_pages(obj);
1338                 if (err) {
1339                         i915_gem_object_put(obj);
1340                         goto out;
1341                 }
1342
1343                 list_add(&obj->st_link, &objects);
1344
1345                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1346                 if (IS_ERR(vma)) {
1347                         err = PTR_ERR(vma);
1348                         goto out;
1349                 }
1350
1351                 mutex_lock(&ggtt->vm.mutex);
1352                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1353                                            obj->base.size,
1354                                            total,
1355                                            obj->cache_level,
1356                                            0);
1357                 mutex_unlock(&ggtt->vm.mutex);
1358                 if (err) {
1359                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1360                                total, ggtt->vm.total, err);
1361                         goto out;
1362                 }
1363                 track_vma_bind(vma);
1364
1365                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1366                 if (vma->node.start != total ||
1367                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1368                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1369                                vma->node.start, vma->node.size,
1370                                total, 2*I915_GTT_PAGE_SIZE);
1371                         err = -EINVAL;
1372                         goto out;
1373                 }
1374         }
1375
1376         /* Now we start forcing evictions */
1377         for (total = I915_GTT_PAGE_SIZE;
1378              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1379              total += 2 * I915_GTT_PAGE_SIZE) {
1380                 struct i915_vma *vma;
1381
1382                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1383                                                       2 * PAGE_SIZE);
1384                 if (IS_ERR(obj)) {
1385                         err = PTR_ERR(obj);
1386                         goto out;
1387                 }
1388
1389                 err = i915_gem_object_pin_pages(obj);
1390                 if (err) {
1391                         i915_gem_object_put(obj);
1392                         goto out;
1393                 }
1394
1395                 list_add(&obj->st_link, &objects);
1396
1397                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1398                 if (IS_ERR(vma)) {
1399                         err = PTR_ERR(vma);
1400                         goto out;
1401                 }
1402
1403                 mutex_lock(&ggtt->vm.mutex);
1404                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1405                                            obj->base.size,
1406                                            total,
1407                                            obj->cache_level,
1408                                            0);
1409                 mutex_unlock(&ggtt->vm.mutex);
1410                 if (err) {
1411                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1412                                total, ggtt->vm.total, err);
1413                         goto out;
1414                 }
1415                 track_vma_bind(vma);
1416
1417                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1418                 if (vma->node.start != total ||
1419                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1420                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1421                                vma->node.start, vma->node.size,
1422                                total, 2*I915_GTT_PAGE_SIZE);
1423                         err = -EINVAL;
1424                         goto out;
1425                 }
1426         }
1427
1428         /* And then try at random */
1429         list_for_each_entry_safe(obj, on, &objects, st_link) {
1430                 struct i915_vma *vma;
1431                 u64 offset;
1432
1433                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1434                 if (IS_ERR(vma)) {
1435                         err = PTR_ERR(vma);
1436                         goto out;
1437                 }
1438
1439                 err = i915_vma_unbind(vma);
1440                 if (err) {
1441                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1442                         goto out;
1443                 }
1444
1445                 offset = igt_random_offset(&prng,
1446                                            0, ggtt->vm.total,
1447                                            2 * I915_GTT_PAGE_SIZE,
1448                                            I915_GTT_MIN_ALIGNMENT);
1449
1450                 mutex_lock(&ggtt->vm.mutex);
1451                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1452                                            obj->base.size,
1453                                            offset,
1454                                            obj->cache_level,
1455                                            0);
1456                 mutex_unlock(&ggtt->vm.mutex);
1457                 if (err) {
1458                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1459                                total, ggtt->vm.total, err);
1460                         goto out;
1461                 }
1462                 track_vma_bind(vma);
1463
1464                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1465                 if (vma->node.start != offset ||
1466                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1467                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1468                                vma->node.start, vma->node.size,
1469                                offset, 2*I915_GTT_PAGE_SIZE);
1470                         err = -EINVAL;
1471                         goto out;
1472                 }
1473         }
1474
1475 out:
1476         list_for_each_entry_safe(obj, on, &objects, st_link) {
1477                 i915_gem_object_unpin_pages(obj);
1478                 i915_gem_object_put(obj);
1479         }
1480         return err;
1481 }
1482
1483 static int igt_gtt_insert(void *arg)
1484 {
1485         struct i915_ggtt *ggtt = arg;
1486         struct drm_i915_gem_object *obj, *on;
1487         struct drm_mm_node tmp = {};
1488         const struct invalid_insert {
1489                 u64 size;
1490                 u64 alignment;
1491                 u64 start, end;
1492         } invalid_insert[] = {
1493                 {
1494                         ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1495                         0, ggtt->vm.total,
1496                 },
1497                 {
1498                         2*I915_GTT_PAGE_SIZE, 0,
1499                         0, I915_GTT_PAGE_SIZE,
1500                 },
1501                 {
1502                         -(u64)I915_GTT_PAGE_SIZE, 0,
1503                         0, 4*I915_GTT_PAGE_SIZE,
1504                 },
1505                 {
1506                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1507                         0, 4*I915_GTT_PAGE_SIZE,
1508                 },
1509                 {
1510                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1511                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1512                 },
1513                 {}
1514         }, *ii;
1515         LIST_HEAD(objects);
1516         u64 total;
1517         int err = -ENODEV;
1518
1519         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1520          * to the node, evicting if required.
1521          */
1522
1523         /* Check a couple of obviously invalid requests */
1524         for (ii = invalid_insert; ii->size; ii++) {
1525                 mutex_lock(&ggtt->vm.mutex);
1526                 err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1527                                           ii->size, ii->alignment,
1528                                           I915_COLOR_UNEVICTABLE,
1529                                           ii->start, ii->end,
1530                                           0);
1531                 mutex_unlock(&ggtt->vm.mutex);
1532                 if (err != -ENOSPC) {
1533                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1534                                ii->size, ii->alignment, ii->start, ii->end,
1535                                err);
1536                         return -EINVAL;
1537                 }
1538         }
1539
1540         /* Start by filling the GGTT */
1541         for (total = 0;
1542              total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1543              total += I915_GTT_PAGE_SIZE) {
1544                 struct i915_vma *vma;
1545
1546                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1547                                                       I915_GTT_PAGE_SIZE);
1548                 if (IS_ERR(obj)) {
1549                         err = PTR_ERR(obj);
1550                         goto out;
1551                 }
1552
1553                 err = i915_gem_object_pin_pages(obj);
1554                 if (err) {
1555                         i915_gem_object_put(obj);
1556                         goto out;
1557                 }
1558
1559                 list_add(&obj->st_link, &objects);
1560
1561                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1562                 if (IS_ERR(vma)) {
1563                         err = PTR_ERR(vma);
1564                         goto out;
1565                 }
1566
1567                 mutex_lock(&ggtt->vm.mutex);
1568                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1569                                           obj->base.size, 0, obj->cache_level,
1570                                           0, ggtt->vm.total,
1571                                           0);
1572                 mutex_unlock(&ggtt->vm.mutex);
1573                 if (err == -ENOSPC) {
1574                         /* maxed out the GGTT space */
1575                         i915_gem_object_put(obj);
1576                         break;
1577                 }
1578                 if (err) {
1579                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1580                                total, ggtt->vm.total, err);
1581                         goto out;
1582                 }
1583                 track_vma_bind(vma);
1584                 __i915_vma_pin(vma);
1585
1586                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1587         }
1588
1589         list_for_each_entry(obj, &objects, st_link) {
1590                 struct i915_vma *vma;
1591
1592                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1593                 if (IS_ERR(vma)) {
1594                         err = PTR_ERR(vma);
1595                         goto out;
1596                 }
1597
1598                 if (!drm_mm_node_allocated(&vma->node)) {
1599                         pr_err("VMA was unexpectedly evicted!\n");
1600                         err = -EINVAL;
1601                         goto out;
1602                 }
1603
1604                 __i915_vma_unpin(vma);
1605         }
1606
1607         /* If we then reinsert, we should find the same hole */
1608         list_for_each_entry_safe(obj, on, &objects, st_link) {
1609                 struct i915_vma *vma;
1610                 u64 offset;
1611
1612                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1613                 if (IS_ERR(vma)) {
1614                         err = PTR_ERR(vma);
1615                         goto out;
1616                 }
1617
1618                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1619                 offset = vma->node.start;
1620
1621                 err = i915_vma_unbind(vma);
1622                 if (err) {
1623                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1624                         goto out;
1625                 }
1626
1627                 mutex_lock(&ggtt->vm.mutex);
1628                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1629                                           obj->base.size, 0, obj->cache_level,
1630                                           0, ggtt->vm.total,
1631                                           0);
1632                 mutex_unlock(&ggtt->vm.mutex);
1633                 if (err) {
1634                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1635                                total, ggtt->vm.total, err);
1636                         goto out;
1637                 }
1638                 track_vma_bind(vma);
1639
1640                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1641                 if (vma->node.start != offset) {
1642                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1643                                offset, vma->node.start);
1644                         err = -EINVAL;
1645                         goto out;
1646                 }
1647         }
1648
1649         /* And then force evictions */
1650         for (total = 0;
1651              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1652              total += 2 * I915_GTT_PAGE_SIZE) {
1653                 struct i915_vma *vma;
1654
1655                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1656                                                       2 * I915_GTT_PAGE_SIZE);
1657                 if (IS_ERR(obj)) {
1658                         err = PTR_ERR(obj);
1659                         goto out;
1660                 }
1661
1662                 err = i915_gem_object_pin_pages(obj);
1663                 if (err) {
1664                         i915_gem_object_put(obj);
1665                         goto out;
1666                 }
1667
1668                 list_add(&obj->st_link, &objects);
1669
1670                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1671                 if (IS_ERR(vma)) {
1672                         err = PTR_ERR(vma);
1673                         goto out;
1674                 }
1675
1676                 mutex_lock(&ggtt->vm.mutex);
1677                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1678                                           obj->base.size, 0, obj->cache_level,
1679                                           0, ggtt->vm.total,
1680                                           0);
1681                 mutex_unlock(&ggtt->vm.mutex);
1682                 if (err) {
1683                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1684                                total, ggtt->vm.total, err);
1685                         goto out;
1686                 }
1687                 track_vma_bind(vma);
1688
1689                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1690         }
1691
1692 out:
1693         list_for_each_entry_safe(obj, on, &objects, st_link) {
1694                 i915_gem_object_unpin_pages(obj);
1695                 i915_gem_object_put(obj);
1696         }
1697         return err;
1698 }
1699
1700 int i915_gem_gtt_mock_selftests(void)
1701 {
1702         static const struct i915_subtest tests[] = {
1703                 SUBTEST(igt_mock_drunk),
1704                 SUBTEST(igt_mock_walk),
1705                 SUBTEST(igt_mock_pot),
1706                 SUBTEST(igt_mock_fill),
1707                 SUBTEST(igt_gtt_reserve),
1708                 SUBTEST(igt_gtt_insert),
1709         };
1710         struct drm_i915_private *i915;
1711         struct i915_ggtt *ggtt;
1712         int err;
1713
1714         i915 = mock_gem_device();
1715         if (!i915)
1716                 return -ENOMEM;
1717
1718         ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1719         if (!ggtt) {
1720                 err = -ENOMEM;
1721                 goto out_put;
1722         }
1723         mock_init_ggtt(i915, ggtt);
1724
1725         err = i915_subtests(tests, ggtt);
1726
1727         mock_device_flush(i915);
1728         i915_gem_drain_freed_objects(i915);
1729         mock_fini_ggtt(ggtt);
1730         kfree(ggtt);
1731 out_put:
1732         mock_destroy_device(i915);
1733         return err;
1734 }
1735
1736 static int context_sync(struct intel_context *ce)
1737 {
1738         struct i915_request *rq;
1739         long timeout;
1740
1741         rq = intel_context_create_request(ce);
1742         if (IS_ERR(rq))
1743                 return PTR_ERR(rq);
1744
1745         i915_request_get(rq);
1746         i915_request_add(rq);
1747
1748         timeout = i915_request_wait(rq, 0, HZ / 5);
1749         i915_request_put(rq);
1750
1751         return timeout < 0 ? -EIO : 0;
1752 }
1753
1754 static struct i915_request *
1755 submit_batch(struct intel_context *ce, u64 addr)
1756 {
1757         struct i915_request *rq;
1758         int err;
1759
1760         rq = intel_context_create_request(ce);
1761         if (IS_ERR(rq))
1762                 return rq;
1763
1764         err = 0;
1765         if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1766                 err = rq->engine->emit_init_breadcrumb(rq);
1767         if (err == 0)
1768                 err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1769
1770         if (err == 0)
1771                 i915_request_get(rq);
1772         i915_request_add(rq);
1773
1774         return err ? ERR_PTR(err) : rq;
1775 }
1776
1777 static u32 *spinner(u32 *batch, int i)
1778 {
1779         return batch + i * 64 / sizeof(*batch) + 4;
1780 }
1781
1782 static void end_spin(u32 *batch, int i)
1783 {
1784         *spinner(batch, i) = MI_BATCH_BUFFER_END;
1785         wmb();
1786 }
1787
1788 static int igt_cs_tlb(void *arg)
1789 {
1790         const unsigned int count = PAGE_SIZE / 64;
1791         const unsigned int chunk_size = count * PAGE_SIZE;
1792         struct drm_i915_private *i915 = arg;
1793         struct drm_i915_gem_object *bbe, *act, *out;
1794         struct i915_gem_engines_iter it;
1795         struct i915_address_space *vm;
1796         struct i915_gem_context *ctx;
1797         struct intel_context *ce;
1798         struct i915_vma *vma;
1799         I915_RND_STATE(prng);
1800         struct file *file;
1801         unsigned int i;
1802         u32 *result;
1803         u32 *batch;
1804         int err = 0;
1805
1806         /*
1807          * Our mission here is to fool the hardware to execute something
1808          * from scratch as it has not seen the batch move (due to missing
1809          * the TLB invalidate).
1810          */
1811
1812         file = mock_file(i915);
1813         if (IS_ERR(file))
1814                 return PTR_ERR(file);
1815
1816         ctx = live_context(i915, file);
1817         if (IS_ERR(ctx)) {
1818                 err = PTR_ERR(ctx);
1819                 goto out_unlock;
1820         }
1821
1822         vm = i915_gem_context_get_vm_rcu(ctx);
1823         if (i915_is_ggtt(vm))
1824                 goto out_vm;
1825
1826         /* Create two pages; dummy we prefill the TLB, and intended */
1827         bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1828         if (IS_ERR(bbe)) {
1829                 err = PTR_ERR(bbe);
1830                 goto out_vm;
1831         }
1832
1833         batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1834         if (IS_ERR(batch)) {
1835                 err = PTR_ERR(batch);
1836                 goto out_put_bbe;
1837         }
1838         memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1839         i915_gem_object_flush_map(bbe);
1840         i915_gem_object_unpin_map(bbe);
1841
1842         act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1843         if (IS_ERR(act)) {
1844                 err = PTR_ERR(act);
1845                 goto out_put_bbe;
1846         }
1847
1848         /* Track the execution of each request by writing into different slot */
1849         batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1850         if (IS_ERR(batch)) {
1851                 err = PTR_ERR(batch);
1852                 goto out_put_act;
1853         }
1854         for (i = 0; i < count; i++) {
1855                 u32 *cs = batch + i * 64 / sizeof(*cs);
1856                 u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1857
1858                 GEM_BUG_ON(INTEL_GEN(i915) < 6);
1859                 cs[0] = MI_STORE_DWORD_IMM_GEN4;
1860                 if (INTEL_GEN(i915) >= 8) {
1861                         cs[1] = lower_32_bits(addr);
1862                         cs[2] = upper_32_bits(addr);
1863                         cs[3] = i;
1864                         cs[4] = MI_NOOP;
1865                         cs[5] = MI_BATCH_BUFFER_START_GEN8;
1866                 } else {
1867                         cs[1] = 0;
1868                         cs[2] = lower_32_bits(addr);
1869                         cs[3] = i;
1870                         cs[4] = MI_NOOP;
1871                         cs[5] = MI_BATCH_BUFFER_START;
1872                 }
1873         }
1874
1875         out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1876         if (IS_ERR(out)) {
1877                 err = PTR_ERR(out);
1878                 goto out_put_batch;
1879         }
1880         i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1881
1882         vma = i915_vma_instance(out, vm, NULL);
1883         if (IS_ERR(vma)) {
1884                 err = PTR_ERR(vma);
1885                 goto out_put_out;
1886         }
1887
1888         err = i915_vma_pin(vma, 0, 0,
1889                            PIN_USER |
1890                            PIN_OFFSET_FIXED |
1891                            (vm->total - PAGE_SIZE));
1892         if (err)
1893                 goto out_put_out;
1894         GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1895
1896         result = i915_gem_object_pin_map(out, I915_MAP_WB);
1897         if (IS_ERR(result)) {
1898                 err = PTR_ERR(result);
1899                 goto out_put_out;
1900         }
1901
1902         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1903                 IGT_TIMEOUT(end_time);
1904                 unsigned long pass = 0;
1905
1906                 if (!intel_engine_can_store_dword(ce->engine))
1907                         continue;
1908
1909                 while (!__igt_timeout(end_time, NULL)) {
1910                         struct i915_vm_pt_stash stash = {};
1911                         struct i915_request *rq;
1912                         u64 offset;
1913
1914                         offset = igt_random_offset(&prng,
1915                                                    0, vm->total - PAGE_SIZE,
1916                                                    chunk_size, PAGE_SIZE);
1917
1918                         memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1919
1920                         vma = i915_vma_instance(bbe, vm, NULL);
1921                         if (IS_ERR(vma)) {
1922                                 err = PTR_ERR(vma);
1923                                 goto end;
1924                         }
1925
1926                         err = vma->ops->set_pages(vma);
1927                         if (err)
1928                                 goto end;
1929
1930                         err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
1931                         if (err)
1932                                 goto end;
1933
1934                         err = i915_vm_pin_pt_stash(vm, &stash);
1935                         if (err) {
1936                                 i915_vm_free_pt_stash(vm, &stash);
1937                                 goto end;
1938                         }
1939
1940                         vm->allocate_va_range(vm, &stash, offset, chunk_size);
1941
1942                         i915_vm_free_pt_stash(vm, &stash);
1943
1944                         /* Prime the TLB with the dummy pages */
1945                         for (i = 0; i < count; i++) {
1946                                 vma->node.start = offset + i * PAGE_SIZE;
1947                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1948
1949                                 rq = submit_batch(ce, vma->node.start);
1950                                 if (IS_ERR(rq)) {
1951                                         err = PTR_ERR(rq);
1952                                         goto end;
1953                                 }
1954                                 i915_request_put(rq);
1955                         }
1956
1957                         vma->ops->clear_pages(vma);
1958
1959                         err = context_sync(ce);
1960                         if (err) {
1961                                 pr_err("%s: dummy setup timed out\n",
1962                                        ce->engine->name);
1963                                 goto end;
1964                         }
1965
1966                         vma = i915_vma_instance(act, vm, NULL);
1967                         if (IS_ERR(vma)) {
1968                                 err = PTR_ERR(vma);
1969                                 goto end;
1970                         }
1971
1972                         err = vma->ops->set_pages(vma);
1973                         if (err)
1974                                 goto end;
1975
1976                         /* Replace the TLB with target batches */
1977                         for (i = 0; i < count; i++) {
1978                                 struct i915_request *rq;
1979                                 u32 *cs = batch + i * 64 / sizeof(*cs);
1980                                 u64 addr;
1981
1982                                 vma->node.start = offset + i * PAGE_SIZE;
1983                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1984
1985                                 addr = vma->node.start + i * 64;
1986                                 cs[4] = MI_NOOP;
1987                                 cs[6] = lower_32_bits(addr);
1988                                 cs[7] = upper_32_bits(addr);
1989                                 wmb();
1990
1991                                 rq = submit_batch(ce, addr);
1992                                 if (IS_ERR(rq)) {
1993                                         err = PTR_ERR(rq);
1994                                         goto end;
1995                                 }
1996
1997                                 /* Wait until the context chain has started */
1998                                 if (i == 0) {
1999                                         while (READ_ONCE(result[i]) &&
2000                                                !i915_request_completed(rq))
2001                                                 cond_resched();
2002                                 } else {
2003                                         end_spin(batch, i - 1);
2004                                 }
2005
2006                                 i915_request_put(rq);
2007                         }
2008                         end_spin(batch, count - 1);
2009
2010                         vma->ops->clear_pages(vma);
2011
2012                         err = context_sync(ce);
2013                         if (err) {
2014                                 pr_err("%s: writes timed out\n",
2015                                        ce->engine->name);
2016                                 goto end;
2017                         }
2018
2019                         for (i = 0; i < count; i++) {
2020                                 if (result[i] != i) {
2021                                         pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2022                                                ce->engine->name, pass,
2023                                                offset, i, result[i], i);
2024                                         err = -EINVAL;
2025                                         goto end;
2026                                 }
2027                         }
2028
2029                         vm->clear_range(vm, offset, chunk_size);
2030                         pass++;
2031                 }
2032         }
2033 end:
2034         if (igt_flush_test(i915))
2035                 err = -EIO;
2036         i915_gem_context_unlock_engines(ctx);
2037         i915_gem_object_unpin_map(out);
2038 out_put_out:
2039         i915_gem_object_put(out);
2040 out_put_batch:
2041         i915_gem_object_unpin_map(act);
2042 out_put_act:
2043         i915_gem_object_put(act);
2044 out_put_bbe:
2045         i915_gem_object_put(bbe);
2046 out_vm:
2047         i915_vm_put(vm);
2048 out_unlock:
2049         fput(file);
2050         return err;
2051 }
2052
2053 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2054 {
2055         static const struct i915_subtest tests[] = {
2056                 SUBTEST(igt_ppgtt_alloc),
2057                 SUBTEST(igt_ppgtt_lowlevel),
2058                 SUBTEST(igt_ppgtt_drunk),
2059                 SUBTEST(igt_ppgtt_walk),
2060                 SUBTEST(igt_ppgtt_pot),
2061                 SUBTEST(igt_ppgtt_fill),
2062                 SUBTEST(igt_ppgtt_shrink),
2063                 SUBTEST(igt_ppgtt_shrink_boom),
2064                 SUBTEST(igt_ggtt_lowlevel),
2065                 SUBTEST(igt_ggtt_drunk),
2066                 SUBTEST(igt_ggtt_walk),
2067                 SUBTEST(igt_ggtt_pot),
2068                 SUBTEST(igt_ggtt_fill),
2069                 SUBTEST(igt_ggtt_page),
2070                 SUBTEST(igt_cs_tlb),
2071         };
2072
2073         GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2074
2075         return i915_subtests(tests, i915);
2076 }