Merge branch 'next' into for-linus
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31 #include "gt/intel_gpu_commands.h"
32
33 #include "i915_random.h"
34 #include "i915_selftest.h"
35
36 #include "mock_drm.h"
37 #include "mock_gem_device.h"
38 #include "mock_gtt.h"
39 #include "igt_flush_test.h"
40
41 static void cleanup_freed_objects(struct drm_i915_private *i915)
42 {
43         i915_gem_drain_freed_objects(i915);
44 }
45
46 static void fake_free_pages(struct drm_i915_gem_object *obj,
47                             struct sg_table *pages)
48 {
49         sg_free_table(pages);
50         kfree(pages);
51 }
52
53 static int fake_get_pages(struct drm_i915_gem_object *obj)
54 {
55 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
56 #define PFN_BIAS 0x1000
57         struct sg_table *pages;
58         struct scatterlist *sg;
59         unsigned int sg_page_sizes;
60         typeof(obj->base.size) rem;
61
62         pages = kmalloc(sizeof(*pages), GFP);
63         if (!pages)
64                 return -ENOMEM;
65
66         rem = round_up(obj->base.size, BIT(31)) >> 31;
67         if (sg_alloc_table(pages, rem, GFP)) {
68                 kfree(pages);
69                 return -ENOMEM;
70         }
71
72         sg_page_sizes = 0;
73         rem = obj->base.size;
74         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
75                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
76
77                 GEM_BUG_ON(!len);
78                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
79                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
80                 sg_dma_len(sg) = len;
81                 sg_page_sizes |= len;
82
83                 rem -= len;
84         }
85         GEM_BUG_ON(rem);
86
87         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
88
89         return 0;
90 #undef GFP
91 }
92
93 static void fake_put_pages(struct drm_i915_gem_object *obj,
94                            struct sg_table *pages)
95 {
96         fake_free_pages(obj, pages);
97         obj->mm.dirty = false;
98 }
99
100 static const struct drm_i915_gem_object_ops fake_ops = {
101         .name = "fake-gem",
102         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
103         .get_pages = fake_get_pages,
104         .put_pages = fake_put_pages,
105 };
106
107 static struct drm_i915_gem_object *
108 fake_dma_object(struct drm_i915_private *i915, u64 size)
109 {
110         static struct lock_class_key lock_class;
111         struct drm_i915_gem_object *obj;
112
113         GEM_BUG_ON(!size);
114         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
115
116         if (overflows_type(size, obj->base.size))
117                 return ERR_PTR(-E2BIG);
118
119         obj = i915_gem_object_alloc();
120         if (!obj)
121                 goto err;
122
123         drm_gem_private_object_init(&i915->drm, &obj->base, size);
124         i915_gem_object_init(obj, &fake_ops, &lock_class);
125
126         i915_gem_object_set_volatile(obj);
127
128         obj->write_domain = I915_GEM_DOMAIN_CPU;
129         obj->read_domains = I915_GEM_DOMAIN_CPU;
130         obj->cache_level = I915_CACHE_NONE;
131
132         /* Preallocate the "backing storage" */
133         if (i915_gem_object_pin_pages(obj))
134                 goto err_obj;
135
136         i915_gem_object_unpin_pages(obj);
137         return obj;
138
139 err_obj:
140         i915_gem_object_put(obj);
141 err:
142         return ERR_PTR(-ENOMEM);
143 }
144
145 static int igt_ppgtt_alloc(void *arg)
146 {
147         struct drm_i915_private *dev_priv = arg;
148         struct i915_ppgtt *ppgtt;
149         u64 size, last, limit;
150         int err = 0;
151
152         /* Allocate a ppggt and try to fill the entire range */
153
154         if (!HAS_PPGTT(dev_priv))
155                 return 0;
156
157         ppgtt = i915_ppgtt_create(&dev_priv->gt);
158         if (IS_ERR(ppgtt))
159                 return PTR_ERR(ppgtt);
160
161         if (!ppgtt->vm.allocate_va_range)
162                 goto err_ppgtt_cleanup;
163
164         /*
165          * While we only allocate the page tables here and so we could
166          * address a much larger GTT than we could actually fit into
167          * RAM, a practical limit is the amount of physical pages in the system.
168          * This should ensure that we do not run into the oomkiller during
169          * the test and take down the machine wilfully.
170          */
171         limit = totalram_pages() << PAGE_SHIFT;
172         limit = min(ppgtt->vm.total, limit);
173
174         /* Check we can allocate the entire range */
175         for (size = 4096; size <= limit; size <<= 2) {
176                 struct i915_vm_pt_stash stash = {};
177
178                 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
179                 if (err)
180                         goto err_ppgtt_cleanup;
181
182                 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
183                 if (err) {
184                         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
185                         goto err_ppgtt_cleanup;
186                 }
187
188                 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
189                 cond_resched();
190
191                 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
192
193                 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
194         }
195
196         /* Check we can incrementally allocate the entire range */
197         for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
198                 struct i915_vm_pt_stash stash = {};
199
200                 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
201                 if (err)
202                         goto err_ppgtt_cleanup;
203
204                 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
205                 if (err) {
206                         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
207                         goto err_ppgtt_cleanup;
208                 }
209
210                 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
211                                             last, size - last);
212                 cond_resched();
213
214                 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
215         }
216
217 err_ppgtt_cleanup:
218         i915_vm_put(&ppgtt->vm);
219         return err;
220 }
221
222 static int lowlevel_hole(struct i915_address_space *vm,
223                          u64 hole_start, u64 hole_end,
224                          unsigned long end_time)
225 {
226         I915_RND_STATE(seed_prng);
227         struct i915_vma *mock_vma;
228         unsigned int size;
229
230         mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
231         if (!mock_vma)
232                 return -ENOMEM;
233
234         /* Keep creating larger objects until one cannot fit into the hole */
235         for (size = 12; (hole_end - hole_start) >> size; size++) {
236                 I915_RND_SUBSTATE(prng, seed_prng);
237                 struct drm_i915_gem_object *obj;
238                 unsigned int *order, count, n;
239                 u64 hole_size;
240
241                 hole_size = (hole_end - hole_start) >> size;
242                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
243                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
244                 count = hole_size >> 1;
245                 if (!count) {
246                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
247                                  __func__, hole_start, hole_end, size, hole_size);
248                         break;
249                 }
250
251                 do {
252                         order = i915_random_order(count, &prng);
253                         if (order)
254                                 break;
255                 } while (count >>= 1);
256                 if (!count) {
257                         kfree(mock_vma);
258                         return -ENOMEM;
259                 }
260                 GEM_BUG_ON(!order);
261
262                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
263                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
264
265                 /* Ignore allocation failures (i.e. don't report them as
266                  * a test failure) as we are purposefully allocating very
267                  * large objects without checking that we have sufficient
268                  * memory. We expect to hit -ENOMEM.
269                  */
270
271                 obj = fake_dma_object(vm->i915, BIT_ULL(size));
272                 if (IS_ERR(obj)) {
273                         kfree(order);
274                         break;
275                 }
276
277                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
278
279                 if (i915_gem_object_pin_pages(obj)) {
280                         i915_gem_object_put(obj);
281                         kfree(order);
282                         break;
283                 }
284
285                 for (n = 0; n < count; n++) {
286                         u64 addr = hole_start + order[n] * BIT_ULL(size);
287                         intel_wakeref_t wakeref;
288
289                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
290
291                         if (igt_timeout(end_time,
292                                         "%s timed out before %d/%d\n",
293                                         __func__, n, count)) {
294                                 hole_end = hole_start; /* quit */
295                                 break;
296                         }
297
298                         if (vm->allocate_va_range) {
299                                 struct i915_vm_pt_stash stash = {};
300
301                                 if (i915_vm_alloc_pt_stash(vm, &stash,
302                                                            BIT_ULL(size)))
303                                         break;
304
305                                 if (i915_vm_pin_pt_stash(vm, &stash)) {
306                                         i915_vm_free_pt_stash(vm, &stash);
307                                         break;
308                                 }
309
310                                 vm->allocate_va_range(vm, &stash,
311                                                       addr, BIT_ULL(size));
312
313                                 i915_vm_free_pt_stash(vm, &stash);
314                         }
315
316                         mock_vma->pages = obj->mm.pages;
317                         mock_vma->node.size = BIT_ULL(size);
318                         mock_vma->node.start = addr;
319
320                         with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
321                                 vm->insert_entries(vm, mock_vma,
322                                                    I915_CACHE_NONE, 0);
323                 }
324                 count = n;
325
326                 i915_random_reorder(order, count, &prng);
327                 for (n = 0; n < count; n++) {
328                         u64 addr = hole_start + order[n] * BIT_ULL(size);
329                         intel_wakeref_t wakeref;
330
331                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
332                         with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
333                                 vm->clear_range(vm, addr, BIT_ULL(size));
334                 }
335
336                 i915_gem_object_unpin_pages(obj);
337                 i915_gem_object_put(obj);
338
339                 kfree(order);
340
341                 cleanup_freed_objects(vm->i915);
342         }
343
344         kfree(mock_vma);
345         return 0;
346 }
347
348 static void close_object_list(struct list_head *objects,
349                               struct i915_address_space *vm)
350 {
351         struct drm_i915_gem_object *obj, *on;
352         int ignored;
353
354         list_for_each_entry_safe(obj, on, objects, st_link) {
355                 struct i915_vma *vma;
356
357                 vma = i915_vma_instance(obj, vm, NULL);
358                 if (!IS_ERR(vma))
359                         ignored = i915_vma_unbind(vma);
360
361                 list_del(&obj->st_link);
362                 i915_gem_object_put(obj);
363         }
364 }
365
366 static int fill_hole(struct i915_address_space *vm,
367                      u64 hole_start, u64 hole_end,
368                      unsigned long end_time)
369 {
370         const u64 hole_size = hole_end - hole_start;
371         struct drm_i915_gem_object *obj;
372         const unsigned long max_pages =
373                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
374         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
375         unsigned long npages, prime, flags;
376         struct i915_vma *vma;
377         LIST_HEAD(objects);
378         int err;
379
380         /* Try binding many VMA working inwards from either edge */
381
382         flags = PIN_OFFSET_FIXED | PIN_USER;
383         if (i915_is_ggtt(vm))
384                 flags |= PIN_GLOBAL;
385
386         for_each_prime_number_from(prime, 2, max_step) {
387                 for (npages = 1; npages <= max_pages; npages *= prime) {
388                         const u64 full_size = npages << PAGE_SHIFT;
389                         const struct {
390                                 const char *name;
391                                 u64 offset;
392                                 int step;
393                         } phases[] = {
394                                 { "top-down", hole_end, -1, },
395                                 { "bottom-up", hole_start, 1, },
396                                 { }
397                         }, *p;
398
399                         obj = fake_dma_object(vm->i915, full_size);
400                         if (IS_ERR(obj))
401                                 break;
402
403                         list_add(&obj->st_link, &objects);
404
405                         /* Align differing sized objects against the edges, and
406                          * check we don't walk off into the void when binding
407                          * them into the GTT.
408                          */
409                         for (p = phases; p->name; p++) {
410                                 u64 offset;
411
412                                 offset = p->offset;
413                                 list_for_each_entry(obj, &objects, st_link) {
414                                         vma = i915_vma_instance(obj, vm, NULL);
415                                         if (IS_ERR(vma))
416                                                 continue;
417
418                                         if (p->step < 0) {
419                                                 if (offset < hole_start + obj->base.size)
420                                                         break;
421                                                 offset -= obj->base.size;
422                                         }
423
424                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
425                                         if (err) {
426                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
427                                                        __func__, p->name, err, npages, prime, offset);
428                                                 goto err;
429                                         }
430
431                                         if (!drm_mm_node_allocated(&vma->node) ||
432                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
433                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
434                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
435                                                        offset);
436                                                 err = -EINVAL;
437                                                 goto err;
438                                         }
439
440                                         i915_vma_unpin(vma);
441
442                                         if (p->step > 0) {
443                                                 if (offset + obj->base.size > hole_end)
444                                                         break;
445                                                 offset += obj->base.size;
446                                         }
447                                 }
448
449                                 offset = p->offset;
450                                 list_for_each_entry(obj, &objects, st_link) {
451                                         vma = i915_vma_instance(obj, vm, NULL);
452                                         if (IS_ERR(vma))
453                                                 continue;
454
455                                         if (p->step < 0) {
456                                                 if (offset < hole_start + obj->base.size)
457                                                         break;
458                                                 offset -= obj->base.size;
459                                         }
460
461                                         if (!drm_mm_node_allocated(&vma->node) ||
462                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
463                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
464                                                        __func__, p->name, vma->node.start, vma->node.size,
465                                                        offset);
466                                                 err = -EINVAL;
467                                                 goto err;
468                                         }
469
470                                         err = i915_vma_unbind(vma);
471                                         if (err) {
472                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
473                                                        __func__, p->name, vma->node.start, vma->node.size,
474                                                        err);
475                                                 goto err;
476                                         }
477
478                                         if (p->step > 0) {
479                                                 if (offset + obj->base.size > hole_end)
480                                                         break;
481                                                 offset += obj->base.size;
482                                         }
483                                 }
484
485                                 offset = p->offset;
486                                 list_for_each_entry_reverse(obj, &objects, st_link) {
487                                         vma = i915_vma_instance(obj, vm, NULL);
488                                         if (IS_ERR(vma))
489                                                 continue;
490
491                                         if (p->step < 0) {
492                                                 if (offset < hole_start + obj->base.size)
493                                                         break;
494                                                 offset -= obj->base.size;
495                                         }
496
497                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
498                                         if (err) {
499                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
500                                                        __func__, p->name, err, npages, prime, offset);
501                                                 goto err;
502                                         }
503
504                                         if (!drm_mm_node_allocated(&vma->node) ||
505                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
506                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
507                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
508                                                        offset);
509                                                 err = -EINVAL;
510                                                 goto err;
511                                         }
512
513                                         i915_vma_unpin(vma);
514
515                                         if (p->step > 0) {
516                                                 if (offset + obj->base.size > hole_end)
517                                                         break;
518                                                 offset += obj->base.size;
519                                         }
520                                 }
521
522                                 offset = p->offset;
523                                 list_for_each_entry_reverse(obj, &objects, st_link) {
524                                         vma = i915_vma_instance(obj, vm, NULL);
525                                         if (IS_ERR(vma))
526                                                 continue;
527
528                                         if (p->step < 0) {
529                                                 if (offset < hole_start + obj->base.size)
530                                                         break;
531                                                 offset -= obj->base.size;
532                                         }
533
534                                         if (!drm_mm_node_allocated(&vma->node) ||
535                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
536                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
537                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
538                                                        offset);
539                                                 err = -EINVAL;
540                                                 goto err;
541                                         }
542
543                                         err = i915_vma_unbind(vma);
544                                         if (err) {
545                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
546                                                        __func__, p->name, vma->node.start, vma->node.size,
547                                                        err);
548                                                 goto err;
549                                         }
550
551                                         if (p->step > 0) {
552                                                 if (offset + obj->base.size > hole_end)
553                                                         break;
554                                                 offset += obj->base.size;
555                                         }
556                                 }
557                         }
558
559                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
560                                         __func__, npages, prime)) {
561                                 err = -EINTR;
562                                 goto err;
563                         }
564                 }
565
566                 close_object_list(&objects, vm);
567                 cleanup_freed_objects(vm->i915);
568         }
569
570         return 0;
571
572 err:
573         close_object_list(&objects, vm);
574         return err;
575 }
576
577 static int walk_hole(struct i915_address_space *vm,
578                      u64 hole_start, u64 hole_end,
579                      unsigned long end_time)
580 {
581         const u64 hole_size = hole_end - hole_start;
582         const unsigned long max_pages =
583                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
584         unsigned long flags;
585         u64 size;
586
587         /* Try binding a single VMA in different positions within the hole */
588
589         flags = PIN_OFFSET_FIXED | PIN_USER;
590         if (i915_is_ggtt(vm))
591                 flags |= PIN_GLOBAL;
592
593         for_each_prime_number_from(size, 1, max_pages) {
594                 struct drm_i915_gem_object *obj;
595                 struct i915_vma *vma;
596                 u64 addr;
597                 int err = 0;
598
599                 obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
600                 if (IS_ERR(obj))
601                         break;
602
603                 vma = i915_vma_instance(obj, vm, NULL);
604                 if (IS_ERR(vma)) {
605                         err = PTR_ERR(vma);
606                         goto err_put;
607                 }
608
609                 for (addr = hole_start;
610                      addr + obj->base.size < hole_end;
611                      addr += obj->base.size) {
612                         err = i915_vma_pin(vma, 0, 0, addr | flags);
613                         if (err) {
614                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
615                                        __func__, addr, vma->size,
616                                        hole_start, hole_end, err);
617                                 goto err_put;
618                         }
619                         i915_vma_unpin(vma);
620
621                         if (!drm_mm_node_allocated(&vma->node) ||
622                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
623                                 pr_err("%s incorrect at %llx + %llx\n",
624                                        __func__, addr, vma->size);
625                                 err = -EINVAL;
626                                 goto err_put;
627                         }
628
629                         err = i915_vma_unbind(vma);
630                         if (err) {
631                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
632                                        __func__, addr, vma->size, err);
633                                 goto err_put;
634                         }
635
636                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
637
638                         if (igt_timeout(end_time,
639                                         "%s timed out at %llx\n",
640                                         __func__, addr)) {
641                                 err = -EINTR;
642                                 goto err_put;
643                         }
644                 }
645
646 err_put:
647                 i915_gem_object_put(obj);
648                 if (err)
649                         return err;
650
651                 cleanup_freed_objects(vm->i915);
652         }
653
654         return 0;
655 }
656
657 static int pot_hole(struct i915_address_space *vm,
658                     u64 hole_start, u64 hole_end,
659                     unsigned long end_time)
660 {
661         struct drm_i915_gem_object *obj;
662         struct i915_vma *vma;
663         unsigned long flags;
664         unsigned int pot;
665         int err = 0;
666
667         flags = PIN_OFFSET_FIXED | PIN_USER;
668         if (i915_is_ggtt(vm))
669                 flags |= PIN_GLOBAL;
670
671         obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
672         if (IS_ERR(obj))
673                 return PTR_ERR(obj);
674
675         vma = i915_vma_instance(obj, vm, NULL);
676         if (IS_ERR(vma)) {
677                 err = PTR_ERR(vma);
678                 goto err_obj;
679         }
680
681         /* Insert a pair of pages across every pot boundary within the hole */
682         for (pot = fls64(hole_end - 1) - 1;
683              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
684              pot--) {
685                 u64 step = BIT_ULL(pot);
686                 u64 addr;
687
688                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
689                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
690                      addr += step) {
691                         err = i915_vma_pin(vma, 0, 0, addr | flags);
692                         if (err) {
693                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
694                                        __func__,
695                                        addr,
696                                        hole_start, hole_end,
697                                        err);
698                                 goto err_obj;
699                         }
700
701                         if (!drm_mm_node_allocated(&vma->node) ||
702                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
703                                 pr_err("%s incorrect at %llx + %llx\n",
704                                        __func__, addr, vma->size);
705                                 i915_vma_unpin(vma);
706                                 err = i915_vma_unbind(vma);
707                                 err = -EINVAL;
708                                 goto err_obj;
709                         }
710
711                         i915_vma_unpin(vma);
712                         err = i915_vma_unbind(vma);
713                         GEM_BUG_ON(err);
714                 }
715
716                 if (igt_timeout(end_time,
717                                 "%s timed out after %d/%d\n",
718                                 __func__, pot, fls64(hole_end - 1) - 1)) {
719                         err = -EINTR;
720                         goto err_obj;
721                 }
722         }
723
724 err_obj:
725         i915_gem_object_put(obj);
726         return err;
727 }
728
729 static int drunk_hole(struct i915_address_space *vm,
730                       u64 hole_start, u64 hole_end,
731                       unsigned long end_time)
732 {
733         I915_RND_STATE(prng);
734         unsigned int size;
735         unsigned long flags;
736
737         flags = PIN_OFFSET_FIXED | PIN_USER;
738         if (i915_is_ggtt(vm))
739                 flags |= PIN_GLOBAL;
740
741         /* Keep creating larger objects until one cannot fit into the hole */
742         for (size = 12; (hole_end - hole_start) >> size; size++) {
743                 struct drm_i915_gem_object *obj;
744                 unsigned int *order, count, n;
745                 struct i915_vma *vma;
746                 u64 hole_size;
747                 int err = -ENODEV;
748
749                 hole_size = (hole_end - hole_start) >> size;
750                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
751                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
752                 count = hole_size >> 1;
753                 if (!count) {
754                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
755                                  __func__, hole_start, hole_end, size, hole_size);
756                         break;
757                 }
758
759                 do {
760                         order = i915_random_order(count, &prng);
761                         if (order)
762                                 break;
763                 } while (count >>= 1);
764                 if (!count)
765                         return -ENOMEM;
766                 GEM_BUG_ON(!order);
767
768                 /* Ignore allocation failures (i.e. don't report them as
769                  * a test failure) as we are purposefully allocating very
770                  * large objects without checking that we have sufficient
771                  * memory. We expect to hit -ENOMEM.
772                  */
773
774                 obj = fake_dma_object(vm->i915, BIT_ULL(size));
775                 if (IS_ERR(obj)) {
776                         kfree(order);
777                         break;
778                 }
779
780                 vma = i915_vma_instance(obj, vm, NULL);
781                 if (IS_ERR(vma)) {
782                         err = PTR_ERR(vma);
783                         goto err_obj;
784                 }
785
786                 GEM_BUG_ON(vma->size != BIT_ULL(size));
787
788                 for (n = 0; n < count; n++) {
789                         u64 addr = hole_start + order[n] * BIT_ULL(size);
790
791                         err = i915_vma_pin(vma, 0, 0, addr | flags);
792                         if (err) {
793                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
794                                        __func__,
795                                        addr, BIT_ULL(size),
796                                        hole_start, hole_end,
797                                        err);
798                                 goto err_obj;
799                         }
800
801                         if (!drm_mm_node_allocated(&vma->node) ||
802                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
803                                 pr_err("%s incorrect at %llx + %llx\n",
804                                        __func__, addr, BIT_ULL(size));
805                                 i915_vma_unpin(vma);
806                                 err = i915_vma_unbind(vma);
807                                 err = -EINVAL;
808                                 goto err_obj;
809                         }
810
811                         i915_vma_unpin(vma);
812                         err = i915_vma_unbind(vma);
813                         GEM_BUG_ON(err);
814
815                         if (igt_timeout(end_time,
816                                         "%s timed out after %d/%d\n",
817                                         __func__, n, count)) {
818                                 err = -EINTR;
819                                 goto err_obj;
820                         }
821                 }
822
823 err_obj:
824                 i915_gem_object_put(obj);
825                 kfree(order);
826                 if (err)
827                         return err;
828
829                 cleanup_freed_objects(vm->i915);
830         }
831
832         return 0;
833 }
834
835 static int __shrink_hole(struct i915_address_space *vm,
836                          u64 hole_start, u64 hole_end,
837                          unsigned long end_time)
838 {
839         struct drm_i915_gem_object *obj;
840         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
841         unsigned int order = 12;
842         LIST_HEAD(objects);
843         int err = 0;
844         u64 addr;
845
846         /* Keep creating larger objects until one cannot fit into the hole */
847         for (addr = hole_start; addr < hole_end; ) {
848                 struct i915_vma *vma;
849                 u64 size = BIT_ULL(order++);
850
851                 size = min(size, hole_end - addr);
852                 obj = fake_dma_object(vm->i915, size);
853                 if (IS_ERR(obj)) {
854                         err = PTR_ERR(obj);
855                         break;
856                 }
857
858                 list_add(&obj->st_link, &objects);
859
860                 vma = i915_vma_instance(obj, vm, NULL);
861                 if (IS_ERR(vma)) {
862                         err = PTR_ERR(vma);
863                         break;
864                 }
865
866                 GEM_BUG_ON(vma->size != size);
867
868                 err = i915_vma_pin(vma, 0, 0, addr | flags);
869                 if (err) {
870                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
871                                __func__, addr, size, hole_start, hole_end, err);
872                         break;
873                 }
874
875                 if (!drm_mm_node_allocated(&vma->node) ||
876                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
877                         pr_err("%s incorrect at %llx + %llx\n",
878                                __func__, addr, size);
879                         i915_vma_unpin(vma);
880                         err = i915_vma_unbind(vma);
881                         err = -EINVAL;
882                         break;
883                 }
884
885                 i915_vma_unpin(vma);
886                 addr += size;
887
888                 /*
889                  * Since we are injecting allocation faults at random intervals,
890                  * wait for this allocation to complete before we change the
891                  * faultinjection.
892                  */
893                 err = i915_vma_sync(vma);
894                 if (err)
895                         break;
896
897                 if (igt_timeout(end_time,
898                                 "%s timed out at ofset %llx [%llx - %llx]\n",
899                                 __func__, addr, hole_start, hole_end)) {
900                         err = -EINTR;
901                         break;
902                 }
903         }
904
905         close_object_list(&objects, vm);
906         cleanup_freed_objects(vm->i915);
907         return err;
908 }
909
910 static int shrink_hole(struct i915_address_space *vm,
911                        u64 hole_start, u64 hole_end,
912                        unsigned long end_time)
913 {
914         unsigned long prime;
915         int err;
916
917         vm->fault_attr.probability = 999;
918         atomic_set(&vm->fault_attr.times, -1);
919
920         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
921                 vm->fault_attr.interval = prime;
922                 err = __shrink_hole(vm, hole_start, hole_end, end_time);
923                 if (err)
924                         break;
925         }
926
927         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
928
929         return err;
930 }
931
932 static int shrink_boom(struct i915_address_space *vm,
933                        u64 hole_start, u64 hole_end,
934                        unsigned long end_time)
935 {
936         unsigned int sizes[] = { SZ_2M, SZ_1G };
937         struct drm_i915_gem_object *purge;
938         struct drm_i915_gem_object *explode;
939         int err;
940         int i;
941
942         /*
943          * Catch the case which shrink_hole seems to miss. The setup here
944          * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
945          * ensuring that all vma assiocated with the respective pd/pdp are
946          * unpinned at the time.
947          */
948
949         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
950                 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
951                 unsigned int size = sizes[i];
952                 struct i915_vma *vma;
953
954                 purge = fake_dma_object(vm->i915, size);
955                 if (IS_ERR(purge))
956                         return PTR_ERR(purge);
957
958                 vma = i915_vma_instance(purge, vm, NULL);
959                 if (IS_ERR(vma)) {
960                         err = PTR_ERR(vma);
961                         goto err_purge;
962                 }
963
964                 err = i915_vma_pin(vma, 0, 0, flags);
965                 if (err)
966                         goto err_purge;
967
968                 /* Should now be ripe for purging */
969                 i915_vma_unpin(vma);
970
971                 explode = fake_dma_object(vm->i915, size);
972                 if (IS_ERR(explode)) {
973                         err = PTR_ERR(explode);
974                         goto err_purge;
975                 }
976
977                 vm->fault_attr.probability = 100;
978                 vm->fault_attr.interval = 1;
979                 atomic_set(&vm->fault_attr.times, -1);
980
981                 vma = i915_vma_instance(explode, vm, NULL);
982                 if (IS_ERR(vma)) {
983                         err = PTR_ERR(vma);
984                         goto err_explode;
985                 }
986
987                 err = i915_vma_pin(vma, 0, 0, flags | size);
988                 if (err)
989                         goto err_explode;
990
991                 i915_vma_unpin(vma);
992
993                 i915_gem_object_put(purge);
994                 i915_gem_object_put(explode);
995
996                 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
997                 cleanup_freed_objects(vm->i915);
998         }
999
1000         return 0;
1001
1002 err_explode:
1003         i915_gem_object_put(explode);
1004 err_purge:
1005         i915_gem_object_put(purge);
1006         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1007         return err;
1008 }
1009
1010 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1011                           int (*func)(struct i915_address_space *vm,
1012                                       u64 hole_start, u64 hole_end,
1013                                       unsigned long end_time))
1014 {
1015         struct i915_ppgtt *ppgtt;
1016         IGT_TIMEOUT(end_time);
1017         struct file *file;
1018         int err;
1019
1020         if (!HAS_FULL_PPGTT(dev_priv))
1021                 return 0;
1022
1023         file = mock_file(dev_priv);
1024         if (IS_ERR(file))
1025                 return PTR_ERR(file);
1026
1027         ppgtt = i915_ppgtt_create(&dev_priv->gt);
1028         if (IS_ERR(ppgtt)) {
1029                 err = PTR_ERR(ppgtt);
1030                 goto out_free;
1031         }
1032         GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1033         GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1034
1035         err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1036
1037         i915_vm_put(&ppgtt->vm);
1038
1039 out_free:
1040         fput(file);
1041         return err;
1042 }
1043
1044 static int igt_ppgtt_fill(void *arg)
1045 {
1046         return exercise_ppgtt(arg, fill_hole);
1047 }
1048
1049 static int igt_ppgtt_walk(void *arg)
1050 {
1051         return exercise_ppgtt(arg, walk_hole);
1052 }
1053
1054 static int igt_ppgtt_pot(void *arg)
1055 {
1056         return exercise_ppgtt(arg, pot_hole);
1057 }
1058
1059 static int igt_ppgtt_drunk(void *arg)
1060 {
1061         return exercise_ppgtt(arg, drunk_hole);
1062 }
1063
1064 static int igt_ppgtt_lowlevel(void *arg)
1065 {
1066         return exercise_ppgtt(arg, lowlevel_hole);
1067 }
1068
1069 static int igt_ppgtt_shrink(void *arg)
1070 {
1071         return exercise_ppgtt(arg, shrink_hole);
1072 }
1073
1074 static int igt_ppgtt_shrink_boom(void *arg)
1075 {
1076         return exercise_ppgtt(arg, shrink_boom);
1077 }
1078
1079 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1080 {
1081         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1082         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1083
1084         if (a->start < b->start)
1085                 return -1;
1086         else
1087                 return 1;
1088 }
1089
1090 static int exercise_ggtt(struct drm_i915_private *i915,
1091                          int (*func)(struct i915_address_space *vm,
1092                                      u64 hole_start, u64 hole_end,
1093                                      unsigned long end_time))
1094 {
1095         struct i915_ggtt *ggtt = &i915->ggtt;
1096         u64 hole_start, hole_end, last = 0;
1097         struct drm_mm_node *node;
1098         IGT_TIMEOUT(end_time);
1099         int err = 0;
1100
1101 restart:
1102         list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1103         drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1104                 if (hole_start < last)
1105                         continue;
1106
1107                 if (ggtt->vm.mm.color_adjust)
1108                         ggtt->vm.mm.color_adjust(node, 0,
1109                                                  &hole_start, &hole_end);
1110                 if (hole_start >= hole_end)
1111                         continue;
1112
1113                 err = func(&ggtt->vm, hole_start, hole_end, end_time);
1114                 if (err)
1115                         break;
1116
1117                 /* As we have manipulated the drm_mm, the list may be corrupt */
1118                 last = hole_end;
1119                 goto restart;
1120         }
1121
1122         return err;
1123 }
1124
1125 static int igt_ggtt_fill(void *arg)
1126 {
1127         return exercise_ggtt(arg, fill_hole);
1128 }
1129
1130 static int igt_ggtt_walk(void *arg)
1131 {
1132         return exercise_ggtt(arg, walk_hole);
1133 }
1134
1135 static int igt_ggtt_pot(void *arg)
1136 {
1137         return exercise_ggtt(arg, pot_hole);
1138 }
1139
1140 static int igt_ggtt_drunk(void *arg)
1141 {
1142         return exercise_ggtt(arg, drunk_hole);
1143 }
1144
1145 static int igt_ggtt_lowlevel(void *arg)
1146 {
1147         return exercise_ggtt(arg, lowlevel_hole);
1148 }
1149
1150 static int igt_ggtt_page(void *arg)
1151 {
1152         const unsigned int count = PAGE_SIZE/sizeof(u32);
1153         I915_RND_STATE(prng);
1154         struct drm_i915_private *i915 = arg;
1155         struct i915_ggtt *ggtt = &i915->ggtt;
1156         struct drm_i915_gem_object *obj;
1157         intel_wakeref_t wakeref;
1158         struct drm_mm_node tmp;
1159         unsigned int *order, n;
1160         int err;
1161
1162         if (!i915_ggtt_has_aperture(ggtt))
1163                 return 0;
1164
1165         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1166         if (IS_ERR(obj))
1167                 return PTR_ERR(obj);
1168
1169         err = i915_gem_object_pin_pages(obj);
1170         if (err)
1171                 goto out_free;
1172
1173         memset(&tmp, 0, sizeof(tmp));
1174         mutex_lock(&ggtt->vm.mutex);
1175         err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1176                                           count * PAGE_SIZE, 0,
1177                                           I915_COLOR_UNEVICTABLE,
1178                                           0, ggtt->mappable_end,
1179                                           DRM_MM_INSERT_LOW);
1180         mutex_unlock(&ggtt->vm.mutex);
1181         if (err)
1182                 goto out_unpin;
1183
1184         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1185
1186         for (n = 0; n < count; n++) {
1187                 u64 offset = tmp.start + n * PAGE_SIZE;
1188
1189                 ggtt->vm.insert_page(&ggtt->vm,
1190                                      i915_gem_object_get_dma_address(obj, 0),
1191                                      offset, I915_CACHE_NONE, 0);
1192         }
1193
1194         order = i915_random_order(count, &prng);
1195         if (!order) {
1196                 err = -ENOMEM;
1197                 goto out_remove;
1198         }
1199
1200         for (n = 0; n < count; n++) {
1201                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1202                 u32 __iomem *vaddr;
1203
1204                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1205                 iowrite32(n, vaddr + n);
1206                 io_mapping_unmap_atomic(vaddr);
1207         }
1208         intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1209
1210         i915_random_reorder(order, count, &prng);
1211         for (n = 0; n < count; n++) {
1212                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1213                 u32 __iomem *vaddr;
1214                 u32 val;
1215
1216                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1217                 val = ioread32(vaddr + n);
1218                 io_mapping_unmap_atomic(vaddr);
1219
1220                 if (val != n) {
1221                         pr_err("insert page failed: found %d, expected %d\n",
1222                                val, n);
1223                         err = -EINVAL;
1224                         break;
1225                 }
1226         }
1227
1228         kfree(order);
1229 out_remove:
1230         ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1231         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1232         mutex_lock(&ggtt->vm.mutex);
1233         drm_mm_remove_node(&tmp);
1234         mutex_unlock(&ggtt->vm.mutex);
1235 out_unpin:
1236         i915_gem_object_unpin_pages(obj);
1237 out_free:
1238         i915_gem_object_put(obj);
1239         return err;
1240 }
1241
1242 static void track_vma_bind(struct i915_vma *vma)
1243 {
1244         struct drm_i915_gem_object *obj = vma->obj;
1245
1246         __i915_gem_object_pin_pages(obj);
1247
1248         GEM_BUG_ON(vma->pages);
1249         atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1250         __i915_gem_object_pin_pages(obj);
1251         vma->pages = obj->mm.pages;
1252
1253         mutex_lock(&vma->vm->mutex);
1254         list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1255         mutex_unlock(&vma->vm->mutex);
1256 }
1257
1258 static int exercise_mock(struct drm_i915_private *i915,
1259                          int (*func)(struct i915_address_space *vm,
1260                                      u64 hole_start, u64 hole_end,
1261                                      unsigned long end_time))
1262 {
1263         const u64 limit = totalram_pages() << PAGE_SHIFT;
1264         struct i915_address_space *vm;
1265         struct i915_gem_context *ctx;
1266         IGT_TIMEOUT(end_time);
1267         int err;
1268
1269         ctx = mock_context(i915, "mock");
1270         if (!ctx)
1271                 return -ENOMEM;
1272
1273         vm = i915_gem_context_get_vm_rcu(ctx);
1274         err = func(vm, 0, min(vm->total, limit), end_time);
1275         i915_vm_put(vm);
1276
1277         mock_context_close(ctx);
1278         return err;
1279 }
1280
1281 static int igt_mock_fill(void *arg)
1282 {
1283         struct i915_ggtt *ggtt = arg;
1284
1285         return exercise_mock(ggtt->vm.i915, fill_hole);
1286 }
1287
1288 static int igt_mock_walk(void *arg)
1289 {
1290         struct i915_ggtt *ggtt = arg;
1291
1292         return exercise_mock(ggtt->vm.i915, walk_hole);
1293 }
1294
1295 static int igt_mock_pot(void *arg)
1296 {
1297         struct i915_ggtt *ggtt = arg;
1298
1299         return exercise_mock(ggtt->vm.i915, pot_hole);
1300 }
1301
1302 static int igt_mock_drunk(void *arg)
1303 {
1304         struct i915_ggtt *ggtt = arg;
1305
1306         return exercise_mock(ggtt->vm.i915, drunk_hole);
1307 }
1308
1309 static int igt_gtt_reserve(void *arg)
1310 {
1311         struct i915_ggtt *ggtt = arg;
1312         struct drm_i915_gem_object *obj, *on;
1313         I915_RND_STATE(prng);
1314         LIST_HEAD(objects);
1315         u64 total;
1316         int err = -ENODEV;
1317
1318         /* i915_gem_gtt_reserve() tries to reserve the precise range
1319          * for the node, and evicts if it has to. So our test checks that
1320          * it can give us the requsted space and prevent overlaps.
1321          */
1322
1323         /* Start by filling the GGTT */
1324         for (total = 0;
1325              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1326              total += 2 * I915_GTT_PAGE_SIZE) {
1327                 struct i915_vma *vma;
1328
1329                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1330                                                       2 * PAGE_SIZE);
1331                 if (IS_ERR(obj)) {
1332                         err = PTR_ERR(obj);
1333                         goto out;
1334                 }
1335
1336                 err = i915_gem_object_pin_pages(obj);
1337                 if (err) {
1338                         i915_gem_object_put(obj);
1339                         goto out;
1340                 }
1341
1342                 list_add(&obj->st_link, &objects);
1343
1344                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1345                 if (IS_ERR(vma)) {
1346                         err = PTR_ERR(vma);
1347                         goto out;
1348                 }
1349
1350                 mutex_lock(&ggtt->vm.mutex);
1351                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1352                                            obj->base.size,
1353                                            total,
1354                                            obj->cache_level,
1355                                            0);
1356                 mutex_unlock(&ggtt->vm.mutex);
1357                 if (err) {
1358                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1359                                total, ggtt->vm.total, err);
1360                         goto out;
1361                 }
1362                 track_vma_bind(vma);
1363
1364                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1365                 if (vma->node.start != total ||
1366                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1367                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1368                                vma->node.start, vma->node.size,
1369                                total, 2*I915_GTT_PAGE_SIZE);
1370                         err = -EINVAL;
1371                         goto out;
1372                 }
1373         }
1374
1375         /* Now we start forcing evictions */
1376         for (total = I915_GTT_PAGE_SIZE;
1377              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1378              total += 2 * I915_GTT_PAGE_SIZE) {
1379                 struct i915_vma *vma;
1380
1381                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1382                                                       2 * PAGE_SIZE);
1383                 if (IS_ERR(obj)) {
1384                         err = PTR_ERR(obj);
1385                         goto out;
1386                 }
1387
1388                 err = i915_gem_object_pin_pages(obj);
1389                 if (err) {
1390                         i915_gem_object_put(obj);
1391                         goto out;
1392                 }
1393
1394                 list_add(&obj->st_link, &objects);
1395
1396                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1397                 if (IS_ERR(vma)) {
1398                         err = PTR_ERR(vma);
1399                         goto out;
1400                 }
1401
1402                 mutex_lock(&ggtt->vm.mutex);
1403                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1404                                            obj->base.size,
1405                                            total,
1406                                            obj->cache_level,
1407                                            0);
1408                 mutex_unlock(&ggtt->vm.mutex);
1409                 if (err) {
1410                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1411                                total, ggtt->vm.total, err);
1412                         goto out;
1413                 }
1414                 track_vma_bind(vma);
1415
1416                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1417                 if (vma->node.start != total ||
1418                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1419                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1420                                vma->node.start, vma->node.size,
1421                                total, 2*I915_GTT_PAGE_SIZE);
1422                         err = -EINVAL;
1423                         goto out;
1424                 }
1425         }
1426
1427         /* And then try at random */
1428         list_for_each_entry_safe(obj, on, &objects, st_link) {
1429                 struct i915_vma *vma;
1430                 u64 offset;
1431
1432                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1433                 if (IS_ERR(vma)) {
1434                         err = PTR_ERR(vma);
1435                         goto out;
1436                 }
1437
1438                 err = i915_vma_unbind(vma);
1439                 if (err) {
1440                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1441                         goto out;
1442                 }
1443
1444                 offset = igt_random_offset(&prng,
1445                                            0, ggtt->vm.total,
1446                                            2 * I915_GTT_PAGE_SIZE,
1447                                            I915_GTT_MIN_ALIGNMENT);
1448
1449                 mutex_lock(&ggtt->vm.mutex);
1450                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1451                                            obj->base.size,
1452                                            offset,
1453                                            obj->cache_level,
1454                                            0);
1455                 mutex_unlock(&ggtt->vm.mutex);
1456                 if (err) {
1457                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1458                                total, ggtt->vm.total, err);
1459                         goto out;
1460                 }
1461                 track_vma_bind(vma);
1462
1463                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1464                 if (vma->node.start != offset ||
1465                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1466                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1467                                vma->node.start, vma->node.size,
1468                                offset, 2*I915_GTT_PAGE_SIZE);
1469                         err = -EINVAL;
1470                         goto out;
1471                 }
1472         }
1473
1474 out:
1475         list_for_each_entry_safe(obj, on, &objects, st_link) {
1476                 i915_gem_object_unpin_pages(obj);
1477                 i915_gem_object_put(obj);
1478         }
1479         return err;
1480 }
1481
1482 static int igt_gtt_insert(void *arg)
1483 {
1484         struct i915_ggtt *ggtt = arg;
1485         struct drm_i915_gem_object *obj, *on;
1486         struct drm_mm_node tmp = {};
1487         const struct invalid_insert {
1488                 u64 size;
1489                 u64 alignment;
1490                 u64 start, end;
1491         } invalid_insert[] = {
1492                 {
1493                         ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1494                         0, ggtt->vm.total,
1495                 },
1496                 {
1497                         2*I915_GTT_PAGE_SIZE, 0,
1498                         0, I915_GTT_PAGE_SIZE,
1499                 },
1500                 {
1501                         -(u64)I915_GTT_PAGE_SIZE, 0,
1502                         0, 4*I915_GTT_PAGE_SIZE,
1503                 },
1504                 {
1505                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1506                         0, 4*I915_GTT_PAGE_SIZE,
1507                 },
1508                 {
1509                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1510                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1511                 },
1512                 {}
1513         }, *ii;
1514         LIST_HEAD(objects);
1515         u64 total;
1516         int err = -ENODEV;
1517
1518         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1519          * to the node, evicting if required.
1520          */
1521
1522         /* Check a couple of obviously invalid requests */
1523         for (ii = invalid_insert; ii->size; ii++) {
1524                 mutex_lock(&ggtt->vm.mutex);
1525                 err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1526                                           ii->size, ii->alignment,
1527                                           I915_COLOR_UNEVICTABLE,
1528                                           ii->start, ii->end,
1529                                           0);
1530                 mutex_unlock(&ggtt->vm.mutex);
1531                 if (err != -ENOSPC) {
1532                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1533                                ii->size, ii->alignment, ii->start, ii->end,
1534                                err);
1535                         return -EINVAL;
1536                 }
1537         }
1538
1539         /* Start by filling the GGTT */
1540         for (total = 0;
1541              total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1542              total += I915_GTT_PAGE_SIZE) {
1543                 struct i915_vma *vma;
1544
1545                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1546                                                       I915_GTT_PAGE_SIZE);
1547                 if (IS_ERR(obj)) {
1548                         err = PTR_ERR(obj);
1549                         goto out;
1550                 }
1551
1552                 err = i915_gem_object_pin_pages(obj);
1553                 if (err) {
1554                         i915_gem_object_put(obj);
1555                         goto out;
1556                 }
1557
1558                 list_add(&obj->st_link, &objects);
1559
1560                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1561                 if (IS_ERR(vma)) {
1562                         err = PTR_ERR(vma);
1563                         goto out;
1564                 }
1565
1566                 mutex_lock(&ggtt->vm.mutex);
1567                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1568                                           obj->base.size, 0, obj->cache_level,
1569                                           0, ggtt->vm.total,
1570                                           0);
1571                 mutex_unlock(&ggtt->vm.mutex);
1572                 if (err == -ENOSPC) {
1573                         /* maxed out the GGTT space */
1574                         i915_gem_object_put(obj);
1575                         break;
1576                 }
1577                 if (err) {
1578                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1579                                total, ggtt->vm.total, err);
1580                         goto out;
1581                 }
1582                 track_vma_bind(vma);
1583                 __i915_vma_pin(vma);
1584
1585                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1586         }
1587
1588         list_for_each_entry(obj, &objects, st_link) {
1589                 struct i915_vma *vma;
1590
1591                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1592                 if (IS_ERR(vma)) {
1593                         err = PTR_ERR(vma);
1594                         goto out;
1595                 }
1596
1597                 if (!drm_mm_node_allocated(&vma->node)) {
1598                         pr_err("VMA was unexpectedly evicted!\n");
1599                         err = -EINVAL;
1600                         goto out;
1601                 }
1602
1603                 __i915_vma_unpin(vma);
1604         }
1605
1606         /* If we then reinsert, we should find the same hole */
1607         list_for_each_entry_safe(obj, on, &objects, st_link) {
1608                 struct i915_vma *vma;
1609                 u64 offset;
1610
1611                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1612                 if (IS_ERR(vma)) {
1613                         err = PTR_ERR(vma);
1614                         goto out;
1615                 }
1616
1617                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1618                 offset = vma->node.start;
1619
1620                 err = i915_vma_unbind(vma);
1621                 if (err) {
1622                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1623                         goto out;
1624                 }
1625
1626                 mutex_lock(&ggtt->vm.mutex);
1627                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1628                                           obj->base.size, 0, obj->cache_level,
1629                                           0, ggtt->vm.total,
1630                                           0);
1631                 mutex_unlock(&ggtt->vm.mutex);
1632                 if (err) {
1633                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1634                                total, ggtt->vm.total, err);
1635                         goto out;
1636                 }
1637                 track_vma_bind(vma);
1638
1639                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1640                 if (vma->node.start != offset) {
1641                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1642                                offset, vma->node.start);
1643                         err = -EINVAL;
1644                         goto out;
1645                 }
1646         }
1647
1648         /* And then force evictions */
1649         for (total = 0;
1650              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1651              total += 2 * I915_GTT_PAGE_SIZE) {
1652                 struct i915_vma *vma;
1653
1654                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1655                                                       2 * I915_GTT_PAGE_SIZE);
1656                 if (IS_ERR(obj)) {
1657                         err = PTR_ERR(obj);
1658                         goto out;
1659                 }
1660
1661                 err = i915_gem_object_pin_pages(obj);
1662                 if (err) {
1663                         i915_gem_object_put(obj);
1664                         goto out;
1665                 }
1666
1667                 list_add(&obj->st_link, &objects);
1668
1669                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1670                 if (IS_ERR(vma)) {
1671                         err = PTR_ERR(vma);
1672                         goto out;
1673                 }
1674
1675                 mutex_lock(&ggtt->vm.mutex);
1676                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1677                                           obj->base.size, 0, obj->cache_level,
1678                                           0, ggtt->vm.total,
1679                                           0);
1680                 mutex_unlock(&ggtt->vm.mutex);
1681                 if (err) {
1682                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1683                                total, ggtt->vm.total, err);
1684                         goto out;
1685                 }
1686                 track_vma_bind(vma);
1687
1688                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1689         }
1690
1691 out:
1692         list_for_each_entry_safe(obj, on, &objects, st_link) {
1693                 i915_gem_object_unpin_pages(obj);
1694                 i915_gem_object_put(obj);
1695         }
1696         return err;
1697 }
1698
1699 int i915_gem_gtt_mock_selftests(void)
1700 {
1701         static const struct i915_subtest tests[] = {
1702                 SUBTEST(igt_mock_drunk),
1703                 SUBTEST(igt_mock_walk),
1704                 SUBTEST(igt_mock_pot),
1705                 SUBTEST(igt_mock_fill),
1706                 SUBTEST(igt_gtt_reserve),
1707                 SUBTEST(igt_gtt_insert),
1708         };
1709         struct drm_i915_private *i915;
1710         struct i915_ggtt *ggtt;
1711         int err;
1712
1713         i915 = mock_gem_device();
1714         if (!i915)
1715                 return -ENOMEM;
1716
1717         ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1718         if (!ggtt) {
1719                 err = -ENOMEM;
1720                 goto out_put;
1721         }
1722         mock_init_ggtt(i915, ggtt);
1723
1724         err = i915_subtests(tests, ggtt);
1725
1726         mock_device_flush(i915);
1727         i915_gem_drain_freed_objects(i915);
1728         mock_fini_ggtt(ggtt);
1729         kfree(ggtt);
1730 out_put:
1731         mock_destroy_device(i915);
1732         return err;
1733 }
1734
1735 static int context_sync(struct intel_context *ce)
1736 {
1737         struct i915_request *rq;
1738         long timeout;
1739
1740         rq = intel_context_create_request(ce);
1741         if (IS_ERR(rq))
1742                 return PTR_ERR(rq);
1743
1744         i915_request_get(rq);
1745         i915_request_add(rq);
1746
1747         timeout = i915_request_wait(rq, 0, HZ / 5);
1748         i915_request_put(rq);
1749
1750         return timeout < 0 ? -EIO : 0;
1751 }
1752
1753 static struct i915_request *
1754 submit_batch(struct intel_context *ce, u64 addr)
1755 {
1756         struct i915_request *rq;
1757         int err;
1758
1759         rq = intel_context_create_request(ce);
1760         if (IS_ERR(rq))
1761                 return rq;
1762
1763         err = 0;
1764         if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1765                 err = rq->engine->emit_init_breadcrumb(rq);
1766         if (err == 0)
1767                 err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1768
1769         if (err == 0)
1770                 i915_request_get(rq);
1771         i915_request_add(rq);
1772
1773         return err ? ERR_PTR(err) : rq;
1774 }
1775
1776 static u32 *spinner(u32 *batch, int i)
1777 {
1778         return batch + i * 64 / sizeof(*batch) + 4;
1779 }
1780
1781 static void end_spin(u32 *batch, int i)
1782 {
1783         *spinner(batch, i) = MI_BATCH_BUFFER_END;
1784         wmb();
1785 }
1786
1787 static int igt_cs_tlb(void *arg)
1788 {
1789         const unsigned int count = PAGE_SIZE / 64;
1790         const unsigned int chunk_size = count * PAGE_SIZE;
1791         struct drm_i915_private *i915 = arg;
1792         struct drm_i915_gem_object *bbe, *act, *out;
1793         struct i915_gem_engines_iter it;
1794         struct i915_address_space *vm;
1795         struct i915_gem_context *ctx;
1796         struct intel_context *ce;
1797         struct i915_vma *vma;
1798         I915_RND_STATE(prng);
1799         struct file *file;
1800         unsigned int i;
1801         u32 *result;
1802         u32 *batch;
1803         int err = 0;
1804
1805         /*
1806          * Our mission here is to fool the hardware to execute something
1807          * from scratch as it has not seen the batch move (due to missing
1808          * the TLB invalidate).
1809          */
1810
1811         file = mock_file(i915);
1812         if (IS_ERR(file))
1813                 return PTR_ERR(file);
1814
1815         ctx = live_context(i915, file);
1816         if (IS_ERR(ctx)) {
1817                 err = PTR_ERR(ctx);
1818                 goto out_unlock;
1819         }
1820
1821         vm = i915_gem_context_get_vm_rcu(ctx);
1822         if (i915_is_ggtt(vm))
1823                 goto out_vm;
1824
1825         /* Create two pages; dummy we prefill the TLB, and intended */
1826         bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1827         if (IS_ERR(bbe)) {
1828                 err = PTR_ERR(bbe);
1829                 goto out_vm;
1830         }
1831
1832         batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1833         if (IS_ERR(batch)) {
1834                 err = PTR_ERR(batch);
1835                 goto out_put_bbe;
1836         }
1837         memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1838         i915_gem_object_flush_map(bbe);
1839         i915_gem_object_unpin_map(bbe);
1840
1841         act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1842         if (IS_ERR(act)) {
1843                 err = PTR_ERR(act);
1844                 goto out_put_bbe;
1845         }
1846
1847         /* Track the execution of each request by writing into different slot */
1848         batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1849         if (IS_ERR(batch)) {
1850                 err = PTR_ERR(batch);
1851                 goto out_put_act;
1852         }
1853         for (i = 0; i < count; i++) {
1854                 u32 *cs = batch + i * 64 / sizeof(*cs);
1855                 u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1856
1857                 GEM_BUG_ON(INTEL_GEN(i915) < 6);
1858                 cs[0] = MI_STORE_DWORD_IMM_GEN4;
1859                 if (INTEL_GEN(i915) >= 8) {
1860                         cs[1] = lower_32_bits(addr);
1861                         cs[2] = upper_32_bits(addr);
1862                         cs[3] = i;
1863                         cs[4] = MI_NOOP;
1864                         cs[5] = MI_BATCH_BUFFER_START_GEN8;
1865                 } else {
1866                         cs[1] = 0;
1867                         cs[2] = lower_32_bits(addr);
1868                         cs[3] = i;
1869                         cs[4] = MI_NOOP;
1870                         cs[5] = MI_BATCH_BUFFER_START;
1871                 }
1872         }
1873
1874         out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1875         if (IS_ERR(out)) {
1876                 err = PTR_ERR(out);
1877                 goto out_put_batch;
1878         }
1879         i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1880
1881         vma = i915_vma_instance(out, vm, NULL);
1882         if (IS_ERR(vma)) {
1883                 err = PTR_ERR(vma);
1884                 goto out_put_out;
1885         }
1886
1887         err = i915_vma_pin(vma, 0, 0,
1888                            PIN_USER |
1889                            PIN_OFFSET_FIXED |
1890                            (vm->total - PAGE_SIZE));
1891         if (err)
1892                 goto out_put_out;
1893         GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1894
1895         result = i915_gem_object_pin_map(out, I915_MAP_WB);
1896         if (IS_ERR(result)) {
1897                 err = PTR_ERR(result);
1898                 goto out_put_out;
1899         }
1900
1901         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1902                 IGT_TIMEOUT(end_time);
1903                 unsigned long pass = 0;
1904
1905                 if (!intel_engine_can_store_dword(ce->engine))
1906                         continue;
1907
1908                 while (!__igt_timeout(end_time, NULL)) {
1909                         struct i915_vm_pt_stash stash = {};
1910                         struct i915_request *rq;
1911                         u64 offset;
1912
1913                         offset = igt_random_offset(&prng,
1914                                                    0, vm->total - PAGE_SIZE,
1915                                                    chunk_size, PAGE_SIZE);
1916
1917                         memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1918
1919                         vma = i915_vma_instance(bbe, vm, NULL);
1920                         if (IS_ERR(vma)) {
1921                                 err = PTR_ERR(vma);
1922                                 goto end;
1923                         }
1924
1925                         err = vma->ops->set_pages(vma);
1926                         if (err)
1927                                 goto end;
1928
1929                         err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
1930                         if (err)
1931                                 goto end;
1932
1933                         err = i915_vm_pin_pt_stash(vm, &stash);
1934                         if (err) {
1935                                 i915_vm_free_pt_stash(vm, &stash);
1936                                 goto end;
1937                         }
1938
1939                         vm->allocate_va_range(vm, &stash, offset, chunk_size);
1940
1941                         i915_vm_free_pt_stash(vm, &stash);
1942
1943                         /* Prime the TLB with the dummy pages */
1944                         for (i = 0; i < count; i++) {
1945                                 vma->node.start = offset + i * PAGE_SIZE;
1946                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1947
1948                                 rq = submit_batch(ce, vma->node.start);
1949                                 if (IS_ERR(rq)) {
1950                                         err = PTR_ERR(rq);
1951                                         goto end;
1952                                 }
1953                                 i915_request_put(rq);
1954                         }
1955
1956                         vma->ops->clear_pages(vma);
1957
1958                         err = context_sync(ce);
1959                         if (err) {
1960                                 pr_err("%s: dummy setup timed out\n",
1961                                        ce->engine->name);
1962                                 goto end;
1963                         }
1964
1965                         vma = i915_vma_instance(act, vm, NULL);
1966                         if (IS_ERR(vma)) {
1967                                 err = PTR_ERR(vma);
1968                                 goto end;
1969                         }
1970
1971                         err = vma->ops->set_pages(vma);
1972                         if (err)
1973                                 goto end;
1974
1975                         /* Replace the TLB with target batches */
1976                         for (i = 0; i < count; i++) {
1977                                 struct i915_request *rq;
1978                                 u32 *cs = batch + i * 64 / sizeof(*cs);
1979                                 u64 addr;
1980
1981                                 vma->node.start = offset + i * PAGE_SIZE;
1982                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1983
1984                                 addr = vma->node.start + i * 64;
1985                                 cs[4] = MI_NOOP;
1986                                 cs[6] = lower_32_bits(addr);
1987                                 cs[7] = upper_32_bits(addr);
1988                                 wmb();
1989
1990                                 rq = submit_batch(ce, addr);
1991                                 if (IS_ERR(rq)) {
1992                                         err = PTR_ERR(rq);
1993                                         goto end;
1994                                 }
1995
1996                                 /* Wait until the context chain has started */
1997                                 if (i == 0) {
1998                                         while (READ_ONCE(result[i]) &&
1999                                                !i915_request_completed(rq))
2000                                                 cond_resched();
2001                                 } else {
2002                                         end_spin(batch, i - 1);
2003                                 }
2004
2005                                 i915_request_put(rq);
2006                         }
2007                         end_spin(batch, count - 1);
2008
2009                         vma->ops->clear_pages(vma);
2010
2011                         err = context_sync(ce);
2012                         if (err) {
2013                                 pr_err("%s: writes timed out\n",
2014                                        ce->engine->name);
2015                                 goto end;
2016                         }
2017
2018                         for (i = 0; i < count; i++) {
2019                                 if (result[i] != i) {
2020                                         pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2021                                                ce->engine->name, pass,
2022                                                offset, i, result[i], i);
2023                                         err = -EINVAL;
2024                                         goto end;
2025                                 }
2026                         }
2027
2028                         vm->clear_range(vm, offset, chunk_size);
2029                         pass++;
2030                 }
2031         }
2032 end:
2033         if (igt_flush_test(i915))
2034                 err = -EIO;
2035         i915_gem_context_unlock_engines(ctx);
2036         i915_gem_object_unpin_map(out);
2037 out_put_out:
2038         i915_gem_object_put(out);
2039 out_put_batch:
2040         i915_gem_object_unpin_map(act);
2041 out_put_act:
2042         i915_gem_object_put(act);
2043 out_put_bbe:
2044         i915_gem_object_put(bbe);
2045 out_vm:
2046         i915_vm_put(vm);
2047 out_unlock:
2048         fput(file);
2049         return err;
2050 }
2051
2052 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2053 {
2054         static const struct i915_subtest tests[] = {
2055                 SUBTEST(igt_ppgtt_alloc),
2056                 SUBTEST(igt_ppgtt_lowlevel),
2057                 SUBTEST(igt_ppgtt_drunk),
2058                 SUBTEST(igt_ppgtt_walk),
2059                 SUBTEST(igt_ppgtt_pot),
2060                 SUBTEST(igt_ppgtt_fill),
2061                 SUBTEST(igt_ppgtt_shrink),
2062                 SUBTEST(igt_ppgtt_shrink_boom),
2063                 SUBTEST(igt_ggtt_lowlevel),
2064                 SUBTEST(igt_ggtt_drunk),
2065                 SUBTEST(igt_ggtt_walk),
2066                 SUBTEST(igt_ggtt_pot),
2067                 SUBTEST(igt_ggtt_fill),
2068                 SUBTEST(igt_ggtt_page),
2069                 SUBTEST(igt_cs_tlb),
2070         };
2071
2072         GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2073
2074         return i915_subtests(tests, i915);
2075 }