Merge tag 'drm-next-2021-05-10' of git://anongit.freedesktop.org/drm/drm
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "gem/i915_gem_context.h"
29 #include "gem/selftests/mock_context.h"
30 #include "gt/intel_context.h"
31 #include "gt/intel_gpu_commands.h"
32
33 #include "i915_random.h"
34 #include "i915_selftest.h"
35
36 #include "mock_drm.h"
37 #include "mock_gem_device.h"
38 #include "mock_gtt.h"
39 #include "igt_flush_test.h"
40
41 static void cleanup_freed_objects(struct drm_i915_private *i915)
42 {
43         i915_gem_drain_freed_objects(i915);
44 }
45
46 static void fake_free_pages(struct drm_i915_gem_object *obj,
47                             struct sg_table *pages)
48 {
49         sg_free_table(pages);
50         kfree(pages);
51 }
52
53 static int fake_get_pages(struct drm_i915_gem_object *obj)
54 {
55 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
56 #define PFN_BIAS 0x1000
57         struct sg_table *pages;
58         struct scatterlist *sg;
59         unsigned int sg_page_sizes;
60         typeof(obj->base.size) rem;
61
62         pages = kmalloc(sizeof(*pages), GFP);
63         if (!pages)
64                 return -ENOMEM;
65
66         rem = round_up(obj->base.size, BIT(31)) >> 31;
67         if (sg_alloc_table(pages, rem, GFP)) {
68                 kfree(pages);
69                 return -ENOMEM;
70         }
71
72         sg_page_sizes = 0;
73         rem = obj->base.size;
74         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
75                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
76
77                 GEM_BUG_ON(!len);
78                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
79                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
80                 sg_dma_len(sg) = len;
81                 sg_page_sizes |= len;
82
83                 rem -= len;
84         }
85         GEM_BUG_ON(rem);
86
87         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
88
89         return 0;
90 #undef GFP
91 }
92
93 static void fake_put_pages(struct drm_i915_gem_object *obj,
94                            struct sg_table *pages)
95 {
96         fake_free_pages(obj, pages);
97         obj->mm.dirty = false;
98 }
99
100 static const struct drm_i915_gem_object_ops fake_ops = {
101         .name = "fake-gem",
102         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
103         .get_pages = fake_get_pages,
104         .put_pages = fake_put_pages,
105 };
106
107 static struct drm_i915_gem_object *
108 fake_dma_object(struct drm_i915_private *i915, u64 size)
109 {
110         static struct lock_class_key lock_class;
111         struct drm_i915_gem_object *obj;
112
113         GEM_BUG_ON(!size);
114         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
115
116         if (overflows_type(size, obj->base.size))
117                 return ERR_PTR(-E2BIG);
118
119         obj = i915_gem_object_alloc();
120         if (!obj)
121                 goto err;
122
123         drm_gem_private_object_init(&i915->drm, &obj->base, size);
124         i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
125
126         i915_gem_object_set_volatile(obj);
127
128         obj->write_domain = I915_GEM_DOMAIN_CPU;
129         obj->read_domains = I915_GEM_DOMAIN_CPU;
130         obj->cache_level = I915_CACHE_NONE;
131
132         /* Preallocate the "backing storage" */
133         if (i915_gem_object_pin_pages_unlocked(obj))
134                 goto err_obj;
135
136         i915_gem_object_unpin_pages(obj);
137         return obj;
138
139 err_obj:
140         i915_gem_object_put(obj);
141 err:
142         return ERR_PTR(-ENOMEM);
143 }
144
145 static int igt_ppgtt_alloc(void *arg)
146 {
147         struct drm_i915_private *dev_priv = arg;
148         struct i915_ppgtt *ppgtt;
149         struct i915_gem_ww_ctx ww;
150         u64 size, last, limit;
151         int err = 0;
152
153         /* Allocate a ppggt and try to fill the entire range */
154
155         if (!HAS_PPGTT(dev_priv))
156                 return 0;
157
158         ppgtt = i915_ppgtt_create(&dev_priv->gt);
159         if (IS_ERR(ppgtt))
160                 return PTR_ERR(ppgtt);
161
162         if (!ppgtt->vm.allocate_va_range)
163                 goto err_ppgtt_cleanup;
164
165         /*
166          * While we only allocate the page tables here and so we could
167          * address a much larger GTT than we could actually fit into
168          * RAM, a practical limit is the amount of physical pages in the system.
169          * This should ensure that we do not run into the oomkiller during
170          * the test and take down the machine wilfully.
171          */
172         limit = totalram_pages() << PAGE_SHIFT;
173         limit = min(ppgtt->vm.total, limit);
174
175         i915_gem_ww_ctx_init(&ww, false);
176 retry:
177         err = i915_vm_lock_objects(&ppgtt->vm, &ww);
178         if (err)
179                 goto err_ppgtt_cleanup;
180
181         /* Check we can allocate the entire range */
182         for (size = 4096; size <= limit; size <<= 2) {
183                 struct i915_vm_pt_stash stash = {};
184
185                 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
186                 if (err)
187                         goto err_ppgtt_cleanup;
188
189                 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
190                 if (err) {
191                         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
192                         goto err_ppgtt_cleanup;
193                 }
194
195                 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
196                 cond_resched();
197
198                 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
199
200                 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
201         }
202
203         /* Check we can incrementally allocate the entire range */
204         for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
205                 struct i915_vm_pt_stash stash = {};
206
207                 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
208                 if (err)
209                         goto err_ppgtt_cleanup;
210
211                 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
212                 if (err) {
213                         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
214                         goto err_ppgtt_cleanup;
215                 }
216
217                 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
218                                             last, size - last);
219                 cond_resched();
220
221                 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
222         }
223
224 err_ppgtt_cleanup:
225         if (err == -EDEADLK) {
226                 err = i915_gem_ww_ctx_backoff(&ww);
227                 if (!err)
228                         goto retry;
229         }
230         i915_gem_ww_ctx_fini(&ww);
231
232         i915_vm_put(&ppgtt->vm);
233         return err;
234 }
235
236 static int lowlevel_hole(struct i915_address_space *vm,
237                          u64 hole_start, u64 hole_end,
238                          unsigned long end_time)
239 {
240         I915_RND_STATE(seed_prng);
241         struct i915_vma *mock_vma;
242         unsigned int size;
243
244         mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
245         if (!mock_vma)
246                 return -ENOMEM;
247
248         /* Keep creating larger objects until one cannot fit into the hole */
249         for (size = 12; (hole_end - hole_start) >> size; size++) {
250                 I915_RND_SUBSTATE(prng, seed_prng);
251                 struct drm_i915_gem_object *obj;
252                 unsigned int *order, count, n;
253                 u64 hole_size;
254
255                 hole_size = (hole_end - hole_start) >> size;
256                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
257                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
258                 count = hole_size >> 1;
259                 if (!count) {
260                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
261                                  __func__, hole_start, hole_end, size, hole_size);
262                         break;
263                 }
264
265                 do {
266                         order = i915_random_order(count, &prng);
267                         if (order)
268                                 break;
269                 } while (count >>= 1);
270                 if (!count) {
271                         kfree(mock_vma);
272                         return -ENOMEM;
273                 }
274                 GEM_BUG_ON(!order);
275
276                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
277                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
278
279                 /* Ignore allocation failures (i.e. don't report them as
280                  * a test failure) as we are purposefully allocating very
281                  * large objects without checking that we have sufficient
282                  * memory. We expect to hit -ENOMEM.
283                  */
284
285                 obj = fake_dma_object(vm->i915, BIT_ULL(size));
286                 if (IS_ERR(obj)) {
287                         kfree(order);
288                         break;
289                 }
290
291                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
292
293                 if (i915_gem_object_pin_pages_unlocked(obj)) {
294                         i915_gem_object_put(obj);
295                         kfree(order);
296                         break;
297                 }
298
299                 for (n = 0; n < count; n++) {
300                         u64 addr = hole_start + order[n] * BIT_ULL(size);
301                         intel_wakeref_t wakeref;
302
303                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
304
305                         if (igt_timeout(end_time,
306                                         "%s timed out before %d/%d\n",
307                                         __func__, n, count)) {
308                                 hole_end = hole_start; /* quit */
309                                 break;
310                         }
311
312                         if (vm->allocate_va_range) {
313                                 struct i915_vm_pt_stash stash = {};
314                                 struct i915_gem_ww_ctx ww;
315                                 int err;
316
317                                 i915_gem_ww_ctx_init(&ww, false);
318 retry:
319                                 err = i915_vm_lock_objects(vm, &ww);
320                                 if (err)
321                                         goto alloc_vm_end;
322
323                                 err = -ENOMEM;
324                                 if (i915_vm_alloc_pt_stash(vm, &stash,
325                                                            BIT_ULL(size)))
326                                         goto alloc_vm_end;
327
328                                 err = i915_vm_pin_pt_stash(vm, &stash);
329                                 if (!err)
330                                         vm->allocate_va_range(vm, &stash,
331                                                               addr, BIT_ULL(size));
332
333                                 i915_vm_free_pt_stash(vm, &stash);
334 alloc_vm_end:
335                                 if (err == -EDEADLK) {
336                                         err = i915_gem_ww_ctx_backoff(&ww);
337                                         if (!err)
338                                                 goto retry;
339                                 }
340                                 i915_gem_ww_ctx_fini(&ww);
341
342                                 if (err)
343                                         break;
344                         }
345
346                         mock_vma->pages = obj->mm.pages;
347                         mock_vma->node.size = BIT_ULL(size);
348                         mock_vma->node.start = addr;
349
350                         with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
351                                 vm->insert_entries(vm, mock_vma,
352                                                    I915_CACHE_NONE, 0);
353                 }
354                 count = n;
355
356                 i915_random_reorder(order, count, &prng);
357                 for (n = 0; n < count; n++) {
358                         u64 addr = hole_start + order[n] * BIT_ULL(size);
359                         intel_wakeref_t wakeref;
360
361                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
362                         with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
363                                 vm->clear_range(vm, addr, BIT_ULL(size));
364                 }
365
366                 i915_gem_object_unpin_pages(obj);
367                 i915_gem_object_put(obj);
368
369                 kfree(order);
370
371                 cleanup_freed_objects(vm->i915);
372         }
373
374         kfree(mock_vma);
375         return 0;
376 }
377
378 static void close_object_list(struct list_head *objects,
379                               struct i915_address_space *vm)
380 {
381         struct drm_i915_gem_object *obj, *on;
382         int ignored;
383
384         list_for_each_entry_safe(obj, on, objects, st_link) {
385                 struct i915_vma *vma;
386
387                 vma = i915_vma_instance(obj, vm, NULL);
388                 if (!IS_ERR(vma))
389                         ignored = i915_vma_unbind(vma);
390
391                 list_del(&obj->st_link);
392                 i915_gem_object_put(obj);
393         }
394 }
395
396 static int fill_hole(struct i915_address_space *vm,
397                      u64 hole_start, u64 hole_end,
398                      unsigned long end_time)
399 {
400         const u64 hole_size = hole_end - hole_start;
401         struct drm_i915_gem_object *obj;
402         const unsigned long max_pages =
403                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
404         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
405         unsigned long npages, prime, flags;
406         struct i915_vma *vma;
407         LIST_HEAD(objects);
408         int err;
409
410         /* Try binding many VMA working inwards from either edge */
411
412         flags = PIN_OFFSET_FIXED | PIN_USER;
413         if (i915_is_ggtt(vm))
414                 flags |= PIN_GLOBAL;
415
416         for_each_prime_number_from(prime, 2, max_step) {
417                 for (npages = 1; npages <= max_pages; npages *= prime) {
418                         const u64 full_size = npages << PAGE_SHIFT;
419                         const struct {
420                                 const char *name;
421                                 u64 offset;
422                                 int step;
423                         } phases[] = {
424                                 { "top-down", hole_end, -1, },
425                                 { "bottom-up", hole_start, 1, },
426                                 { }
427                         }, *p;
428
429                         obj = fake_dma_object(vm->i915, full_size);
430                         if (IS_ERR(obj))
431                                 break;
432
433                         list_add(&obj->st_link, &objects);
434
435                         /* Align differing sized objects against the edges, and
436                          * check we don't walk off into the void when binding
437                          * them into the GTT.
438                          */
439                         for (p = phases; p->name; p++) {
440                                 u64 offset;
441
442                                 offset = p->offset;
443                                 list_for_each_entry(obj, &objects, st_link) {
444                                         vma = i915_vma_instance(obj, vm, NULL);
445                                         if (IS_ERR(vma))
446                                                 continue;
447
448                                         if (p->step < 0) {
449                                                 if (offset < hole_start + obj->base.size)
450                                                         break;
451                                                 offset -= obj->base.size;
452                                         }
453
454                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
455                                         if (err) {
456                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
457                                                        __func__, p->name, err, npages, prime, offset);
458                                                 goto err;
459                                         }
460
461                                         if (!drm_mm_node_allocated(&vma->node) ||
462                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
463                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
464                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
465                                                        offset);
466                                                 err = -EINVAL;
467                                                 goto err;
468                                         }
469
470                                         i915_vma_unpin(vma);
471
472                                         if (p->step > 0) {
473                                                 if (offset + obj->base.size > hole_end)
474                                                         break;
475                                                 offset += obj->base.size;
476                                         }
477                                 }
478
479                                 offset = p->offset;
480                                 list_for_each_entry(obj, &objects, st_link) {
481                                         vma = i915_vma_instance(obj, vm, NULL);
482                                         if (IS_ERR(vma))
483                                                 continue;
484
485                                         if (p->step < 0) {
486                                                 if (offset < hole_start + obj->base.size)
487                                                         break;
488                                                 offset -= obj->base.size;
489                                         }
490
491                                         if (!drm_mm_node_allocated(&vma->node) ||
492                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
493                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
494                                                        __func__, p->name, vma->node.start, vma->node.size,
495                                                        offset);
496                                                 err = -EINVAL;
497                                                 goto err;
498                                         }
499
500                                         err = i915_vma_unbind(vma);
501                                         if (err) {
502                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
503                                                        __func__, p->name, vma->node.start, vma->node.size,
504                                                        err);
505                                                 goto err;
506                                         }
507
508                                         if (p->step > 0) {
509                                                 if (offset + obj->base.size > hole_end)
510                                                         break;
511                                                 offset += obj->base.size;
512                                         }
513                                 }
514
515                                 offset = p->offset;
516                                 list_for_each_entry_reverse(obj, &objects, st_link) {
517                                         vma = i915_vma_instance(obj, vm, NULL);
518                                         if (IS_ERR(vma))
519                                                 continue;
520
521                                         if (p->step < 0) {
522                                                 if (offset < hole_start + obj->base.size)
523                                                         break;
524                                                 offset -= obj->base.size;
525                                         }
526
527                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
528                                         if (err) {
529                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
530                                                        __func__, p->name, err, npages, prime, offset);
531                                                 goto err;
532                                         }
533
534                                         if (!drm_mm_node_allocated(&vma->node) ||
535                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
536                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
537                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
538                                                        offset);
539                                                 err = -EINVAL;
540                                                 goto err;
541                                         }
542
543                                         i915_vma_unpin(vma);
544
545                                         if (p->step > 0) {
546                                                 if (offset + obj->base.size > hole_end)
547                                                         break;
548                                                 offset += obj->base.size;
549                                         }
550                                 }
551
552                                 offset = p->offset;
553                                 list_for_each_entry_reverse(obj, &objects, st_link) {
554                                         vma = i915_vma_instance(obj, vm, NULL);
555                                         if (IS_ERR(vma))
556                                                 continue;
557
558                                         if (p->step < 0) {
559                                                 if (offset < hole_start + obj->base.size)
560                                                         break;
561                                                 offset -= obj->base.size;
562                                         }
563
564                                         if (!drm_mm_node_allocated(&vma->node) ||
565                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
566                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
567                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
568                                                        offset);
569                                                 err = -EINVAL;
570                                                 goto err;
571                                         }
572
573                                         err = i915_vma_unbind(vma);
574                                         if (err) {
575                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
576                                                        __func__, p->name, vma->node.start, vma->node.size,
577                                                        err);
578                                                 goto err;
579                                         }
580
581                                         if (p->step > 0) {
582                                                 if (offset + obj->base.size > hole_end)
583                                                         break;
584                                                 offset += obj->base.size;
585                                         }
586                                 }
587                         }
588
589                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
590                                         __func__, npages, prime)) {
591                                 err = -EINTR;
592                                 goto err;
593                         }
594                 }
595
596                 close_object_list(&objects, vm);
597                 cleanup_freed_objects(vm->i915);
598         }
599
600         return 0;
601
602 err:
603         close_object_list(&objects, vm);
604         return err;
605 }
606
607 static int walk_hole(struct i915_address_space *vm,
608                      u64 hole_start, u64 hole_end,
609                      unsigned long end_time)
610 {
611         const u64 hole_size = hole_end - hole_start;
612         const unsigned long max_pages =
613                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
614         unsigned long flags;
615         u64 size;
616
617         /* Try binding a single VMA in different positions within the hole */
618
619         flags = PIN_OFFSET_FIXED | PIN_USER;
620         if (i915_is_ggtt(vm))
621                 flags |= PIN_GLOBAL;
622
623         for_each_prime_number_from(size, 1, max_pages) {
624                 struct drm_i915_gem_object *obj;
625                 struct i915_vma *vma;
626                 u64 addr;
627                 int err = 0;
628
629                 obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
630                 if (IS_ERR(obj))
631                         break;
632
633                 vma = i915_vma_instance(obj, vm, NULL);
634                 if (IS_ERR(vma)) {
635                         err = PTR_ERR(vma);
636                         goto err_put;
637                 }
638
639                 for (addr = hole_start;
640                      addr + obj->base.size < hole_end;
641                      addr += obj->base.size) {
642                         err = i915_vma_pin(vma, 0, 0, addr | flags);
643                         if (err) {
644                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
645                                        __func__, addr, vma->size,
646                                        hole_start, hole_end, err);
647                                 goto err_put;
648                         }
649                         i915_vma_unpin(vma);
650
651                         if (!drm_mm_node_allocated(&vma->node) ||
652                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
653                                 pr_err("%s incorrect at %llx + %llx\n",
654                                        __func__, addr, vma->size);
655                                 err = -EINVAL;
656                                 goto err_put;
657                         }
658
659                         err = i915_vma_unbind(vma);
660                         if (err) {
661                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
662                                        __func__, addr, vma->size, err);
663                                 goto err_put;
664                         }
665
666                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
667
668                         if (igt_timeout(end_time,
669                                         "%s timed out at %llx\n",
670                                         __func__, addr)) {
671                                 err = -EINTR;
672                                 goto err_put;
673                         }
674                 }
675
676 err_put:
677                 i915_gem_object_put(obj);
678                 if (err)
679                         return err;
680
681                 cleanup_freed_objects(vm->i915);
682         }
683
684         return 0;
685 }
686
687 static int pot_hole(struct i915_address_space *vm,
688                     u64 hole_start, u64 hole_end,
689                     unsigned long end_time)
690 {
691         struct drm_i915_gem_object *obj;
692         struct i915_vma *vma;
693         unsigned long flags;
694         unsigned int pot;
695         int err = 0;
696
697         flags = PIN_OFFSET_FIXED | PIN_USER;
698         if (i915_is_ggtt(vm))
699                 flags |= PIN_GLOBAL;
700
701         obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
702         if (IS_ERR(obj))
703                 return PTR_ERR(obj);
704
705         vma = i915_vma_instance(obj, vm, NULL);
706         if (IS_ERR(vma)) {
707                 err = PTR_ERR(vma);
708                 goto err_obj;
709         }
710
711         /* Insert a pair of pages across every pot boundary within the hole */
712         for (pot = fls64(hole_end - 1) - 1;
713              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
714              pot--) {
715                 u64 step = BIT_ULL(pot);
716                 u64 addr;
717
718                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
719                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
720                      addr += step) {
721                         err = i915_vma_pin(vma, 0, 0, addr | flags);
722                         if (err) {
723                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
724                                        __func__,
725                                        addr,
726                                        hole_start, hole_end,
727                                        err);
728                                 goto err_obj;
729                         }
730
731                         if (!drm_mm_node_allocated(&vma->node) ||
732                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
733                                 pr_err("%s incorrect at %llx + %llx\n",
734                                        __func__, addr, vma->size);
735                                 i915_vma_unpin(vma);
736                                 err = i915_vma_unbind(vma);
737                                 err = -EINVAL;
738                                 goto err_obj;
739                         }
740
741                         i915_vma_unpin(vma);
742                         err = i915_vma_unbind(vma);
743                         GEM_BUG_ON(err);
744                 }
745
746                 if (igt_timeout(end_time,
747                                 "%s timed out after %d/%d\n",
748                                 __func__, pot, fls64(hole_end - 1) - 1)) {
749                         err = -EINTR;
750                         goto err_obj;
751                 }
752         }
753
754 err_obj:
755         i915_gem_object_put(obj);
756         return err;
757 }
758
759 static int drunk_hole(struct i915_address_space *vm,
760                       u64 hole_start, u64 hole_end,
761                       unsigned long end_time)
762 {
763         I915_RND_STATE(prng);
764         unsigned int size;
765         unsigned long flags;
766
767         flags = PIN_OFFSET_FIXED | PIN_USER;
768         if (i915_is_ggtt(vm))
769                 flags |= PIN_GLOBAL;
770
771         /* Keep creating larger objects until one cannot fit into the hole */
772         for (size = 12; (hole_end - hole_start) >> size; size++) {
773                 struct drm_i915_gem_object *obj;
774                 unsigned int *order, count, n;
775                 struct i915_vma *vma;
776                 u64 hole_size;
777                 int err = -ENODEV;
778
779                 hole_size = (hole_end - hole_start) >> size;
780                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
781                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
782                 count = hole_size >> 1;
783                 if (!count) {
784                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
785                                  __func__, hole_start, hole_end, size, hole_size);
786                         break;
787                 }
788
789                 do {
790                         order = i915_random_order(count, &prng);
791                         if (order)
792                                 break;
793                 } while (count >>= 1);
794                 if (!count)
795                         return -ENOMEM;
796                 GEM_BUG_ON(!order);
797
798                 /* Ignore allocation failures (i.e. don't report them as
799                  * a test failure) as we are purposefully allocating very
800                  * large objects without checking that we have sufficient
801                  * memory. We expect to hit -ENOMEM.
802                  */
803
804                 obj = fake_dma_object(vm->i915, BIT_ULL(size));
805                 if (IS_ERR(obj)) {
806                         kfree(order);
807                         break;
808                 }
809
810                 vma = i915_vma_instance(obj, vm, NULL);
811                 if (IS_ERR(vma)) {
812                         err = PTR_ERR(vma);
813                         goto err_obj;
814                 }
815
816                 GEM_BUG_ON(vma->size != BIT_ULL(size));
817
818                 for (n = 0; n < count; n++) {
819                         u64 addr = hole_start + order[n] * BIT_ULL(size);
820
821                         err = i915_vma_pin(vma, 0, 0, addr | flags);
822                         if (err) {
823                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
824                                        __func__,
825                                        addr, BIT_ULL(size),
826                                        hole_start, hole_end,
827                                        err);
828                                 goto err_obj;
829                         }
830
831                         if (!drm_mm_node_allocated(&vma->node) ||
832                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
833                                 pr_err("%s incorrect at %llx + %llx\n",
834                                        __func__, addr, BIT_ULL(size));
835                                 i915_vma_unpin(vma);
836                                 err = i915_vma_unbind(vma);
837                                 err = -EINVAL;
838                                 goto err_obj;
839                         }
840
841                         i915_vma_unpin(vma);
842                         err = i915_vma_unbind(vma);
843                         GEM_BUG_ON(err);
844
845                         if (igt_timeout(end_time,
846                                         "%s timed out after %d/%d\n",
847                                         __func__, n, count)) {
848                                 err = -EINTR;
849                                 goto err_obj;
850                         }
851                 }
852
853 err_obj:
854                 i915_gem_object_put(obj);
855                 kfree(order);
856                 if (err)
857                         return err;
858
859                 cleanup_freed_objects(vm->i915);
860         }
861
862         return 0;
863 }
864
865 static int __shrink_hole(struct i915_address_space *vm,
866                          u64 hole_start, u64 hole_end,
867                          unsigned long end_time)
868 {
869         struct drm_i915_gem_object *obj;
870         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
871         unsigned int order = 12;
872         LIST_HEAD(objects);
873         int err = 0;
874         u64 addr;
875
876         /* Keep creating larger objects until one cannot fit into the hole */
877         for (addr = hole_start; addr < hole_end; ) {
878                 struct i915_vma *vma;
879                 u64 size = BIT_ULL(order++);
880
881                 size = min(size, hole_end - addr);
882                 obj = fake_dma_object(vm->i915, size);
883                 if (IS_ERR(obj)) {
884                         err = PTR_ERR(obj);
885                         break;
886                 }
887
888                 list_add(&obj->st_link, &objects);
889
890                 vma = i915_vma_instance(obj, vm, NULL);
891                 if (IS_ERR(vma)) {
892                         err = PTR_ERR(vma);
893                         break;
894                 }
895
896                 GEM_BUG_ON(vma->size != size);
897
898                 err = i915_vma_pin(vma, 0, 0, addr | flags);
899                 if (err) {
900                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
901                                __func__, addr, size, hole_start, hole_end, err);
902                         break;
903                 }
904
905                 if (!drm_mm_node_allocated(&vma->node) ||
906                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
907                         pr_err("%s incorrect at %llx + %llx\n",
908                                __func__, addr, size);
909                         i915_vma_unpin(vma);
910                         err = i915_vma_unbind(vma);
911                         err = -EINVAL;
912                         break;
913                 }
914
915                 i915_vma_unpin(vma);
916                 addr += size;
917
918                 /*
919                  * Since we are injecting allocation faults at random intervals,
920                  * wait for this allocation to complete before we change the
921                  * faultinjection.
922                  */
923                 err = i915_vma_sync(vma);
924                 if (err)
925                         break;
926
927                 if (igt_timeout(end_time,
928                                 "%s timed out at ofset %llx [%llx - %llx]\n",
929                                 __func__, addr, hole_start, hole_end)) {
930                         err = -EINTR;
931                         break;
932                 }
933         }
934
935         close_object_list(&objects, vm);
936         cleanup_freed_objects(vm->i915);
937         return err;
938 }
939
940 static int shrink_hole(struct i915_address_space *vm,
941                        u64 hole_start, u64 hole_end,
942                        unsigned long end_time)
943 {
944         unsigned long prime;
945         int err;
946
947         vm->fault_attr.probability = 999;
948         atomic_set(&vm->fault_attr.times, -1);
949
950         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
951                 vm->fault_attr.interval = prime;
952                 err = __shrink_hole(vm, hole_start, hole_end, end_time);
953                 if (err)
954                         break;
955         }
956
957         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
958
959         return err;
960 }
961
962 static int shrink_boom(struct i915_address_space *vm,
963                        u64 hole_start, u64 hole_end,
964                        unsigned long end_time)
965 {
966         unsigned int sizes[] = { SZ_2M, SZ_1G };
967         struct drm_i915_gem_object *purge;
968         struct drm_i915_gem_object *explode;
969         int err;
970         int i;
971
972         /*
973          * Catch the case which shrink_hole seems to miss. The setup here
974          * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
975          * ensuring that all vma assiocated with the respective pd/pdp are
976          * unpinned at the time.
977          */
978
979         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
980                 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
981                 unsigned int size = sizes[i];
982                 struct i915_vma *vma;
983
984                 purge = fake_dma_object(vm->i915, size);
985                 if (IS_ERR(purge))
986                         return PTR_ERR(purge);
987
988                 vma = i915_vma_instance(purge, vm, NULL);
989                 if (IS_ERR(vma)) {
990                         err = PTR_ERR(vma);
991                         goto err_purge;
992                 }
993
994                 err = i915_vma_pin(vma, 0, 0, flags);
995                 if (err)
996                         goto err_purge;
997
998                 /* Should now be ripe for purging */
999                 i915_vma_unpin(vma);
1000
1001                 explode = fake_dma_object(vm->i915, size);
1002                 if (IS_ERR(explode)) {
1003                         err = PTR_ERR(explode);
1004                         goto err_purge;
1005                 }
1006
1007                 vm->fault_attr.probability = 100;
1008                 vm->fault_attr.interval = 1;
1009                 atomic_set(&vm->fault_attr.times, -1);
1010
1011                 vma = i915_vma_instance(explode, vm, NULL);
1012                 if (IS_ERR(vma)) {
1013                         err = PTR_ERR(vma);
1014                         goto err_explode;
1015                 }
1016
1017                 err = i915_vma_pin(vma, 0, 0, flags | size);
1018                 if (err)
1019                         goto err_explode;
1020
1021                 i915_vma_unpin(vma);
1022
1023                 i915_gem_object_put(purge);
1024                 i915_gem_object_put(explode);
1025
1026                 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1027                 cleanup_freed_objects(vm->i915);
1028         }
1029
1030         return 0;
1031
1032 err_explode:
1033         i915_gem_object_put(explode);
1034 err_purge:
1035         i915_gem_object_put(purge);
1036         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1037         return err;
1038 }
1039
1040 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1041                           int (*func)(struct i915_address_space *vm,
1042                                       u64 hole_start, u64 hole_end,
1043                                       unsigned long end_time))
1044 {
1045         struct i915_ppgtt *ppgtt;
1046         IGT_TIMEOUT(end_time);
1047         struct file *file;
1048         int err;
1049
1050         if (!HAS_FULL_PPGTT(dev_priv))
1051                 return 0;
1052
1053         file = mock_file(dev_priv);
1054         if (IS_ERR(file))
1055                 return PTR_ERR(file);
1056
1057         ppgtt = i915_ppgtt_create(&dev_priv->gt);
1058         if (IS_ERR(ppgtt)) {
1059                 err = PTR_ERR(ppgtt);
1060                 goto out_free;
1061         }
1062         GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1063         GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
1064
1065         err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1066
1067         i915_vm_put(&ppgtt->vm);
1068
1069 out_free:
1070         fput(file);
1071         return err;
1072 }
1073
1074 static int igt_ppgtt_fill(void *arg)
1075 {
1076         return exercise_ppgtt(arg, fill_hole);
1077 }
1078
1079 static int igt_ppgtt_walk(void *arg)
1080 {
1081         return exercise_ppgtt(arg, walk_hole);
1082 }
1083
1084 static int igt_ppgtt_pot(void *arg)
1085 {
1086         return exercise_ppgtt(arg, pot_hole);
1087 }
1088
1089 static int igt_ppgtt_drunk(void *arg)
1090 {
1091         return exercise_ppgtt(arg, drunk_hole);
1092 }
1093
1094 static int igt_ppgtt_lowlevel(void *arg)
1095 {
1096         return exercise_ppgtt(arg, lowlevel_hole);
1097 }
1098
1099 static int igt_ppgtt_shrink(void *arg)
1100 {
1101         return exercise_ppgtt(arg, shrink_hole);
1102 }
1103
1104 static int igt_ppgtt_shrink_boom(void *arg)
1105 {
1106         return exercise_ppgtt(arg, shrink_boom);
1107 }
1108
1109 static int sort_holes(void *priv, const struct list_head *A,
1110                       const struct list_head *B)
1111 {
1112         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1113         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1114
1115         if (a->start < b->start)
1116                 return -1;
1117         else
1118                 return 1;
1119 }
1120
1121 static int exercise_ggtt(struct drm_i915_private *i915,
1122                          int (*func)(struct i915_address_space *vm,
1123                                      u64 hole_start, u64 hole_end,
1124                                      unsigned long end_time))
1125 {
1126         struct i915_ggtt *ggtt = &i915->ggtt;
1127         u64 hole_start, hole_end, last = 0;
1128         struct drm_mm_node *node;
1129         IGT_TIMEOUT(end_time);
1130         int err = 0;
1131
1132 restart:
1133         list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1134         drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1135                 if (hole_start < last)
1136                         continue;
1137
1138                 if (ggtt->vm.mm.color_adjust)
1139                         ggtt->vm.mm.color_adjust(node, 0,
1140                                                  &hole_start, &hole_end);
1141                 if (hole_start >= hole_end)
1142                         continue;
1143
1144                 err = func(&ggtt->vm, hole_start, hole_end, end_time);
1145                 if (err)
1146                         break;
1147
1148                 /* As we have manipulated the drm_mm, the list may be corrupt */
1149                 last = hole_end;
1150                 goto restart;
1151         }
1152
1153         return err;
1154 }
1155
1156 static int igt_ggtt_fill(void *arg)
1157 {
1158         return exercise_ggtt(arg, fill_hole);
1159 }
1160
1161 static int igt_ggtt_walk(void *arg)
1162 {
1163         return exercise_ggtt(arg, walk_hole);
1164 }
1165
1166 static int igt_ggtt_pot(void *arg)
1167 {
1168         return exercise_ggtt(arg, pot_hole);
1169 }
1170
1171 static int igt_ggtt_drunk(void *arg)
1172 {
1173         return exercise_ggtt(arg, drunk_hole);
1174 }
1175
1176 static int igt_ggtt_lowlevel(void *arg)
1177 {
1178         return exercise_ggtt(arg, lowlevel_hole);
1179 }
1180
1181 static int igt_ggtt_page(void *arg)
1182 {
1183         const unsigned int count = PAGE_SIZE/sizeof(u32);
1184         I915_RND_STATE(prng);
1185         struct drm_i915_private *i915 = arg;
1186         struct i915_ggtt *ggtt = &i915->ggtt;
1187         struct drm_i915_gem_object *obj;
1188         intel_wakeref_t wakeref;
1189         struct drm_mm_node tmp;
1190         unsigned int *order, n;
1191         int err;
1192
1193         if (!i915_ggtt_has_aperture(ggtt))
1194                 return 0;
1195
1196         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1197         if (IS_ERR(obj))
1198                 return PTR_ERR(obj);
1199
1200         err = i915_gem_object_pin_pages_unlocked(obj);
1201         if (err)
1202                 goto out_free;
1203
1204         memset(&tmp, 0, sizeof(tmp));
1205         mutex_lock(&ggtt->vm.mutex);
1206         err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1207                                           count * PAGE_SIZE, 0,
1208                                           I915_COLOR_UNEVICTABLE,
1209                                           0, ggtt->mappable_end,
1210                                           DRM_MM_INSERT_LOW);
1211         mutex_unlock(&ggtt->vm.mutex);
1212         if (err)
1213                 goto out_unpin;
1214
1215         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1216
1217         for (n = 0; n < count; n++) {
1218                 u64 offset = tmp.start + n * PAGE_SIZE;
1219
1220                 ggtt->vm.insert_page(&ggtt->vm,
1221                                      i915_gem_object_get_dma_address(obj, 0),
1222                                      offset, I915_CACHE_NONE, 0);
1223         }
1224
1225         order = i915_random_order(count, &prng);
1226         if (!order) {
1227                 err = -ENOMEM;
1228                 goto out_remove;
1229         }
1230
1231         for (n = 0; n < count; n++) {
1232                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1233                 u32 __iomem *vaddr;
1234
1235                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1236                 iowrite32(n, vaddr + n);
1237                 io_mapping_unmap_atomic(vaddr);
1238         }
1239         intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1240
1241         i915_random_reorder(order, count, &prng);
1242         for (n = 0; n < count; n++) {
1243                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1244                 u32 __iomem *vaddr;
1245                 u32 val;
1246
1247                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1248                 val = ioread32(vaddr + n);
1249                 io_mapping_unmap_atomic(vaddr);
1250
1251                 if (val != n) {
1252                         pr_err("insert page failed: found %d, expected %d\n",
1253                                val, n);
1254                         err = -EINVAL;
1255                         break;
1256                 }
1257         }
1258
1259         kfree(order);
1260 out_remove:
1261         ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1262         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1263         mutex_lock(&ggtt->vm.mutex);
1264         drm_mm_remove_node(&tmp);
1265         mutex_unlock(&ggtt->vm.mutex);
1266 out_unpin:
1267         i915_gem_object_unpin_pages(obj);
1268 out_free:
1269         i915_gem_object_put(obj);
1270         return err;
1271 }
1272
1273 static void track_vma_bind(struct i915_vma *vma)
1274 {
1275         struct drm_i915_gem_object *obj = vma->obj;
1276
1277         __i915_gem_object_pin_pages(obj);
1278
1279         GEM_BUG_ON(vma->pages);
1280         atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1281         __i915_gem_object_pin_pages(obj);
1282         vma->pages = obj->mm.pages;
1283
1284         mutex_lock(&vma->vm->mutex);
1285         list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1286         mutex_unlock(&vma->vm->mutex);
1287 }
1288
1289 static int exercise_mock(struct drm_i915_private *i915,
1290                          int (*func)(struct i915_address_space *vm,
1291                                      u64 hole_start, u64 hole_end,
1292                                      unsigned long end_time))
1293 {
1294         const u64 limit = totalram_pages() << PAGE_SHIFT;
1295         struct i915_address_space *vm;
1296         struct i915_gem_context *ctx;
1297         IGT_TIMEOUT(end_time);
1298         int err;
1299
1300         ctx = mock_context(i915, "mock");
1301         if (!ctx)
1302                 return -ENOMEM;
1303
1304         vm = i915_gem_context_get_vm_rcu(ctx);
1305         err = func(vm, 0, min(vm->total, limit), end_time);
1306         i915_vm_put(vm);
1307
1308         mock_context_close(ctx);
1309         return err;
1310 }
1311
1312 static int igt_mock_fill(void *arg)
1313 {
1314         struct i915_ggtt *ggtt = arg;
1315
1316         return exercise_mock(ggtt->vm.i915, fill_hole);
1317 }
1318
1319 static int igt_mock_walk(void *arg)
1320 {
1321         struct i915_ggtt *ggtt = arg;
1322
1323         return exercise_mock(ggtt->vm.i915, walk_hole);
1324 }
1325
1326 static int igt_mock_pot(void *arg)
1327 {
1328         struct i915_ggtt *ggtt = arg;
1329
1330         return exercise_mock(ggtt->vm.i915, pot_hole);
1331 }
1332
1333 static int igt_mock_drunk(void *arg)
1334 {
1335         struct i915_ggtt *ggtt = arg;
1336
1337         return exercise_mock(ggtt->vm.i915, drunk_hole);
1338 }
1339
1340 static int igt_gtt_reserve(void *arg)
1341 {
1342         struct i915_ggtt *ggtt = arg;
1343         struct drm_i915_gem_object *obj, *on;
1344         I915_RND_STATE(prng);
1345         LIST_HEAD(objects);
1346         u64 total;
1347         int err = -ENODEV;
1348
1349         /* i915_gem_gtt_reserve() tries to reserve the precise range
1350          * for the node, and evicts if it has to. So our test checks that
1351          * it can give us the requsted space and prevent overlaps.
1352          */
1353
1354         /* Start by filling the GGTT */
1355         for (total = 0;
1356              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1357              total += 2 * I915_GTT_PAGE_SIZE) {
1358                 struct i915_vma *vma;
1359
1360                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1361                                                       2 * PAGE_SIZE);
1362                 if (IS_ERR(obj)) {
1363                         err = PTR_ERR(obj);
1364                         goto out;
1365                 }
1366
1367                 err = i915_gem_object_pin_pages_unlocked(obj);
1368                 if (err) {
1369                         i915_gem_object_put(obj);
1370                         goto out;
1371                 }
1372
1373                 list_add(&obj->st_link, &objects);
1374
1375                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1376                 if (IS_ERR(vma)) {
1377                         err = PTR_ERR(vma);
1378                         goto out;
1379                 }
1380
1381                 mutex_lock(&ggtt->vm.mutex);
1382                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1383                                            obj->base.size,
1384                                            total,
1385                                            obj->cache_level,
1386                                            0);
1387                 mutex_unlock(&ggtt->vm.mutex);
1388                 if (err) {
1389                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1390                                total, ggtt->vm.total, err);
1391                         goto out;
1392                 }
1393                 track_vma_bind(vma);
1394
1395                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1396                 if (vma->node.start != total ||
1397                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1398                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1399                                vma->node.start, vma->node.size,
1400                                total, 2*I915_GTT_PAGE_SIZE);
1401                         err = -EINVAL;
1402                         goto out;
1403                 }
1404         }
1405
1406         /* Now we start forcing evictions */
1407         for (total = I915_GTT_PAGE_SIZE;
1408              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1409              total += 2 * I915_GTT_PAGE_SIZE) {
1410                 struct i915_vma *vma;
1411
1412                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1413                                                       2 * PAGE_SIZE);
1414                 if (IS_ERR(obj)) {
1415                         err = PTR_ERR(obj);
1416                         goto out;
1417                 }
1418
1419                 err = i915_gem_object_pin_pages_unlocked(obj);
1420                 if (err) {
1421                         i915_gem_object_put(obj);
1422                         goto out;
1423                 }
1424
1425                 list_add(&obj->st_link, &objects);
1426
1427                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1428                 if (IS_ERR(vma)) {
1429                         err = PTR_ERR(vma);
1430                         goto out;
1431                 }
1432
1433                 mutex_lock(&ggtt->vm.mutex);
1434                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1435                                            obj->base.size,
1436                                            total,
1437                                            obj->cache_level,
1438                                            0);
1439                 mutex_unlock(&ggtt->vm.mutex);
1440                 if (err) {
1441                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1442                                total, ggtt->vm.total, err);
1443                         goto out;
1444                 }
1445                 track_vma_bind(vma);
1446
1447                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1448                 if (vma->node.start != total ||
1449                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1450                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1451                                vma->node.start, vma->node.size,
1452                                total, 2*I915_GTT_PAGE_SIZE);
1453                         err = -EINVAL;
1454                         goto out;
1455                 }
1456         }
1457
1458         /* And then try at random */
1459         list_for_each_entry_safe(obj, on, &objects, st_link) {
1460                 struct i915_vma *vma;
1461                 u64 offset;
1462
1463                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1464                 if (IS_ERR(vma)) {
1465                         err = PTR_ERR(vma);
1466                         goto out;
1467                 }
1468
1469                 err = i915_vma_unbind(vma);
1470                 if (err) {
1471                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1472                         goto out;
1473                 }
1474
1475                 offset = igt_random_offset(&prng,
1476                                            0, ggtt->vm.total,
1477                                            2 * I915_GTT_PAGE_SIZE,
1478                                            I915_GTT_MIN_ALIGNMENT);
1479
1480                 mutex_lock(&ggtt->vm.mutex);
1481                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1482                                            obj->base.size,
1483                                            offset,
1484                                            obj->cache_level,
1485                                            0);
1486                 mutex_unlock(&ggtt->vm.mutex);
1487                 if (err) {
1488                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1489                                total, ggtt->vm.total, err);
1490                         goto out;
1491                 }
1492                 track_vma_bind(vma);
1493
1494                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1495                 if (vma->node.start != offset ||
1496                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1497                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1498                                vma->node.start, vma->node.size,
1499                                offset, 2*I915_GTT_PAGE_SIZE);
1500                         err = -EINVAL;
1501                         goto out;
1502                 }
1503         }
1504
1505 out:
1506         list_for_each_entry_safe(obj, on, &objects, st_link) {
1507                 i915_gem_object_unpin_pages(obj);
1508                 i915_gem_object_put(obj);
1509         }
1510         return err;
1511 }
1512
1513 static int igt_gtt_insert(void *arg)
1514 {
1515         struct i915_ggtt *ggtt = arg;
1516         struct drm_i915_gem_object *obj, *on;
1517         struct drm_mm_node tmp = {};
1518         const struct invalid_insert {
1519                 u64 size;
1520                 u64 alignment;
1521                 u64 start, end;
1522         } invalid_insert[] = {
1523                 {
1524                         ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1525                         0, ggtt->vm.total,
1526                 },
1527                 {
1528                         2*I915_GTT_PAGE_SIZE, 0,
1529                         0, I915_GTT_PAGE_SIZE,
1530                 },
1531                 {
1532                         -(u64)I915_GTT_PAGE_SIZE, 0,
1533                         0, 4*I915_GTT_PAGE_SIZE,
1534                 },
1535                 {
1536                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1537                         0, 4*I915_GTT_PAGE_SIZE,
1538                 },
1539                 {
1540                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1541                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1542                 },
1543                 {}
1544         }, *ii;
1545         LIST_HEAD(objects);
1546         u64 total;
1547         int err = -ENODEV;
1548
1549         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1550          * to the node, evicting if required.
1551          */
1552
1553         /* Check a couple of obviously invalid requests */
1554         for (ii = invalid_insert; ii->size; ii++) {
1555                 mutex_lock(&ggtt->vm.mutex);
1556                 err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1557                                           ii->size, ii->alignment,
1558                                           I915_COLOR_UNEVICTABLE,
1559                                           ii->start, ii->end,
1560                                           0);
1561                 mutex_unlock(&ggtt->vm.mutex);
1562                 if (err != -ENOSPC) {
1563                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1564                                ii->size, ii->alignment, ii->start, ii->end,
1565                                err);
1566                         return -EINVAL;
1567                 }
1568         }
1569
1570         /* Start by filling the GGTT */
1571         for (total = 0;
1572              total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1573              total += I915_GTT_PAGE_SIZE) {
1574                 struct i915_vma *vma;
1575
1576                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1577                                                       I915_GTT_PAGE_SIZE);
1578                 if (IS_ERR(obj)) {
1579                         err = PTR_ERR(obj);
1580                         goto out;
1581                 }
1582
1583                 err = i915_gem_object_pin_pages_unlocked(obj);
1584                 if (err) {
1585                         i915_gem_object_put(obj);
1586                         goto out;
1587                 }
1588
1589                 list_add(&obj->st_link, &objects);
1590
1591                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1592                 if (IS_ERR(vma)) {
1593                         err = PTR_ERR(vma);
1594                         goto out;
1595                 }
1596
1597                 mutex_lock(&ggtt->vm.mutex);
1598                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1599                                           obj->base.size, 0, obj->cache_level,
1600                                           0, ggtt->vm.total,
1601                                           0);
1602                 mutex_unlock(&ggtt->vm.mutex);
1603                 if (err == -ENOSPC) {
1604                         /* maxed out the GGTT space */
1605                         i915_gem_object_put(obj);
1606                         break;
1607                 }
1608                 if (err) {
1609                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1610                                total, ggtt->vm.total, err);
1611                         goto out;
1612                 }
1613                 track_vma_bind(vma);
1614                 __i915_vma_pin(vma);
1615
1616                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1617         }
1618
1619         list_for_each_entry(obj, &objects, st_link) {
1620                 struct i915_vma *vma;
1621
1622                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1623                 if (IS_ERR(vma)) {
1624                         err = PTR_ERR(vma);
1625                         goto out;
1626                 }
1627
1628                 if (!drm_mm_node_allocated(&vma->node)) {
1629                         pr_err("VMA was unexpectedly evicted!\n");
1630                         err = -EINVAL;
1631                         goto out;
1632                 }
1633
1634                 __i915_vma_unpin(vma);
1635         }
1636
1637         /* If we then reinsert, we should find the same hole */
1638         list_for_each_entry_safe(obj, on, &objects, st_link) {
1639                 struct i915_vma *vma;
1640                 u64 offset;
1641
1642                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1643                 if (IS_ERR(vma)) {
1644                         err = PTR_ERR(vma);
1645                         goto out;
1646                 }
1647
1648                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1649                 offset = vma->node.start;
1650
1651                 err = i915_vma_unbind(vma);
1652                 if (err) {
1653                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1654                         goto out;
1655                 }
1656
1657                 mutex_lock(&ggtt->vm.mutex);
1658                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1659                                           obj->base.size, 0, obj->cache_level,
1660                                           0, ggtt->vm.total,
1661                                           0);
1662                 mutex_unlock(&ggtt->vm.mutex);
1663                 if (err) {
1664                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1665                                total, ggtt->vm.total, err);
1666                         goto out;
1667                 }
1668                 track_vma_bind(vma);
1669
1670                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1671                 if (vma->node.start != offset) {
1672                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1673                                offset, vma->node.start);
1674                         err = -EINVAL;
1675                         goto out;
1676                 }
1677         }
1678
1679         /* And then force evictions */
1680         for (total = 0;
1681              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1682              total += 2 * I915_GTT_PAGE_SIZE) {
1683                 struct i915_vma *vma;
1684
1685                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1686                                                       2 * I915_GTT_PAGE_SIZE);
1687                 if (IS_ERR(obj)) {
1688                         err = PTR_ERR(obj);
1689                         goto out;
1690                 }
1691
1692                 err = i915_gem_object_pin_pages_unlocked(obj);
1693                 if (err) {
1694                         i915_gem_object_put(obj);
1695                         goto out;
1696                 }
1697
1698                 list_add(&obj->st_link, &objects);
1699
1700                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1701                 if (IS_ERR(vma)) {
1702                         err = PTR_ERR(vma);
1703                         goto out;
1704                 }
1705
1706                 mutex_lock(&ggtt->vm.mutex);
1707                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1708                                           obj->base.size, 0, obj->cache_level,
1709                                           0, ggtt->vm.total,
1710                                           0);
1711                 mutex_unlock(&ggtt->vm.mutex);
1712                 if (err) {
1713                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1714                                total, ggtt->vm.total, err);
1715                         goto out;
1716                 }
1717                 track_vma_bind(vma);
1718
1719                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1720         }
1721
1722 out:
1723         list_for_each_entry_safe(obj, on, &objects, st_link) {
1724                 i915_gem_object_unpin_pages(obj);
1725                 i915_gem_object_put(obj);
1726         }
1727         return err;
1728 }
1729
1730 int i915_gem_gtt_mock_selftests(void)
1731 {
1732         static const struct i915_subtest tests[] = {
1733                 SUBTEST(igt_mock_drunk),
1734                 SUBTEST(igt_mock_walk),
1735                 SUBTEST(igt_mock_pot),
1736                 SUBTEST(igt_mock_fill),
1737                 SUBTEST(igt_gtt_reserve),
1738                 SUBTEST(igt_gtt_insert),
1739         };
1740         struct drm_i915_private *i915;
1741         struct i915_ggtt *ggtt;
1742         int err;
1743
1744         i915 = mock_gem_device();
1745         if (!i915)
1746                 return -ENOMEM;
1747
1748         ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1749         if (!ggtt) {
1750                 err = -ENOMEM;
1751                 goto out_put;
1752         }
1753         mock_init_ggtt(i915, ggtt);
1754
1755         err = i915_subtests(tests, ggtt);
1756
1757         mock_device_flush(i915);
1758         i915_gem_drain_freed_objects(i915);
1759         mock_fini_ggtt(ggtt);
1760         kfree(ggtt);
1761 out_put:
1762         mock_destroy_device(i915);
1763         return err;
1764 }
1765
1766 static int context_sync(struct intel_context *ce)
1767 {
1768         struct i915_request *rq;
1769         long timeout;
1770
1771         rq = intel_context_create_request(ce);
1772         if (IS_ERR(rq))
1773                 return PTR_ERR(rq);
1774
1775         i915_request_get(rq);
1776         i915_request_add(rq);
1777
1778         timeout = i915_request_wait(rq, 0, HZ / 5);
1779         i915_request_put(rq);
1780
1781         return timeout < 0 ? -EIO : 0;
1782 }
1783
1784 static struct i915_request *
1785 submit_batch(struct intel_context *ce, u64 addr)
1786 {
1787         struct i915_request *rq;
1788         int err;
1789
1790         rq = intel_context_create_request(ce);
1791         if (IS_ERR(rq))
1792                 return rq;
1793
1794         err = 0;
1795         if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1796                 err = rq->engine->emit_init_breadcrumb(rq);
1797         if (err == 0)
1798                 err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1799
1800         if (err == 0)
1801                 i915_request_get(rq);
1802         i915_request_add(rq);
1803
1804         return err ? ERR_PTR(err) : rq;
1805 }
1806
1807 static u32 *spinner(u32 *batch, int i)
1808 {
1809         return batch + i * 64 / sizeof(*batch) + 4;
1810 }
1811
1812 static void end_spin(u32 *batch, int i)
1813 {
1814         *spinner(batch, i) = MI_BATCH_BUFFER_END;
1815         wmb();
1816 }
1817
1818 static int igt_cs_tlb(void *arg)
1819 {
1820         const unsigned int count = PAGE_SIZE / 64;
1821         const unsigned int chunk_size = count * PAGE_SIZE;
1822         struct drm_i915_private *i915 = arg;
1823         struct drm_i915_gem_object *bbe, *act, *out;
1824         struct i915_gem_engines_iter it;
1825         struct i915_address_space *vm;
1826         struct i915_gem_context *ctx;
1827         struct intel_context *ce;
1828         struct i915_vma *vma;
1829         I915_RND_STATE(prng);
1830         struct file *file;
1831         unsigned int i;
1832         u32 *result;
1833         u32 *batch;
1834         int err = 0;
1835
1836         /*
1837          * Our mission here is to fool the hardware to execute something
1838          * from scratch as it has not seen the batch move (due to missing
1839          * the TLB invalidate).
1840          */
1841
1842         file = mock_file(i915);
1843         if (IS_ERR(file))
1844                 return PTR_ERR(file);
1845
1846         ctx = live_context(i915, file);
1847         if (IS_ERR(ctx)) {
1848                 err = PTR_ERR(ctx);
1849                 goto out_unlock;
1850         }
1851
1852         vm = i915_gem_context_get_vm_rcu(ctx);
1853         if (i915_is_ggtt(vm))
1854                 goto out_vm;
1855
1856         /* Create two pages; dummy we prefill the TLB, and intended */
1857         bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1858         if (IS_ERR(bbe)) {
1859                 err = PTR_ERR(bbe);
1860                 goto out_vm;
1861         }
1862
1863         batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
1864         if (IS_ERR(batch)) {
1865                 err = PTR_ERR(batch);
1866                 goto out_put_bbe;
1867         }
1868         memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1869         i915_gem_object_flush_map(bbe);
1870         i915_gem_object_unpin_map(bbe);
1871
1872         act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1873         if (IS_ERR(act)) {
1874                 err = PTR_ERR(act);
1875                 goto out_put_bbe;
1876         }
1877
1878         /* Track the execution of each request by writing into different slot */
1879         batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
1880         if (IS_ERR(batch)) {
1881                 err = PTR_ERR(batch);
1882                 goto out_put_act;
1883         }
1884         for (i = 0; i < count; i++) {
1885                 u32 *cs = batch + i * 64 / sizeof(*cs);
1886                 u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1887
1888                 GEM_BUG_ON(INTEL_GEN(i915) < 6);
1889                 cs[0] = MI_STORE_DWORD_IMM_GEN4;
1890                 if (INTEL_GEN(i915) >= 8) {
1891                         cs[1] = lower_32_bits(addr);
1892                         cs[2] = upper_32_bits(addr);
1893                         cs[3] = i;
1894                         cs[4] = MI_NOOP;
1895                         cs[5] = MI_BATCH_BUFFER_START_GEN8;
1896                 } else {
1897                         cs[1] = 0;
1898                         cs[2] = lower_32_bits(addr);
1899                         cs[3] = i;
1900                         cs[4] = MI_NOOP;
1901                         cs[5] = MI_BATCH_BUFFER_START;
1902                 }
1903         }
1904
1905         out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1906         if (IS_ERR(out)) {
1907                 err = PTR_ERR(out);
1908                 goto out_put_batch;
1909         }
1910         i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1911
1912         vma = i915_vma_instance(out, vm, NULL);
1913         if (IS_ERR(vma)) {
1914                 err = PTR_ERR(vma);
1915                 goto out_put_out;
1916         }
1917
1918         err = i915_vma_pin(vma, 0, 0,
1919                            PIN_USER |
1920                            PIN_OFFSET_FIXED |
1921                            (vm->total - PAGE_SIZE));
1922         if (err)
1923                 goto out_put_out;
1924         GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1925
1926         result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
1927         if (IS_ERR(result)) {
1928                 err = PTR_ERR(result);
1929                 goto out_put_out;
1930         }
1931
1932         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1933                 IGT_TIMEOUT(end_time);
1934                 unsigned long pass = 0;
1935
1936                 if (!intel_engine_can_store_dword(ce->engine))
1937                         continue;
1938
1939                 while (!__igt_timeout(end_time, NULL)) {
1940                         struct i915_vm_pt_stash stash = {};
1941                         struct i915_request *rq;
1942                         struct i915_gem_ww_ctx ww;
1943                         u64 offset;
1944
1945                         offset = igt_random_offset(&prng,
1946                                                    0, vm->total - PAGE_SIZE,
1947                                                    chunk_size, PAGE_SIZE);
1948
1949                         memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1950
1951                         vma = i915_vma_instance(bbe, vm, NULL);
1952                         if (IS_ERR(vma)) {
1953                                 err = PTR_ERR(vma);
1954                                 goto end;
1955                         }
1956
1957                         err = vma->ops->set_pages(vma);
1958                         if (err)
1959                                 goto end;
1960
1961                         i915_gem_ww_ctx_init(&ww, false);
1962 retry:
1963                         err = i915_vm_lock_objects(vm, &ww);
1964                         if (err)
1965                                 goto end_ww;
1966
1967                         err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
1968                         if (err)
1969                                 goto end_ww;
1970
1971                         err = i915_vm_pin_pt_stash(vm, &stash);
1972                         if (!err)
1973                                 vm->allocate_va_range(vm, &stash, offset, chunk_size);
1974
1975                         i915_vm_free_pt_stash(vm, &stash);
1976 end_ww:
1977                         if (err == -EDEADLK) {
1978                                 err = i915_gem_ww_ctx_backoff(&ww);
1979                                 if (!err)
1980                                         goto retry;
1981                         }
1982                         i915_gem_ww_ctx_fini(&ww);
1983                         if (err)
1984                                 goto end;
1985
1986                         /* Prime the TLB with the dummy pages */
1987                         for (i = 0; i < count; i++) {
1988                                 vma->node.start = offset + i * PAGE_SIZE;
1989                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1990
1991                                 rq = submit_batch(ce, vma->node.start);
1992                                 if (IS_ERR(rq)) {
1993                                         err = PTR_ERR(rq);
1994                                         goto end;
1995                                 }
1996                                 i915_request_put(rq);
1997                         }
1998
1999                         vma->ops->clear_pages(vma);
2000
2001                         err = context_sync(ce);
2002                         if (err) {
2003                                 pr_err("%s: dummy setup timed out\n",
2004                                        ce->engine->name);
2005                                 goto end;
2006                         }
2007
2008                         vma = i915_vma_instance(act, vm, NULL);
2009                         if (IS_ERR(vma)) {
2010                                 err = PTR_ERR(vma);
2011                                 goto end;
2012                         }
2013
2014                         err = vma->ops->set_pages(vma);
2015                         if (err)
2016                                 goto end;
2017
2018                         /* Replace the TLB with target batches */
2019                         for (i = 0; i < count; i++) {
2020                                 struct i915_request *rq;
2021                                 u32 *cs = batch + i * 64 / sizeof(*cs);
2022                                 u64 addr;
2023
2024                                 vma->node.start = offset + i * PAGE_SIZE;
2025                                 vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
2026
2027                                 addr = vma->node.start + i * 64;
2028                                 cs[4] = MI_NOOP;
2029                                 cs[6] = lower_32_bits(addr);
2030                                 cs[7] = upper_32_bits(addr);
2031                                 wmb();
2032
2033                                 rq = submit_batch(ce, addr);
2034                                 if (IS_ERR(rq)) {
2035                                         err = PTR_ERR(rq);
2036                                         goto end;
2037                                 }
2038
2039                                 /* Wait until the context chain has started */
2040                                 if (i == 0) {
2041                                         while (READ_ONCE(result[i]) &&
2042                                                !i915_request_completed(rq))
2043                                                 cond_resched();
2044                                 } else {
2045                                         end_spin(batch, i - 1);
2046                                 }
2047
2048                                 i915_request_put(rq);
2049                         }
2050                         end_spin(batch, count - 1);
2051
2052                         vma->ops->clear_pages(vma);
2053
2054                         err = context_sync(ce);
2055                         if (err) {
2056                                 pr_err("%s: writes timed out\n",
2057                                        ce->engine->name);
2058                                 goto end;
2059                         }
2060
2061                         for (i = 0; i < count; i++) {
2062                                 if (result[i] != i) {
2063                                         pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2064                                                ce->engine->name, pass,
2065                                                offset, i, result[i], i);
2066                                         err = -EINVAL;
2067                                         goto end;
2068                                 }
2069                         }
2070
2071                         vm->clear_range(vm, offset, chunk_size);
2072                         pass++;
2073                 }
2074         }
2075 end:
2076         if (igt_flush_test(i915))
2077                 err = -EIO;
2078         i915_gem_context_unlock_engines(ctx);
2079         i915_gem_object_unpin_map(out);
2080 out_put_out:
2081         i915_gem_object_put(out);
2082 out_put_batch:
2083         i915_gem_object_unpin_map(act);
2084 out_put_act:
2085         i915_gem_object_put(act);
2086 out_put_bbe:
2087         i915_gem_object_put(bbe);
2088 out_vm:
2089         i915_vm_put(vm);
2090 out_unlock:
2091         fput(file);
2092         return err;
2093 }
2094
2095 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2096 {
2097         static const struct i915_subtest tests[] = {
2098                 SUBTEST(igt_ppgtt_alloc),
2099                 SUBTEST(igt_ppgtt_lowlevel),
2100                 SUBTEST(igt_ppgtt_drunk),
2101                 SUBTEST(igt_ppgtt_walk),
2102                 SUBTEST(igt_ppgtt_pot),
2103                 SUBTEST(igt_ppgtt_fill),
2104                 SUBTEST(igt_ppgtt_shrink),
2105                 SUBTEST(igt_ppgtt_shrink_boom),
2106                 SUBTEST(igt_ggtt_lowlevel),
2107                 SUBTEST(igt_ggtt_drunk),
2108                 SUBTEST(igt_ggtt_walk),
2109                 SUBTEST(igt_ggtt_pot),
2110                 SUBTEST(igt_ggtt_fill),
2111                 SUBTEST(igt_ggtt_page),
2112                 SUBTEST(igt_cs_tlb),
2113         };
2114
2115         GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
2116
2117         return i915_subtests(tests, i915);
2118 }