2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017 Intel Corporation
7 #include <linux/prime_numbers.h>
8 #include <linux/string_helpers.h>
9 #include <linux/swap.h>
11 #include "i915_selftest.h"
13 #include "gem/i915_gem_internal.h"
14 #include "gem/i915_gem_lmem.h"
15 #include "gem/i915_gem_pm.h"
16 #include "gem/i915_gem_region.h"
18 #include "gt/intel_gt.h"
20 #include "igt_gem_utils.h"
21 #include "mock_context.h"
23 #include "selftests/mock_drm.h"
24 #include "selftests/mock_gem_device.h"
25 #include "selftests/mock_region.h"
26 #include "selftests/i915_random.h"
28 static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915,
31 struct i915_gem_context *ctx = live_context(i915, file);
32 struct i915_address_space *vm;
39 WRITE_ONCE(vm->scrub_64K, true);
44 static const unsigned int page_sizes[] = {
45 I915_GTT_PAGE_SIZE_2M,
46 I915_GTT_PAGE_SIZE_64K,
47 I915_GTT_PAGE_SIZE_4K,
50 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
55 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
56 unsigned int page_size = page_sizes[i];
58 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
65 static void huge_pages_free_pages(struct sg_table *st)
67 struct scatterlist *sg;
69 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
71 __free_pages(sg_page(sg), get_order(sg->length));
78 static int get_huge_pages(struct drm_i915_gem_object *obj)
80 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
81 unsigned int page_mask = obj->mm.page_mask;
83 struct scatterlist *sg;
84 unsigned int sg_page_sizes;
87 /* restricted by sg_alloc_table */
88 if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
91 st = kmalloc(sizeof(*st), GFP);
95 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
100 rem = obj->base.size;
106 * Our goal here is simple, we want to greedily fill the object from
107 * largest to smallest page-size, while ensuring that we use *every*
108 * page-size as per the given page-mask.
111 unsigned int bit = ilog2(page_mask);
112 unsigned int page_size = BIT(bit);
113 int order = get_order(page_size);
118 GEM_BUG_ON(order > MAX_ORDER);
119 page = alloc_pages(GFP | __GFP_ZERO, order);
123 sg_set_page(sg, page, page_size, 0);
124 sg_page_sizes |= page_size;
134 } while ((rem - ((page_size-1) & page_mask)) >= page_size);
136 page_mask &= (page_size-1);
139 if (i915_gem_gtt_prepare_pages(obj, st))
142 GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
143 __i915_gem_object_set_pages(obj, st);
148 sg_set_page(sg, NULL, 0, 0);
150 huge_pages_free_pages(st);
155 static void put_huge_pages(struct drm_i915_gem_object *obj,
156 struct sg_table *pages)
158 i915_gem_gtt_finish_pages(obj, pages);
159 huge_pages_free_pages(pages);
161 obj->mm.dirty = false;
163 __start_cpu_write(obj);
166 static const struct drm_i915_gem_object_ops huge_page_ops = {
168 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
169 .get_pages = get_huge_pages,
170 .put_pages = put_huge_pages,
173 static struct drm_i915_gem_object *
174 huge_pages_object(struct drm_i915_private *i915,
176 unsigned int page_mask)
178 static struct lock_class_key lock_class;
179 struct drm_i915_gem_object *obj;
180 unsigned int cache_level;
183 GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
185 if (size >> PAGE_SHIFT > INT_MAX)
186 return ERR_PTR(-E2BIG);
188 if (overflows_type(size, obj->base.size))
189 return ERR_PTR(-E2BIG);
191 obj = i915_gem_object_alloc();
193 return ERR_PTR(-ENOMEM);
195 drm_gem_private_object_init(&i915->drm, &obj->base, size);
196 i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
197 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
198 i915_gem_object_set_volatile(obj);
200 obj->write_domain = I915_GEM_DOMAIN_CPU;
201 obj->read_domains = I915_GEM_DOMAIN_CPU;
203 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
204 i915_gem_object_set_cache_coherency(obj, cache_level);
206 obj->mm.page_mask = page_mask;
211 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
213 struct drm_i915_private *i915 = to_i915(obj->base.dev);
214 const u64 max_len = rounddown_pow_of_two(UINT_MAX);
216 struct scatterlist *sg;
219 /* restricted by sg_alloc_table */
220 if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
223 st = kmalloc(sizeof(*st), GFP);
227 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
232 /* Use optimal page sized chunks to fill in the sg table */
233 rem = obj->base.size;
237 unsigned int page_size = get_largest_page_size(i915, rem);
238 unsigned int len = min(page_size * div_u64(rem, page_size),
241 GEM_BUG_ON(!page_size);
245 sg_dma_len(sg) = len;
246 sg_dma_address(sg) = page_size;
261 __i915_gem_object_set_pages(obj, st);
266 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
268 struct drm_i915_private *i915 = to_i915(obj->base.dev);
270 struct scatterlist *sg;
271 unsigned int page_size;
273 st = kmalloc(sizeof(*st), GFP);
277 if (sg_alloc_table(st, 1, GFP)) {
285 page_size = get_largest_page_size(i915, obj->base.size);
286 GEM_BUG_ON(!page_size);
289 sg->length = obj->base.size;
290 sg_dma_len(sg) = obj->base.size;
291 sg_dma_address(sg) = page_size;
293 __i915_gem_object_set_pages(obj, st);
299 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
300 struct sg_table *pages)
302 sg_free_table(pages);
306 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
307 struct sg_table *pages)
309 fake_free_huge_pages(obj, pages);
310 obj->mm.dirty = false;
313 static const struct drm_i915_gem_object_ops fake_ops = {
315 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
316 .get_pages = fake_get_huge_pages,
317 .put_pages = fake_put_huge_pages,
320 static const struct drm_i915_gem_object_ops fake_ops_single = {
322 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
323 .get_pages = fake_get_huge_pages_single,
324 .put_pages = fake_put_huge_pages,
327 static struct drm_i915_gem_object *
328 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
330 static struct lock_class_key lock_class;
331 struct drm_i915_gem_object *obj;
334 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
336 if (size >> PAGE_SHIFT > UINT_MAX)
337 return ERR_PTR(-E2BIG);
339 if (overflows_type(size, obj->base.size))
340 return ERR_PTR(-E2BIG);
342 obj = i915_gem_object_alloc();
344 return ERR_PTR(-ENOMEM);
346 drm_gem_private_object_init(&i915->drm, &obj->base, size);
349 i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0);
351 i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
353 i915_gem_object_set_volatile(obj);
355 obj->write_domain = I915_GEM_DOMAIN_CPU;
356 obj->read_domains = I915_GEM_DOMAIN_CPU;
357 obj->pat_index = i915_gem_get_pat_index(i915, I915_CACHE_NONE);
362 static int igt_check_page_sizes(struct i915_vma *vma)
364 struct drm_i915_private *i915 = vma->vm->i915;
365 unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
366 struct drm_i915_gem_object *obj = vma->obj;
369 /* We have to wait for the async bind to complete before our asserts */
370 err = i915_vma_sync(vma);
374 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
375 pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
376 vma->page_sizes.sg & ~supported, supported);
380 if (!HAS_PAGE_SIZES(i915, vma->resource->page_sizes_gtt)) {
381 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
382 vma->resource->page_sizes_gtt & ~supported, supported);
386 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
387 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
388 vma->page_sizes.phys, obj->mm.page_sizes.phys);
392 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
393 pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
394 vma->page_sizes.sg, obj->mm.page_sizes.sg);
399 * The dma-api is like a box of chocolates when it comes to the
400 * alignment of dma addresses, however for LMEM we have total control
401 * and so can guarantee alignment, likewise when we allocate our blocks
402 * they should appear in descending order, and if we know that we align
403 * to the largest page size for the GTT address, we should be able to
404 * assert that if we see 2M physical pages then we should also get 2M
405 * GTT pages. If we don't then something might be wrong in our
406 * construction of the backing pages.
408 * Maintaining alignment is required to utilise huge pages in the ppGGT.
410 if (i915_gem_object_is_lmem(obj) &&
411 IS_ALIGNED(i915_vma_offset(vma), SZ_2M) &&
412 vma->page_sizes.sg & SZ_2M &&
413 vma->resource->page_sizes_gtt < SZ_2M) {
414 pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
415 vma->page_sizes.sg, vma->resource->page_sizes_gtt);
422 static int igt_mock_exhaust_device_supported_pages(void *arg)
424 struct i915_ppgtt *ppgtt = arg;
425 struct drm_i915_private *i915 = ppgtt->vm.i915;
426 unsigned int saved_mask = RUNTIME_INFO(i915)->page_sizes;
427 struct drm_i915_gem_object *obj;
428 struct i915_vma *vma;
433 * Sanity check creating objects with every valid page support
434 * combination for our mock device.
437 for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
438 unsigned int combination = SZ_4K; /* Required for ppGTT */
440 for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
442 combination |= page_sizes[j];
445 RUNTIME_INFO(i915)->page_sizes = combination;
447 for (single = 0; single <= 1; ++single) {
448 obj = fake_huge_pages_object(i915, combination, !!single);
454 if (obj->base.size != combination) {
455 pr_err("obj->base.size=%zu, expected=%u\n",
456 obj->base.size, combination);
461 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
467 err = i915_vma_pin(vma, 0, 0, PIN_USER);
471 err = igt_check_page_sizes(vma);
473 if (vma->page_sizes.sg != combination) {
474 pr_err("page_sizes.sg=%u, expected=%u\n",
475 vma->page_sizes.sg, combination);
480 i915_gem_object_put(obj);
490 i915_gem_object_put(obj);
492 RUNTIME_INFO(i915)->page_sizes = saved_mask;
497 static int igt_mock_memory_region_huge_pages(void *arg)
499 const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
500 struct i915_ppgtt *ppgtt = arg;
501 struct drm_i915_private *i915 = ppgtt->vm.i915;
502 unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
503 struct intel_memory_region *mem;
504 struct drm_i915_gem_object *obj;
505 struct i915_vma *vma;
509 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
511 pr_err("%s failed to create memory region\n", __func__);
515 for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
516 unsigned int page_size = BIT(bit);
517 resource_size_t phys;
520 for (i = 0; i < ARRAY_SIZE(flags); ++i) {
521 obj = i915_gem_object_create_region(mem,
522 page_size, page_size,
529 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
535 err = i915_vma_pin(vma, 0, 0, PIN_USER);
539 err = igt_check_page_sizes(vma);
543 phys = i915_gem_object_get_dma_address(obj, 0);
544 if (!IS_ALIGNED(phys, page_size)) {
545 pr_err("%s addr misaligned(%pa) page_size=%u\n",
546 __func__, &phys, page_size);
551 if (vma->resource->page_sizes_gtt != page_size) {
552 pr_err("%s page_sizes.gtt=%u, expected=%u\n",
553 __func__, vma->resource->page_sizes_gtt,
560 __i915_gem_object_put_pages(obj);
561 i915_gem_object_put(obj);
570 i915_gem_object_put(obj);
572 intel_memory_region_destroy(mem);
576 static int igt_mock_ppgtt_misaligned_dma(void *arg)
578 struct i915_ppgtt *ppgtt = arg;
579 struct drm_i915_private *i915 = ppgtt->vm.i915;
580 unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
581 struct drm_i915_gem_object *obj;
586 * Sanity check dma misalignment for huge pages -- the dma addresses we
587 * insert into the paging structures need to always respect the page
591 bit = ilog2(I915_GTT_PAGE_SIZE_64K);
593 for_each_set_bit_from(bit, &supported,
594 ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
595 IGT_TIMEOUT(end_time);
596 unsigned int page_size = BIT(bit);
597 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
600 round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
601 struct i915_vma *vma;
603 obj = fake_huge_pages_object(i915, size, true);
607 if (obj->base.size != size) {
608 pr_err("obj->base.size=%zu, expected=%u\n",
609 obj->base.size, size);
614 err = i915_gem_object_pin_pages_unlocked(obj);
618 /* Force the page size for this object */
619 obj->mm.page_sizes.sg = page_size;
621 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
627 err = i915_vma_pin(vma, 0, 0, flags);
632 err = igt_check_page_sizes(vma);
634 if (vma->resource->page_sizes_gtt != page_size) {
635 pr_err("page_sizes.gtt=%u, expected %u\n",
636 vma->resource->page_sizes_gtt, page_size);
646 * Try all the other valid offsets until the next
647 * boundary -- should always fall back to using 4K
650 for (offset = 4096; offset < page_size; offset += 4096) {
651 err = i915_vma_unbind_unlocked(vma);
655 err = i915_vma_pin(vma, 0, 0, flags | offset);
659 err = igt_check_page_sizes(vma);
661 if (vma->resource->page_sizes_gtt != I915_GTT_PAGE_SIZE_4K) {
662 pr_err("page_sizes.gtt=%u, expected %llu\n",
663 vma->resource->page_sizes_gtt,
664 I915_GTT_PAGE_SIZE_4K);
673 if (igt_timeout(end_time,
674 "%s timed out at offset %x with page-size %x\n",
675 __func__, offset, page_size))
679 i915_gem_object_lock(obj, NULL);
680 i915_gem_object_unpin_pages(obj);
681 __i915_gem_object_put_pages(obj);
682 i915_gem_object_unlock(obj);
683 i915_gem_object_put(obj);
689 i915_gem_object_lock(obj, NULL);
690 i915_gem_object_unpin_pages(obj);
691 i915_gem_object_unlock(obj);
693 i915_gem_object_put(obj);
698 static void close_object_list(struct list_head *objects)
700 struct drm_i915_gem_object *obj, *on;
702 list_for_each_entry_safe(obj, on, objects, st_link) {
703 list_del(&obj->st_link);
704 i915_gem_object_lock(obj, NULL);
705 i915_gem_object_unpin_pages(obj);
706 __i915_gem_object_put_pages(obj);
707 i915_gem_object_unlock(obj);
708 i915_gem_object_put(obj);
712 static int igt_ppgtt_huge_fill(void *arg)
714 struct drm_i915_private *i915 = arg;
715 unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
716 bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
717 struct i915_address_space *vm;
718 struct i915_gem_context *ctx;
719 unsigned long max_pages;
720 unsigned long page_num;
724 IGT_TIMEOUT(end_time);
727 if (supported == I915_GTT_PAGE_SIZE_4K)
730 file = mock_file(i915);
732 return PTR_ERR(file);
734 ctx = hugepage_ctx(i915, file);
739 vm = i915_gem_context_get_eb_vm(ctx);
740 max_pages = vm->total >> PAGE_SHIFT;
742 for_each_prime_number_from(page_num, 1, max_pages) {
743 struct drm_i915_gem_object *obj;
744 u64 size = page_num << PAGE_SHIFT;
745 struct i915_vma *vma;
746 unsigned int expected_gtt = 0;
749 obj = fake_huge_pages_object(i915, size, single);
755 if (obj->base.size != size) {
756 pr_err("obj->base.size=%zd, expected=%llu\n",
757 obj->base.size, size);
758 i915_gem_object_put(obj);
763 err = i915_gem_object_pin_pages_unlocked(obj);
765 i915_gem_object_put(obj);
769 list_add(&obj->st_link, &objects);
771 vma = i915_vma_instance(obj, vm, NULL);
777 /* vma start must be aligned to BIT(21) to allow 2M PTEs */
778 err = i915_vma_pin(vma, 0, BIT(21), PIN_USER);
782 err = igt_check_page_sizes(vma);
789 * Figure out the expected gtt page size knowing that we go from
790 * largest to smallest page size sg chunks, and that we align to
791 * the largest page size.
793 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
794 unsigned int page_size = page_sizes[i];
796 if (HAS_PAGE_SIZES(i915, page_size) &&
798 expected_gtt |= page_size;
803 GEM_BUG_ON(!expected_gtt);
806 if (!has_pte64 && (obj->base.size < I915_GTT_PAGE_SIZE_2M ||
807 expected_gtt & I915_GTT_PAGE_SIZE_2M))
808 expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
812 if (!has_pte64 && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
813 if (!IS_ALIGNED(vma->node.start,
814 I915_GTT_PAGE_SIZE_2M)) {
815 pr_err("node.start(%llx) not aligned to 2M\n",
821 if (!IS_ALIGNED(vma->node.size,
822 I915_GTT_PAGE_SIZE_2M)) {
823 pr_err("node.size(%llx) not aligned to 2M\n",
830 if (vma->resource->page_sizes_gtt != expected_gtt) {
831 pr_err("gtt=%#x, expected=%#x, size=0x%zx, single=%s\n",
832 vma->resource->page_sizes_gtt, expected_gtt,
833 obj->base.size, str_yes_no(!!single));
838 if (igt_timeout(end_time,
839 "%s timed out at size %zd\n",
840 __func__, obj->base.size))
846 close_object_list(&objects);
848 if (err == -ENOMEM || err == -ENOSPC)
857 static int igt_ppgtt_64K(void *arg)
859 struct drm_i915_private *i915 = arg;
860 bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
861 struct drm_i915_gem_object *obj;
862 struct i915_address_space *vm;
863 struct i915_gem_context *ctx;
865 const struct object_info {
870 /* Cases with forced padding/alignment */
873 .gtt = I915_GTT_PAGE_SIZE_64K,
877 .size = SZ_64K + SZ_4K,
878 .gtt = I915_GTT_PAGE_SIZE_4K,
882 .size = SZ_64K - SZ_4K,
883 .gtt = I915_GTT_PAGE_SIZE_4K,
888 .gtt = I915_GTT_PAGE_SIZE_64K,
892 .size = SZ_2M - SZ_4K,
893 .gtt = I915_GTT_PAGE_SIZE_4K,
897 .size = SZ_2M + SZ_4K,
898 .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
902 .size = SZ_2M + SZ_64K,
903 .gtt = I915_GTT_PAGE_SIZE_64K,
907 .size = SZ_2M - SZ_64K,
908 .gtt = I915_GTT_PAGE_SIZE_64K,
911 /* Try without any forced padding/alignment */
915 .gtt = I915_GTT_PAGE_SIZE_4K,
919 .offset = SZ_2M - SZ_64K,
920 .gtt = I915_GTT_PAGE_SIZE_4K,
923 struct i915_vma *vma;
928 * Sanity check some of the trickiness with 64K pages -- either we can
929 * safely mark the whole page-table(2M block) as 64K, or we have to
930 * always fallback to 4K.
933 if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
936 file = mock_file(i915);
938 return PTR_ERR(file);
940 ctx = hugepage_ctx(i915, file);
945 vm = i915_gem_context_get_eb_vm(ctx);
947 for (i = 0; i < ARRAY_SIZE(objects); ++i) {
948 unsigned int size = objects[i].size;
949 unsigned int expected_gtt = objects[i].gtt;
950 unsigned int offset = objects[i].offset;
951 unsigned int flags = PIN_USER;
954 * For modern GTT models, the requirements for marking a page-table
955 * as 64K have been relaxed. Account for this.
960 expected_gtt |= I915_GTT_PAGE_SIZE_64K;
961 if (size & (SZ_64K - 1))
962 expected_gtt |= I915_GTT_PAGE_SIZE_4K;
965 for (single = 0; single <= 1; single++) {
966 obj = fake_huge_pages_object(i915, size, !!single);
972 err = i915_gem_object_pin_pages_unlocked(obj);
977 * Disable 2M pages -- We only want to use 64K/4K pages
980 obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
982 vma = i915_vma_instance(obj, vm, NULL);
985 goto out_object_unpin;
989 flags |= PIN_OFFSET_FIXED | offset;
991 err = i915_vma_pin(vma, 0, 0, flags);
993 goto out_object_unpin;
995 err = igt_check_page_sizes(vma);
999 if (!has_pte64 && !offset &&
1000 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
1001 if (!IS_ALIGNED(vma->node.start,
1002 I915_GTT_PAGE_SIZE_2M)) {
1003 pr_err("node.start(%llx) not aligned to 2M\n",
1009 if (!IS_ALIGNED(vma->node.size,
1010 I915_GTT_PAGE_SIZE_2M)) {
1011 pr_err("node.size(%llx) not aligned to 2M\n",
1018 if (vma->resource->page_sizes_gtt != expected_gtt) {
1019 pr_err("gtt=%#x, expected=%#x, i=%d, single=%s offset=%#x size=%#x\n",
1020 vma->resource->page_sizes_gtt,
1021 expected_gtt, i, str_yes_no(!!single),
1027 i915_vma_unpin(vma);
1028 i915_gem_object_lock(obj, NULL);
1029 i915_gem_object_unpin_pages(obj);
1030 __i915_gem_object_put_pages(obj);
1031 i915_gem_object_unlock(obj);
1032 i915_gem_object_put(obj);
1034 i915_gem_drain_freed_objects(i915);
1041 i915_vma_unpin(vma);
1043 i915_gem_object_lock(obj, NULL);
1044 i915_gem_object_unpin_pages(obj);
1045 i915_gem_object_unlock(obj);
1047 i915_gem_object_put(obj);
1055 static int gpu_write(struct intel_context *ce,
1056 struct i915_vma *vma,
1062 i915_gem_object_lock(vma->obj, NULL);
1063 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1064 i915_gem_object_unlock(vma->obj);
1068 return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
1069 vma->size >> PAGE_SHIFT, val);
1073 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1075 unsigned int needs_flush;
1079 i915_gem_object_lock(obj, NULL);
1080 err = i915_gem_object_prepare_read(obj, &needs_flush);
1084 for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
1085 u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1087 if (needs_flush & CLFLUSH_BEFORE)
1088 drm_clflush_virt_range(ptr, PAGE_SIZE);
1090 if (ptr[dword] != val) {
1091 pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1092 n, dword, ptr[dword], val);
1101 i915_gem_object_finish_access(obj);
1103 i915_gem_object_unlock(obj);
1108 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1110 unsigned long n = obj->base.size >> PAGE_SHIFT;
1114 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
1118 ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1120 return PTR_ERR(ptr);
1125 pr_err("base[%u]=%08x, val=%08x\n",
1131 ptr += PAGE_SIZE / sizeof(*ptr);
1134 i915_gem_object_unpin_map(obj);
1138 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1140 if (i915_gem_object_has_struct_page(obj))
1141 return __cpu_check_shmem(obj, dword, val);
1143 return __cpu_check_vmap(obj, dword, val);
1146 static int __igt_write_huge(struct intel_context *ce,
1147 struct drm_i915_gem_object *obj,
1148 u64 size, u64 offset,
1151 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1152 struct i915_vma *vma;
1155 vma = i915_vma_instance(obj, ce->vm, NULL);
1157 return PTR_ERR(vma);
1159 err = i915_vma_pin(vma, size, 0, flags | offset);
1162 * The ggtt may have some pages reserved so
1163 * refrain from erroring out.
1165 if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1171 err = igt_check_page_sizes(vma);
1175 err = gpu_write(ce, vma, dword, val);
1177 pr_err("gpu-write failed at offset=%llx\n", offset);
1181 err = cpu_check(obj, dword, val);
1183 pr_err("cpu-check failed at offset=%llx\n", offset);
1188 i915_vma_unpin(vma);
1192 static int igt_write_huge(struct drm_i915_private *i915,
1193 struct drm_i915_gem_object *obj)
1195 struct i915_gem_engines *engines;
1196 struct i915_gem_engines_iter it;
1197 struct intel_context *ce;
1198 I915_RND_STATE(prng);
1199 IGT_TIMEOUT(end_time);
1200 unsigned int max_page_size;
1202 struct i915_gem_context *ctx;
1211 file = mock_file(i915);
1213 return PTR_ERR(file);
1215 ctx = hugepage_ctx(i915, file);
1221 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1223 size = obj->base.size;
1224 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1225 !HAS_64K_PAGES(i915))
1226 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1231 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1233 if (!intel_engine_can_store_dword(ce->engine))
1236 max = min(max, ce->vm->total);
1239 i915_gem_context_unlock_engines(ctx);
1244 * To keep things interesting when alternating between engines in our
1245 * randomized order, lets also make feeding to the same engine a few
1246 * times in succession a possibility by enlarging the permutation array.
1248 order = i915_random_order(count * count, &prng);
1254 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1255 max = div_u64(max - size, max_page_size);
1258 * Try various offsets in an ascending/descending fashion until we
1259 * timeout -- we want to avoid issues hidden by effectively always using
1263 engines = i915_gem_context_lock_engines(ctx);
1264 for_each_prime_number_from(num, 0, max) {
1265 u64 offset_low = num * max_page_size;
1266 u64 offset_high = (max - num) * max_page_size;
1267 u32 dword = offset_in_page(num) / 4;
1268 struct intel_context *ce;
1270 ce = engines->engines[order[i] % engines->num_engines];
1271 i = (i + 1) % (count * count);
1272 if (!ce || !intel_engine_can_store_dword(ce->engine))
1276 * In order to utilize 64K pages we need to both pad the vma
1277 * size and ensure the vma offset is at the start of the pt
1278 * boundary, however to improve coverage we opt for testing both
1279 * aligned and unaligned offsets.
1281 * With PS64 this is no longer the case, but to ensure we
1282 * sometimes get the compact layout for smaller objects, apply
1283 * the round_up anyway.
1285 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1286 offset_low = round_down(offset_low,
1287 I915_GTT_PAGE_SIZE_2M);
1289 err = __igt_write_huge(ce, obj, size, offset_low,
1294 err = __igt_write_huge(ce, obj, size, offset_high,
1299 if (igt_timeout(end_time,
1300 "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1301 __func__, ce->engine->name, offset_low, offset_high,
1305 i915_gem_context_unlock_engines(ctx);
1314 typedef struct drm_i915_gem_object *
1315 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1317 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1319 return i915->mm.gemfs && has_transparent_hugepage();
1322 static struct drm_i915_gem_object *
1323 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1325 if (!igt_can_allocate_thp(i915)) {
1326 pr_info("%s missing THP support, skipping\n", __func__);
1327 return ERR_PTR(-ENODEV);
1330 return i915_gem_object_create_shmem(i915, size);
1333 static struct drm_i915_gem_object *
1334 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1336 return i915_gem_object_create_internal(i915, size);
1339 static struct drm_i915_gem_object *
1340 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1342 return huge_pages_object(i915, size, size);
1345 static struct drm_i915_gem_object *
1346 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1348 return i915_gem_object_create_lmem(i915, size, flags);
1351 static u32 igt_random_size(struct rnd_state *prng,
1358 GEM_BUG_ON(!is_power_of_2(min_page_size));
1359 GEM_BUG_ON(!is_power_of_2(max_page_size));
1360 GEM_BUG_ON(min_page_size < PAGE_SIZE);
1361 GEM_BUG_ON(min_page_size > max_page_size);
1363 mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1364 size = prandom_u32_state(prng) & mask;
1365 if (size < min_page_size)
1366 size |= min_page_size;
1371 static int igt_ppgtt_smoke_huge(void *arg)
1373 struct drm_i915_private *i915 = arg;
1374 struct drm_i915_gem_object *obj;
1375 I915_RND_STATE(prng);
1381 { igt_create_internal, SZ_64K, SZ_2M, },
1382 { igt_create_shmem, SZ_64K, SZ_32M, },
1383 { igt_create_local, SZ_64K, SZ_1G, },
1389 * Sanity check that the HW uses huge pages correctly through our
1390 * various backends -- ensure that our writes land in the right place.
1393 for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1394 u32 min = backends[i].min;
1395 u32 max = backends[i].max;
1399 size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1401 obj = backends[i].fn(i915, size, 0);
1404 if (err == -E2BIG) {
1407 } else if (err == -ENODEV) {
1415 err = i915_gem_object_pin_pages_unlocked(obj);
1417 if (err == -ENXIO || err == -E2BIG || err == -ENOMEM) {
1418 i915_gem_object_put(obj);
1425 if (obj->mm.page_sizes.phys < min) {
1426 pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1432 err = igt_write_huge(i915, obj);
1434 pr_err("%s write-huge failed with size=%u, i=%d\n",
1438 i915_gem_object_lock(obj, NULL);
1439 i915_gem_object_unpin_pages(obj);
1440 __i915_gem_object_put_pages(obj);
1441 i915_gem_object_unlock(obj);
1443 i915_gem_object_put(obj);
1445 if (err == -ENOMEM || err == -ENXIO)
1457 static int igt_ppgtt_sanity_check(void *arg)
1459 struct drm_i915_private *i915 = arg;
1460 unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
1465 { igt_create_system, 0, },
1466 { igt_create_local, 0, },
1467 { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
1476 { SZ_2M - SZ_64K, SZ_64K },
1477 { SZ_2M - SZ_4K, SZ_64K | SZ_4K },
1478 { SZ_2M + SZ_4K, SZ_64K | SZ_4K },
1479 { SZ_2M + SZ_4K, SZ_2M | SZ_4K },
1480 { SZ_2M + SZ_64K, SZ_2M | SZ_64K },
1481 { SZ_2M + SZ_64K, SZ_64K },
1486 if (supported == I915_GTT_PAGE_SIZE_4K)
1490 * Sanity check that the HW behaves with a limited set of combinations.
1491 * We already have a bunch of randomised testing, which should give us
1492 * a decent amount of variation between runs, however we should keep
1493 * this to limit the chances of introducing a temporary regression, by
1494 * testing the most obvious cases that might make something blow up.
1497 for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1498 for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1499 struct drm_i915_gem_object *obj;
1500 u32 size = combos[j].size;
1501 u32 pages = combos[j].pages;
1503 obj = backends[i].fn(i915, size, backends[i].flags);
1506 if (err == -ENODEV) {
1507 pr_info("Device lacks local memory, skipping\n");
1515 err = i915_gem_object_pin_pages_unlocked(obj);
1517 i915_gem_object_put(obj);
1521 GEM_BUG_ON(pages > obj->base.size);
1522 pages = pages & supported;
1525 obj->mm.page_sizes.sg = pages;
1527 err = igt_write_huge(i915, obj);
1529 i915_gem_object_lock(obj, NULL);
1530 i915_gem_object_unpin_pages(obj);
1531 __i915_gem_object_put_pages(obj);
1532 i915_gem_object_unlock(obj);
1533 i915_gem_object_put(obj);
1536 pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1537 __func__, size, pages, i, j);
1552 static int igt_ppgtt_compact(void *arg)
1554 struct drm_i915_private *i915 = arg;
1555 struct drm_i915_gem_object *obj;
1559 * Simple test to catch issues with compact 64K pages -- since the pt is
1560 * compacted to 256B that gives us 32 entries per pt, however since the
1561 * backing page for the pt is 4K, any extra entries we might incorrectly
1562 * write out should be ignored by the HW. If ever hit such a case this
1563 * test should catch it since some of our writes would land in scratch.
1566 if (!HAS_64K_PAGES(i915)) {
1567 pr_info("device lacks compact 64K page support, skipping\n");
1571 if (!HAS_LMEM(i915)) {
1572 pr_info("device lacks LMEM support, skipping\n");
1576 /* We want the range to cover multiple page-table boundaries. */
1577 obj = i915_gem_object_create_lmem(i915, SZ_4M, 0);
1579 return PTR_ERR(obj);
1581 err = i915_gem_object_pin_pages_unlocked(obj);
1585 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
1586 pr_info("LMEM compact unable to allocate huge-page(s)\n");
1591 * Disable 2M GTT pages by forcing the page-size to 64K for the GTT
1594 obj->mm.page_sizes.sg = I915_GTT_PAGE_SIZE_64K;
1596 err = igt_write_huge(i915, obj);
1598 pr_err("LMEM compact write-huge failed\n");
1601 i915_gem_object_unpin_pages(obj);
1603 i915_gem_object_put(obj);
1611 static int igt_ppgtt_mixed(void *arg)
1613 struct drm_i915_private *i915 = arg;
1614 const unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
1615 struct drm_i915_gem_object *obj, *on;
1616 struct i915_gem_engines *engines;
1617 struct i915_gem_engines_iter it;
1618 struct i915_address_space *vm;
1619 struct i915_gem_context *ctx;
1620 struct intel_context *ce;
1622 I915_RND_STATE(prng);
1624 struct intel_memory_region *mr;
1625 struct i915_vma *vma;
1632 * Sanity check mixing 4K and 64K pages within the same page-table via
1633 * the new PS64 TLB hint.
1636 if (!HAS_64K_PAGES(i915)) {
1637 pr_info("device lacks PS64, skipping\n");
1641 file = mock_file(i915);
1643 return PTR_ERR(file);
1645 ctx = hugepage_ctx(i915, file);
1650 vm = i915_gem_context_get_eb_vm(ctx);
1657 sz = i915_prandom_u32_max_state(SZ_4M, &prng);
1658 sz = max_t(u32, sz, SZ_4K);
1660 mr = i915->mm.regions[INTEL_REGION_LMEM_0];
1662 mr = i915->mm.regions[INTEL_REGION_SMEM];
1664 obj = i915_gem_object_create_region(mr, sz, 0, 0);
1670 list_add_tail(&obj->st_link, &objects);
1672 vma = i915_vma_instance(obj, vm, NULL);
1678 addr = round_up(addr, mr->min_page_size);
1679 err = i915_vma_pin(vma, 0, 0, addr | flags);
1683 if (mr->type == INTEL_MEMORY_LOCAL &&
1684 (vma->resource->page_sizes_gtt & I915_GTT_PAGE_SIZE_4K)) {
1689 addr += obj->base.size;
1691 } while (addr <= SZ_16M);
1695 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1697 if (!intel_engine_can_store_dword(ce->engine))
1702 i915_gem_context_unlock_engines(ctx);
1706 order = i915_random_order(count * count, &prng);
1714 engines = i915_gem_context_lock_engines(ctx);
1715 list_for_each_entry(obj, &objects, st_link) {
1716 u32 rnd = i915_prandom_u32_max_state(UINT_MAX, &prng);
1718 addr = round_up(addr, obj->mm.region->min_page_size);
1720 ce = engines->engines[order[i] % engines->num_engines];
1721 i = (i + 1) % (count * count);
1722 if (!ce || !intel_engine_can_store_dword(ce->engine))
1725 err = __igt_write_huge(ce, obj, obj->base.size, addr, 0, rnd);
1729 err = __igt_write_huge(ce, obj, obj->base.size, addr,
1730 offset_in_page(rnd) / sizeof(u32), rnd + 1);
1734 err = __igt_write_huge(ce, obj, obj->base.size, addr,
1735 (PAGE_SIZE / sizeof(u32)) - 1,
1740 addr += obj->base.size;
1745 i915_gem_context_unlock_engines(ctx);
1748 list_for_each_entry_safe(obj, on, &objects, st_link) {
1749 list_del(&obj->st_link);
1750 i915_gem_object_put(obj);
1759 static int igt_tmpfs_fallback(void *arg)
1761 struct drm_i915_private *i915 = arg;
1762 struct i915_address_space *vm;
1763 struct i915_gem_context *ctx;
1764 struct vfsmount *gemfs = i915->mm.gemfs;
1765 struct drm_i915_gem_object *obj;
1766 struct i915_vma *vma;
1771 file = mock_file(i915);
1773 return PTR_ERR(file);
1775 ctx = hugepage_ctx(i915, file);
1780 vm = i915_gem_context_get_eb_vm(ctx);
1783 * Make sure that we don't burst into a ball of flames upon falling back
1784 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1785 * when setting up gemfs.
1788 i915->mm.gemfs = NULL;
1790 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1796 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1797 if (IS_ERR(vaddr)) {
1798 err = PTR_ERR(vaddr);
1801 *vaddr = 0xdeadbeaf;
1803 __i915_gem_object_flush_map(obj, 0, 64);
1804 i915_gem_object_unpin_map(obj);
1806 vma = i915_vma_instance(obj, vm, NULL);
1812 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1816 err = igt_check_page_sizes(vma);
1818 i915_vma_unpin(vma);
1820 i915_gem_object_put(obj);
1822 i915->mm.gemfs = gemfs;
1830 static int igt_shrink_thp(void *arg)
1832 struct drm_i915_private *i915 = arg;
1833 struct i915_address_space *vm;
1834 struct i915_gem_context *ctx;
1835 struct drm_i915_gem_object *obj;
1836 struct i915_gem_engines_iter it;
1837 struct intel_context *ce;
1838 struct i915_vma *vma;
1840 unsigned int flags = PIN_USER;
1846 if (!igt_can_allocate_thp(i915)) {
1847 pr_info("missing THP support, skipping\n");
1851 file = mock_file(i915);
1853 return PTR_ERR(file);
1855 ctx = hugepage_ctx(i915, file);
1860 vm = i915_gem_context_get_eb_vm(ctx);
1863 * Sanity check shrinking huge-paged object -- make sure nothing blows
1867 obj = i915_gem_object_create_shmem(i915, SZ_2M);
1873 vma = i915_vma_instance(obj, vm, NULL);
1879 wf = intel_runtime_pm_get(&i915->runtime_pm); /* active shrink */
1881 err = i915_vma_pin(vma, 0, 0, flags);
1885 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1886 pr_info("failed to allocate THP, finishing test early\n");
1890 err = igt_check_page_sizes(vma);
1896 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1897 if (!intel_engine_can_store_dword(ce->engine))
1900 err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1904 i915_gem_context_unlock_engines(ctx);
1906 * Nuke everything *before* we unpin the pages so we can be reasonably
1907 * sure that when later checking get_nr_swap_pages() that some random
1908 * leftover object doesn't steal the remaining swap space.
1910 i915_gem_shrink(NULL, i915, -1UL, NULL,
1912 I915_SHRINK_UNBOUND |
1913 I915_SHRINK_ACTIVE);
1914 i915_vma_unpin(vma);
1919 * Now that the pages are *unpinned* shrinking should invoke
1920 * shmem to truncate our pages, if we have available swap.
1922 should_swap = get_nr_swap_pages() > 0;
1923 i915_gem_shrink(NULL, i915, -1UL, NULL,
1925 I915_SHRINK_UNBOUND |
1926 I915_SHRINK_ACTIVE |
1927 I915_SHRINK_WRITEBACK);
1928 if (should_swap == i915_gem_object_has_pages(obj)) {
1929 pr_err("unexpected pages mismatch, should_swap=%s\n",
1930 str_yes_no(should_swap));
1935 if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
1936 pr_err("unexpected residual page-size bits, should_swap=%s\n",
1937 str_yes_no(should_swap));
1942 err = i915_vma_pin(vma, 0, 0, flags);
1947 err = cpu_check(obj, n, 0xdeadbeaf);
1953 i915_vma_unpin(vma);
1955 intel_runtime_pm_put(&i915->runtime_pm, wf);
1957 i915_gem_object_put(obj);
1965 int i915_gem_huge_page_mock_selftests(void)
1967 static const struct i915_subtest tests[] = {
1968 SUBTEST(igt_mock_exhaust_device_supported_pages),
1969 SUBTEST(igt_mock_memory_region_huge_pages),
1970 SUBTEST(igt_mock_ppgtt_misaligned_dma),
1972 struct drm_i915_private *dev_priv;
1973 struct i915_ppgtt *ppgtt;
1976 dev_priv = mock_gem_device();
1980 /* Pretend to be a device which supports the 48b PPGTT */
1981 RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1982 RUNTIME_INFO(dev_priv)->ppgtt_size = 48;
1984 ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1985 if (IS_ERR(ppgtt)) {
1986 err = PTR_ERR(ppgtt);
1990 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1991 pr_err("failed to create 48b PPGTT\n");
1996 /* If we were ever hit this then it's time to mock the 64K scratch */
1997 if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1998 pr_err("PPGTT missing 64K scratch page\n");
2003 err = i915_subtests(tests, ppgtt);
2006 i915_vm_put(&ppgtt->vm);
2008 mock_destroy_device(dev_priv);
2012 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
2014 static const struct i915_subtest tests[] = {
2015 SUBTEST(igt_shrink_thp),
2016 SUBTEST(igt_tmpfs_fallback),
2017 SUBTEST(igt_ppgtt_smoke_huge),
2018 SUBTEST(igt_ppgtt_sanity_check),
2019 SUBTEST(igt_ppgtt_compact),
2020 SUBTEST(igt_ppgtt_mixed),
2021 SUBTEST(igt_ppgtt_huge_fill),
2022 SUBTEST(igt_ppgtt_64K),
2025 if (!HAS_PPGTT(i915)) {
2026 pr_info("PPGTT not supported, skipping live-selftests\n");
2030 if (intel_gt_is_wedged(to_gt(i915)))
2033 return i915_live_subtests(tests, i915);