2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/random.h>
9 #include "gem/selftests/igt_gem_utils.h"
10 #include "gem/selftests/mock_context.h"
11 #include "gem/i915_gem_pm.h"
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_pm.h"
15 #include "i915_selftest.h"
17 #include "igt_flush_test.h"
20 static int switch_to_context(struct i915_gem_context *ctx)
22 struct i915_gem_engines_iter it;
23 struct intel_context *ce;
26 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
27 struct i915_request *rq;
29 rq = intel_context_create_request(ce);
37 i915_gem_context_unlock_engines(ctx);
42 static void trash_stolen(struct drm_i915_private *i915)
44 struct i915_ggtt *ggtt = &i915->ggtt;
45 const u64 slot = ggtt->error_capture.start;
46 const resource_size_t size = resource_size(&i915->dsm);
48 u32 prng = 0x12345678;
50 /* XXX: fsck. needs some more thought... */
51 if (!i915_ggtt_has_aperture(ggtt))
54 for (page = 0; page < size; page += PAGE_SIZE) {
55 const dma_addr_t dma = i915->dsm.start + page;
59 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
61 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
62 for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
63 prng = next_pseudo_random32(prng);
64 iowrite32(prng, &s[x]);
66 io_mapping_unmap_atomic(s);
69 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
72 static void simulate_hibernate(struct drm_i915_private *i915)
74 intel_wakeref_t wakeref;
76 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
79 * As a final sting in the tail, invalidate stolen. Under a real S4,
80 * stolen is lost and needs to be refilled on resume. However, under
81 * CI we merely do S4-device testing (as full S4 is too unreliable
82 * for automated testing across a cluster), so to simulate the effect
83 * of stolen being trashed across S4, we trash it ourselves.
87 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
90 static int pm_prepare(struct drm_i915_private *i915)
92 i915_gem_suspend(i915);
97 static void pm_suspend(struct drm_i915_private *i915)
99 intel_wakeref_t wakeref;
101 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
102 i915_ggtt_suspend(&i915->ggtt);
103 i915_gem_suspend_late(i915);
107 static void pm_hibernate(struct drm_i915_private *i915)
109 intel_wakeref_t wakeref;
111 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
112 i915_ggtt_suspend(&i915->ggtt);
114 i915_gem_freeze(i915);
115 i915_gem_freeze_late(i915);
119 static void pm_resume(struct drm_i915_private *i915)
121 intel_wakeref_t wakeref;
124 * Both suspend and hibernate follow the same wakeup path and assume
125 * that runtime-pm just works.
127 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
128 i915_ggtt_resume(&i915->ggtt);
129 i915_gem_resume(i915);
133 static int igt_gem_suspend(void *arg)
135 struct drm_i915_private *i915 = arg;
136 struct i915_gem_context *ctx;
140 file = mock_file(i915);
142 return PTR_ERR(file);
145 ctx = live_context(i915, file);
147 err = switch_to_context(ctx);
151 err = pm_prepare(i915);
157 /* Here be dragons! Note that with S3RST any S3 may become S4! */
158 simulate_hibernate(i915);
162 err = switch_to_context(ctx);
168 static int igt_gem_hibernate(void *arg)
170 struct drm_i915_private *i915 = arg;
171 struct i915_gem_context *ctx;
175 file = mock_file(i915);
177 return PTR_ERR(file);
180 ctx = live_context(i915, file);
182 err = switch_to_context(ctx);
186 err = pm_prepare(i915);
192 /* Here be dragons! */
193 simulate_hibernate(i915);
197 err = switch_to_context(ctx);
203 static int igt_gem_ww_ctx(void *arg)
205 struct drm_i915_private *i915 = arg;
206 struct drm_i915_gem_object *obj, *obj2;
207 struct i915_gem_ww_ctx ww;
210 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
214 obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
220 i915_gem_ww_ctx_init(&ww, true);
222 /* Lock the objects, twice for good measure (-EALREADY handling) */
223 err = i915_gem_object_lock(obj, &ww);
225 err = i915_gem_object_lock_interruptible(obj, &ww);
227 err = i915_gem_object_lock_interruptible(obj2, &ww);
229 err = i915_gem_object_lock(obj2, &ww);
231 if (err == -EDEADLK) {
232 err = i915_gem_ww_ctx_backoff(&ww);
236 i915_gem_ww_ctx_fini(&ww);
237 i915_gem_object_put(obj2);
239 i915_gem_object_put(obj);
243 int i915_gem_live_selftests(struct drm_i915_private *i915)
245 static const struct i915_subtest tests[] = {
246 SUBTEST(igt_gem_suspend),
247 SUBTEST(igt_gem_hibernate),
248 SUBTEST(igt_gem_ww_ctx),
251 if (intel_gt_is_wedged(&i915->gt))
254 return i915_live_subtests(tests, i915);