2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
6 #include "gt/intel_gpu_commands.h"
7 #include "gt/intel_gt.h"
9 #include "gem/selftests/igt_gem_utils.h"
11 #include "igt_spinner.h"
13 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
17 memset(spin, 0, sizeof(*spin));
20 spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
21 if (IS_ERR(spin->hws)) {
22 err = PTR_ERR(spin->hws);
25 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
27 spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
28 if (IS_ERR(spin->obj)) {
29 err = PTR_ERR(spin->obj);
36 i915_gem_object_put(spin->hws);
41 static void *igt_spinner_pin_obj(struct intel_context *ce,
42 struct i915_gem_ww_ctx *ww,
43 struct drm_i915_gem_object *obj,
44 unsigned int mode, struct i915_vma **vma)
49 *vma = i915_vma_instance(obj, ce->vm, NULL);
51 return ERR_CAST(*vma);
53 ret = i915_gem_object_lock(obj, ww);
57 vaddr = i915_gem_object_pin_map(obj, mode);
60 i915_gem_object_unlock(obj);
66 ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER);
68 ret = i915_vma_pin(*vma, 0, 0, PIN_USER);
71 i915_gem_object_unpin_map(obj);
78 int igt_spinner_pin(struct igt_spinner *spin,
79 struct intel_context *ce,
80 struct i915_gem_ww_ctx *ww)
84 if (spin->ce && WARN_ON(spin->ce != ce))
89 vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma);
91 return PTR_ERR(vaddr);
93 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
98 i915_coherent_map_type(spin->gt->i915);
100 vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
102 return PTR_ERR(vaddr);
110 static unsigned int seqno_offset(u64 fence)
112 return offset_in_page(sizeof(u32) * fence);
115 static u64 hws_address(const struct i915_vma *hws,
116 const struct i915_request *rq)
118 return hws->node.start + seqno_offset(rq->fence.context);
121 static int move_to_active(struct i915_vma *vma,
122 struct i915_request *rq,
128 err = i915_request_await_object(rq, vma->obj,
129 flags & EXEC_OBJECT_WRITE);
131 err = i915_vma_move_to_active(vma, rq, flags);
132 i915_vma_unlock(vma);
137 struct i915_request *
138 igt_spinner_create_request(struct igt_spinner *spin,
139 struct intel_context *ce,
140 u32 arbitration_command)
142 struct intel_engine_cs *engine = ce->engine;
143 struct i915_request *rq = NULL;
144 struct i915_vma *hws, *vma;
149 GEM_BUG_ON(spin->gt != ce->vm->gt);
151 if (!intel_engine_can_store_dword(ce->engine))
152 return ERR_PTR(-ENODEV);
155 err = igt_spinner_pin(spin, ce, NULL);
161 vma = spin->batch_vma;
163 rq = intel_context_create_request(ce);
167 err = move_to_active(vma, rq, 0);
171 err = move_to_active(hws, rq, 0);
177 if (INTEL_GEN(rq->engine->i915) >= 8) {
178 *batch++ = MI_STORE_DWORD_IMM_GEN4;
179 *batch++ = lower_32_bits(hws_address(hws, rq));
180 *batch++ = upper_32_bits(hws_address(hws, rq));
181 } else if (INTEL_GEN(rq->engine->i915) >= 6) {
182 *batch++ = MI_STORE_DWORD_IMM_GEN4;
184 *batch++ = hws_address(hws, rq);
185 } else if (INTEL_GEN(rq->engine->i915) >= 4) {
186 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
188 *batch++ = hws_address(hws, rq);
190 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
191 *batch++ = hws_address(hws, rq);
193 *batch++ = rq->fence.seqno;
195 *batch++ = arbitration_command;
197 if (INTEL_GEN(rq->engine->i915) >= 8)
198 *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
199 else if (IS_HASWELL(rq->engine->i915))
200 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
201 else if (INTEL_GEN(rq->engine->i915) >= 6)
202 *batch++ = MI_BATCH_BUFFER_START;
204 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
205 *batch++ = lower_32_bits(vma->node.start);
206 *batch++ = upper_32_bits(vma->node.start);
208 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
210 intel_gt_chipset_flush(engine->gt);
212 if (engine->emit_init_breadcrumb) {
213 err = engine->emit_init_breadcrumb(rq);
219 if (INTEL_GEN(rq->engine->i915) <= 5)
220 flags |= I915_DISPATCH_SECURE;
221 err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
225 i915_request_set_error_once(rq, err);
226 i915_request_add(rq);
228 return err ? ERR_PTR(err) : rq;
232 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
234 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
236 return READ_ONCE(*seqno);
239 void igt_spinner_end(struct igt_spinner *spin)
244 *spin->batch = MI_BATCH_BUFFER_END;
245 intel_gt_chipset_flush(spin->gt);
248 void igt_spinner_fini(struct igt_spinner *spin)
250 igt_spinner_end(spin);
253 i915_vma_unpin(spin->batch_vma);
254 i915_gem_object_unpin_map(spin->obj);
256 i915_gem_object_put(spin->obj);
259 i915_vma_unpin(spin->hws_vma);
260 i915_gem_object_unpin_map(spin->hws);
262 i915_gem_object_put(spin->hws);
265 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
267 if (i915_request_is_ready(rq))
268 intel_engine_flush_submission(rq->engine);
270 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
273 wait_for(i915_seqno_passed(hws_seqno(spin, rq),