2 * Copyright © 2014-2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_guc.h"
26 #include "intel_guc_ads.h"
27 #include "intel_guc_submission.h"
30 static void gen8_guc_raise_irq(struct intel_guc *guc)
32 struct drm_i915_private *dev_priv = guc_to_i915(guc);
34 I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
37 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
39 GEM_BUG_ON(!guc->send_regs.base);
40 GEM_BUG_ON(!guc->send_regs.count);
41 GEM_BUG_ON(i >= guc->send_regs.count);
43 return _MMIO(guc->send_regs.base + 4 * i);
46 void intel_guc_init_send_regs(struct intel_guc *guc)
48 struct drm_i915_private *dev_priv = guc_to_i915(guc);
49 enum forcewake_domains fw_domains = 0;
52 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
53 guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
55 for (i = 0; i < guc->send_regs.count; i++) {
56 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
58 FW_REG_READ | FW_REG_WRITE);
60 guc->send_regs.fw_domains = fw_domains;
63 void intel_guc_init_early(struct intel_guc *guc)
65 intel_guc_fw_init_early(guc);
66 intel_guc_ct_init_early(&guc->ct);
67 intel_guc_log_init_early(&guc->log);
69 mutex_init(&guc->send_mutex);
70 spin_lock_init(&guc->irq_lock);
71 guc->send = intel_guc_send_nop;
72 guc->handler = intel_guc_to_host_event_handler_nop;
73 guc->notify = gen8_guc_raise_irq;
76 static int guc_init_wq(struct intel_guc *guc)
78 struct drm_i915_private *dev_priv = guc_to_i915(guc);
81 * GuC log buffer flush work item has to do register access to
82 * send the ack to GuC and this work item, if not synced before
83 * suspend, can potentially get executed after the GFX device is
85 * By marking the WQ as freezable, we don't have to bother about
86 * flushing of this work item from the suspend hooks, the pending
87 * work item if any will be either executed before the suspend
88 * or scheduled later on resume. This way the handling of work
89 * item can be kept same between system suspend & rpm suspend.
91 guc->log.relay.flush_wq =
92 alloc_ordered_workqueue("i915-guc_log",
93 WQ_HIGHPRI | WQ_FREEZABLE);
94 if (!guc->log.relay.flush_wq) {
95 DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
100 * Even though both sending GuC action, and adding a new workitem to
101 * GuC workqueue are serialized (each with its own locking), since
102 * we're using mutliple engines, it's possible that we're going to
103 * issue a preempt request with two (or more - each for different
104 * engine) workitems in GuC queue. In this situation, GuC may submit
105 * all of them, which will make us very confused.
106 * Our preemption contexts may even already be complete - before we
107 * even had the chance to sent the preempt action to GuC!. Rather
108 * than introducing yet another lock, we can just use ordered workqueue
109 * to make sure we're always sending a single preemption request with a
112 if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
113 USES_GUC_SUBMISSION(dev_priv)) {
114 guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
116 if (!guc->preempt_wq) {
117 destroy_workqueue(guc->log.relay.flush_wq);
118 DRM_ERROR("Couldn't allocate workqueue for GuC "
127 static void guc_fini_wq(struct intel_guc *guc)
129 struct workqueue_struct *wq;
131 wq = fetch_and_zero(&guc->preempt_wq);
133 destroy_workqueue(wq);
135 wq = fetch_and_zero(&guc->log.relay.flush_wq);
137 destroy_workqueue(wq);
140 int intel_guc_init_misc(struct intel_guc *guc)
142 struct drm_i915_private *i915 = guc_to_i915(guc);
145 ret = guc_init_wq(guc);
149 intel_uc_fw_fetch(i915, &guc->fw);
154 void intel_guc_fini_misc(struct intel_guc *guc)
156 intel_uc_fw_fini(&guc->fw);
160 static int guc_shared_data_create(struct intel_guc *guc)
162 struct i915_vma *vma;
165 vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
169 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
171 i915_vma_unpin_and_release(&vma, 0);
172 return PTR_ERR(vaddr);
175 guc->shared_data = vma;
176 guc->shared_data_vaddr = vaddr;
181 static void guc_shared_data_destroy(struct intel_guc *guc)
183 i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
186 int intel_guc_init(struct intel_guc *guc)
188 struct drm_i915_private *dev_priv = guc_to_i915(guc);
191 ret = guc_shared_data_create(guc);
194 GEM_BUG_ON(!guc->shared_data);
196 ret = intel_guc_log_create(&guc->log);
200 ret = intel_guc_ads_create(guc);
203 GEM_BUG_ON(!guc->ads_vma);
205 /* We need to notify the guc whenever we change the GGTT */
206 i915_ggtt_enable_guc(dev_priv);
211 intel_guc_log_destroy(&guc->log);
213 guc_shared_data_destroy(guc);
215 intel_uc_fw_fini(&guc->fw);
219 void intel_guc_fini(struct intel_guc *guc)
221 struct drm_i915_private *dev_priv = guc_to_i915(guc);
223 i915_ggtt_disable_guc(dev_priv);
224 intel_guc_ads_destroy(guc);
225 intel_guc_log_destroy(&guc->log);
226 guc_shared_data_destroy(guc);
227 intel_uc_fw_fini(&guc->fw);
230 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
232 u32 level = intel_guc_log_get_level(&guc->log);
236 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
237 flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
239 if (!GUC_LOG_LEVEL_IS_ENABLED(level))
240 flags |= GUC_LOG_DEFAULT_DISABLED;
242 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
243 flags |= GUC_LOG_DISABLED;
245 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
246 GUC_LOG_VERBOSITY_SHIFT;
251 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
255 flags |= GUC_CTL_VCS2_ENABLED;
257 if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
258 flags |= GUC_CTL_KERNEL_SUBMISSIONS;
260 flags |= GUC_CTL_DISABLE_SCHEDULER;
265 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
269 if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
272 base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
273 ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
276 flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
277 (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
282 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
284 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
287 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
289 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
295 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
296 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
297 BUILD_BUG_ON(!DPC_BUFFER_SIZE);
298 BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
299 BUILD_BUG_ON(!ISR_BUFFER_SIZE);
300 BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
302 BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
303 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
304 BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
305 (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
306 BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
307 (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
309 flags = GUC_LOG_VALID |
310 GUC_LOG_NOTIFY_ON_HALF_FULL |
312 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
313 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
314 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
315 (offset << GUC_LOG_BUF_ADDR_SHIFT);
324 * Initialise the GuC parameter block before starting the firmware
325 * transfer. These parameters are read by the firmware on startup
326 * and cannot be changed thereafter.
328 void intel_guc_init_params(struct intel_guc *guc)
330 struct drm_i915_private *dev_priv = guc_to_i915(guc);
331 u32 params[GUC_CTL_MAX_DWORDS];
334 memset(params, 0, sizeof(params));
337 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
338 * second. This ARAR is calculated by:
339 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
341 params[GUC_CTL_ARAT_HIGH] = 0;
342 params[GUC_CTL_ARAT_LOW] = 100000000;
344 params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
346 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
347 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
348 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
349 params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
351 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
352 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
355 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
356 * they are power context saved so it's ok to release forcewake
357 * when we are done here and take it again at xfer time.
359 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
361 I915_WRITE(SOFT_SCRATCH(0), 0);
363 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
364 I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
366 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
369 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
370 u32 *response_buf, u32 response_buf_size)
372 WARN(1, "Unexpected send: action=%#x\n", *action);
376 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
378 WARN(1, "Unexpected event: no suitable handler\n");
382 * This function implements the MMIO based host to GuC interface.
384 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
385 u32 *response_buf, u32 response_buf_size)
387 struct drm_i915_private *dev_priv = guc_to_i915(guc);
393 GEM_BUG_ON(len > guc->send_regs.count);
395 /* We expect only action code */
396 GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
398 /* If CT is available, we expect to use MMIO only during init/fini */
399 GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
400 *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
401 *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
403 mutex_lock(&guc->send_mutex);
404 intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
406 for (i = 0; i < len; i++)
407 I915_WRITE(guc_send_reg(guc, i), action[i]);
409 POSTING_READ(guc_send_reg(guc, i - 1));
411 intel_guc_notify(guc);
414 * No GuC command should ever take longer than 10ms.
415 * Fast commands should still complete in 10us.
417 ret = __intel_wait_for_register_fw(dev_priv,
418 guc_send_reg(guc, 0),
419 INTEL_GUC_MSG_TYPE_MASK,
420 INTEL_GUC_MSG_TYPE_RESPONSE <<
421 INTEL_GUC_MSG_TYPE_SHIFT,
423 /* If GuC explicitly returned an error, convert it to -EIO */
424 if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
428 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
429 action[0], ret, status);
434 int count = min(response_buf_size, guc->send_regs.count - 1);
436 for (i = 0; i < count; i++)
437 response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
440 /* Use data from the GuC response as our return value */
441 ret = INTEL_GUC_MSG_TO_DATA(status);
444 intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
445 mutex_unlock(&guc->send_mutex);
450 void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
452 struct drm_i915_private *dev_priv = guc_to_i915(guc);
456 * Sample the log buffer flush related bits & clear them out now
457 * itself from the message identity register to minimize the
458 * probability of losing a flush interrupt, when there are back
459 * to back flush interrupts.
460 * There can be a new flush interrupt, for different log buffer
461 * type (like for ISR), whilst Host is handling one (for DPC).
462 * Since same bit is used in message register for ISR & DPC, it
463 * could happen that GuC sets the bit for 2nd interrupt but Host
464 * clears out the bit on handling the 1st interrupt.
466 disable_rpm_wakeref_asserts(dev_priv);
467 spin_lock(&guc->irq_lock);
468 val = I915_READ(SOFT_SCRATCH(15));
469 msg = val & guc->msg_enabled_mask;
470 I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
471 spin_unlock(&guc->irq_lock);
472 enable_rpm_wakeref_asserts(dev_priv);
474 intel_guc_to_host_process_recv_msg(guc, msg);
477 void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
479 /* Make sure to handle only enabled messages */
480 msg &= guc->msg_enabled_mask;
482 if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
483 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
484 intel_guc_log_handle_flush_event(&guc->log);
487 int intel_guc_sample_forcewake(struct intel_guc *guc)
489 struct drm_i915_private *dev_priv = guc_to_i915(guc);
492 action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
493 /* WaRsDisableCoarsePowerGating:skl,cnl */
494 if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
497 /* bit 0 and 1 are for Render and Media domain separately */
498 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
500 return intel_guc_send(guc, action, ARRAY_SIZE(action));
504 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
505 * @guc: intel_guc structure
506 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
508 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
509 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
512 * Return: non-zero code on error
514 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
517 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
521 return intel_guc_send(guc, action, ARRAY_SIZE(action));
525 * intel_guc_suspend() - notify GuC entering suspend state
528 int intel_guc_suspend(struct intel_guc *guc)
531 INTEL_GUC_ACTION_ENTER_S_STATE,
532 GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
533 intel_guc_ggtt_offset(guc, guc->shared_data)
536 return intel_guc_send(guc, data, ARRAY_SIZE(data));
540 * intel_guc_reset_engine() - ask GuC to reset an engine
541 * @guc: intel_guc structure
542 * @engine: engine to be reset
544 int intel_guc_reset_engine(struct intel_guc *guc,
545 struct intel_engine_cs *engine)
549 GEM_BUG_ON(!guc->execbuf_client);
551 data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
552 data[1] = engine->guc_id;
556 data[5] = guc->execbuf_client->stage_id;
557 data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
559 return intel_guc_send(guc, data, ARRAY_SIZE(data));
563 * intel_guc_resume() - notify GuC resuming from suspend state
566 int intel_guc_resume(struct intel_guc *guc)
569 INTEL_GUC_ACTION_EXIT_S_STATE,
571 intel_guc_ggtt_offset(guc, guc->shared_data)
574 return intel_guc_send(guc, data, ARRAY_SIZE(data));
578 * DOC: GuC Address Space
580 * The layout of GuC address space is shown below:
584 * +===========> +====================+ <== FFFF_FFFF
586 * | +====================+ <== GUC_GGTT_TOP
590 * Address +===> +====================+ <== GuC ggtt_pin_bias
598 * +=======+===> +====================+ <== 0000_0000
600 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
601 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
602 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
606 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
608 * @size: size of area to allocate (both virtual space and memory)
610 * This is a wrapper to create an object for use with the GuC. In order to
611 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
612 * both some backing storage and a range inside the Global GTT. We must pin
613 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
614 * range is reserved inside GuC.
616 * Return: A i915_vma if successful, otherwise an ERR_PTR.
618 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
620 struct drm_i915_private *dev_priv = guc_to_i915(guc);
621 struct drm_i915_gem_object *obj;
622 struct i915_vma *vma;
626 obj = i915_gem_object_create(dev_priv, size);
628 return ERR_CAST(obj);
630 vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
634 flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
635 ret = i915_vma_pin(vma, 0, 0, flags);
644 i915_gem_object_put(obj);
649 * intel_guc_reserved_gtt_size()
650 * @guc: intel_guc structure
652 * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using
653 * GuC we can't have any objects pinned in that region. This function returns
654 * the size of the shadowed region.
657 * 0 if GuC is not present or not in use.
658 * Otherwise, the GuC WOPCM size.
660 u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
662 return guc_to_i915(guc)->wopcm.guc.size;