1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016-2019 Intel Corporation
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <linux/highmem.h>
10 #include <drm/drm_cache.h>
11 #include <drm/drm_print.h>
13 #include "gem/i915_gem_lmem.h"
14 #include "intel_uc_fw.h"
15 #include "intel_uc_fw_abi.h"
19 static inline struct intel_gt *
20 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
22 if (type == INTEL_UC_FW_TYPE_GUC)
23 return container_of(uc_fw, struct intel_gt, uc.guc.fw);
25 GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
26 return container_of(uc_fw, struct intel_gt, uc.huc.fw);
29 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
31 GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
32 return ____uc_fw_to_gt(uc_fw, uc_fw->type);
35 #ifdef CONFIG_DRM_I915_DEBUG_GUC
36 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
37 enum intel_uc_fw_status status)
39 uc_fw->__status = status;
40 drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
41 "%s firmware -> %s\n",
42 intel_uc_fw_type_repr(uc_fw->type),
43 status == INTEL_UC_FIRMWARE_SELECTED ?
44 uc_fw->path : intel_uc_fw_status_repr(status));
49 * List of required GuC and HuC binaries per-platform.
50 * Must be ordered based on platform + revid, from newer to older.
52 * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
55 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
56 fw_def(DG2, 0, guc_def(dg2, 70, 1, 2)) \
57 fw_def(ALDERLAKE_P, 0, guc_def(adlp, 70, 1, 1)) \
58 fw_def(ALDERLAKE_S, 0, guc_def(tgl, 70, 1, 1)) \
59 fw_def(DG1, 0, guc_def(dg1, 70, 1, 1)) \
60 fw_def(ROCKETLAKE, 0, guc_def(tgl, 70, 1, 1)) \
61 fw_def(TIGERLAKE, 0, guc_def(tgl, 70, 1, 1)) \
62 fw_def(JASPERLAKE, 0, guc_def(ehl, 70, 1, 1)) \
63 fw_def(ELKHARTLAKE, 0, guc_def(ehl, 70, 1, 1)) \
64 fw_def(ICELAKE, 0, guc_def(icl, 70, 1, 1)) \
65 fw_def(COMETLAKE, 5, guc_def(cml, 70, 1, 1)) \
66 fw_def(COMETLAKE, 0, guc_def(kbl, 70, 1, 1)) \
67 fw_def(COFFEELAKE, 0, guc_def(kbl, 70, 1, 1)) \
68 fw_def(GEMINILAKE, 0, guc_def(glk, 70, 1, 1)) \
69 fw_def(KABYLAKE, 0, guc_def(kbl, 70, 1, 1)) \
70 fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \
71 fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1))
73 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
74 fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
75 fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \
76 fw_def(DG1, 0, huc_def(dg1, 7, 9, 3)) \
77 fw_def(ROCKETLAKE, 0, huc_def(tgl, 7, 9, 3)) \
78 fw_def(TIGERLAKE, 0, huc_def(tgl, 7, 9, 3)) \
79 fw_def(JASPERLAKE, 0, huc_def(ehl, 9, 0, 0)) \
80 fw_def(ELKHARTLAKE, 0, huc_def(ehl, 9, 0, 0)) \
81 fw_def(ICELAKE, 0, huc_def(icl, 9, 0, 0)) \
82 fw_def(COMETLAKE, 5, huc_def(cml, 4, 0, 0)) \
83 fw_def(COMETLAKE, 0, huc_def(kbl, 4, 0, 0)) \
84 fw_def(COFFEELAKE, 0, huc_def(kbl, 4, 0, 0)) \
85 fw_def(GEMINILAKE, 0, huc_def(glk, 4, 0, 0)) \
86 fw_def(KABYLAKE, 0, huc_def(kbl, 4, 0, 0)) \
87 fw_def(BROXTON, 0, huc_def(bxt, 2, 0, 0)) \
88 fw_def(SKYLAKE, 0, huc_def(skl, 2, 0, 0))
90 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
92 __stringify(prefix_) name_ \
93 __stringify(major_) "." \
94 __stringify(minor_) "." \
95 __stringify(patch_) ".bin"
97 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
98 __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
100 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
101 __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
103 /* All blobs need to be declared via MODULE_FIRMWARE() */
104 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
105 MODULE_FIRMWARE(uc_);
107 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
108 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
110 /* The below structs and macros are used to iterate across the list of blobs */
111 struct __packed uc_fw_blob {
117 #define UC_FW_BLOB(major_, minor_, path_) \
118 { .major = major_, .minor = minor_, .path = path_ }
120 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
121 UC_FW_BLOB(major_, minor_, \
122 MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
124 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
125 UC_FW_BLOB(major_, minor_, \
126 MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
128 struct __packed uc_fw_platform_requirement {
129 enum intel_platform p;
130 u8 rev; /* first platform rev using this FW */
131 const struct uc_fw_blob blob;
134 #define MAKE_FW_LIST(platform_, revid_, uc_) \
136 .p = INTEL_##platform_, \
141 struct fw_blobs_by_type {
142 const struct uc_fw_platform_requirement *blobs;
147 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
149 static const struct uc_fw_platform_requirement blobs_guc[] = {
150 INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
152 static const struct uc_fw_platform_requirement blobs_huc[] = {
153 INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
155 static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
156 [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
157 [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
159 static const struct uc_fw_platform_requirement *fw_blobs;
160 enum intel_platform p = INTEL_INFO(i915)->platform;
162 u8 rev = INTEL_REVID(i915);
165 GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
166 fw_blobs = blobs_all[uc_fw->type].blobs;
167 fw_count = blobs_all[uc_fw->type].count;
169 for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
170 if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
171 const struct uc_fw_blob *blob = &fw_blobs[i].blob;
172 uc_fw->path = blob->path;
173 uc_fw->major_ver_wanted = blob->major;
174 uc_fw->minor_ver_wanted = blob->minor;
179 /* make sure the list is ordered as expected */
180 if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
181 for (i = 1; i < fw_count; i++) {
182 if (fw_blobs[i].p < fw_blobs[i - 1].p)
185 if (fw_blobs[i].p == fw_blobs[i - 1].p &&
186 fw_blobs[i].rev < fw_blobs[i - 1].rev)
189 pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
190 intel_platform_name(fw_blobs[i - 1].p),
192 intel_platform_name(fw_blobs[i].p),
200 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
202 if (i915->params.enable_guc & ENABLE_GUC_MASK)
203 return i915->params.guc_firmware_path;
207 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
209 if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
210 return i915->params.huc_firmware_path;
214 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
216 const char *path = NULL;
218 switch (uc_fw->type) {
219 case INTEL_UC_FW_TYPE_GUC:
220 path = __override_guc_firmware_path(i915);
222 case INTEL_UC_FW_TYPE_HUC:
223 path = __override_huc_firmware_path(i915);
227 if (unlikely(path)) {
229 uc_fw->user_overridden = true;
234 * intel_uc_fw_init_early - initialize the uC object and select the firmware
235 * @uc_fw: uC firmware
238 * Initialize the state of our uC object and relevant tracking and select the
239 * firmware to fetch and load.
241 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
242 enum intel_uc_fw_type type)
244 struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
247 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
248 * before we're looked at the HW caps to see if we have uc support
250 BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
251 GEM_BUG_ON(uc_fw->status);
252 GEM_BUG_ON(uc_fw->path);
256 if (HAS_GT_UC(i915)) {
257 __uc_fw_auto_select(i915, uc_fw);
258 __uc_fw_user_override(i915, uc_fw);
261 intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
262 INTEL_UC_FIRMWARE_SELECTED :
263 INTEL_UC_FIRMWARE_DISABLED :
264 INTEL_UC_FIRMWARE_NOT_SUPPORTED);
267 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
269 struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
270 bool user = e == -EINVAL;
272 if (i915_inject_probe_error(i915, e)) {
273 /* non-existing blob */
274 uc_fw->path = "<invalid>";
275 uc_fw->user_overridden = user;
276 } else if (i915_inject_probe_error(i915, e)) {
277 /* require next major version */
278 uc_fw->major_ver_wanted += 1;
279 uc_fw->minor_ver_wanted = 0;
280 uc_fw->user_overridden = user;
281 } else if (i915_inject_probe_error(i915, e)) {
282 /* require next minor version */
283 uc_fw->minor_ver_wanted += 1;
284 uc_fw->user_overridden = user;
285 } else if (uc_fw->major_ver_wanted &&
286 i915_inject_probe_error(i915, e)) {
287 /* require prev major version */
288 uc_fw->major_ver_wanted -= 1;
289 uc_fw->minor_ver_wanted = 0;
290 uc_fw->user_overridden = user;
291 } else if (uc_fw->minor_ver_wanted &&
292 i915_inject_probe_error(i915, e)) {
293 /* require prev minor version - hey, this should work! */
294 uc_fw->minor_ver_wanted -= 1;
295 uc_fw->user_overridden = user;
296 } else if (user && i915_inject_probe_error(i915, e)) {
297 /* officially unsupported platform */
298 uc_fw->major_ver_wanted = 0;
299 uc_fw->minor_ver_wanted = 0;
300 uc_fw->user_overridden = true;
305 * intel_uc_fw_fetch - fetch uC firmware
306 * @uc_fw: uC firmware
308 * Fetch uC firmware into GEM obj.
310 * Return: 0 on success, a negative errno code on failure.
312 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
314 struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
315 struct device *dev = i915->drm.dev;
316 struct drm_i915_gem_object *obj;
317 const struct firmware *fw = NULL;
318 struct uc_css_header *css;
322 GEM_BUG_ON(!i915->wopcm.size);
323 GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
325 err = i915_inject_probe_error(i915, -ENXIO);
329 __force_fw_fetch_failures(uc_fw, -EINVAL);
330 __force_fw_fetch_failures(uc_fw, -ESTALE);
332 err = request_firmware(&fw, uc_fw->path, dev);
336 /* Check the size of the blob before examining buffer contents */
337 if (unlikely(fw->size < sizeof(struct uc_css_header))) {
338 drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
339 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
340 fw->size, sizeof(struct uc_css_header));
345 css = (struct uc_css_header *)fw->data;
347 /* Check integrity of size values inside CSS header */
348 size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
349 css->exponent_size_dw) * sizeof(u32);
350 if (unlikely(size != sizeof(struct uc_css_header))) {
352 "%s firmware %s: unexpected header size: %zu != %zu\n",
353 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
354 fw->size, sizeof(struct uc_css_header));
359 /* uCode size must calculated from other sizes */
360 uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
363 uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
365 /* At least, it should have header, uCode and RSA. Size of all three. */
366 size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
367 if (unlikely(fw->size < size)) {
368 drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
369 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
375 /* Sanity check whether this fw is not larger than whole WOPCM memory */
376 size = __intel_uc_fw_get_upload_size(uc_fw);
377 if (unlikely(size >= i915->wopcm.size)) {
378 drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
379 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
380 size, (size_t)i915->wopcm.size);
385 /* Get version numbers from the CSS header */
386 uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
388 uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
391 if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
392 uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
393 drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
394 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
395 uc_fw->major_ver_found, uc_fw->minor_ver_found,
396 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
397 if (!intel_uc_fw_is_overridden(uc_fw)) {
403 if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
404 uc_fw->private_data_size = css->private_data_size;
406 if (HAS_LMEM(i915)) {
407 obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
409 obj->flags |= I915_BO_ALLOC_PM_EARLY;
411 obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
420 uc_fw->size = fw->size;
421 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
423 release_firmware(fw);
427 intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
428 INTEL_UC_FIRMWARE_MISSING :
429 INTEL_UC_FIRMWARE_ERROR);
431 drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
432 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
433 drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
434 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
436 release_firmware(fw); /* OK even if fw is NULL */
440 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
442 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
443 struct drm_mm_node *node = &ggtt->uc_fw;
445 GEM_BUG_ON(!drm_mm_node_allocated(node));
446 GEM_BUG_ON(upper_32_bits(node->start));
447 GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
449 return lower_32_bits(node->start);
452 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
454 struct drm_i915_gem_object *obj = uc_fw->obj;
455 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
456 struct i915_vma_resource *dummy = &uc_fw->dummy;
459 dummy->start = uc_fw_ggtt_offset(uc_fw);
460 dummy->node_size = obj->base.size;
461 dummy->bi.pages = obj->mm.pages;
463 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
464 GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
466 /* uc_fw->obj cache domains were not controlled across suspend */
467 if (i915_gem_object_has_struct_page(obj))
468 drm_clflush_sg(dummy->bi.pages);
470 if (i915_gem_object_is_lmem(obj))
473 ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
476 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
478 struct drm_i915_gem_object *obj = uc_fw->obj;
479 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
480 u64 start = uc_fw_ggtt_offset(uc_fw);
482 ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
485 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
487 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
488 struct intel_uncore *uncore = gt->uncore;
492 ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
496 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
498 /* Set the source address for the uCode */
499 offset = uc_fw_ggtt_offset(uc_fw);
500 GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
501 intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
502 intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
504 /* Set the DMA destination */
505 intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
506 intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
509 * Set the transfer size. The header plus uCode will be copied to WOPCM
510 * via DMA, excluding any other components
512 intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
513 sizeof(struct uc_css_header) + uc_fw->ucode_size);
516 intel_uncore_write_fw(uncore, DMA_CTRL,
517 _MASKED_BIT_ENABLE(dma_flags | START_DMA));
519 /* Wait for DMA to finish */
520 ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
522 drm_err(>->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
523 intel_uc_fw_type_repr(uc_fw->type),
524 intel_uncore_read_fw(uncore, DMA_CTRL));
526 /* Disable the bits once DMA is over */
527 intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
529 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
535 * intel_uc_fw_upload - load uC firmware using custom loader
536 * @uc_fw: uC firmware
537 * @dst_offset: destination offset
538 * @dma_flags: flags for flags for dma ctrl
540 * Loads uC firmware and updates internal flags.
542 * Return: 0 on success, non-zero on failure.
544 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
546 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
549 /* make sure the status was cleared the last time we reset the uc */
550 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
552 err = i915_inject_probe_error(gt->i915, -ENOEXEC);
556 if (!intel_uc_fw_is_loadable(uc_fw))
559 /* Call custom loader */
560 uc_fw_bind_ggtt(uc_fw);
561 err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
562 uc_fw_unbind_ggtt(uc_fw);
566 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
570 i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
571 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
573 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
577 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
580 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
581 * while it reads it from the 64 RSA registers if it is smaller.
582 * The HuC RSA is always read from memory.
584 return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
587 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
589 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
590 struct i915_vma *vma;
595 err = i915_inject_probe_error(gt->i915, -ENXIO);
599 if (!uc_fw_need_rsa_in_memory(uc_fw))
603 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
604 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
605 * authentication from memory, as the RSA offset now falls within the
606 * GuC inaccessible range. We resort to perma-pinning an additional vma
607 * within the accessible range that only contains the RSA signature.
608 * The GuC HW can use this extra pinning to perform the authentication
609 * since its GGTT offset will be GuC accessible.
611 GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
612 vma = intel_guc_allocate_vma(>->uc.guc, PAGE_SIZE);
616 vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
617 i915_coherent_map_type(gt->i915, vma->obj, true));
619 i915_vma_unpin_and_release(&vma, 0);
620 err = PTR_ERR(vaddr);
624 copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
625 i915_gem_object_unpin_map(vma->obj);
627 if (copied < uc_fw->rsa_size) {
632 uc_fw->rsa_data = vma;
637 i915_vma_unpin_and_release(&vma, 0);
641 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
643 i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
646 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
650 /* this should happen before the load! */
651 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
653 if (!intel_uc_fw_is_available(uc_fw))
656 err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
658 DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
659 intel_uc_fw_type_repr(uc_fw->type), err);
663 err = uc_fw_rsa_data_create(uc_fw);
665 DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
666 intel_uc_fw_type_repr(uc_fw->type), err);
673 i915_gem_object_unpin_pages(uc_fw->obj);
675 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
679 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
681 uc_fw_rsa_data_destroy(uc_fw);
683 if (i915_gem_object_has_pinned_pages(uc_fw->obj))
684 i915_gem_object_unpin_pages(uc_fw->obj);
686 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
690 * intel_uc_fw_cleanup_fetch - cleanup uC firmware
691 * @uc_fw: uC firmware
693 * Cleans up uC firmware by releasing the firmware GEM obj.
695 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
697 if (!intel_uc_fw_is_available(uc_fw))
700 i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
702 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
706 * intel_uc_fw_copy_rsa - copy fw RSA to buffer
708 * @uc_fw: uC firmware
710 * @max_len: max number of bytes to copy
712 * Return: number of copied bytes.
714 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
716 struct intel_memory_region *mr = uc_fw->obj->mm.region;
717 u32 size = min_t(u32, uc_fw->rsa_size, max_len);
718 u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
719 struct sgt_iter iter;
723 /* Called during reset handling, must be atomic [no fs_reclaim] */
724 GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
726 idx = offset >> PAGE_SHIFT;
727 offset = offset_in_page(offset);
728 if (i915_gem_object_has_struct_page(uc_fw->obj)) {
731 for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
732 u32 len = min_t(u32, size, PAGE_SIZE - offset);
740 vaddr = kmap_atomic(page);
741 memcpy(dst, vaddr + offset, len);
742 kunmap_atomic(vaddr);
754 for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
755 u32 len = min_t(u32, size, PAGE_SIZE - offset);
763 vaddr = io_mapping_map_atomic_wc(&mr->iomap,
764 addr - mr->region.start);
765 memcpy_fromio(dst, vaddr + offset, len);
766 io_mapping_unmap_atomic(vaddr);
781 * intel_uc_fw_dump - dump information about uC firmware
782 * @uc_fw: uC firmware
783 * @p: the &drm_printer
785 * Pretty printer for uC firmware.
787 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
789 drm_printf(p, "%s firmware: %s\n",
790 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
791 drm_printf(p, "\tstatus: %s\n",
792 intel_uc_fw_status_repr(uc_fw->status));
793 drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
794 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
795 uc_fw->major_ver_found, uc_fw->minor_ver_found);
796 drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
797 drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);