1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <linux/firmware.h>
7 #include <linux/highmem.h>
8 #include <linux/moduleparam.h>
11 #include "vpu_boot_api.h"
14 #include "ivpu_fw_log.h"
20 #define FW_GLOBAL_MEM_START (2ull * SZ_1G)
21 #define FW_GLOBAL_MEM_END (3ull * SZ_1G)
22 #define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
23 #define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
24 #define FW_RUNTIME_MAX_SIZE SZ_512M
25 #define FW_SHAVE_NN_MAX_SIZE SZ_2M
26 #define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
27 #define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
28 #define FW_VERSION_HEADER_SIZE SZ_4K
29 #define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
31 #define WATCHDOG_MSS_REDIRECT 32
32 #define WATCHDOG_NCE_REDIRECT 33
34 #define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
36 #define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \
37 ivpu_fw_check_api(vdev, fw_hdr, #name, \
38 VPU_##name##_API_VER_INDEX, \
39 VPU_##name##_API_VER_MAJOR, \
40 VPU_##name##_API_VER_MINOR, min_major)
42 static char *ivpu_firmware;
43 module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
44 MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
46 /* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
51 { IVPU_HW_37XX, "vpu_37xx.bin" },
52 { IVPU_HW_37XX, "mtl_vpu.bin" },
53 { IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
54 { IVPU_HW_40XX, "vpu_40xx.bin" },
55 { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
58 static int ivpu_fw_request(struct ivpu_device *vdev)
64 ret = request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
66 vdev->fw->name = ivpu_firmware;
70 for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
71 if (fw_names[i].gen != ivpu_hw_gen(vdev))
74 ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
76 vdev->fw->name = fw_names[i].name;
81 ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
86 ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
87 const char *str, int index, u16 expected_major, u16 expected_minor,
90 u16 major = (u16)(fw_hdr->api_version[index] >> 16);
91 u16 minor = (u16)(fw_hdr->api_version[index]);
93 if (major < min_major) {
94 ivpu_err(vdev, "Incompatible FW %s API version: %d.%d, required %d.0 or later\n",
95 str, major, minor, min_major);
98 if (major != expected_major) {
99 ivpu_warn(vdev, "Major FW %s API version different: %d.%d (expected %d.%d)\n",
100 str, major, minor, expected_major, expected_minor);
102 ivpu_dbg(vdev, FW_BOOT, "FW %s API version: %d.%d (expected %d.%d)\n",
103 str, major, minor, expected_major, expected_minor);
108 static int ivpu_fw_parse(struct ivpu_device *vdev)
110 struct ivpu_fw_info *fw = vdev->fw;
111 const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
112 u64 runtime_addr, image_load_addr, runtime_size, image_size;
114 if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
115 ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
119 if (fw_hdr->header_version != VPU_FW_HEADER_VERSION) {
120 ivpu_err(vdev, "Invalid firmware header version: %u\n", fw_hdr->header_version);
124 runtime_addr = fw_hdr->boot_params_load_address;
125 runtime_size = fw_hdr->runtime_size;
126 image_load_addr = fw_hdr->image_load_address;
127 image_size = fw_hdr->image_size;
129 if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
130 ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
134 if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
135 ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
139 if (FW_FILE_IMAGE_OFFSET + image_size > fw->file->size) {
140 ivpu_err(vdev, "Invalid image size: %llu\n", image_size);
144 if (image_load_addr < runtime_addr ||
145 image_load_addr + image_size > runtime_addr + runtime_size) {
146 ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
147 image_load_addr, image_size);
151 if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
152 ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
156 if (fw_hdr->entry_point < image_load_addr ||
157 fw_hdr->entry_point >= image_load_addr + image_size) {
158 ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
161 ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
162 fw_hdr->header_version, fw_hdr->image_format);
164 ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
165 (const char *)fw_hdr + VPU_FW_HEADER_SIZE);
167 if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
169 if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3))
172 fw->runtime_addr = runtime_addr;
173 fw->runtime_size = runtime_size;
174 fw->image_load_offset = image_load_addr - runtime_addr;
175 fw->image_size = image_size;
176 fw->shave_nn_size = PAGE_ALIGN(fw_hdr->shave_nn_fw_size);
178 fw->cold_boot_entry_point = fw_hdr->entry_point;
179 fw->entry_point = fw->cold_boot_entry_point;
181 fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
182 fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
183 fw->trace_hw_component_mask = -1;
185 ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
186 fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
187 ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
188 fw->runtime_addr, image_load_addr, fw->entry_point);
193 static void ivpu_fw_release(struct ivpu_device *vdev)
195 release_firmware(vdev->fw->file);
198 static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
200 struct ivpu_fw_info *fw = vdev->fw;
201 u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
202 u64 size = FW_SHARED_MEM_SIZE;
204 if (start + size > FW_GLOBAL_MEM_END) {
205 ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
209 ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
213 static int ivpu_fw_mem_init(struct ivpu_device *vdev)
215 struct ivpu_fw_info *fw = vdev->fw;
219 ret = ivpu_fw_update_global_range(vdev);
223 fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
224 DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
226 ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
230 fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE,
232 if (!fw->mem_log_crit) {
233 ivpu_err(vdev, "Failed to allocate critical log buffer\n");
235 goto err_free_fw_mem;
238 if (ivpu_log_level <= IVPU_FW_LOG_INFO)
239 log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
241 log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
243 fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED);
244 if (!fw->mem_log_verb) {
245 ivpu_err(vdev, "Failed to allocate verbose log buffer\n");
247 goto err_free_log_crit;
250 if (fw->shave_nn_size) {
251 fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
252 fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
253 if (!fw->mem_shave_nn) {
254 ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
256 goto err_free_log_verb;
263 ivpu_bo_free_internal(fw->mem_log_verb);
265 ivpu_bo_free_internal(fw->mem_log_crit);
267 ivpu_bo_free_internal(fw->mem);
271 static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
273 struct ivpu_fw_info *fw = vdev->fw;
275 if (fw->mem_shave_nn) {
276 ivpu_bo_free_internal(fw->mem_shave_nn);
277 fw->mem_shave_nn = NULL;
280 ivpu_bo_free_internal(fw->mem_log_verb);
281 ivpu_bo_free_internal(fw->mem_log_crit);
282 ivpu_bo_free_internal(fw->mem);
284 fw->mem_log_verb = NULL;
285 fw->mem_log_crit = NULL;
289 int ivpu_fw_init(struct ivpu_device *vdev)
293 ret = ivpu_fw_request(vdev);
297 ret = ivpu_fw_parse(vdev);
301 ret = ivpu_fw_mem_init(vdev);
308 ivpu_fw_release(vdev);
312 void ivpu_fw_fini(struct ivpu_device *vdev)
314 ivpu_fw_mem_fini(vdev);
315 ivpu_fw_release(vdev);
318 int ivpu_fw_load(struct ivpu_device *vdev)
320 struct ivpu_fw_info *fw = vdev->fw;
321 u64 image_end_offset = fw->image_load_offset + fw->image_size;
323 memset(fw->mem->kvaddr, 0, fw->image_load_offset);
324 memcpy(fw->mem->kvaddr + fw->image_load_offset,
325 fw->file->data + FW_FILE_IMAGE_OFFSET, fw->image_size);
327 if (IVPU_WA(clear_runtime_mem)) {
328 u8 *start = fw->mem->kvaddr + image_end_offset;
329 u64 size = fw->mem->base.size - image_end_offset;
331 memset(start, 0, size);
334 clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
339 static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
341 ivpu_dbg(vdev, FW_BOOT, "boot_params.magic = 0x%x\n",
343 ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_id = 0x%x\n",
344 boot_params->vpu_id);
345 ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_count = 0x%x\n",
346 boot_params->vpu_count);
347 ivpu_dbg(vdev, FW_BOOT, "boot_params.frequency = %u\n",
348 boot_params->frequency);
349 ivpu_dbg(vdev, FW_BOOT, "boot_params.perf_clk_frequency = %u\n",
350 boot_params->perf_clk_frequency);
352 ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_start = 0x%llx\n",
353 boot_params->ipc_header_area_start);
354 ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_size = 0x%x\n",
355 boot_params->ipc_header_area_size);
356 ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_base = 0x%llx\n",
357 boot_params->shared_region_base);
358 ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_size = 0x%x\n",
359 boot_params->shared_region_size);
360 ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_start = 0x%llx\n",
361 boot_params->ipc_payload_area_start);
362 ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_size = 0x%x\n",
363 boot_params->ipc_payload_area_size);
364 ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_base = 0x%llx\n",
365 boot_params->global_aliased_pio_base);
366 ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_size = 0x%x\n",
367 boot_params->global_aliased_pio_size);
369 ivpu_dbg(vdev, FW_BOOT, "boot_params.autoconfig = 0x%x\n",
370 boot_params->autoconfig);
372 ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 0x%x\n",
373 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use);
374 ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
375 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
377 ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
378 boot_params->global_memory_allocator_base);
379 ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
380 boot_params->global_memory_allocator_size);
382 ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
383 boot_params->shave_nn_fw_base);
385 ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_mss = 0x%x\n",
386 boot_params->watchdog_irq_mss);
387 ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
388 boot_params->watchdog_irq_nce);
389 ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
390 boot_params->host_to_vpu_irq);
391 ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
392 boot_params->job_done_irq);
394 ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
395 boot_params->host_version_id);
396 ivpu_dbg(vdev, FW_BOOT, "boot_params.si_stepping = 0x%x\n",
397 boot_params->si_stepping);
398 ivpu_dbg(vdev, FW_BOOT, "boot_params.device_id = 0x%llx\n",
399 boot_params->device_id);
400 ivpu_dbg(vdev, FW_BOOT, "boot_params.feature_exclusion = 0x%llx\n",
401 boot_params->feature_exclusion);
402 ivpu_dbg(vdev, FW_BOOT, "boot_params.sku = 0x%llx\n",
404 ivpu_dbg(vdev, FW_BOOT, "boot_params.min_freq_pll_ratio = 0x%x\n",
405 boot_params->min_freq_pll_ratio);
406 ivpu_dbg(vdev, FW_BOOT, "boot_params.pn_freq_pll_ratio = 0x%x\n",
407 boot_params->pn_freq_pll_ratio);
408 ivpu_dbg(vdev, FW_BOOT, "boot_params.max_freq_pll_ratio = 0x%x\n",
409 boot_params->max_freq_pll_ratio);
410 ivpu_dbg(vdev, FW_BOOT, "boot_params.default_trace_level = 0x%x\n",
411 boot_params->default_trace_level);
412 ivpu_dbg(vdev, FW_BOOT, "boot_params.tracing_buff_message_format_mask = 0x%llx\n",
413 boot_params->tracing_buff_message_format_mask);
414 ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_destination_mask = 0x%x\n",
415 boot_params->trace_destination_mask);
416 ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_hw_component_mask = 0x%llx\n",
417 boot_params->trace_hw_component_mask);
418 ivpu_dbg(vdev, FW_BOOT, "boot_params.boot_type = 0x%x\n",
419 boot_params->boot_type);
420 ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_base = 0x%llx\n",
421 boot_params->punit_telemetry_sram_base);
422 ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_size = 0x%llx\n",
423 boot_params->punit_telemetry_sram_size);
424 ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
425 boot_params->vpu_telemetry_enable);
428 void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
430 struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx;
432 /* In case of warm boot we only have to reset the entrypoint addr */
433 if (!ivpu_fw_is_cold_boot(vdev)) {
434 boot_params->save_restore_ret_address = 0;
435 vdev->pm->is_warmboot = true;
436 clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
440 vdev->pm->is_warmboot = false;
442 boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
443 boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
444 boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
447 * Uncached region of VPU address space, covers IPC buffers, job queues
448 * and log buffers, programmable to L2$ Uncached by VPU MTRR
450 boot_params->shared_region_base = vdev->hw->ranges.global.start;
451 boot_params->shared_region_size = vdev->hw->ranges.global.end -
452 vdev->hw->ranges.global.start;
454 boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
455 boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
457 boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
458 boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
460 boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
461 boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
463 /* Allow configuration for L2C_PAGE_TABLE with boot param value */
464 boot_params->autoconfig = 1;
466 /* Enable L2 cache for first 2GB of high memory */
467 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
468 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
469 ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
471 if (vdev->fw->mem_shave_nn)
472 boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
474 boot_params->watchdog_irq_mss = WATCHDOG_MSS_REDIRECT;
475 boot_params->watchdog_irq_nce = WATCHDOG_NCE_REDIRECT;
476 boot_params->si_stepping = ivpu_revision(vdev);
477 boot_params->device_id = ivpu_device_id(vdev);
478 boot_params->feature_exclusion = vdev->hw->tile_fuse;
479 boot_params->sku = vdev->hw->sku;
481 boot_params->min_freq_pll_ratio = vdev->hw->pll.min_ratio;
482 boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
483 boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
485 boot_params->default_trace_level = vdev->fw->trace_level;
486 boot_params->tracing_buff_message_format_mask = BIT(VPU_TRACING_FORMAT_STRING);
487 boot_params->trace_destination_mask = vdev->fw->trace_destination_mask;
488 boot_params->trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
489 boot_params->crit_tracing_buff_addr = vdev->fw->mem_log_crit->vpu_addr;
490 boot_params->crit_tracing_buff_size = vdev->fw->mem_log_crit->base.size;
491 boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
492 boot_params->verbose_tracing_buff_size = vdev->fw->mem_log_verb->base.size;
494 boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
495 boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
496 boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
498 clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
500 ivpu_fw_boot_params_print(vdev, boot_params);