1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
8 #include "ivpu_hw_37xx_reg.h"
9 #include "ivpu_hw_reg_io.h"
15 #define TILE_FUSE_ENABLE_BOTH 0x0
16 #define TILE_SKU_BOTH_MTL 0x3630
18 /* Work point configuration values */
19 #define CONFIG_1_TILE 0x01
20 #define CONFIG_2_TILE 0x02
21 #define PLL_RATIO_5_3 0x01
22 #define PLL_RATIO_4_3 0x02
23 #define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
24 #define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
25 #define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
26 #define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
27 #define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
28 #define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
30 #define PLL_REF_CLK_FREQ (50 * 1000000)
31 #define PLL_SIMULATION_FREQ (10 * 1000000)
32 #define PLL_DEFAULT_EPP_VALUE 0x80
34 #define TIM_SAFE_ENABLE 0xf1d0dead
35 #define TIM_WATCHDOG_RESET_VALUE 0xffffffff
37 #define TIMEOUT_US (150 * USEC_PER_MSEC)
38 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
39 #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
40 #define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
42 #define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
43 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
44 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
45 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
46 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
47 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
48 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
50 #define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
51 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
52 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
54 #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
56 #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
57 (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
58 (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
60 #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
61 #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
63 #define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
64 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
65 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
66 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
67 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
68 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
69 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
71 static char *ivpu_platform_to_str(u32 platform)
74 case IVPU_PLATFORM_SILICON:
75 return "IVPU_PLATFORM_SILICON";
76 case IVPU_PLATFORM_SIMICS:
77 return "IVPU_PLATFORM_SIMICS";
78 case IVPU_PLATFORM_FPGA:
79 return "IVPU_PLATFORM_FPGA";
81 return "Invalid platform";
85 static void ivpu_hw_read_platform(struct ivpu_device *vdev)
87 u32 gen_ctrl = REGV_RD32(VPU_37XX_HOST_SS_GEN_CTRL);
88 u32 platform = REG_GET_FLD(VPU_37XX_HOST_SS_GEN_CTRL, PS, gen_ctrl);
90 if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
91 vdev->platform = platform;
93 vdev->platform = IVPU_PLATFORM_SILICON;
95 ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
96 ivpu_platform_to_str(vdev->platform), vdev->platform);
99 static void ivpu_hw_wa_init(struct ivpu_device *vdev)
101 vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
102 vdev->wa.clear_runtime_mem = false;
103 vdev->wa.d3hot_after_power_off = true;
105 if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
106 vdev->wa.interrupt_clear_with_0 = true;
109 static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
111 if (ivpu_is_simics(vdev) || ivpu_is_fpga(vdev)) {
112 vdev->timeout.boot = 100000;
113 vdev->timeout.jsm = 50000;
114 vdev->timeout.tdr = 2000000;
115 vdev->timeout.reschedule_suspend = 1000;
117 vdev->timeout.boot = 1000;
118 vdev->timeout.jsm = 500;
119 vdev->timeout.tdr = 2000;
120 vdev->timeout.reschedule_suspend = 10;
124 static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
126 return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
129 /* Send KMD initiated workpoint change */
130 static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
131 u16 target_ratio, u16 config)
136 ret = ivpu_pll_wait_for_cmd_send(vdev);
138 ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
142 val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0);
143 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
144 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
145 REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
147 val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1);
148 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
149 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
150 REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
152 val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2);
153 val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
154 REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
156 val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD);
157 val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val);
158 REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val);
160 ret = ivpu_pll_wait_for_cmd_send(vdev);
162 ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
167 static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
169 u32 exp_val = enable ? 0x1 : 0x0;
171 if (IVPU_WA(punit_disabled))
174 return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
177 static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
179 if (IVPU_WA(punit_disabled))
182 return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
185 static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
187 struct ivpu_hw_info *hw = vdev->hw;
188 u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
189 u32 fmin_fuse, fmax_fuse;
191 fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE);
192 fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
193 fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
195 fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE);
196 fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
198 hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
199 hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
200 hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
203 static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev)
205 return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
208 static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
210 struct ivpu_hw_info *hw = vdev->hw;
215 if (IVPU_WA(punit_disabled)) {
216 ivpu_dbg(vdev, PM, "Skipping PLL request on %s\n",
217 ivpu_platform_to_str(vdev->platform));
222 target_ratio = hw->pll.pn_ratio;
229 ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
230 config, target_ratio);
232 ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
234 ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
238 ret = ivpu_pll_wait_for_lock(vdev, enable);
240 ivpu_err(vdev, "Timed out waiting for PLL lock\n");
245 ret = ivpu_pll_wait_for_status_ready(vdev);
247 ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
251 ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
253 ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
261 static int ivpu_pll_enable(struct ivpu_device *vdev)
263 return ivpu_pll_drive(vdev, true);
266 static int ivpu_pll_disable(struct ivpu_device *vdev)
268 return ivpu_pll_drive(vdev, false);
271 static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
275 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
276 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
277 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
279 REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
282 static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
284 u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
287 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
288 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
289 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
291 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
292 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
293 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
296 REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
299 static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
301 u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
304 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
305 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
306 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
308 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
309 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
310 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
313 REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
316 static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
318 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
320 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
326 static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
328 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
330 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
336 static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
338 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
340 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
346 static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
348 u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
350 if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
351 !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
357 static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
359 u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QACCEPTN);
361 if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
362 !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
368 static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
370 u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QDENY);
372 if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
373 !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
379 static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
381 ivpu_boot_host_ss_rst_clr_assert(vdev);
383 return ivpu_boot_noc_qreqn_check(vdev, 0x0);
386 static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
388 REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
391 static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
396 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
398 val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
400 val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
401 REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
403 ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
405 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
409 ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
411 ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
416 static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
418 return ivpu_boot_host_ss_axi_drive(vdev, true);
421 static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
426 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
428 val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
429 val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
431 val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
432 val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
434 REGV_WR32(MTL_VPU_TOP_NOC_QREQN, val);
436 ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
438 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
442 ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
444 ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
449 static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
451 return ivpu_boot_host_ss_top_noc_drive(vdev, true);
454 static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
456 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
459 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
461 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
463 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
466 static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
468 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
471 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
473 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
475 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
478 static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
480 /* FPGA model (UPF) is not power aware, skipped Power Island polling */
481 if (ivpu_is_fpga(vdev))
484 return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
485 exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
488 static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
490 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
493 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
495 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
497 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
500 static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
502 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
505 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
507 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
509 REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
512 static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
516 ivpu_boot_pwr_island_trickle_drive(vdev, true);
517 ivpu_boot_pwr_island_drive(vdev, true);
519 ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
521 ivpu_err(vdev, "Timed out waiting for power island status\n");
525 ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
527 ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
531 ivpu_boot_host_ss_clk_drive(vdev, true);
532 ivpu_boot_pwr_island_isolation_drive(vdev, false);
533 ivpu_boot_host_ss_rst_drive(vdev, true);
534 ivpu_boot_dpu_active_drive(vdev, true);
539 static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
541 ivpu_boot_dpu_active_drive(vdev, false);
542 ivpu_boot_pwr_island_isolation_drive(vdev, true);
543 ivpu_boot_pwr_island_trickle_drive(vdev, false);
544 ivpu_boot_pwr_island_drive(vdev, false);
546 return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
549 static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
551 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
553 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
554 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
555 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
557 REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
560 static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
562 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
564 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
565 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
566 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
567 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
569 REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
572 static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
576 val = REGV_RD32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
577 val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
579 val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
580 REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
582 val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
583 REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
585 val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
586 REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
588 val = vdev->fw->entry_point >> 9;
589 REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
591 val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
592 REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
594 ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
595 vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
598 static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
603 ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
605 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
609 val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL);
611 val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
613 val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
614 REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val);
616 ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
618 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
623 static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
625 struct ivpu_hw_info *hw = vdev->hw;
627 hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
628 hw->sku = TILE_SKU_BOTH_MTL;
629 hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
631 ivpu_pll_init_frequency_ratios(vdev);
633 ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
634 ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M);
635 ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
636 ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
638 ivpu_hw_read_platform(vdev);
639 ivpu_hw_wa_init(vdev);
640 ivpu_hw_timeouts_init(vdev);
645 static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
649 if (ivpu_boot_pwr_domain_disable(vdev)) {
650 ivpu_err(vdev, "Failed to disable power domain\n");
654 if (ivpu_pll_disable(vdev)) {
655 ivpu_err(vdev, "Failed to disable PLL\n");
662 static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev)
666 ret = ivpu_boot_d0i3_drive(vdev, true);
668 ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
670 udelay(5); /* VPU requires 5 us to complete the transition */
675 static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev)
679 ret = ivpu_boot_d0i3_drive(vdev, false);
681 ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
686 static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
690 ret = ivpu_hw_37xx_d0i3_disable(vdev);
692 ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
694 ret = ivpu_pll_enable(vdev);
696 ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
700 ret = ivpu_boot_host_ss_configure(vdev);
702 ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
707 * The control circuitry for vpu_idle indication logic powers up active.
708 * To ensure unnecessary low power mode signal from LRT during bring up,
709 * KMD disables the circuitry prior to bringing up the Main Power island.
711 ivpu_boot_vpu_idle_gen_disable(vdev);
713 ret = ivpu_boot_pwr_domain_enable(vdev);
715 ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
719 ret = ivpu_boot_host_ss_axi_enable(vdev);
721 ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
725 ret = ivpu_boot_host_ss_top_noc_enable(vdev);
727 ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
732 static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev)
734 ivpu_boot_no_snoop_enable(vdev);
735 ivpu_boot_tbu_mmu_enable(vdev);
736 ivpu_boot_soc_cpu_boot(vdev);
741 static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
745 if (IVPU_WA(punit_disabled))
748 val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS);
749 return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) &&
750 REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
753 static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
757 if (!ivpu_hw_37xx_is_idle(vdev))
758 ivpu_warn(vdev, "VPU not idle during power down\n");
760 if (ivpu_hw_37xx_reset(vdev)) {
761 ivpu_err(vdev, "Failed to reset VPU\n");
765 if (ivpu_hw_37xx_d0i3_enable(vdev)) {
766 ivpu_err(vdev, "Failed to enter D0I3\n");
773 static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
777 /* Enable writing and set non-zero WDT value */
778 REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
779 REGV_WR32(MTL_VPU_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
781 /* Enable writing and disable watchdog timer */
782 REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
783 REGV_WR32(MTL_VPU_CPU_SS_TIM_WDOG_EN, 0);
785 /* Now clear the timeout interrupt */
786 val = REGV_RD32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG);
787 val = REG_CLR_FLD(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
788 REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
791 static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
793 u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
796 if ((config & 0xff) == PLL_RATIO_4_3)
797 cpu_clock = pll_clock * 2 / 4;
799 cpu_clock = pll_clock * 2 / 5;
804 /* Register indirect accesses */
805 static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
809 pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL);
810 pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK;
812 if (!ivpu_is_silicon(vdev))
813 return PLL_SIMULATION_FREQ;
815 return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
818 static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
820 return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
823 static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev)
825 return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE);
828 static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
830 return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
833 static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
835 u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
836 u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
838 REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
841 static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
843 return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
846 static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
848 u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
850 return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
853 static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
855 REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
858 static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev)
860 REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
863 static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev)
865 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
866 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
867 REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
868 REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
871 static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
873 REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
874 REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
875 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
876 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
879 static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
881 ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
883 ivpu_pm_schedule_recovery(vdev);
886 static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
888 ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
890 ivpu_hw_wdt_disable(vdev);
891 ivpu_pm_schedule_recovery(vdev);
894 static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
896 ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
898 ivpu_pm_schedule_recovery(vdev);
901 /* Handler for IRQs from VPU core (irqV) */
902 static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
904 u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
906 REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
908 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
909 ivpu_mmu_irq_evtq_handler(vdev);
911 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
912 ivpu_ipc_irq_handler(vdev);
914 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
915 ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
917 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
918 ivpu_mmu_irq_gerr_handler(vdev);
920 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
921 ivpu_hw_37xx_irq_wdt_mss_handler(vdev);
923 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
924 ivpu_hw_37xx_irq_wdt_nce_handler(vdev);
926 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
927 ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
932 /* Handler for IRQs from Buttress core (irqB) */
933 static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
935 u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
936 bool schedule_recovery = false;
941 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
942 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
943 REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
945 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
946 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
947 REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
948 schedule_recovery = true;
951 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
952 u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
954 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
955 ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
956 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
957 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
958 REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1);
959 schedule_recovery = true;
962 /* This must be done after interrupts are cleared at the source. */
963 if (IVPU_WA(interrupt_clear_with_0))
965 * Writing 1 triggers an interrupt, so we can't perform read update write.
966 * Clear local interrupt status by writing 0 to all bits.
968 REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
970 REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
972 if (schedule_recovery)
973 ivpu_pm_schedule_recovery(vdev);
978 static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
980 struct ivpu_device *vdev = ptr;
981 u32 ret_irqv, ret_irqb;
983 REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
985 ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
986 ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
988 /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
989 REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
991 return IRQ_RETVAL(ret_irqb | ret_irqv);
994 static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
996 u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
997 u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
999 if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev))
1000 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1002 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
1003 ivpu_err(vdev, "WDT MSS timeout detected\n");
1005 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
1006 ivpu_err(vdev, "WDT NCE timeout detected\n");
1008 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
1009 ivpu_err(vdev, "NOC Firewall irq detected\n");
1011 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
1012 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
1014 if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
1015 u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
1017 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
1018 ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
1019 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
1020 REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
1024 const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
1025 .info_init = ivpu_hw_37xx_info_init,
1026 .power_up = ivpu_hw_37xx_power_up,
1027 .is_idle = ivpu_hw_37xx_is_idle,
1028 .power_down = ivpu_hw_37xx_power_down,
1029 .reset = ivpu_hw_37xx_reset,
1030 .boot_fw = ivpu_hw_37xx_boot_fw,
1031 .wdt_disable = ivpu_hw_37xx_wdt_disable,
1032 .diagnose_failure = ivpu_hw_37xx_diagnose_failure,
1033 .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
1034 .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
1035 .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
1036 .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
1037 .reg_db_set = ivpu_hw_37xx_reg_db_set,
1038 .reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get,
1039 .reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get,
1040 .reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set,
1041 .irq_clear = ivpu_hw_37xx_irq_clear,
1042 .irq_enable = ivpu_hw_37xx_irq_enable,
1043 .irq_disable = ivpu_hw_37xx_irq_disable,
1044 .irq_handler = ivpu_hw_37xx_irq_handler,