b59b1f472b4069ba581628d7a99ff3137686fb5c
[platform/kernel/linux-rpi.git] / drivers / accel / ivpu / ivpu_hw_mtl.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_fw.h"
8 #include "ivpu_hw_mtl_reg.h"
9 #include "ivpu_hw_reg_io.h"
10 #include "ivpu_hw.h"
11 #include "ivpu_ipc.h"
12 #include "ivpu_mmu.h"
13 #include "ivpu_pm.h"
14
15 #define TILE_FUSE_ENABLE_BOTH        0x0
16 #define TILE_FUSE_ENABLE_UPPER       0x1
17 #define TILE_FUSE_ENABLE_LOWER       0x2
18
19 #define TILE_SKU_BOTH_MTL            0x3630
20 #define TILE_SKU_LOWER_MTL           0x3631
21 #define TILE_SKU_UPPER_MTL           0x3632
22
23 /* Work point configuration values */
24 #define WP_CONFIG_1_TILE_5_3_RATIO   0x0101
25 #define WP_CONFIG_1_TILE_4_3_RATIO   0x0102
26 #define WP_CONFIG_2_TILE_5_3_RATIO   0x0201
27 #define WP_CONFIG_2_TILE_4_3_RATIO   0x0202
28 #define WP_CONFIG_0_TILE_PLL_OFF     0x0000
29
30 #define PLL_REF_CLK_FREQ             (50 * 1000000)
31 #define PLL_SIMULATION_FREQ          (10 * 1000000)
32 #define PLL_RATIO_TO_FREQ(x)         ((x) * PLL_REF_CLK_FREQ)
33 #define PLL_DEFAULT_EPP_VALUE        0x80
34
35 #define TIM_SAFE_ENABLE              0xf1d0dead
36 #define TIM_WATCHDOG_RESET_VALUE     0xffffffff
37
38 #define TIMEOUT_US                   (150 * USEC_PER_MSEC)
39 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
40 #define PLL_TIMEOUT_US               (1500 * USEC_PER_MSEC)
41 #define IDLE_TIMEOUT_US              (500 * USEC_PER_MSEC)
42
43 #define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
44                         (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
45                         (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
46                         (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
47                         (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
48                         (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
49                         (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
50
51 #define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
52                         (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
53                         (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
54
55 #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
56
57 #define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
58                            (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
59                            (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
60
61 #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
62 #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
63
64 #define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
65                                      (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
66                                      (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
67                                      (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
68                                      (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
69                                      (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
70                                      (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
71
72 static char *ivpu_platform_to_str(u32 platform)
73 {
74         switch (platform) {
75         case IVPU_PLATFORM_SILICON:
76                 return "IVPU_PLATFORM_SILICON";
77         case IVPU_PLATFORM_SIMICS:
78                 return "IVPU_PLATFORM_SIMICS";
79         case IVPU_PLATFORM_FPGA:
80                 return "IVPU_PLATFORM_FPGA";
81         default:
82                 return "Invalid platform";
83         }
84 }
85
86 static void ivpu_hw_read_platform(struct ivpu_device *vdev)
87 {
88         u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL);
89         u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl);
90
91         if  (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
92                 vdev->platform = platform;
93         else
94                 vdev->platform = IVPU_PLATFORM_SILICON;
95
96         ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
97                  ivpu_platform_to_str(vdev->platform), vdev->platform);
98 }
99
100 static void ivpu_hw_wa_init(struct ivpu_device *vdev)
101 {
102         vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
103         vdev->wa.clear_runtime_mem = false;
104 }
105
106 static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
107 {
108         if (ivpu_is_simics(vdev) || ivpu_is_fpga(vdev)) {
109                 vdev->timeout.boot = 100000;
110                 vdev->timeout.jsm = 50000;
111                 vdev->timeout.tdr = 2000000;
112                 vdev->timeout.reschedule_suspend = 1000;
113         } else {
114                 vdev->timeout.boot = 1000;
115                 vdev->timeout.jsm = 500;
116                 vdev->timeout.tdr = 2000;
117                 vdev->timeout.reschedule_suspend = 10;
118         }
119 }
120
121 static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
122 {
123         return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
124 }
125
126 /* Send KMD initiated workpoint change */
127 static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
128                              u16 target_ratio, u16 config)
129 {
130         int ret;
131         u32 val;
132
133         ret = ivpu_pll_wait_for_cmd_send(vdev);
134         if (ret) {
135                 ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
136                 return ret;
137         }
138
139         val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0);
140         val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
141         val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
142         REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val);
143
144         val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1);
145         val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
146         val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
147         REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val);
148
149         val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2);
150         val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
151         REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val);
152
153         val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD);
154         val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val);
155         REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val);
156
157         ret = ivpu_pll_wait_for_cmd_send(vdev);
158         if (ret)
159                 ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
160
161         return ret;
162 }
163
164 static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
165 {
166         u32 exp_val = enable ? 0x1 : 0x0;
167
168         if (IVPU_WA(punit_disabled))
169                 return 0;
170
171         return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
172 }
173
174 static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
175 {
176         if (IVPU_WA(punit_disabled))
177                 return 0;
178
179         return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
180 }
181
182 static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
183 {
184         struct ivpu_hw_info *hw = vdev->hw;
185         u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
186         u32 fmin_fuse, fmax_fuse;
187
188         fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE);
189         fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
190         fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
191
192         fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE);
193         fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
194
195         hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
196         hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
197         hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
198 }
199
200 static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
201 {
202         struct ivpu_hw_info *hw = vdev->hw;
203         u16 target_ratio;
204         u16 config;
205         int ret;
206
207         if (IVPU_WA(punit_disabled)) {
208                 ivpu_dbg(vdev, PM, "Skipping PLL request on %s\n",
209                          ivpu_platform_to_str(vdev->platform));
210                 return 0;
211         }
212
213         if (enable) {
214                 target_ratio = hw->pll.pn_ratio;
215                 config = hw->config;
216         } else {
217                 target_ratio = 0;
218                 config = 0;
219         }
220
221         ivpu_dbg(vdev, PM, "PLL workpoint request: %d Hz\n", PLL_RATIO_TO_FREQ(target_ratio));
222
223         ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
224         if (ret) {
225                 ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
226                 return ret;
227         }
228
229         ret = ivpu_pll_wait_for_lock(vdev, enable);
230         if (ret) {
231                 ivpu_err(vdev, "Timed out waiting for PLL lock\n");
232                 return ret;
233         }
234
235         if (enable) {
236                 ret = ivpu_pll_wait_for_status_ready(vdev);
237                 if (ret) {
238                         ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
239                         return ret;
240                 }
241         }
242
243         return 0;
244 }
245
246 static int ivpu_pll_enable(struct ivpu_device *vdev)
247 {
248         return ivpu_pll_drive(vdev, true);
249 }
250
251 static int ivpu_pll_disable(struct ivpu_device *vdev)
252 {
253         return ivpu_pll_drive(vdev, false);
254 }
255
256 static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
257 {
258         u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_CLR);
259
260         val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
261         val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
262         val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
263
264         REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val);
265 }
266
267 static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
268 {
269         u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET);
270
271         if (enable) {
272                 val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
273                 val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
274                 val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
275         } else {
276                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
277                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
278                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
279         }
280
281         REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val);
282 }
283
284 static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
285 {
286         u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET);
287
288         if (enable) {
289                 val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
290                 val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
291                 val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
292         } else {
293                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
294                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
295                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
296         }
297
298         REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val);
299 }
300
301 static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
302 {
303         u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
304
305         if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
306                 return -EIO;
307
308         return 0;
309 }
310
311 static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
312 {
313         u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN);
314
315         if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
316                 return -EIO;
317
318         return 0;
319 }
320
321 static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
322 {
323         u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY);
324
325         if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
326                 return -EIO;
327
328         return 0;
329 }
330
331 static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
332 {
333         u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
334
335         if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
336             !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
337                 return -EIO;
338
339         return 0;
340 }
341
342 static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
343 {
344         u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QACCEPTN);
345
346         if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
347             !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
348                 return -EIO;
349
350         return 0;
351 }
352
353 static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
354 {
355         u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QDENY);
356
357         if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
358             !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
359                 return -EIO;
360
361         return 0;
362 }
363
364 static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
365 {
366         ivpu_boot_host_ss_rst_clr_assert(vdev);
367
368         return ivpu_boot_noc_qreqn_check(vdev, 0x0);
369 }
370
371 static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
372 {
373         REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
374 }
375
376 static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
377 {
378         int ret;
379         u32 val;
380
381         val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
382         if (enable)
383                 val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
384         else
385                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
386         REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val);
387
388         ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
389         if (ret) {
390                 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
391                 return ret;
392         }
393
394         ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
395         if (ret)
396                 ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
397
398         return ret;
399 }
400
401 static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
402 {
403         return ivpu_boot_host_ss_axi_drive(vdev, true);
404 }
405
406 static int ivpu_boot_host_ss_axi_disable(struct ivpu_device *vdev)
407 {
408         return ivpu_boot_host_ss_axi_drive(vdev, false);
409 }
410
411 static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
412 {
413         int ret;
414         u32 val;
415
416         val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
417         if (enable) {
418                 val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
419                 val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
420         } else {
421                 val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
422                 val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
423         }
424         REGV_WR32(MTL_VPU_TOP_NOC_QREQN, val);
425
426         ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
427         if (ret) {
428                 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
429                 return ret;
430         }
431
432         ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
433         if (ret)
434                 ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
435
436         return ret;
437 }
438
439 static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
440 {
441         return ivpu_boot_host_ss_top_noc_drive(vdev, true);
442 }
443
444 static int ivpu_boot_host_ss_top_noc_disable(struct ivpu_device *vdev)
445 {
446         return ivpu_boot_host_ss_top_noc_drive(vdev, false);
447 }
448
449 static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
450 {
451         u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
452
453         if (enable)
454                 val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
455         else
456                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
457
458         REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
459 }
460
461 static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
462 {
463         u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0);
464
465         if (enable)
466                 val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
467         else
468                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
469
470         REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val);
471 }
472
473 static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
474 {
475         /* FPGA model (UPF) is not power aware, skipped Power Island polling */
476         if (ivpu_is_fpga(vdev))
477                 return 0;
478
479         return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
480                              exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
481 }
482
483 static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
484 {
485         u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0);
486
487         if (enable)
488                 val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
489         else
490                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
491
492         REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val);
493 }
494
495 static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
496 {
497         u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE);
498
499         if (enable)
500                 val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
501         else
502                 val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
503
504         REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
505 }
506
507 static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
508 {
509         ivpu_boot_dpu_active_drive(vdev, false);
510         ivpu_boot_pwr_island_isolation_drive(vdev, true);
511         ivpu_boot_pwr_island_trickle_drive(vdev, false);
512         ivpu_boot_pwr_island_drive(vdev, false);
513
514         return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
515 }
516
517 static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
518 {
519         int ret;
520
521         ivpu_boot_pwr_island_trickle_drive(vdev, true);
522         ivpu_boot_pwr_island_drive(vdev, true);
523
524         ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
525         if (ret) {
526                 ivpu_err(vdev, "Timed out waiting for power island status\n");
527                 return ret;
528         }
529
530         ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
531         if (ret) {
532                 ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
533                 return ret;
534         }
535
536         ivpu_boot_host_ss_clk_drive(vdev, true);
537         ivpu_boot_pwr_island_isolation_drive(vdev, false);
538         ivpu_boot_host_ss_rst_drive(vdev, true);
539         ivpu_boot_dpu_active_drive(vdev, true);
540
541         return ret;
542 }
543
544 static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
545 {
546         u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES);
547
548         val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
549         val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
550         val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
551
552         REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val);
553 }
554
555 static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
556 {
557         u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV);
558
559         if (ivpu_is_fpga(vdev)) {
560                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
561                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
562                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
563                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
564         } else {
565                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
566                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
567                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
568                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
569                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
570                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
571                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_AWMMUSSIDV, val);
572                 val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_ARMMUSSIDV, val);
573         }
574
575         REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val);
576 }
577
578 static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
579 {
580         u32 val;
581
582         val = REGV_RD32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
583         val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
584
585         val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
586         REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
587
588         val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
589         REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
590
591         val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
592         REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
593
594         val = vdev->fw->entry_point >> 9;
595         REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
596
597         val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
598         REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
599
600         ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
601                  vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
602 }
603
604 static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
605 {
606         int ret;
607         u32 val;
608
609         ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
610         if (ret) {
611                 ivpu_err(vdev, "Failed to sync before D0i3 tansition: %d\n", ret);
612                 return ret;
613         }
614
615         val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL);
616         if (enable)
617                 val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
618         else
619                 val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
620         REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val);
621
622         ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
623         if (ret)
624                 ivpu_err(vdev, "Failed to sync after D0i3 tansition: %d\n", ret);
625
626         return ret;
627 }
628
629 static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
630 {
631         struct ivpu_hw_info *hw = vdev->hw;
632         u32 tile_fuse;
633
634         tile_fuse = REGB_RD32(MTL_BUTTRESS_TILE_FUSE);
635         if (!REG_TEST_FLD(MTL_BUTTRESS_TILE_FUSE, VALID, tile_fuse))
636                 ivpu_warn(vdev, "Tile Fuse: Invalid (0x%x)\n", tile_fuse);
637
638         hw->tile_fuse = REG_GET_FLD(MTL_BUTTRESS_TILE_FUSE, SKU, tile_fuse);
639         switch (hw->tile_fuse) {
640         case TILE_FUSE_ENABLE_LOWER:
641                 hw->sku = TILE_SKU_LOWER_MTL;
642                 hw->config = WP_CONFIG_1_TILE_5_3_RATIO;
643                 ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Lower\n");
644                 break;
645         case TILE_FUSE_ENABLE_UPPER:
646                 hw->sku = TILE_SKU_UPPER_MTL;
647                 hw->config = WP_CONFIG_1_TILE_4_3_RATIO;
648                 ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Upper\n");
649                 break;
650         case TILE_FUSE_ENABLE_BOTH:
651                 hw->sku = TILE_SKU_BOTH_MTL;
652                 hw->config = WP_CONFIG_2_TILE_5_3_RATIO;
653                 ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Both\n");
654                 break;
655         default:
656                 hw->config = WP_CONFIG_0_TILE_PLL_OFF;
657                 ivpu_dbg(vdev, MISC, "Tile Fuse: Disable\n");
658                 break;
659         }
660
661         ivpu_pll_init_frequency_ratios(vdev);
662
663         ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M);
664         ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M);
665         ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M);
666         ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G);
667         hw->ranges.global_aliased_pio = hw->ranges.user_low;
668
669         return 0;
670 }
671
672 static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
673 {
674         int ret;
675         u32 val;
676
677         if (IVPU_WA(punit_disabled))
678                 return 0;
679
680         ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
681         if (ret) {
682                 ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
683                 return ret;
684         }
685
686         val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET);
687         val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
688         REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val);
689
690         ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
691         if (ret)
692                 ivpu_err(vdev, "Timed out waiting for RESET completion\n");
693
694         return ret;
695 }
696
697 static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
698 {
699         int ret;
700
701         ret = ivpu_boot_d0i3_drive(vdev, true);
702         if (ret)
703                 ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
704
705         udelay(5); /* VPU requires 5 us to complete the transition */
706
707         return ret;
708 }
709
710 static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
711 {
712         int ret;
713
714         ret = ivpu_boot_d0i3_drive(vdev, false);
715         if (ret)
716                 ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
717
718         return ret;
719 }
720
721 static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
722 {
723         int ret;
724
725         ivpu_hw_read_platform(vdev);
726         ivpu_hw_wa_init(vdev);
727         ivpu_hw_timeouts_init(vdev);
728
729         ret = ivpu_hw_mtl_reset(vdev);
730         if (ret)
731                 ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
732
733         ret = ivpu_hw_mtl_d0i3_disable(vdev);
734         if (ret)
735                 ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
736
737         ret = ivpu_pll_enable(vdev);
738         if (ret) {
739                 ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
740                 return ret;
741         }
742
743         ret = ivpu_boot_host_ss_configure(vdev);
744         if (ret) {
745                 ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
746                 return ret;
747         }
748
749         /*
750          * The control circuitry for vpu_idle indication logic powers up active.
751          * To ensure unnecessary low power mode signal from LRT during bring up,
752          * KMD disables the circuitry prior to bringing up the Main Power island.
753          */
754         ivpu_boot_vpu_idle_gen_disable(vdev);
755
756         ret = ivpu_boot_pwr_domain_enable(vdev);
757         if (ret) {
758                 ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
759                 return ret;
760         }
761
762         ret = ivpu_boot_host_ss_axi_enable(vdev);
763         if (ret) {
764                 ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
765                 return ret;
766         }
767
768         ret = ivpu_boot_host_ss_top_noc_enable(vdev);
769         if (ret)
770                 ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
771
772         return ret;
773 }
774
775 static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
776 {
777         ivpu_boot_no_snoop_enable(vdev);
778         ivpu_boot_tbu_mmu_enable(vdev);
779         ivpu_boot_soc_cpu_boot(vdev);
780
781         return 0;
782 }
783
784 static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev)
785 {
786         u32 val;
787
788         if (IVPU_WA(punit_disabled))
789                 return true;
790
791         val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS);
792         return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) &&
793                REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val);
794 }
795
796 static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
797 {
798         int ret = 0;
799
800         /* FPGA requires manual clearing of IP_Reset bit by enabling quiescent state */
801         if (ivpu_is_fpga(vdev)) {
802                 if (ivpu_boot_host_ss_top_noc_disable(vdev)) {
803                         ivpu_err(vdev, "Failed to disable TOP NOC\n");
804                         ret = -EIO;
805                 }
806
807                 if (ivpu_boot_host_ss_axi_disable(vdev)) {
808                         ivpu_err(vdev, "Failed to disable AXI\n");
809                         ret = -EIO;
810                 }
811         }
812
813         if (ivpu_boot_pwr_domain_disable(vdev)) {
814                 ivpu_err(vdev, "Failed to disable power domain\n");
815                 ret = -EIO;
816         }
817
818         if (ivpu_pll_disable(vdev)) {
819                 ivpu_err(vdev, "Failed to disable PLL\n");
820                 ret = -EIO;
821         }
822
823         if (ivpu_hw_mtl_d0i3_enable(vdev))
824                 ivpu_warn(vdev, "Failed to enable D0I3\n");
825
826         return ret;
827 }
828
829 static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
830 {
831         u32 val;
832
833         /* Enable writing and set non-zero WDT value */
834         REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
835         REGV_WR32(MTL_VPU_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
836
837         /* Enable writing and disable watchdog timer */
838         REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
839         REGV_WR32(MTL_VPU_CPU_SS_TIM_WDOG_EN, 0);
840
841         /* Now clear the timeout interrupt */
842         val = REGV_RD32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG);
843         val = REG_CLR_FLD(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
844         REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
845 }
846
847 /* Register indirect accesses */
848 static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
849 {
850         u32 pll_curr_ratio;
851
852         pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL);
853         pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK;
854
855         if (!ivpu_is_silicon(vdev))
856                 return PLL_SIMULATION_FREQ;
857
858         return PLL_RATIO_TO_FREQ(pll_curr_ratio);
859 }
860
861 static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
862 {
863         return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET);
864 }
865
866 static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev)
867 {
868         return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE);
869 }
870
871 static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev)
872 {
873         return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE);
874 }
875
876 static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
877 {
878         u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
879         u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
880
881         REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
882 }
883
884 static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
885 {
886         return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM);
887 }
888
889 static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev)
890 {
891         u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT);
892
893         return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
894 }
895
896 static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
897 {
898         REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
899 }
900
901 static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev)
902 {
903         REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
904 }
905
906 static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev)
907 {
908         REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
909         REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
910         REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
911         REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
912 }
913
914 static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
915 {
916         REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
917         REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
918         REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
919         REGB_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
920 }
921
922 static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
923 {
924         ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
925
926         ivpu_pm_schedule_recovery(vdev);
927 }
928
929 static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
930 {
931         ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
932
933         ivpu_hw_wdt_disable(vdev);
934         ivpu_pm_schedule_recovery(vdev);
935 }
936
937 static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
938 {
939         ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
940
941         ivpu_pm_schedule_recovery(vdev);
942 }
943
944 /* Handler for IRQs from VPU core (irqV) */
945 static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq)
946 {
947         u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
948
949         REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status);
950
951         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
952                 ivpu_mmu_irq_evtq_handler(vdev);
953
954         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
955                 ivpu_ipc_irq_handler(vdev);
956
957         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
958                 ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
959
960         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
961                 ivpu_mmu_irq_gerr_handler(vdev);
962
963         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
964                 ivpu_hw_mtl_irq_wdt_mss_handler(vdev);
965
966         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
967                 ivpu_hw_mtl_irq_wdt_nce_handler(vdev);
968
969         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
970                 ivpu_hw_mtl_irq_noc_firewall_handler(vdev);
971
972         return status;
973 }
974
975 /* Handler for IRQs from Buttress core (irqB) */
976 static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
977 {
978         u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
979         bool schedule_recovery = false;
980
981         if (status == 0)
982                 return 0;
983
984         /* Disable global interrupt before handling local buttress interrupts */
985         REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
986
987         if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
988                 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL));
989
990         if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
991                 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
992                 REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1);
993                 schedule_recovery = true;
994         }
995
996         if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
997                 u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
998
999                 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
1000                          ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
1001                          REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
1002                          REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
1003                 REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1);
1004                 schedule_recovery = true;
1005         }
1006
1007         /*
1008          * Clear local interrupt status by writing 0 to all bits.
1009          * This must be done after interrupts are cleared at the source.
1010          * Writing 1 triggers an interrupt, so we can't perform read update write.
1011          */
1012         REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
1013
1014         /* Re-enable global interrupt */
1015         REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
1016
1017         if (schedule_recovery)
1018                 ivpu_pm_schedule_recovery(vdev);
1019
1020         return status;
1021 }
1022
1023 static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr)
1024 {
1025         struct ivpu_device *vdev = ptr;
1026         u32 ret_irqv, ret_irqb;
1027
1028         ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq);
1029         ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq);
1030
1031         return IRQ_RETVAL(ret_irqb | ret_irqv);
1032 }
1033
1034 static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev)
1035 {
1036         u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
1037         u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
1038
1039         if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev))
1040                 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1041
1042         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
1043                 ivpu_err(vdev, "WDT MSS timeout detected\n");
1044
1045         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
1046                 ivpu_err(vdev, "WDT NCE timeout detected\n");
1047
1048         if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
1049                 ivpu_err(vdev, "NOC Firewall irq detected\n");
1050
1051         if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
1052                 ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
1053
1054         if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
1055                 u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
1056
1057                 ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
1058                          ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
1059                          REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
1060                          REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
1061         }
1062 }
1063
1064 const struct ivpu_hw_ops ivpu_hw_mtl_ops = {
1065         .info_init = ivpu_hw_mtl_info_init,
1066         .power_up = ivpu_hw_mtl_power_up,
1067         .is_idle = ivpu_hw_mtl_is_idle,
1068         .power_down = ivpu_hw_mtl_power_down,
1069         .boot_fw = ivpu_hw_mtl_boot_fw,
1070         .wdt_disable = ivpu_hw_mtl_wdt_disable,
1071         .diagnose_failure = ivpu_hw_mtl_diagnose_failure,
1072         .reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get,
1073         .reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get,
1074         .reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get,
1075         .reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get,
1076         .reg_db_set = ivpu_hw_mtl_reg_db_set,
1077         .reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get,
1078         .reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get,
1079         .reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set,
1080         .irq_clear = ivpu_hw_mtl_irq_clear,
1081         .irq_enable = ivpu_hw_mtl_irq_enable,
1082         .irq_disable = ivpu_hw_mtl_irq_disable,
1083         .irq_handler = ivpu_hw_mtl_irq_handler,
1084 };