accel/qaic: Implement quirk for SOC_HW_VERSION
[platform/kernel/linux-rpi.git] / drivers / accel / ivpu / ivpu_mmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5
6 #include <linux/circ_buf.h>
7 #include <linux/highmem.h>
8
9 #include "ivpu_drv.h"
10 #include "ivpu_hw_37xx_reg.h"
11 #include "ivpu_hw_reg_io.h"
12 #include "ivpu_mmu.h"
13 #include "ivpu_mmu_context.h"
14 #include "ivpu_pm.h"
15
16 #define IVPU_MMU_IDR0_REF               0x080f3e0f
17 #define IVPU_MMU_IDR0_REF_SIMICS        0x080f3e1f
18 #define IVPU_MMU_IDR1_REF               0x0e739d18
19 #define IVPU_MMU_IDR3_REF               0x0000003c
20 #define IVPU_MMU_IDR5_REF               0x00040070
21 #define IVPU_MMU_IDR5_REF_SIMICS        0x00000075
22 #define IVPU_MMU_IDR5_REF_FPGA          0x00800075
23
24 #define IVPU_MMU_CDTAB_ENT_SIZE         64
25 #define IVPU_MMU_CDTAB_ENT_COUNT_LOG2   8 /* 256 entries */
26 #define IVPU_MMU_CDTAB_ENT_COUNT        ((u32)1 << IVPU_MMU_CDTAB_ENT_COUNT_LOG2)
27
28 #define IVPU_MMU_STREAM_ID0             0
29 #define IVPU_MMU_STREAM_ID3             3
30
31 #define IVPU_MMU_STRTAB_ENT_SIZE        64
32 #define IVPU_MMU_STRTAB_ENT_COUNT       4
33 #define IVPU_MMU_STRTAB_CFG_LOG2SIZE    2
34 #define IVPU_MMU_STRTAB_CFG             IVPU_MMU_STRTAB_CFG_LOG2SIZE
35
36 #define IVPU_MMU_Q_COUNT_LOG2           4 /* 16 entries */
37 #define IVPU_MMU_Q_COUNT                ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
38 #define IVPU_MMU_Q_WRAP_BIT             (IVPU_MMU_Q_COUNT << 1)
39 #define IVPU_MMU_Q_WRAP_MASK            (IVPU_MMU_Q_WRAP_BIT - 1)
40 #define IVPU_MMU_Q_IDX_MASK             (IVPU_MMU_Q_COUNT - 1)
41 #define IVPU_MMU_Q_IDX(val)             ((val) & IVPU_MMU_Q_IDX_MASK)
42
43 #define IVPU_MMU_CMDQ_CMD_SIZE          16
44 #define IVPU_MMU_CMDQ_SIZE              (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
45
46 #define IVPU_MMU_EVTQ_CMD_SIZE          32
47 #define IVPU_MMU_EVTQ_SIZE              (IVPU_MMU_Q_COUNT * IVPU_MMU_EVTQ_CMD_SIZE)
48
49 #define IVPU_MMU_CMD_OPCODE             GENMASK(7, 0)
50
51 #define IVPU_MMU_CMD_SYNC_0_CS          GENMASK(13, 12)
52 #define IVPU_MMU_CMD_SYNC_0_MSH         GENMASK(23, 22)
53 #define IVPU_MMU_CMD_SYNC_0_MSI_ATTR    GENMASK(27, 24)
54 #define IVPU_MMU_CMD_SYNC_0_MSI_ATTR    GENMASK(27, 24)
55 #define IVPU_MMU_CMD_SYNC_0_MSI_DATA    GENMASK(63, 32)
56
57 #define IVPU_MMU_CMD_CFGI_0_SSEC        BIT(10)
58 #define IVPU_MMU_CMD_CFGI_0_SSV         BIT(11)
59 #define IVPU_MMU_CMD_CFGI_0_SSID        GENMASK(31, 12)
60 #define IVPU_MMU_CMD_CFGI_0_SID         GENMASK(63, 32)
61 #define IVPU_MMU_CMD_CFGI_1_RANGE       GENMASK(4, 0)
62
63 #define IVPU_MMU_CMD_TLBI_0_ASID        GENMASK(63, 48)
64 #define IVPU_MMU_CMD_TLBI_0_VMID        GENMASK(47, 32)
65
66 #define CMD_PREFETCH_CFG                0x1
67 #define CMD_CFGI_STE                    0x3
68 #define CMD_CFGI_ALL                    0x4
69 #define CMD_CFGI_CD                     0x5
70 #define CMD_CFGI_CD_ALL                 0x6
71 #define CMD_TLBI_NH_ASID                0x11
72 #define CMD_TLBI_EL2_ALL                0x20
73 #define CMD_TLBI_NSNH_ALL               0x30
74 #define CMD_SYNC                        0x46
75
76 #define IVPU_MMU_EVT_F_UUT              0x01
77 #define IVPU_MMU_EVT_C_BAD_STREAMID     0x02
78 #define IVPU_MMU_EVT_F_STE_FETCH        0x03
79 #define IVPU_MMU_EVT_C_BAD_STE          0x04
80 #define IVPU_MMU_EVT_F_BAD_ATS_TREQ     0x05
81 #define IVPU_MMU_EVT_F_STREAM_DISABLED  0x06
82 #define IVPU_MMU_EVT_F_TRANSL_FORBIDDEN 0x07
83 #define IVPU_MMU_EVT_C_BAD_SUBSTREAMID  0x08
84 #define IVPU_MMU_EVT_F_CD_FETCH         0x09
85 #define IVPU_MMU_EVT_C_BAD_CD           0x0a
86 #define IVPU_MMU_EVT_F_WALK_EABT        0x0b
87 #define IVPU_MMU_EVT_F_TRANSLATION      0x10
88 #define IVPU_MMU_EVT_F_ADDR_SIZE        0x11
89 #define IVPU_MMU_EVT_F_ACCESS           0x12
90 #define IVPU_MMU_EVT_F_PERMISSION       0x13
91 #define IVPU_MMU_EVT_F_TLB_CONFLICT     0x20
92 #define IVPU_MMU_EVT_F_CFG_CONFLICT     0x21
93 #define IVPU_MMU_EVT_E_PAGE_REQUEST     0x24
94 #define IVPU_MMU_EVT_F_VMS_FETCH        0x25
95
96 #define IVPU_MMU_EVT_OP_MASK            GENMASK_ULL(7, 0)
97 #define IVPU_MMU_EVT_SSID_MASK          GENMASK_ULL(31, 12)
98
99 #define IVPU_MMU_Q_BASE_RWA             BIT(62)
100 #define IVPU_MMU_Q_BASE_ADDR_MASK       GENMASK_ULL(51, 5)
101 #define IVPU_MMU_STRTAB_BASE_RA         BIT(62)
102 #define IVPU_MMU_STRTAB_BASE_ADDR_MASK  GENMASK_ULL(51, 6)
103
104 #define IVPU_MMU_IRQ_EVTQ_EN            BIT(2)
105 #define IVPU_MMU_IRQ_GERROR_EN          BIT(0)
106
107 #define IVPU_MMU_CR0_ATSCHK             BIT(4)
108 #define IVPU_MMU_CR0_CMDQEN             BIT(3)
109 #define IVPU_MMU_CR0_EVTQEN             BIT(2)
110 #define IVPU_MMU_CR0_PRIQEN             BIT(1)
111 #define IVPU_MMU_CR0_SMMUEN             BIT(0)
112
113 #define IVPU_MMU_CR1_TABLE_SH           GENMASK(11, 10)
114 #define IVPU_MMU_CR1_TABLE_OC           GENMASK(9, 8)
115 #define IVPU_MMU_CR1_TABLE_IC           GENMASK(7, 6)
116 #define IVPU_MMU_CR1_QUEUE_SH           GENMASK(5, 4)
117 #define IVPU_MMU_CR1_QUEUE_OC           GENMASK(3, 2)
118 #define IVPU_MMU_CR1_QUEUE_IC           GENMASK(1, 0)
119 #define IVPU_MMU_CACHE_NC               0
120 #define IVPU_MMU_CACHE_WB               1
121 #define IVPU_MMU_CACHE_WT               2
122 #define IVPU_MMU_SH_NSH                 0
123 #define IVPU_MMU_SH_OSH                 2
124 #define IVPU_MMU_SH_ISH                 3
125
126 #define IVPU_MMU_CMDQ_OP                GENMASK_ULL(7, 0)
127
128 #define IVPU_MMU_CD_0_TCR_T0SZ          GENMASK_ULL(5, 0)
129 #define IVPU_MMU_CD_0_TCR_TG0           GENMASK_ULL(7, 6)
130 #define IVPU_MMU_CD_0_TCR_IRGN0         GENMASK_ULL(9, 8)
131 #define IVPU_MMU_CD_0_TCR_ORGN0         GENMASK_ULL(11, 10)
132 #define IVPU_MMU_CD_0_TCR_SH0           GENMASK_ULL(13, 12)
133 #define IVPU_MMU_CD_0_TCR_EPD0          BIT_ULL(14)
134 #define IVPU_MMU_CD_0_TCR_EPD1          BIT_ULL(30)
135 #define IVPU_MMU_CD_0_ENDI              BIT(15)
136 #define IVPU_MMU_CD_0_V                 BIT(31)
137 #define IVPU_MMU_CD_0_TCR_IPS           GENMASK_ULL(34, 32)
138 #define IVPU_MMU_CD_0_TCR_TBI0          BIT_ULL(38)
139 #define IVPU_MMU_CD_0_AA64              BIT(41)
140 #define IVPU_MMU_CD_0_S                 BIT(44)
141 #define IVPU_MMU_CD_0_R                 BIT(45)
142 #define IVPU_MMU_CD_0_A                 BIT(46)
143 #define IVPU_MMU_CD_0_ASET              BIT(47)
144 #define IVPU_MMU_CD_0_ASID              GENMASK_ULL(63, 48)
145
146 #define IVPU_MMU_T0SZ_48BIT             16
147 #define IVPU_MMU_T0SZ_38BIT             26
148
149 #define IVPU_MMU_IPS_48BIT              5
150 #define IVPU_MMU_IPS_44BIT              4
151 #define IVPU_MMU_IPS_42BIT              3
152 #define IVPU_MMU_IPS_40BIT              2
153 #define IVPU_MMU_IPS_36BIT              1
154 #define IVPU_MMU_IPS_32BIT              0
155
156 #define IVPU_MMU_CD_1_TTB0_MASK         GENMASK_ULL(51, 4)
157
158 #define IVPU_MMU_STE_0_S1CDMAX          GENMASK_ULL(63, 59)
159 #define IVPU_MMU_STE_0_S1FMT            GENMASK_ULL(5, 4)
160 #define IVPU_MMU_STE_0_S1FMT_LINEAR     0
161 #define IVPU_MMU_STE_DWORDS             8
162 #define IVPU_MMU_STE_0_CFG_S1_TRANS     5
163 #define IVPU_MMU_STE_0_CFG              GENMASK_ULL(3, 1)
164 #define IVPU_MMU_STE_0_S1CTXPTR_MASK    GENMASK_ULL(51, 6)
165 #define IVPU_MMU_STE_0_V                        BIT(0)
166
167 #define IVPU_MMU_STE_1_STRW_NSEL1       0ul
168 #define IVPU_MMU_STE_1_CONT             GENMASK_ULL(16, 13)
169 #define IVPU_MMU_STE_1_STRW             GENMASK_ULL(31, 30)
170 #define IVPU_MMU_STE_1_PRIVCFG          GENMASK_ULL(49, 48)
171 #define IVPU_MMU_STE_1_PRIVCFG_UNPRIV   2ul
172 #define IVPU_MMU_STE_1_INSTCFG          GENMASK_ULL(51, 50)
173 #define IVPU_MMU_STE_1_INSTCFG_DATA     2ul
174 #define IVPU_MMU_STE_1_MEV              BIT(19)
175 #define IVPU_MMU_STE_1_S1STALLD         BIT(27)
176 #define IVPU_MMU_STE_1_S1C_CACHE_NC     0ul
177 #define IVPU_MMU_STE_1_S1C_CACHE_WBRA   1ul
178 #define IVPU_MMU_STE_1_S1C_CACHE_WT     2ul
179 #define IVPU_MMU_STE_1_S1C_CACHE_WB     3ul
180 #define IVPU_MMU_STE_1_S1CIR            GENMASK_ULL(3, 2)
181 #define IVPU_MMU_STE_1_S1COR            GENMASK_ULL(5, 4)
182 #define IVPU_MMU_STE_1_S1CSH            GENMASK_ULL(7, 6)
183 #define IVPU_MMU_STE_1_S1DSS            GENMASK_ULL(1, 0)
184 #define IVPU_MMU_STE_1_S1DSS_TERMINATE  0x0
185
186 #define IVPU_MMU_REG_TIMEOUT_US         (10 * USEC_PER_MSEC)
187 #define IVPU_MMU_QUEUE_TIMEOUT_US       (100 * USEC_PER_MSEC)
188
189 #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
190                                   (REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
191                                   (REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
192                                   (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
193                                   (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
194                                   (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
195                                   (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
196
197 static char *ivpu_mmu_event_to_str(u32 cmd)
198 {
199         switch (cmd) {
200         case IVPU_MMU_EVT_F_UUT:
201                 return "Unsupported Upstream Transaction";
202         case IVPU_MMU_EVT_C_BAD_STREAMID:
203                 return "Transaction StreamID out of range";
204         case IVPU_MMU_EVT_F_STE_FETCH:
205                 return "Fetch of STE caused external abort";
206         case IVPU_MMU_EVT_C_BAD_STE:
207                 return "Used STE invalid";
208         case IVPU_MMU_EVT_F_BAD_ATS_TREQ:
209                 return "Address Request disallowed for a StreamID";
210         case IVPU_MMU_EVT_F_STREAM_DISABLED:
211                 return "Transaction marks non-substream disabled";
212         case IVPU_MMU_EVT_F_TRANSL_FORBIDDEN:
213                 return "MMU bypass is disallowed for this StreamID";
214         case IVPU_MMU_EVT_C_BAD_SUBSTREAMID:
215                 return "Invalid StreamID";
216         case IVPU_MMU_EVT_F_CD_FETCH:
217                 return "Fetch of CD caused external abort";
218         case IVPU_MMU_EVT_C_BAD_CD:
219                 return "Fetched CD invalid";
220         case IVPU_MMU_EVT_F_WALK_EABT:
221                 return " An external abort occurred fetching a TLB";
222         case IVPU_MMU_EVT_F_TRANSLATION:
223                 return "Translation fault";
224         case IVPU_MMU_EVT_F_ADDR_SIZE:
225                 return " Output address caused address size fault";
226         case IVPU_MMU_EVT_F_ACCESS:
227                 return "Access flag fault";
228         case IVPU_MMU_EVT_F_PERMISSION:
229                 return "Permission fault occurred on page access";
230         case IVPU_MMU_EVT_F_TLB_CONFLICT:
231                 return "A TLB conflict";
232         case IVPU_MMU_EVT_F_CFG_CONFLICT:
233                 return "A configuration cache conflict";
234         case IVPU_MMU_EVT_E_PAGE_REQUEST:
235                 return "Page request hint from a client device";
236         case IVPU_MMU_EVT_F_VMS_FETCH:
237                 return "Fetch of VMS caused external abort";
238         default:
239                 return "Unknown CMDQ command";
240         }
241 }
242
243 static void ivpu_mmu_config_check(struct ivpu_device *vdev)
244 {
245         u32 val_ref;
246         u32 val;
247
248         if (ivpu_is_simics(vdev))
249                 val_ref = IVPU_MMU_IDR0_REF_SIMICS;
250         else
251                 val_ref = IVPU_MMU_IDR0_REF;
252
253         val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
254         if (val != val_ref)
255                 ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
256
257         val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
258         if (val != IVPU_MMU_IDR1_REF)
259                 ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
260
261         val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
262         if (val != IVPU_MMU_IDR3_REF)
263                 ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
264
265         if (ivpu_is_simics(vdev))
266                 val_ref = IVPU_MMU_IDR5_REF_SIMICS;
267         else if (ivpu_is_fpga(vdev))
268                 val_ref = IVPU_MMU_IDR5_REF_FPGA;
269         else
270                 val_ref = IVPU_MMU_IDR5_REF;
271
272         val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
273         if (val != val_ref)
274                 ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
275 }
276
277 static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
278 {
279         struct ivpu_mmu_info *mmu = vdev->mmu;
280         struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
281         size_t size = IVPU_MMU_CDTAB_ENT_COUNT * IVPU_MMU_CDTAB_ENT_SIZE;
282
283         cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL);
284         if (!cdtab->base)
285                 return -ENOMEM;
286
287         ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size);
288
289         return 0;
290 }
291
292 static int ivpu_mmu_strtab_alloc(struct ivpu_device *vdev)
293 {
294         struct ivpu_mmu_info *mmu = vdev->mmu;
295         struct ivpu_mmu_strtab *strtab = &mmu->strtab;
296         size_t size = IVPU_MMU_STRTAB_ENT_COUNT * IVPU_MMU_STRTAB_ENT_SIZE;
297
298         strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL);
299         if (!strtab->base)
300                 return -ENOMEM;
301
302         strtab->base_cfg = IVPU_MMU_STRTAB_CFG;
303         strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA;
304         strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK;
305
306         ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n",
307                  &strtab->dma, &strtab->dma_q, size);
308
309         return 0;
310 }
311
312 static int ivpu_mmu_cmdq_alloc(struct ivpu_device *vdev)
313 {
314         struct ivpu_mmu_info *mmu = vdev->mmu;
315         struct ivpu_mmu_queue *q = &mmu->cmdq;
316
317         q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL);
318         if (!q->base)
319                 return -ENOMEM;
320
321         q->dma_q = IVPU_MMU_Q_BASE_RWA;
322         q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
323         q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
324
325         ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n",
326                  &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE);
327
328         return 0;
329 }
330
331 static int ivpu_mmu_evtq_alloc(struct ivpu_device *vdev)
332 {
333         struct ivpu_mmu_info *mmu = vdev->mmu;
334         struct ivpu_mmu_queue *q = &mmu->evtq;
335
336         q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL);
337         if (!q->base)
338                 return -ENOMEM;
339
340         q->dma_q = IVPU_MMU_Q_BASE_RWA;
341         q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
342         q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
343
344         ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n",
345                  &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE);
346
347         return 0;
348 }
349
350 static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev)
351 {
352         int ret;
353
354         ret = ivpu_mmu_cdtab_alloc(vdev);
355         if (ret) {
356                 ivpu_err(vdev, "Failed to allocate cdtab: %d\n", ret);
357                 return ret;
358         }
359
360         ret = ivpu_mmu_strtab_alloc(vdev);
361         if (ret) {
362                 ivpu_err(vdev, "Failed to allocate strtab: %d\n", ret);
363                 return ret;
364         }
365
366         ret = ivpu_mmu_cmdq_alloc(vdev);
367         if (ret) {
368                 ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret);
369                 return ret;
370         }
371
372         ret = ivpu_mmu_evtq_alloc(vdev);
373         if (ret)
374                 ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
375
376         return ret;
377 }
378
379 static int ivpu_mmu_reg_write(struct ivpu_device *vdev, u32 reg, u32 val)
380 {
381         u32 reg_ack = reg + 4; /* ACK register is 4B after base register */
382         u32 val_ack;
383         int ret;
384
385         REGV_WR32(reg, val);
386
387         ret = REGV_POLL(reg_ack, val_ack, (val == val_ack), IVPU_MMU_REG_TIMEOUT_US);
388         if (ret)
389                 ivpu_err(vdev, "Failed to write register 0x%x\n", reg);
390
391         return ret;
392 }
393
394 static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
395 {
396         u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
397         int ret;
398
399         ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
400         if (ret)
401                 return ret;
402
403         return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
404 }
405
406 static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
407 {
408         struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
409
410         return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
411                          IVPU_MMU_QUEUE_TIMEOUT_US);
412 }
413
414 static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
415 {
416         struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
417         u64 *queue_buffer = q->base;
418         int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
419
420         if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
421                 ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
422                 return -EBUSY;
423         }
424
425         queue_buffer[idx] = data0;
426         queue_buffer[idx + 1] = data1;
427         q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
428
429         ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
430
431         return 0;
432 }
433
434 static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
435 {
436         struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
437         u64 val;
438         int ret;
439
440         val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC) |
441               FIELD_PREP(IVPU_MMU_CMD_SYNC_0_CS, 0x2) |
442               FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
443               FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
444
445         ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
446         if (ret)
447                 return ret;
448
449         clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
450         REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
451
452         ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
453         if (ret)
454                 ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
455
456         return ret;
457 }
458
459 static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev)
460 {
461         u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL);
462         u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f);
463
464         return ivpu_mmu_cmdq_cmd_write(vdev, "CFGI_ALL", data0, data1);
465 }
466
467 static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device *vdev, u16 ssid)
468 {
469         u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NH_ASID) |
470                   FIELD_PREP(IVPU_MMU_CMD_TLBI_0_ASID, ssid);
471
472         return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NH_ASID", val, 0);
473 }
474
475 static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device *vdev)
476 {
477         u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NSNH_ALL);
478
479         return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NSNH_ALL", val, 0);
480 }
481
482 static int ivpu_mmu_reset(struct ivpu_device *vdev)
483 {
484         struct ivpu_mmu_info *mmu = vdev->mmu;
485         u32 val;
486         int ret;
487
488         memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
489         clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
490         mmu->cmdq.prod = 0;
491         mmu->cmdq.cons = 0;
492
493         memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
494         clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
495         mmu->evtq.prod = 0;
496         mmu->evtq.cons = 0;
497
498         ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
499         if (ret)
500                 return ret;
501
502         val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
503               FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
504               FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
505               FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
506               FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
507               FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
508         REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
509
510         REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
511         REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
512
513         REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
514         REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
515         REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
516
517         val = IVPU_MMU_CR0_CMDQEN;
518         ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
519         if (ret)
520                 return ret;
521
522         ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
523         if (ret)
524                 return ret;
525
526         ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
527         if (ret)
528                 return ret;
529
530         ret = ivpu_mmu_cmdq_sync(vdev);
531         if (ret)
532                 return ret;
533
534         REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
535         REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
536         REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
537
538         val |= IVPU_MMU_CR0_EVTQEN;
539         ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
540         if (ret)
541                 return ret;
542
543         val |= IVPU_MMU_CR0_ATSCHK;
544         ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
545         if (ret)
546                 return ret;
547
548         ret = ivpu_mmu_irqs_setup(vdev);
549         if (ret)
550                 return ret;
551
552         val |= IVPU_MMU_CR0_SMMUEN;
553         return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
554 }
555
556 static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
557 {
558         struct ivpu_mmu_info *mmu = vdev->mmu;
559         struct ivpu_mmu_strtab *strtab = &mmu->strtab;
560         struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
561         u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);
562         u64 str[2];
563
564         str[0] = FIELD_PREP(IVPU_MMU_STE_0_CFG, IVPU_MMU_STE_0_CFG_S1_TRANS) |
565                  FIELD_PREP(IVPU_MMU_STE_0_S1CDMAX, IVPU_MMU_CDTAB_ENT_COUNT_LOG2) |
566                  FIELD_PREP(IVPU_MMU_STE_0_S1FMT, IVPU_MMU_STE_0_S1FMT_LINEAR) |
567                  IVPU_MMU_STE_0_V |
568                  (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK);
569
570         str[1] = FIELD_PREP(IVPU_MMU_STE_1_S1DSS, IVPU_MMU_STE_1_S1DSS_TERMINATE) |
571                  FIELD_PREP(IVPU_MMU_STE_1_S1CIR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
572                  FIELD_PREP(IVPU_MMU_STE_1_S1COR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
573                  FIELD_PREP(IVPU_MMU_STE_1_S1CSH, IVPU_MMU_SH_NSH) |
574                  FIELD_PREP(IVPU_MMU_STE_1_PRIVCFG, IVPU_MMU_STE_1_PRIVCFG_UNPRIV) |
575                  FIELD_PREP(IVPU_MMU_STE_1_INSTCFG, IVPU_MMU_STE_1_INSTCFG_DATA) |
576                  FIELD_PREP(IVPU_MMU_STE_1_STRW, IVPU_MMU_STE_1_STRW_NSEL1) |
577                  FIELD_PREP(IVPU_MMU_STE_1_CONT, IVPU_MMU_STRTAB_CFG_LOG2SIZE) |
578                  IVPU_MMU_STE_1_MEV |
579                  IVPU_MMU_STE_1_S1STALLD;
580
581         WRITE_ONCE(entry[1], str[1]);
582         WRITE_ONCE(entry[0], str[0]);
583
584         clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
585
586         ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
587 }
588
589 static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
590 {
591         ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID0);
592         ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID3);
593
594         return 0;
595 }
596
597 int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
598 {
599         struct ivpu_mmu_info *mmu = vdev->mmu;
600         int ret = 0;
601
602         mutex_lock(&mmu->lock);
603         if (!mmu->on)
604                 goto unlock;
605
606         ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
607         if (ret)
608                 goto unlock;
609
610         ret = ivpu_mmu_cmdq_sync(vdev);
611 unlock:
612         mutex_unlock(&mmu->lock);
613         return ret;
614 }
615
616 static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
617 {
618         struct ivpu_mmu_info *mmu = vdev->mmu;
619         struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
620         u64 *entry;
621         u64 cd[4];
622         int ret = 0;
623
624         if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
625                 return -EINVAL;
626
627         entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
628
629         if (cd_dma != 0) {
630                 cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
631                         FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
632                         FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
633                         FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
634                         FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
635                         FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
636                         FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
637                         IVPU_MMU_CD_0_TCR_EPD1 |
638                         IVPU_MMU_CD_0_AA64 |
639                         IVPU_MMU_CD_0_R |
640                         IVPU_MMU_CD_0_ASET |
641                         IVPU_MMU_CD_0_V;
642                 cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
643                 cd[2] = 0;
644                 cd[3] = 0x0000000000007444;
645
646                 /* For global context generate memory fault on VPU */
647                 if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
648                         cd[0] |= IVPU_MMU_CD_0_A;
649         } else {
650                 memset(cd, 0, sizeof(cd));
651         }
652
653         WRITE_ONCE(entry[1], cd[1]);
654         WRITE_ONCE(entry[2], cd[2]);
655         WRITE_ONCE(entry[3], cd[3]);
656         WRITE_ONCE(entry[0], cd[0]);
657
658         clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
659
660         ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
661                  cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
662
663         mutex_lock(&mmu->lock);
664         if (!mmu->on)
665                 goto unlock;
666
667         ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
668         if (ret)
669                 goto unlock;
670
671         ret = ivpu_mmu_cmdq_sync(vdev);
672 unlock:
673         mutex_unlock(&mmu->lock);
674         return ret;
675 }
676
677 static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
678 {
679         int ret;
680
681         ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
682         if (ret)
683                 ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
684
685         return ret;
686 }
687
688 static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
689 {
690         int ret;
691
692         if (ssid == 0) {
693                 ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
694                 return -EINVAL;
695         }
696
697         ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
698         if (ret)
699                 ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
700
701         return ret;
702 }
703
704 int ivpu_mmu_init(struct ivpu_device *vdev)
705 {
706         struct ivpu_mmu_info *mmu = vdev->mmu;
707         int ret;
708
709         ivpu_dbg(vdev, MMU, "Init..\n");
710
711         drmm_mutex_init(&vdev->drm, &mmu->lock);
712         ivpu_mmu_config_check(vdev);
713
714         ret = ivpu_mmu_structs_alloc(vdev);
715         if (ret)
716                 return ret;
717
718         ret = ivpu_mmu_strtab_init(vdev);
719         if (ret) {
720                 ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
721                 return ret;
722         }
723
724         ret = ivpu_mmu_cd_add_gbl(vdev);
725         if (ret) {
726                 ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
727                 return ret;
728         }
729
730         ret = ivpu_mmu_enable(vdev);
731         if (ret) {
732                 ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
733                 return ret;
734         }
735
736         ivpu_dbg(vdev, MMU, "Init done\n");
737
738         return 0;
739 }
740
741 int ivpu_mmu_enable(struct ivpu_device *vdev)
742 {
743         struct ivpu_mmu_info *mmu = vdev->mmu;
744         int ret;
745
746         mutex_lock(&mmu->lock);
747
748         mmu->on = true;
749
750         ret = ivpu_mmu_reset(vdev);
751         if (ret) {
752                 ivpu_err(vdev, "Failed to reset MMU: %d\n", ret);
753                 goto err;
754         }
755
756         ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
757         if (ret)
758                 goto err;
759
760         ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
761         if (ret)
762                 goto err;
763
764         ret = ivpu_mmu_cmdq_sync(vdev);
765         if (ret)
766                 goto err;
767
768         mutex_unlock(&mmu->lock);
769
770         return 0;
771 err:
772         mmu->on = false;
773         mutex_unlock(&mmu->lock);
774         return ret;
775 }
776
777 void ivpu_mmu_disable(struct ivpu_device *vdev)
778 {
779         struct ivpu_mmu_info *mmu = vdev->mmu;
780
781         mutex_lock(&mmu->lock);
782         mmu->on = false;
783         mutex_unlock(&mmu->lock);
784 }
785
786 static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
787 {
788         u32 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
789         u32 op = FIELD_GET(IVPU_MMU_EVT_OP_MASK, event[0]);
790         u64 fetch_addr = ((u64)event[7]) << 32 | event[6];
791         u64 in_addr = ((u64)event[5]) << 32 | event[4];
792         u32 sid = event[1];
793
794         ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
795                  op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr);
796 }
797
798 static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
799 {
800         struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
801         u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
802         u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
803
804         evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
805         if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
806                 return NULL;
807
808         clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
809
810         evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
811         REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
812
813         return evt;
814 }
815
816 void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
817 {
818         bool schedule_recovery = false;
819         u32 *event;
820         u32 ssid;
821
822         ivpu_dbg(vdev, IRQ, "MMU event queue\n");
823
824         while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
825                 ivpu_mmu_dump_event(vdev, event);
826
827                 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
828                 if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
829                         schedule_recovery = true;
830                 else
831                         ivpu_mmu_user_context_mark_invalid(vdev, ssid);
832         }
833
834         if (schedule_recovery)
835                 ivpu_pm_schedule_recovery(vdev);
836 }
837
838 void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
839 {
840         u32 gerror_val, gerrorn_val, active;
841
842         ivpu_dbg(vdev, IRQ, "MMU error\n");
843
844         gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
845         gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
846
847         active = gerror_val ^ gerrorn_val;
848         if (!(active & IVPU_MMU_GERROR_ERR_MASK))
849                 return;
850
851         if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
852                 ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
853
854         if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
855                 ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
856
857         if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
858                 ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
859
860         if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
861                 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
862
863         if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
864                 ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
865
866         if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
867                 ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
868
869         if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
870                 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
871
872         REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
873 }
874
875 int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
876 {
877         return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
878 }
879
880 void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
881 {
882         ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
883 }