2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/acpi.h>
24 #include <linux/acpi_iort.h>
25 #include <linux/delay.h>
26 #include <linux/dma-iommu.h>
27 #include <linux/err.h>
28 #include <linux/interrupt.h>
29 #include <linux/iommu.h>
30 #include <linux/iopoll.h>
31 #include <linux/module.h>
32 #include <linux/msi.h>
34 #include <linux/of_address.h>
35 #include <linux/of_iommu.h>
36 #include <linux/of_platform.h>
37 #include <linux/pci.h>
38 #include <linux/platform_device.h>
40 #include <linux/amba/bus.h>
42 #include "io-pgtable.h"
45 #define ARM_SMMU_IDR0 0x0
46 #define IDR0_ST_LVL_SHIFT 27
47 #define IDR0_ST_LVL_MASK 0x3
48 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
49 #define IDR0_STALL_MODEL_SHIFT 24
50 #define IDR0_STALL_MODEL_MASK 0x3
51 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
52 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
53 #define IDR0_TTENDIAN_SHIFT 21
54 #define IDR0_TTENDIAN_MASK 0x3
55 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
56 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
57 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
58 #define IDR0_CD2L (1 << 19)
59 #define IDR0_VMID16 (1 << 18)
60 #define IDR0_PRI (1 << 16)
61 #define IDR0_SEV (1 << 14)
62 #define IDR0_MSI (1 << 13)
63 #define IDR0_ASID16 (1 << 12)
64 #define IDR0_ATS (1 << 10)
65 #define IDR0_HYP (1 << 9)
66 #define IDR0_COHACC (1 << 4)
67 #define IDR0_TTF_SHIFT 2
68 #define IDR0_TTF_MASK 0x3
69 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
70 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
71 #define IDR0_S1P (1 << 1)
72 #define IDR0_S2P (1 << 0)
74 #define ARM_SMMU_IDR1 0x4
75 #define IDR1_TABLES_PRESET (1 << 30)
76 #define IDR1_QUEUES_PRESET (1 << 29)
77 #define IDR1_REL (1 << 28)
78 #define IDR1_CMDQ_SHIFT 21
79 #define IDR1_CMDQ_MASK 0x1f
80 #define IDR1_EVTQ_SHIFT 16
81 #define IDR1_EVTQ_MASK 0x1f
82 #define IDR1_PRIQ_SHIFT 11
83 #define IDR1_PRIQ_MASK 0x1f
84 #define IDR1_SSID_SHIFT 6
85 #define IDR1_SSID_MASK 0x1f
86 #define IDR1_SID_SHIFT 0
87 #define IDR1_SID_MASK 0x3f
89 #define ARM_SMMU_IDR5 0x14
90 #define IDR5_STALL_MAX_SHIFT 16
91 #define IDR5_STALL_MAX_MASK 0xffff
92 #define IDR5_GRAN64K (1 << 6)
93 #define IDR5_GRAN16K (1 << 5)
94 #define IDR5_GRAN4K (1 << 4)
95 #define IDR5_OAS_SHIFT 0
96 #define IDR5_OAS_MASK 0x7
97 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
98 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
99 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
100 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
101 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
102 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
104 #define ARM_SMMU_CR0 0x20
105 #define CR0_CMDQEN (1 << 3)
106 #define CR0_EVTQEN (1 << 2)
107 #define CR0_PRIQEN (1 << 1)
108 #define CR0_SMMUEN (1 << 0)
110 #define ARM_SMMU_CR0ACK 0x24
112 #define ARM_SMMU_CR1 0x28
116 #define CR1_CACHE_NC 0
117 #define CR1_CACHE_WB 1
118 #define CR1_CACHE_WT 2
119 #define CR1_TABLE_SH_SHIFT 10
120 #define CR1_TABLE_OC_SHIFT 8
121 #define CR1_TABLE_IC_SHIFT 6
122 #define CR1_QUEUE_SH_SHIFT 4
123 #define CR1_QUEUE_OC_SHIFT 2
124 #define CR1_QUEUE_IC_SHIFT 0
126 #define ARM_SMMU_CR2 0x2c
127 #define CR2_PTM (1 << 2)
128 #define CR2_RECINVSID (1 << 1)
129 #define CR2_E2H (1 << 0)
131 #define ARM_SMMU_GBPA 0x44
132 #define GBPA_ABORT (1 << 20)
133 #define GBPA_UPDATE (1 << 31)
135 #define ARM_SMMU_IRQ_CTRL 0x50
136 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
137 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
138 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
140 #define ARM_SMMU_IRQ_CTRLACK 0x54
142 #define ARM_SMMU_GERROR 0x60
143 #define GERROR_SFM_ERR (1 << 8)
144 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
145 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
146 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
147 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
148 #define GERROR_PRIQ_ABT_ERR (1 << 3)
149 #define GERROR_EVTQ_ABT_ERR (1 << 2)
150 #define GERROR_CMDQ_ERR (1 << 0)
151 #define GERROR_ERR_MASK 0xfd
153 #define ARM_SMMU_GERRORN 0x64
155 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
156 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
157 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
159 #define ARM_SMMU_STRTAB_BASE 0x80
160 #define STRTAB_BASE_RA (1UL << 62)
161 #define STRTAB_BASE_ADDR_SHIFT 6
162 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
164 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
165 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169 #define STRTAB_BASE_CFG_FMT_SHIFT 16
170 #define STRTAB_BASE_CFG_FMT_MASK 0x3
171 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
174 #define ARM_SMMU_CMDQ_BASE 0x90
175 #define ARM_SMMU_CMDQ_PROD 0x98
176 #define ARM_SMMU_CMDQ_CONS 0x9c
178 #define ARM_SMMU_EVTQ_BASE 0xa0
179 #define ARM_SMMU_EVTQ_PROD 0x100a8
180 #define ARM_SMMU_EVTQ_CONS 0x100ac
181 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
185 #define ARM_SMMU_PRIQ_BASE 0xc0
186 #define ARM_SMMU_PRIQ_PROD 0x100c8
187 #define ARM_SMMU_PRIQ_CONS 0x100cc
188 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
192 /* Common MSI config fields */
193 #define MSI_CFG0_ADDR_SHIFT 2
194 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
195 #define MSI_CFG2_SH_SHIFT 4
196 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
197 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
198 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
199 #define MSI_CFG2_MEMATTR_SHIFT 0
200 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
202 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
203 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
204 #define Q_OVERFLOW_FLAG (1 << 31)
205 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
206 #define Q_ENT(q, p) ((q)->base + \
207 Q_IDX(q, p) * (q)->ent_dwords)
209 #define Q_BASE_RWA (1UL << 62)
210 #define Q_BASE_ADDR_SHIFT 5
211 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
212 #define Q_BASE_LOG2SIZE_SHIFT 0
213 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
218 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
219 * 2lvl: 128k L1 entries,
220 * 256 lazy entries per table (each table covers a PCI bus)
222 #define STRTAB_L1_SZ_SHIFT 20
223 #define STRTAB_SPLIT 8
225 #define STRTAB_L1_DESC_DWORDS 1
226 #define STRTAB_L1_DESC_SPAN_SHIFT 0
227 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
228 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
229 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_DWORDS 8
232 #define STRTAB_STE_0_V (1UL << 0)
233 #define STRTAB_STE_0_CFG_SHIFT 1
234 #define STRTAB_STE_0_CFG_MASK 0x7UL
235 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
236 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
237 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
238 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
240 #define STRTAB_STE_0_S1FMT_SHIFT 4
241 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
242 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
243 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
244 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
245 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
247 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
248 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
249 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
250 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
251 #define STRTAB_STE_1_S1C_SH_NSH 0UL
252 #define STRTAB_STE_1_S1C_SH_OSH 2UL
253 #define STRTAB_STE_1_S1C_SH_ISH 3UL
254 #define STRTAB_STE_1_S1CIR_SHIFT 2
255 #define STRTAB_STE_1_S1COR_SHIFT 4
256 #define STRTAB_STE_1_S1CSH_SHIFT 6
258 #define STRTAB_STE_1_S1STALLD (1UL << 27)
260 #define STRTAB_STE_1_EATS_ABT 0UL
261 #define STRTAB_STE_1_EATS_TRANS 1UL
262 #define STRTAB_STE_1_EATS_S1CHK 2UL
263 #define STRTAB_STE_1_EATS_SHIFT 28
265 #define STRTAB_STE_1_STRW_NSEL1 0UL
266 #define STRTAB_STE_1_STRW_EL2 2UL
267 #define STRTAB_STE_1_STRW_SHIFT 30
269 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
270 #define STRTAB_STE_1_SHCFG_SHIFT 44
272 #define STRTAB_STE_2_S2VMID_SHIFT 0
273 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
274 #define STRTAB_STE_2_VTCR_SHIFT 32
275 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
276 #define STRTAB_STE_2_S2AA64 (1UL << 51)
277 #define STRTAB_STE_2_S2ENDI (1UL << 52)
278 #define STRTAB_STE_2_S2PTW (1UL << 54)
279 #define STRTAB_STE_2_S2R (1UL << 58)
281 #define STRTAB_STE_3_S2TTB_SHIFT 4
282 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
284 /* Context descriptor (stage-1 only) */
285 #define CTXDESC_CD_DWORDS 8
286 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
287 #define ARM64_TCR_T0SZ_SHIFT 0
288 #define ARM64_TCR_T0SZ_MASK 0x1fUL
289 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
290 #define ARM64_TCR_TG0_SHIFT 14
291 #define ARM64_TCR_TG0_MASK 0x3UL
292 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
293 #define ARM64_TCR_IRGN0_SHIFT 8
294 #define ARM64_TCR_IRGN0_MASK 0x3UL
295 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
296 #define ARM64_TCR_ORGN0_SHIFT 10
297 #define ARM64_TCR_ORGN0_MASK 0x3UL
298 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
299 #define ARM64_TCR_SH0_SHIFT 12
300 #define ARM64_TCR_SH0_MASK 0x3UL
301 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
302 #define ARM64_TCR_EPD0_SHIFT 7
303 #define ARM64_TCR_EPD0_MASK 0x1UL
304 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
305 #define ARM64_TCR_EPD1_SHIFT 23
306 #define ARM64_TCR_EPD1_MASK 0x1UL
308 #define CTXDESC_CD_0_ENDI (1UL << 15)
309 #define CTXDESC_CD_0_V (1UL << 31)
311 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
312 #define ARM64_TCR_IPS_SHIFT 32
313 #define ARM64_TCR_IPS_MASK 0x7UL
314 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
315 #define ARM64_TCR_TBI0_SHIFT 37
316 #define ARM64_TCR_TBI0_MASK 0x1UL
318 #define CTXDESC_CD_0_AA64 (1UL << 41)
319 #define CTXDESC_CD_0_R (1UL << 45)
320 #define CTXDESC_CD_0_A (1UL << 46)
321 #define CTXDESC_CD_0_ASET_SHIFT 47
322 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
323 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
324 #define CTXDESC_CD_0_ASID_SHIFT 48
325 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
327 #define CTXDESC_CD_1_TTB0_SHIFT 4
328 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
330 #define CTXDESC_CD_3_MAIR_SHIFT 0
332 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
333 #define ARM_SMMU_TCR2CD(tcr, fld) \
334 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
335 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
338 #define CMDQ_ENT_DWORDS 2
339 #define CMDQ_MAX_SZ_SHIFT 8
341 #define CMDQ_ERR_SHIFT 24
342 #define CMDQ_ERR_MASK 0x7f
343 #define CMDQ_ERR_CERROR_NONE_IDX 0
344 #define CMDQ_ERR_CERROR_ILL_IDX 1
345 #define CMDQ_ERR_CERROR_ABT_IDX 2
347 #define CMDQ_0_OP_SHIFT 0
348 #define CMDQ_0_OP_MASK 0xffUL
349 #define CMDQ_0_SSV (1UL << 11)
351 #define CMDQ_PREFETCH_0_SID_SHIFT 32
352 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
353 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
355 #define CMDQ_CFGI_0_SID_SHIFT 32
356 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
357 #define CMDQ_CFGI_1_LEAF (1UL << 0)
358 #define CMDQ_CFGI_1_RANGE_SHIFT 0
359 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
361 #define CMDQ_TLBI_0_VMID_SHIFT 32
362 #define CMDQ_TLBI_0_ASID_SHIFT 48
363 #define CMDQ_TLBI_1_LEAF (1UL << 0)
364 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
365 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
367 #define CMDQ_PRI_0_SSID_SHIFT 12
368 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
369 #define CMDQ_PRI_0_SID_SHIFT 32
370 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
371 #define CMDQ_PRI_1_GRPID_SHIFT 0
372 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
373 #define CMDQ_PRI_1_RESP_SHIFT 12
374 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
375 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
376 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
378 #define CMDQ_SYNC_0_CS_SHIFT 12
379 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
380 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
383 #define EVTQ_ENT_DWORDS 4
384 #define EVTQ_MAX_SZ_SHIFT 7
386 #define EVTQ_0_ID_SHIFT 0
387 #define EVTQ_0_ID_MASK 0xffUL
390 #define PRIQ_ENT_DWORDS 2
391 #define PRIQ_MAX_SZ_SHIFT 8
393 #define PRIQ_0_SID_SHIFT 0
394 #define PRIQ_0_SID_MASK 0xffffffffUL
395 #define PRIQ_0_SSID_SHIFT 32
396 #define PRIQ_0_SSID_MASK 0xfffffUL
397 #define PRIQ_0_PERM_PRIV (1UL << 58)
398 #define PRIQ_0_PERM_EXEC (1UL << 59)
399 #define PRIQ_0_PERM_READ (1UL << 60)
400 #define PRIQ_0_PERM_WRITE (1UL << 61)
401 #define PRIQ_0_PRG_LAST (1UL << 62)
402 #define PRIQ_0_SSID_V (1UL << 63)
404 #define PRIQ_1_PRG_IDX_SHIFT 0
405 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
406 #define PRIQ_1_ADDR_SHIFT 12
407 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
409 /* High-level queue structures */
410 #define ARM_SMMU_POLL_TIMEOUT_US 100
411 #define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */
413 #define MSI_IOVA_BASE 0x8000000
414 #define MSI_IOVA_LENGTH 0x100000
416 /* Until ACPICA headers cover IORT rev. C */
417 #ifndef ACPI_IORT_SMMU_HISILICON_HI161X
418 #define ACPI_IORT_SMMU_HISILICON_HI161X 0x1
421 #ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
422 #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
425 static bool disable_bypass;
426 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
427 MODULE_PARM_DESC(disable_bypass,
428 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
436 enum arm_smmu_msi_index {
443 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
445 ARM_SMMU_EVTQ_IRQ_CFG0,
446 ARM_SMMU_EVTQ_IRQ_CFG1,
447 ARM_SMMU_EVTQ_IRQ_CFG2,
449 [GERROR_MSI_INDEX] = {
450 ARM_SMMU_GERROR_IRQ_CFG0,
451 ARM_SMMU_GERROR_IRQ_CFG1,
452 ARM_SMMU_GERROR_IRQ_CFG2,
455 ARM_SMMU_PRIQ_IRQ_CFG0,
456 ARM_SMMU_PRIQ_IRQ_CFG1,
457 ARM_SMMU_PRIQ_IRQ_CFG2,
461 struct arm_smmu_cmdq_ent {
464 bool substream_valid;
466 /* Command-specific fields */
468 #define CMDQ_OP_PREFETCH_CFG 0x1
475 #define CMDQ_OP_CFGI_STE 0x3
476 #define CMDQ_OP_CFGI_ALL 0x4
485 #define CMDQ_OP_TLBI_NH_ASID 0x11
486 #define CMDQ_OP_TLBI_NH_VA 0x12
487 #define CMDQ_OP_TLBI_EL2_ALL 0x20
488 #define CMDQ_OP_TLBI_S12_VMALL 0x28
489 #define CMDQ_OP_TLBI_S2_IPA 0x2a
490 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
498 #define CMDQ_OP_PRI_RESP 0x41
506 #define CMDQ_OP_CMD_SYNC 0x46
510 struct arm_smmu_queue {
511 int irq; /* Wired interrupt */
522 u32 __iomem *prod_reg;
523 u32 __iomem *cons_reg;
526 struct arm_smmu_cmdq {
527 struct arm_smmu_queue q;
531 struct arm_smmu_evtq {
532 struct arm_smmu_queue q;
536 struct arm_smmu_priq {
537 struct arm_smmu_queue q;
540 /* High-level stream table and context descriptor structures */
541 struct arm_smmu_strtab_l1_desc {
545 dma_addr_t l2ptr_dma;
548 struct arm_smmu_s1_cfg {
550 dma_addr_t cdptr_dma;
552 struct arm_smmu_ctx_desc {
560 struct arm_smmu_s2_cfg {
566 struct arm_smmu_strtab_ent {
568 * An STE is "assigned" if the master emitting the corresponding SID
569 * is attached to a domain. The behaviour of an unassigned STE is
570 * determined by the disable_bypass parameter, whereas an assigned
571 * STE behaves according to s1_cfg/s2_cfg, which themselves are
572 * configured according to the domain type.
575 struct arm_smmu_s1_cfg *s1_cfg;
576 struct arm_smmu_s2_cfg *s2_cfg;
579 struct arm_smmu_strtab_cfg {
581 dma_addr_t strtab_dma;
582 struct arm_smmu_strtab_l1_desc *l1_desc;
583 unsigned int num_l1_ents;
589 /* An SMMUv3 instance */
590 struct arm_smmu_device {
594 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
595 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
596 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
597 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
598 #define ARM_SMMU_FEAT_PRI (1 << 4)
599 #define ARM_SMMU_FEAT_ATS (1 << 5)
600 #define ARM_SMMU_FEAT_SEV (1 << 6)
601 #define ARM_SMMU_FEAT_MSI (1 << 7)
602 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
603 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
604 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
605 #define ARM_SMMU_FEAT_STALLS (1 << 11)
606 #define ARM_SMMU_FEAT_HYP (1 << 12)
609 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
610 #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
613 struct arm_smmu_cmdq cmdq;
614 struct arm_smmu_evtq evtq;
615 struct arm_smmu_priq priq;
620 unsigned long ias; /* IPA */
621 unsigned long oas; /* PA */
622 unsigned long pgsize_bitmap;
624 #define ARM_SMMU_MAX_ASIDS (1 << 16)
625 unsigned int asid_bits;
626 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
628 #define ARM_SMMU_MAX_VMIDS (1 << 16)
629 unsigned int vmid_bits;
630 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
632 unsigned int ssid_bits;
633 unsigned int sid_bits;
635 struct arm_smmu_strtab_cfg strtab_cfg;
637 /* IOMMU core code handle */
638 struct iommu_device iommu;
641 /* SMMU private data for each master */
642 struct arm_smmu_master_data {
643 struct arm_smmu_device *smmu;
644 struct arm_smmu_strtab_ent ste;
647 /* SMMU private data for an IOMMU domain */
648 enum arm_smmu_domain_stage {
649 ARM_SMMU_DOMAIN_S1 = 0,
651 ARM_SMMU_DOMAIN_NESTED,
652 ARM_SMMU_DOMAIN_BYPASS,
655 struct arm_smmu_domain {
656 struct arm_smmu_device *smmu;
657 struct mutex init_mutex; /* Protects smmu pointer */
659 struct io_pgtable_ops *pgtbl_ops;
661 enum arm_smmu_domain_stage stage;
663 struct arm_smmu_s1_cfg s1_cfg;
664 struct arm_smmu_s2_cfg s2_cfg;
667 struct iommu_domain domain;
670 struct arm_smmu_option_prop {
675 static struct arm_smmu_option_prop arm_smmu_options[] = {
676 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
677 { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
681 static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
682 struct arm_smmu_device *smmu)
684 if ((offset > SZ_64K) &&
685 (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY))
688 return smmu->base + offset;
691 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
693 return container_of(dom, struct arm_smmu_domain, domain);
696 static void parse_driver_options(struct arm_smmu_device *smmu)
701 if (of_property_read_bool(smmu->dev->of_node,
702 arm_smmu_options[i].prop)) {
703 smmu->options |= arm_smmu_options[i].opt;
704 dev_notice(smmu->dev, "option %s\n",
705 arm_smmu_options[i].prop);
707 } while (arm_smmu_options[++i].opt);
710 /* Low-level queue manipulation functions */
711 static bool queue_full(struct arm_smmu_queue *q)
713 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
714 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
717 static bool queue_empty(struct arm_smmu_queue *q)
719 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
720 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
723 static void queue_sync_cons(struct arm_smmu_queue *q)
725 q->cons = readl_relaxed(q->cons_reg);
728 static void queue_inc_cons(struct arm_smmu_queue *q)
730 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
732 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
733 writel(q->cons, q->cons_reg);
736 static int queue_sync_prod(struct arm_smmu_queue *q)
739 u32 prod = readl_relaxed(q->prod_reg);
741 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
748 static void queue_inc_prod(struct arm_smmu_queue *q)
750 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
752 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
753 writel(q->prod, q->prod_reg);
757 * Wait for the SMMU to consume items. If drain is true, wait until the queue
758 * is empty. Otherwise, wait until there is at least one free slot.
760 static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
763 unsigned int delay = 1;
765 /* Wait longer if it's queue drain */
766 timeout = ktime_add_us(ktime_get(), drain ?
767 ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US :
768 ARM_SMMU_POLL_TIMEOUT_US);
770 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
771 if (ktime_compare(ktime_get(), timeout) > 0)
786 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
790 for (i = 0; i < n_dwords; ++i)
791 *dst++ = cpu_to_le64(*src++);
794 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
799 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
804 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
808 for (i = 0; i < n_dwords; ++i)
809 *dst++ = le64_to_cpu(*src++);
812 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
817 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
822 /* High-level queue accessors */
823 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
825 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
826 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
828 switch (ent->opcode) {
829 case CMDQ_OP_TLBI_EL2_ALL:
830 case CMDQ_OP_TLBI_NSNH_ALL:
832 case CMDQ_OP_PREFETCH_CFG:
833 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
834 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
835 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
837 case CMDQ_OP_CFGI_STE:
838 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
839 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
841 case CMDQ_OP_CFGI_ALL:
842 /* Cover the entire SID range */
843 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
845 case CMDQ_OP_TLBI_NH_VA:
846 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
847 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
848 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
850 case CMDQ_OP_TLBI_S2_IPA:
851 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
852 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
853 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
855 case CMDQ_OP_TLBI_NH_ASID:
856 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
858 case CMDQ_OP_TLBI_S12_VMALL:
859 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
861 case CMDQ_OP_PRI_RESP:
862 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
863 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
864 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
865 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
866 switch (ent->pri.resp) {
868 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
871 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
874 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
880 case CMDQ_OP_CMD_SYNC:
881 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
890 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
892 static const char *cerror_str[] = {
893 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
894 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
895 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
899 u64 cmd[CMDQ_ENT_DWORDS];
900 struct arm_smmu_queue *q = &smmu->cmdq.q;
901 u32 cons = readl_relaxed(q->cons_reg);
902 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
903 struct arm_smmu_cmdq_ent cmd_sync = {
904 .opcode = CMDQ_OP_CMD_SYNC,
907 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
908 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
911 case CMDQ_ERR_CERROR_ABT_IDX:
912 dev_err(smmu->dev, "retrying command fetch\n");
913 case CMDQ_ERR_CERROR_NONE_IDX:
915 case CMDQ_ERR_CERROR_ILL_IDX:
922 * We may have concurrent producers, so we need to be careful
923 * not to touch any of the shadow cmdq state.
925 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
926 dev_err(smmu->dev, "skipping command in error state:\n");
927 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
928 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
930 /* Convert the erroneous command into a CMD_SYNC */
931 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
932 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
936 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
939 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
940 struct arm_smmu_cmdq_ent *ent)
942 u64 cmd[CMDQ_ENT_DWORDS];
944 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
945 struct arm_smmu_queue *q = &smmu->cmdq.q;
947 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
948 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
953 spin_lock_irqsave(&smmu->cmdq.lock, flags);
954 while (queue_insert_raw(q, cmd) == -ENOSPC) {
955 if (queue_poll_cons(q, false, wfe))
956 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
959 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
960 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
961 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
964 /* Context descriptor manipulation functions */
965 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
969 /* Repack the TCR. Just care about TTBR0 for now */
970 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
971 val |= ARM_SMMU_TCR2CD(tcr, TG0);
972 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
973 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
974 val |= ARM_SMMU_TCR2CD(tcr, SH0);
975 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
976 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
977 val |= ARM_SMMU_TCR2CD(tcr, IPS);
978 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
983 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
984 struct arm_smmu_s1_cfg *cfg)
989 * We don't need to issue any invalidation here, as we'll invalidate
990 * the STE when installing the new entry anyway.
992 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
996 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
997 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
999 cfg->cdptr[0] = cpu_to_le64(val);
1001 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
1002 cfg->cdptr[1] = cpu_to_le64(val);
1004 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
1007 /* Stream table manipulation functions */
1009 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
1013 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
1014 << STRTAB_L1_DESC_SPAN_SHIFT;
1015 val |= desc->l2ptr_dma &
1016 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
1018 *dst = cpu_to_le64(val);
1021 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1023 struct arm_smmu_cmdq_ent cmd = {
1024 .opcode = CMDQ_OP_CFGI_STE,
1031 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1032 cmd.opcode = CMDQ_OP_CMD_SYNC;
1033 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1036 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1037 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1040 * This is hideously complicated, but we only really care about
1041 * three cases at the moment:
1043 * 1. Invalid (all zero) -> bypass/fault (init)
1044 * 2. Bypass/fault -> translation/bypass (attach)
1045 * 3. Translation/bypass -> bypass/fault (detach)
1047 * Given that we can't update the STE atomically and the SMMU
1048 * doesn't read the thing in a defined order, that leaves us
1049 * with the following maintenance requirements:
1051 * 1. Update Config, return (init time STEs aren't live)
1052 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1053 * 3. Update Config, sync
1055 u64 val = le64_to_cpu(dst[0]);
1056 bool ste_live = false;
1057 struct arm_smmu_cmdq_ent prefetch_cmd = {
1058 .opcode = CMDQ_OP_PREFETCH_CFG,
1064 if (val & STRTAB_STE_0_V) {
1067 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1069 case STRTAB_STE_0_CFG_BYPASS:
1071 case STRTAB_STE_0_CFG_S1_TRANS:
1072 case STRTAB_STE_0_CFG_S2_TRANS:
1075 case STRTAB_STE_0_CFG_ABORT:
1079 BUG(); /* STE corruption */
1083 /* Nuke the existing STE_0 value, as we're going to rewrite it */
1084 val = STRTAB_STE_0_V;
1087 if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
1088 if (!ste->assigned && disable_bypass)
1089 val |= STRTAB_STE_0_CFG_ABORT;
1091 val |= STRTAB_STE_0_CFG_BYPASS;
1093 dst[0] = cpu_to_le64(val);
1094 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1095 << STRTAB_STE_1_SHCFG_SHIFT);
1096 dst[2] = 0; /* Nuke the VMID */
1098 arm_smmu_sync_ste_for_sid(smmu, sid);
1104 dst[1] = cpu_to_le64(
1105 STRTAB_STE_1_S1C_CACHE_WBRA
1106 << STRTAB_STE_1_S1CIR_SHIFT |
1107 STRTAB_STE_1_S1C_CACHE_WBRA
1108 << STRTAB_STE_1_S1COR_SHIFT |
1109 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1110 #ifdef CONFIG_PCI_ATS
1111 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1113 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1115 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1116 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1118 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1119 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1120 STRTAB_STE_0_CFG_S1_TRANS;
1125 dst[2] = cpu_to_le64(
1126 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1127 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1128 << STRTAB_STE_2_VTCR_SHIFT |
1130 STRTAB_STE_2_S2ENDI |
1132 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1135 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1136 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1138 val |= STRTAB_STE_0_CFG_S2_TRANS;
1141 arm_smmu_sync_ste_for_sid(smmu, sid);
1142 dst[0] = cpu_to_le64(val);
1143 arm_smmu_sync_ste_for_sid(smmu, sid);
1145 /* It's likely that we'll want to use the new STE soon */
1146 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1147 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1150 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1153 struct arm_smmu_strtab_ent ste = { .assigned = false };
1155 for (i = 0; i < nent; ++i) {
1156 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1157 strtab += STRTAB_STE_DWORDS;
1161 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1165 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1166 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1171 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1172 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1174 desc->span = STRTAB_SPLIT + 1;
1175 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1176 GFP_KERNEL | __GFP_ZERO);
1179 "failed to allocate l2 stream table for SID %u\n",
1184 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1185 arm_smmu_write_strtab_l1_desc(strtab, desc);
1189 /* IRQ and event handlers */
1190 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1193 struct arm_smmu_device *smmu = dev;
1194 struct arm_smmu_queue *q = &smmu->evtq.q;
1195 u64 evt[EVTQ_ENT_DWORDS];
1198 while (!queue_remove_raw(q, evt)) {
1199 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1201 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1202 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1203 dev_info(smmu->dev, "\t0x%016llx\n",
1204 (unsigned long long)evt[i]);
1209 * Not much we can do on overflow, so scream and pretend we're
1212 if (queue_sync_prod(q) == -EOVERFLOW)
1213 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1214 } while (!queue_empty(q));
1216 /* Sync our overflow flag, as we believe we're up to speed */
1217 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1221 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1227 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1228 ssv = evt[0] & PRIQ_0_SSID_V;
1229 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1230 last = evt[0] & PRIQ_0_PRG_LAST;
1231 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1233 dev_info(smmu->dev, "unexpected PRI request received:\n");
1235 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1236 sid, ssid, grpid, last ? "L" : "",
1237 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1238 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1239 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1240 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1241 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1244 struct arm_smmu_cmdq_ent cmd = {
1245 .opcode = CMDQ_OP_PRI_RESP,
1246 .substream_valid = ssv,
1251 .resp = PRI_RESP_DENY,
1255 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1259 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1261 struct arm_smmu_device *smmu = dev;
1262 struct arm_smmu_queue *q = &smmu->priq.q;
1263 u64 evt[PRIQ_ENT_DWORDS];
1266 while (!queue_remove_raw(q, evt))
1267 arm_smmu_handle_ppr(smmu, evt);
1269 if (queue_sync_prod(q) == -EOVERFLOW)
1270 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1271 } while (!queue_empty(q));
1273 /* Sync our overflow flag, as we believe we're up to speed */
1274 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1278 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1280 /* We don't actually use CMD_SYNC interrupts for anything */
1284 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1286 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1288 u32 gerror, gerrorn, active;
1289 struct arm_smmu_device *smmu = dev;
1291 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1292 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1294 active = gerror ^ gerrorn;
1295 if (!(active & GERROR_ERR_MASK))
1296 return IRQ_NONE; /* No errors pending */
1299 "unexpected global error reported (0x%08x), this could be serious\n",
1302 if (active & GERROR_SFM_ERR) {
1303 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1304 arm_smmu_device_disable(smmu);
1307 if (active & GERROR_MSI_GERROR_ABT_ERR)
1308 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1310 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1311 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1313 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1314 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1316 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1317 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1318 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1321 if (active & GERROR_PRIQ_ABT_ERR)
1322 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1324 if (active & GERROR_EVTQ_ABT_ERR)
1325 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1327 if (active & GERROR_CMDQ_ERR)
1328 arm_smmu_cmdq_skip_err(smmu);
1330 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1334 static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
1336 struct arm_smmu_device *smmu = dev;
1338 arm_smmu_evtq_thread(irq, dev);
1339 if (smmu->features & ARM_SMMU_FEAT_PRI)
1340 arm_smmu_priq_thread(irq, dev);
1345 static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
1347 arm_smmu_gerror_handler(irq, dev);
1348 arm_smmu_cmdq_sync_handler(irq, dev);
1349 return IRQ_WAKE_THREAD;
1352 /* IO_PGTABLE API */
1353 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1355 struct arm_smmu_cmdq_ent cmd;
1357 cmd.opcode = CMDQ_OP_CMD_SYNC;
1358 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1361 static void arm_smmu_tlb_sync(void *cookie)
1363 struct arm_smmu_domain *smmu_domain = cookie;
1364 __arm_smmu_tlb_sync(smmu_domain->smmu);
1367 static void arm_smmu_tlb_inv_context(void *cookie)
1369 struct arm_smmu_domain *smmu_domain = cookie;
1370 struct arm_smmu_device *smmu = smmu_domain->smmu;
1371 struct arm_smmu_cmdq_ent cmd;
1373 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1374 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1375 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1378 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1379 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1382 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1383 __arm_smmu_tlb_sync(smmu);
1386 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1387 size_t granule, bool leaf, void *cookie)
1389 struct arm_smmu_domain *smmu_domain = cookie;
1390 struct arm_smmu_device *smmu = smmu_domain->smmu;
1391 struct arm_smmu_cmdq_ent cmd = {
1398 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1399 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1400 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1402 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1403 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1407 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1408 cmd.tlbi.addr += granule;
1409 } while (size -= granule);
1412 static const struct iommu_gather_ops arm_smmu_gather_ops = {
1413 .tlb_flush_all = arm_smmu_tlb_inv_context,
1414 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1415 .tlb_sync = arm_smmu_tlb_sync,
1419 static bool arm_smmu_capable(enum iommu_cap cap)
1422 case IOMMU_CAP_CACHE_COHERENCY:
1424 case IOMMU_CAP_NOEXEC:
1431 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1433 struct arm_smmu_domain *smmu_domain;
1435 if (type != IOMMU_DOMAIN_UNMANAGED &&
1436 type != IOMMU_DOMAIN_DMA &&
1437 type != IOMMU_DOMAIN_IDENTITY)
1441 * Allocate the domain and initialise some of its data structures.
1442 * We can't really do anything meaningful until we've added a
1445 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1449 if (type == IOMMU_DOMAIN_DMA &&
1450 iommu_get_dma_cookie(&smmu_domain->domain)) {
1455 mutex_init(&smmu_domain->init_mutex);
1456 return &smmu_domain->domain;
1459 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1461 int idx, size = 1 << span;
1464 idx = find_first_zero_bit(map, size);
1467 } while (test_and_set_bit(idx, map));
1472 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1474 clear_bit(idx, map);
1477 static void arm_smmu_domain_free(struct iommu_domain *domain)
1479 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1480 struct arm_smmu_device *smmu = smmu_domain->smmu;
1482 iommu_put_dma_cookie(domain);
1483 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1485 /* Free the CD and ASID, if we allocated them */
1486 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1487 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1490 dmam_free_coherent(smmu_domain->smmu->dev,
1491 CTXDESC_CD_DWORDS << 3,
1495 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1498 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1500 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1506 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1507 struct io_pgtable_cfg *pgtbl_cfg)
1511 struct arm_smmu_device *smmu = smmu_domain->smmu;
1512 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1514 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1518 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1520 GFP_KERNEL | __GFP_ZERO);
1522 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1527 cfg->cd.asid = (u16)asid;
1528 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1529 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1530 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1534 arm_smmu_bitmap_free(smmu->asid_map, asid);
1538 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1539 struct io_pgtable_cfg *pgtbl_cfg)
1542 struct arm_smmu_device *smmu = smmu_domain->smmu;
1543 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1545 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1549 cfg->vmid = (u16)vmid;
1550 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1551 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1555 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1558 unsigned long ias, oas;
1559 enum io_pgtable_fmt fmt;
1560 struct io_pgtable_cfg pgtbl_cfg;
1561 struct io_pgtable_ops *pgtbl_ops;
1562 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1563 struct io_pgtable_cfg *);
1564 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1565 struct arm_smmu_device *smmu = smmu_domain->smmu;
1567 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
1568 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
1572 /* Restrict the stage to what we can actually support */
1573 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1574 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1575 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1576 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1578 switch (smmu_domain->stage) {
1579 case ARM_SMMU_DOMAIN_S1:
1582 fmt = ARM_64_LPAE_S1;
1583 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1585 case ARM_SMMU_DOMAIN_NESTED:
1586 case ARM_SMMU_DOMAIN_S2:
1589 fmt = ARM_64_LPAE_S2;
1590 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1596 pgtbl_cfg = (struct io_pgtable_cfg) {
1597 .pgsize_bitmap = smmu->pgsize_bitmap,
1600 .tlb = &arm_smmu_gather_ops,
1601 .iommu_dev = smmu->dev,
1604 if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
1605 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
1607 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1611 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1612 domain->geometry.aperture_end = (1UL << ias) - 1;
1613 domain->geometry.force_aperture = true;
1614 smmu_domain->pgtbl_ops = pgtbl_ops;
1616 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1618 free_io_pgtable_ops(pgtbl_ops);
1623 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1626 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1628 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1629 struct arm_smmu_strtab_l1_desc *l1_desc;
1632 /* Two-level walk */
1633 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1634 l1_desc = &cfg->l1_desc[idx];
1635 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1636 step = &l1_desc->l2ptr[idx];
1638 /* Simple linear lookup */
1639 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1645 static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1648 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1649 struct arm_smmu_device *smmu = master->smmu;
1651 for (i = 0; i < fwspec->num_ids; ++i) {
1652 u32 sid = fwspec->ids[i];
1653 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1655 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1659 static void arm_smmu_detach_dev(struct device *dev)
1661 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
1663 master->ste.assigned = false;
1664 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1667 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1670 struct arm_smmu_device *smmu;
1671 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1672 struct arm_smmu_master_data *master;
1673 struct arm_smmu_strtab_ent *ste;
1675 if (!dev->iommu_fwspec)
1678 master = dev->iommu_fwspec->iommu_priv;
1679 smmu = master->smmu;
1682 /* Already attached to a different domain? */
1684 arm_smmu_detach_dev(dev);
1686 mutex_lock(&smmu_domain->init_mutex);
1688 if (!smmu_domain->smmu) {
1689 smmu_domain->smmu = smmu;
1690 ret = arm_smmu_domain_finalise(domain);
1692 smmu_domain->smmu = NULL;
1695 } else if (smmu_domain->smmu != smmu) {
1697 "cannot attach to SMMU %s (upstream of %s)\n",
1698 dev_name(smmu_domain->smmu->dev),
1699 dev_name(smmu->dev));
1704 ste->assigned = true;
1706 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
1709 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1710 ste->s1_cfg = &smmu_domain->s1_cfg;
1712 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1715 ste->s2_cfg = &smmu_domain->s2_cfg;
1718 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1720 mutex_unlock(&smmu_domain->init_mutex);
1724 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1725 phys_addr_t paddr, size_t size, int prot)
1727 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1732 return ops->map(ops, iova, paddr, size, prot);
1736 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1738 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1743 return ops->unmap(ops, iova, size);
1746 static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
1748 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1751 __arm_smmu_tlb_sync(smmu);
1755 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1757 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1759 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1765 return ops->iova_to_phys(ops, iova);
1768 static struct platform_driver arm_smmu_driver;
1770 static int arm_smmu_match_node(struct device *dev, void *data)
1772 return dev->fwnode == data;
1776 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1778 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1779 fwnode, arm_smmu_match_node);
1781 return dev ? dev_get_drvdata(dev) : NULL;
1784 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1786 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1788 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1789 limit *= 1UL << STRTAB_SPLIT;
1794 static struct iommu_ops arm_smmu_ops;
1796 static int arm_smmu_add_device(struct device *dev)
1799 struct arm_smmu_device *smmu;
1800 struct arm_smmu_master_data *master;
1801 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1802 struct iommu_group *group;
1804 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1807 * We _can_ actually withstand dodgy bus code re-calling add_device()
1808 * without an intervening remove_device()/of_xlate() sequence, but
1809 * we're not going to do so quietly...
1811 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1812 master = fwspec->iommu_priv;
1813 smmu = master->smmu;
1815 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1818 master = kzalloc(sizeof(*master), GFP_KERNEL);
1822 master->smmu = smmu;
1823 fwspec->iommu_priv = master;
1826 /* Check the SIDs are in range of the SMMU and our stream table */
1827 for (i = 0; i < fwspec->num_ids; i++) {
1828 u32 sid = fwspec->ids[i];
1830 if (!arm_smmu_sid_in_range(smmu, sid))
1833 /* Ensure l2 strtab is initialised */
1834 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1835 ret = arm_smmu_init_l2_strtab(smmu, sid);
1841 group = iommu_group_get_for_dev(dev);
1842 if (!IS_ERR(group)) {
1843 iommu_group_put(group);
1844 iommu_device_link(&smmu->iommu, dev);
1847 return PTR_ERR_OR_ZERO(group);
1850 static void arm_smmu_remove_device(struct device *dev)
1852 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1853 struct arm_smmu_master_data *master;
1854 struct arm_smmu_device *smmu;
1856 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1859 master = fwspec->iommu_priv;
1860 smmu = master->smmu;
1861 if (master && master->ste.assigned)
1862 arm_smmu_detach_dev(dev);
1863 iommu_group_remove_device(dev);
1864 iommu_device_unlink(&smmu->iommu, dev);
1866 iommu_fwspec_free(dev);
1869 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1871 struct iommu_group *group;
1874 * We don't support devices sharing stream IDs other than PCI RID
1875 * aliases, since the necessary ID-to-device lookup becomes rather
1876 * impractical given a potential sparse 32-bit stream ID space.
1878 if (dev_is_pci(dev))
1879 group = pci_device_group(dev);
1881 group = generic_device_group(dev);
1886 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1887 enum iommu_attr attr, void *data)
1889 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1891 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1895 case DOMAIN_ATTR_NESTING:
1896 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1903 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1904 enum iommu_attr attr, void *data)
1907 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1909 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1912 mutex_lock(&smmu_domain->init_mutex);
1915 case DOMAIN_ATTR_NESTING:
1916 if (smmu_domain->smmu) {
1922 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1924 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1932 mutex_unlock(&smmu_domain->init_mutex);
1936 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1938 return iommu_fwspec_add_ids(dev, args->args, 1);
1941 static void arm_smmu_get_resv_regions(struct device *dev,
1942 struct list_head *head)
1944 struct iommu_resv_region *region;
1945 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1947 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1948 prot, IOMMU_RESV_SW_MSI);
1952 list_add_tail(®ion->list, head);
1954 iommu_dma_get_resv_regions(dev, head);
1957 static void arm_smmu_put_resv_regions(struct device *dev,
1958 struct list_head *head)
1960 struct iommu_resv_region *entry, *next;
1962 list_for_each_entry_safe(entry, next, head, list)
1966 static struct iommu_ops arm_smmu_ops = {
1967 .capable = arm_smmu_capable,
1968 .domain_alloc = arm_smmu_domain_alloc,
1969 .domain_free = arm_smmu_domain_free,
1970 .attach_dev = arm_smmu_attach_dev,
1971 .map = arm_smmu_map,
1972 .unmap = arm_smmu_unmap,
1973 .map_sg = default_iommu_map_sg,
1974 .flush_iotlb_all = arm_smmu_iotlb_sync,
1975 .iotlb_sync = arm_smmu_iotlb_sync,
1976 .iova_to_phys = arm_smmu_iova_to_phys,
1977 .add_device = arm_smmu_add_device,
1978 .remove_device = arm_smmu_remove_device,
1979 .device_group = arm_smmu_device_group,
1980 .domain_get_attr = arm_smmu_domain_get_attr,
1981 .domain_set_attr = arm_smmu_domain_set_attr,
1982 .of_xlate = arm_smmu_of_xlate,
1983 .get_resv_regions = arm_smmu_get_resv_regions,
1984 .put_resv_regions = arm_smmu_put_resv_regions,
1985 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1988 /* Probing and initialisation functions */
1989 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1990 struct arm_smmu_queue *q,
1991 unsigned long prod_off,
1992 unsigned long cons_off,
1995 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1997 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1999 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
2004 q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
2005 q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
2006 q->ent_dwords = dwords;
2008 q->q_base = Q_BASE_RWA;
2009 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
2010 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
2011 << Q_BASE_LOG2SIZE_SHIFT;
2013 q->prod = q->cons = 0;
2017 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
2022 spin_lock_init(&smmu->cmdq.lock);
2023 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
2024 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
2029 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
2030 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
2035 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2038 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
2039 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
2042 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2045 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2046 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2047 void *strtab = smmu->strtab_cfg.strtab;
2049 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2050 if (!cfg->l1_desc) {
2051 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2055 for (i = 0; i < cfg->num_l1_ents; ++i) {
2056 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2057 strtab += STRTAB_L1_DESC_DWORDS << 3;
2063 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2068 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2070 /* Calculate the L1 size, capped to the SIDSIZE. */
2071 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2072 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2073 cfg->num_l1_ents = 1 << size;
2075 size += STRTAB_SPLIT;
2076 if (size < smmu->sid_bits)
2078 "2-level strtab only covers %u/%u bits of SID\n",
2079 size, smmu->sid_bits);
2081 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2082 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2083 GFP_KERNEL | __GFP_ZERO);
2086 "failed to allocate l1 stream table (%u bytes)\n",
2090 cfg->strtab = strtab;
2092 /* Configure strtab_base_cfg for 2 levels */
2093 reg = STRTAB_BASE_CFG_FMT_2LVL;
2094 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2095 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2096 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2097 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2098 cfg->strtab_base_cfg = reg;
2100 return arm_smmu_init_l1_strtab(smmu);
2103 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2108 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2110 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2111 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2112 GFP_KERNEL | __GFP_ZERO);
2115 "failed to allocate linear stream table (%u bytes)\n",
2119 cfg->strtab = strtab;
2120 cfg->num_l1_ents = 1 << smmu->sid_bits;
2122 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2123 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2124 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2125 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2126 cfg->strtab_base_cfg = reg;
2128 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2132 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2137 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2138 ret = arm_smmu_init_strtab_2lvl(smmu);
2140 ret = arm_smmu_init_strtab_linear(smmu);
2145 /* Set the strtab base address */
2146 reg = smmu->strtab_cfg.strtab_dma &
2147 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2148 reg |= STRTAB_BASE_RA;
2149 smmu->strtab_cfg.strtab_base = reg;
2151 /* Allocate the first VMID for stage-2 bypass STEs */
2152 set_bit(0, smmu->vmid_map);
2156 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2160 ret = arm_smmu_init_queues(smmu);
2164 return arm_smmu_init_strtab(smmu);
2167 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2168 unsigned int reg_off, unsigned int ack_off)
2172 writel_relaxed(val, smmu->base + reg_off);
2173 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2174 1, ARM_SMMU_POLL_TIMEOUT_US);
2177 /* GBPA is "special" */
2178 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2181 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2183 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2184 1, ARM_SMMU_POLL_TIMEOUT_US);
2190 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2191 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2192 1, ARM_SMMU_POLL_TIMEOUT_US);
2195 static void arm_smmu_free_msis(void *data)
2197 struct device *dev = data;
2198 platform_msi_domain_free_irqs(dev);
2201 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2203 phys_addr_t doorbell;
2204 struct device *dev = msi_desc_to_dev(desc);
2205 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2206 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2208 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2209 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2211 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2212 writel_relaxed(msg->data, smmu->base + cfg[1]);
2213 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2216 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2218 struct msi_desc *desc;
2219 int ret, nvec = ARM_SMMU_MAX_MSIS;
2220 struct device *dev = smmu->dev;
2222 /* Clear the MSI address regs */
2223 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2224 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2226 if (smmu->features & ARM_SMMU_FEAT_PRI)
2227 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2231 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2234 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2235 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2237 dev_warn(dev, "failed to allocate MSIs\n");
2241 for_each_msi_entry(desc, dev) {
2242 switch (desc->platform.msi_index) {
2243 case EVTQ_MSI_INDEX:
2244 smmu->evtq.q.irq = desc->irq;
2246 case GERROR_MSI_INDEX:
2247 smmu->gerr_irq = desc->irq;
2249 case PRIQ_MSI_INDEX:
2250 smmu->priq.q.irq = desc->irq;
2252 default: /* Unknown */
2257 /* Add callback to free MSIs on teardown */
2258 devm_add_action(dev, arm_smmu_free_msis, dev);
2261 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
2265 arm_smmu_setup_msis(smmu);
2267 /* Request interrupt lines */
2268 irq = smmu->evtq.q.irq;
2270 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2271 arm_smmu_evtq_thread,
2273 "arm-smmu-v3-evtq", smmu);
2275 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2278 irq = smmu->cmdq.q.irq;
2280 ret = devm_request_irq(smmu->dev, irq,
2281 arm_smmu_cmdq_sync_handler, 0,
2282 "arm-smmu-v3-cmdq-sync", smmu);
2284 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2287 irq = smmu->gerr_irq;
2289 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2290 0, "arm-smmu-v3-gerror", smmu);
2292 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2295 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2296 irq = smmu->priq.q.irq;
2298 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2299 arm_smmu_priq_thread,
2305 "failed to enable priq irq\n");
2310 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2313 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2315 /* Disable IRQs first */
2316 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2317 ARM_SMMU_IRQ_CTRLACK);
2319 dev_err(smmu->dev, "failed to disable irqs\n");
2323 irq = smmu->combined_irq;
2326 * Cavium ThunderX2 implementation doesn't not support unique
2327 * irq lines. Use single irq line for all the SMMUv3 interrupts.
2329 ret = devm_request_threaded_irq(smmu->dev, irq,
2330 arm_smmu_combined_irq_handler,
2331 arm_smmu_combined_irq_thread,
2333 "arm-smmu-v3-combined-irq", smmu);
2335 dev_warn(smmu->dev, "failed to enable combined irq\n");
2337 arm_smmu_setup_unique_irqs(smmu);
2339 if (smmu->features & ARM_SMMU_FEAT_PRI)
2340 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2342 /* Enable interrupt generation on the SMMU */
2343 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2344 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2346 dev_warn(smmu->dev, "failed to enable irqs\n");
2351 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2355 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2357 dev_err(smmu->dev, "failed to clear cr0\n");
2362 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2366 struct arm_smmu_cmdq_ent cmd;
2368 /* Clear CR0 and sync (disables SMMU and queue processing) */
2369 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2370 if (reg & CR0_SMMUEN)
2371 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2373 ret = arm_smmu_device_disable(smmu);
2377 /* CR1 (table and queue memory attributes) */
2378 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2379 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2380 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2381 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2382 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2383 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2384 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2386 /* CR2 (random crap) */
2387 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2388 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2391 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2392 smmu->base + ARM_SMMU_STRTAB_BASE);
2393 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2394 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2397 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2398 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2399 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2401 enables = CR0_CMDQEN;
2402 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2405 dev_err(smmu->dev, "failed to enable command queue\n");
2409 /* Invalidate any cached configuration */
2410 cmd.opcode = CMDQ_OP_CFGI_ALL;
2411 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2412 cmd.opcode = CMDQ_OP_CMD_SYNC;
2413 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2415 /* Invalidate any stale TLB entries */
2416 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2417 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2418 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2421 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2422 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2423 cmd.opcode = CMDQ_OP_CMD_SYNC;
2424 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2427 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2428 writel_relaxed(smmu->evtq.q.prod,
2429 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
2430 writel_relaxed(smmu->evtq.q.cons,
2431 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
2433 enables |= CR0_EVTQEN;
2434 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2437 dev_err(smmu->dev, "failed to enable event queue\n");
2442 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2443 writeq_relaxed(smmu->priq.q.q_base,
2444 smmu->base + ARM_SMMU_PRIQ_BASE);
2445 writel_relaxed(smmu->priq.q.prod,
2446 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
2447 writel_relaxed(smmu->priq.q.cons,
2448 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
2450 enables |= CR0_PRIQEN;
2451 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2454 dev_err(smmu->dev, "failed to enable PRI queue\n");
2459 ret = arm_smmu_setup_irqs(smmu);
2461 dev_err(smmu->dev, "failed to setup irqs\n");
2466 /* Enable the SMMU interface, or ensure bypass */
2467 if (!bypass || disable_bypass) {
2468 enables |= CR0_SMMUEN;
2470 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2472 dev_err(smmu->dev, "GBPA not responding to update\n");
2476 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2479 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2486 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
2489 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
2492 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2494 /* 2-level structures */
2495 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2496 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2498 if (reg & IDR0_CD2L)
2499 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2502 * Translation table endianness.
2503 * We currently require the same endianness as the CPU, but this
2504 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2506 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2507 case IDR0_TTENDIAN_MIXED:
2508 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2511 case IDR0_TTENDIAN_BE:
2512 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2515 case IDR0_TTENDIAN_LE:
2516 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2520 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2524 /* Boolean feature flags */
2525 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2526 smmu->features |= ARM_SMMU_FEAT_PRI;
2528 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2529 smmu->features |= ARM_SMMU_FEAT_ATS;
2532 smmu->features |= ARM_SMMU_FEAT_SEV;
2535 smmu->features |= ARM_SMMU_FEAT_MSI;
2538 smmu->features |= ARM_SMMU_FEAT_HYP;
2541 * The coherency feature as set by FW is used in preference to the ID
2542 * register, but warn on mismatch.
2544 if (!!(reg & IDR0_COHACC) != coherent)
2545 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2546 coherent ? "true" : "false");
2548 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2549 case IDR0_STALL_MODEL_STALL:
2551 case IDR0_STALL_MODEL_FORCE:
2552 smmu->features |= ARM_SMMU_FEAT_STALLS;
2556 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2559 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2561 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2562 dev_err(smmu->dev, "no translation support!\n");
2566 /* We only support the AArch64 table format at present */
2567 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2568 case IDR0_TTF_AARCH32_64:
2571 case IDR0_TTF_AARCH64:
2574 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2578 /* ASID/VMID sizes */
2579 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2580 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2583 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2584 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2585 dev_err(smmu->dev, "embedded implementation not supported\n");
2589 /* Queue sizes, capped at 4k */
2590 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2591 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2592 if (!smmu->cmdq.q.max_n_shift) {
2593 /* Odd alignment restrictions on the base, so ignore for now */
2594 dev_err(smmu->dev, "unit-length command queue not supported\n");
2598 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2599 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2600 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2601 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2603 /* SID/SSID sizes */
2604 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2605 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2608 * If the SMMU supports fewer bits than would fill a single L2 stream
2609 * table, use a linear table instead.
2611 if (smmu->sid_bits <= STRTAB_SPLIT)
2612 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
2615 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2617 /* Maximum number of outstanding stalls */
2618 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2619 & IDR5_STALL_MAX_MASK;
2622 if (reg & IDR5_GRAN64K)
2623 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2624 if (reg & IDR5_GRAN16K)
2625 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2626 if (reg & IDR5_GRAN4K)
2627 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2629 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2630 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2632 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2634 /* Output address size */
2635 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2636 case IDR5_OAS_32_BIT:
2639 case IDR5_OAS_36_BIT:
2642 case IDR5_OAS_40_BIT:
2645 case IDR5_OAS_42_BIT:
2648 case IDR5_OAS_44_BIT:
2653 "unknown output address size. Truncating to 48-bit\n");
2655 case IDR5_OAS_48_BIT:
2659 /* Set the DMA mask for our table walker */
2660 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2662 "failed to set DMA mask for table walker\n");
2664 smmu->ias = max(smmu->ias, smmu->oas);
2666 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2667 smmu->ias, smmu->oas, smmu->features);
2672 static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
2675 case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
2676 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
2678 case ACPI_IORT_SMMU_HISILICON_HI161X:
2679 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
2683 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
2686 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2687 struct arm_smmu_device *smmu)
2689 struct acpi_iort_smmu_v3 *iort_smmu;
2690 struct device *dev = smmu->dev;
2691 struct acpi_iort_node *node;
2693 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
2695 /* Retrieve SMMUv3 specific data */
2696 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
2698 acpi_smmu_get_options(iort_smmu->model, smmu);
2700 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
2701 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2706 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2707 struct arm_smmu_device *smmu)
2713 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2714 struct arm_smmu_device *smmu)
2716 struct device *dev = &pdev->dev;
2720 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2721 dev_err(dev, "missing #iommu-cells property\n");
2722 else if (cells != 1)
2723 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2727 parse_driver_options(smmu);
2729 if (of_dma_is_coherent(dev->of_node))
2730 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2735 static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
2737 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
2743 static int arm_smmu_device_probe(struct platform_device *pdev)
2746 struct resource *res;
2747 resource_size_t ioaddr;
2748 struct arm_smmu_device *smmu;
2749 struct device *dev = &pdev->dev;
2752 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2754 dev_err(dev, "failed to allocate arm_smmu_device\n");
2760 ret = arm_smmu_device_dt_probe(pdev, smmu);
2762 ret = arm_smmu_device_acpi_probe(pdev, smmu);
2767 /* Set bypass mode according to firmware probing result */
2771 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2772 if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) {
2773 dev_err(dev, "MMIO region too small (%pr)\n", res);
2776 ioaddr = res->start;
2778 smmu->base = devm_ioremap_resource(dev, res);
2779 if (IS_ERR(smmu->base))
2780 return PTR_ERR(smmu->base);
2782 /* Interrupt lines */
2784 irq = platform_get_irq_byname(pdev, "combined");
2786 smmu->combined_irq = irq;
2788 irq = platform_get_irq_byname(pdev, "eventq");
2790 smmu->evtq.q.irq = irq;
2792 irq = platform_get_irq_byname(pdev, "priq");
2794 smmu->priq.q.irq = irq;
2796 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2798 smmu->cmdq.q.irq = irq;
2800 irq = platform_get_irq_byname(pdev, "gerror");
2802 smmu->gerr_irq = irq;
2805 ret = arm_smmu_device_hw_probe(smmu);
2809 /* Initialise in-memory data structures */
2810 ret = arm_smmu_init_structures(smmu);
2814 /* Record our private device structure */
2815 platform_set_drvdata(pdev, smmu);
2817 /* Reset the device */
2818 ret = arm_smmu_device_reset(smmu, bypass);
2822 /* And we're up. Go go go! */
2823 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
2824 "smmu3.%pa", &ioaddr);
2828 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2829 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2831 ret = iommu_device_register(&smmu->iommu);
2833 dev_err(dev, "Failed to register iommu\n");
2838 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2840 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2845 #ifdef CONFIG_ARM_AMBA
2846 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2847 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2852 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2853 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2860 static int arm_smmu_device_remove(struct platform_device *pdev)
2862 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2864 arm_smmu_device_disable(smmu);
2869 static void arm_smmu_device_shutdown(struct platform_device *pdev)
2871 arm_smmu_device_remove(pdev);
2874 static const struct of_device_id arm_smmu_of_match[] = {
2875 { .compatible = "arm,smmu-v3", },
2878 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2880 static struct platform_driver arm_smmu_driver = {
2882 .name = "arm-smmu-v3",
2883 .of_match_table = of_match_ptr(arm_smmu_of_match),
2885 .probe = arm_smmu_device_probe,
2886 .remove = arm_smmu_device_remove,
2887 .shutdown = arm_smmu_device_shutdown,
2889 module_platform_driver(arm_smmu_driver);
2891 IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL);
2893 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2894 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2895 MODULE_LICENSE("GPL v2");