1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the IOMMU SVA API for the ARM SMMUv3
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/sched/mm.h>
10 #include <linux/slab.h>
12 #include "arm-smmu-v3.h"
13 #include "../../iommu-sva.h"
14 #include "../../io-pgtable-arm.h"
16 struct arm_smmu_mmu_notifier {
17 struct mmu_notifier mn;
18 struct arm_smmu_ctx_desc *cd;
21 struct list_head list;
22 struct arm_smmu_domain *domain;
25 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
27 struct arm_smmu_bond {
30 struct arm_smmu_mmu_notifier *smmu_mn;
31 struct list_head list;
35 #define sva_to_bond(handle) \
36 container_of(handle, struct arm_smmu_bond, sva)
38 static DEFINE_MUTEX(sva_lock);
41 * Check if the CPU ASID is available on the SMMU side. If a private context
42 * descriptor is using it, try to replace it.
44 static struct arm_smmu_ctx_desc *
45 arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
49 struct arm_smmu_ctx_desc *cd;
50 struct arm_smmu_device *smmu;
51 struct arm_smmu_domain *smmu_domain;
53 cd = xa_load(&arm_smmu_asid_xa, asid);
58 if (WARN_ON(cd->mm != mm))
59 return ERR_PTR(-EINVAL);
60 /* All devices bound to this mm use the same cd struct. */
61 refcount_inc(&cd->refs);
65 smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
66 smmu = smmu_domain->smmu;
68 ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
69 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
71 return ERR_PTR(-ENOSPC);
73 * Race with unmap: TLB invalidations will start targeting the new ASID,
74 * which isn't assigned yet. We'll do an invalidate-all on the old ASID
75 * later, so it doesn't matter.
79 * Update ASID and invalidate CD in all associated masters. There will
80 * be some overlap between use of both ASIDs, until we invalidate the
83 arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd);
85 /* Invalidate TLB entries previously associated with that context */
86 arm_smmu_tlb_inv_asid(smmu, asid);
88 xa_erase(&arm_smmu_asid_xa, asid);
92 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
97 struct arm_smmu_ctx_desc *cd;
98 struct arm_smmu_ctx_desc *ret = NULL;
100 /* Don't free the mm until we release the ASID */
103 asid = arm64_mm_context_get(mm);
109 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
112 goto out_put_context;
115 refcount_set(&cd->refs, 1);
117 mutex_lock(&arm_smmu_asid_lock);
118 ret = arm_smmu_share_asid(mm, asid);
120 mutex_unlock(&arm_smmu_asid_lock);
124 err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
125 mutex_unlock(&arm_smmu_asid_lock);
130 tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
131 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
132 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
133 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
134 CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
138 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
141 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
144 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
152 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
153 par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
154 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
156 cd->ttbr = virt_to_phys(mm->pgd);
159 * MAIR value is pretty much constant and global, so we can just get it
160 * from the current CPU register
162 cd->mair = read_sysreg(mair_el1);
169 arm_smmu_free_asid(cd);
173 arm64_mm_context_put(mm);
176 return err < 0 ? ERR_PTR(err) : ret;
179 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
181 if (arm_smmu_free_asid(cd)) {
183 arm64_mm_context_put(cd->mm);
189 static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
190 struct mm_struct *mm,
194 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
195 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
199 * The mm_types defines vm_end as the first byte after the end address,
200 * different from IOMMU subsystem using the last address of an address
201 * range. So do a simple translation here by calculating size correctly.
204 if (size == ULONG_MAX)
207 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
209 arm_smmu_tlb_inv_asid(smmu_domain->smmu,
212 arm_smmu_tlb_inv_range_asid(start, size,
218 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
221 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
223 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
224 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
226 mutex_lock(&sva_lock);
227 if (smmu_mn->cleared) {
228 mutex_unlock(&sva_lock);
233 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
234 * but disable translation.
236 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
238 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
239 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
241 smmu_mn->cleared = true;
242 mutex_unlock(&sva_lock);
245 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
247 kfree(mn_to_smmu(mn));
250 static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
251 .arch_invalidate_secondary_tlbs = arm_smmu_mm_arch_invalidate_secondary_tlbs,
252 .release = arm_smmu_mm_release,
253 .free_notifier = arm_smmu_mmu_notifier_free,
256 /* Allocate or get existing MMU notifier for this {domain, mm} pair */
257 static struct arm_smmu_mmu_notifier *
258 arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
259 struct mm_struct *mm)
262 struct arm_smmu_ctx_desc *cd;
263 struct arm_smmu_mmu_notifier *smmu_mn;
265 list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
266 if (smmu_mn->mn.mm == mm) {
267 refcount_inc(&smmu_mn->refs);
272 cd = arm_smmu_alloc_shared_cd(mm);
276 smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
282 refcount_set(&smmu_mn->refs, 1);
284 smmu_mn->domain = smmu_domain;
285 smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
287 ret = mmu_notifier_register(&smmu_mn->mn, mm);
293 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
295 goto err_put_notifier;
297 list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
302 mmu_notifier_put(&smmu_mn->mn);
304 arm_smmu_free_shared_cd(cd);
308 static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
310 struct mm_struct *mm = smmu_mn->mn.mm;
311 struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
312 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
314 if (!refcount_dec_and_test(&smmu_mn->refs))
317 list_del(&smmu_mn->list);
318 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
321 * If we went through clear(), we've already invalidated, and no
322 * new TLB entry can have been formed.
324 if (!smmu_mn->cleared) {
325 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
326 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
330 mmu_notifier_put(&smmu_mn->mn);
331 arm_smmu_free_shared_cd(cd);
334 static struct iommu_sva *
335 __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
338 struct arm_smmu_bond *bond;
339 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
340 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
341 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
343 if (!master || !master->sva_enabled)
344 return ERR_PTR(-ENODEV);
346 /* If bind() was already called for this {dev, mm} pair, reuse it. */
347 list_for_each_entry(bond, &master->bonds, list) {
348 if (bond->mm == mm) {
349 refcount_inc(&bond->refs);
354 bond = kzalloc(sizeof(*bond), GFP_KERNEL);
356 return ERR_PTR(-ENOMEM);
360 refcount_set(&bond->refs, 1);
362 bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
363 if (IS_ERR(bond->smmu_mn)) {
364 ret = PTR_ERR(bond->smmu_mn);
368 list_add(&bond->list, &master->bonds);
376 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
378 unsigned long reg, fld;
380 unsigned long asid_bits;
381 u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
383 if (vabits_actual == 52)
384 feat_mask |= ARM_SMMU_FEAT_VAX;
386 if ((smmu->features & feat_mask) != feat_mask)
389 if (!(smmu->pgsize_bitmap & PAGE_SIZE))
393 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
394 * not even pretending to support AArch32 here. Abort if the MMU outputs
395 * addresses larger than what we support.
397 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
398 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
399 oas = id_aa64mmfr0_parange_to_phys_shift(fld);
403 /* We can support bigger ASIDs than the CPU, but not smaller */
404 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
405 asid_bits = fld ? 16 : 8;
406 if (smmu->asid_bits < asid_bits)
410 * See max_pinned_asids in arch/arm64/mm/context.c. The following is
411 * generally the maximum number of bindable processes.
413 if (arm64_kernel_unmapped_at_el0())
415 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
416 num_possible_cpus() - 2);
421 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
423 /* We're not keeping track of SIDs in fault events */
424 if (master->num_streams != 1)
427 return master->stall_enabled;
430 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
432 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
435 /* SSID support is mandatory for the moment */
436 return master->ssid_bits;
439 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
443 mutex_lock(&sva_lock);
444 enabled = master->sva_enabled;
445 mutex_unlock(&sva_lock);
449 static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
452 struct device *dev = master->dev;
455 * Drivers for devices supporting PRI or stall should enable IOPF first.
456 * Others have device-specific fault handlers and don't need IOPF.
458 if (!arm_smmu_master_iopf_supported(master))
461 if (!master->iopf_enabled)
464 ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
468 ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
470 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
476 static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
478 struct device *dev = master->dev;
480 if (!master->iopf_enabled)
483 iommu_unregister_device_fault_handler(dev);
484 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
487 int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
491 mutex_lock(&sva_lock);
492 ret = arm_smmu_master_sva_enable_iopf(master);
494 master->sva_enabled = true;
495 mutex_unlock(&sva_lock);
500 int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
502 mutex_lock(&sva_lock);
503 if (!list_empty(&master->bonds)) {
504 dev_err(master->dev, "cannot disable SVA, device is bound\n");
505 mutex_unlock(&sva_lock);
508 arm_smmu_master_sva_disable_iopf(master);
509 master->sva_enabled = false;
510 mutex_unlock(&sva_lock);
515 void arm_smmu_sva_notifier_synchronize(void)
518 * Some MMU notifiers may still be waiting to be freed, using
519 * arm_smmu_mmu_notifier_free(). Wait for them.
521 mmu_notifier_synchronize();
524 void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
525 struct device *dev, ioasid_t id)
527 struct mm_struct *mm = domain->mm;
528 struct arm_smmu_bond *bond = NULL, *t;
529 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
531 mutex_lock(&sva_lock);
532 list_for_each_entry(t, &master->bonds, list) {
539 if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
540 list_del(&bond->list);
541 arm_smmu_mmu_notifier_put(bond->smmu_mn);
544 mutex_unlock(&sva_lock);
547 static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
548 struct device *dev, ioasid_t id)
551 struct iommu_sva *handle;
552 struct mm_struct *mm = domain->mm;
554 mutex_lock(&sva_lock);
555 handle = __arm_smmu_sva_bind(dev, mm);
557 ret = PTR_ERR(handle);
558 mutex_unlock(&sva_lock);
563 static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
568 static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
569 .set_dev_pasid = arm_smmu_sva_set_dev_pasid,
570 .free = arm_smmu_sva_domain_free
573 struct iommu_domain *arm_smmu_sva_domain_alloc(void)
575 struct iommu_domain *domain;
577 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
580 domain->ops = &arm_smmu_sva_domain_ops;