1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
4 * The io_pagetable is the top of datastructure that maps IOVA's to PFNs. The
5 * PFNs can be placed into an iommu_domain, or returned to the caller as a page
6 * list for access by an in-kernel user.
8 * The datastructure uses the iopt_pages to optimize the storage of the PFNs
9 * between the domains and xarray.
11 #include <linux/iommufd.h>
12 #include <linux/lockdep.h>
13 #include <linux/iommu.h>
14 #include <linux/sched/mm.h>
15 #include <linux/err.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
19 #include "io_pagetable.h"
20 #include "double_span.h"
22 struct iopt_pages_list {
23 struct iopt_pages *pages;
24 struct iopt_area *area;
25 struct list_head next;
26 unsigned long start_byte;
30 struct iopt_area *iopt_area_contig_init(struct iopt_area_contig_iter *iter,
31 struct io_pagetable *iopt,
33 unsigned long last_iova)
35 lockdep_assert_held(&iopt->iova_rwsem);
37 iter->cur_iova = iova;
38 iter->last_iova = last_iova;
39 iter->area = iopt_area_iter_first(iopt, iova, iova);
42 if (!iter->area->pages) {
49 struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter)
51 unsigned long last_iova;
55 last_iova = iopt_area_last_iova(iter->area);
56 if (iter->last_iova <= last_iova)
59 iter->cur_iova = last_iova + 1;
60 iter->area = iopt_area_iter_next(iter->area, iter->cur_iova,
64 if (iter->cur_iova != iopt_area_iova(iter->area) ||
72 static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
74 unsigned long iova_alignment,
75 unsigned long page_offset)
77 if (span->is_used || span->last_hole - span->start_hole < length - 1)
80 span->start_hole = ALIGN(span->start_hole, iova_alignment) |
82 if (span->start_hole > span->last_hole ||
83 span->last_hole - span->start_hole < length - 1)
88 static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
90 unsigned long iova_alignment,
91 unsigned long page_offset)
93 if (span->is_hole || span->last_used - span->start_used < length - 1)
96 span->start_used = ALIGN(span->start_used, iova_alignment) |
98 if (span->start_used > span->last_used ||
99 span->last_used - span->start_used < length - 1)
105 * Automatically find a block of IOVA that is not being used and not reserved.
106 * Does not return a 0 IOVA even if it is valid.
108 static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova,
109 unsigned long uptr, unsigned long length)
111 unsigned long page_offset = uptr % PAGE_SIZE;
112 struct interval_tree_double_span_iter used_span;
113 struct interval_tree_span_iter allowed_span;
114 unsigned long iova_alignment;
116 lockdep_assert_held(&iopt->iova_rwsem);
118 /* Protect roundup_pow-of_two() from overflow */
119 if (length == 0 || length >= ULONG_MAX / 2)
123 * Keep alignment present in the uptr when building the IOVA, this
124 * increases the chance we can map a THP.
127 iova_alignment = roundup_pow_of_two(length);
129 iova_alignment = min_t(unsigned long,
130 roundup_pow_of_two(length),
131 1UL << __ffs64(uptr));
133 if (iova_alignment < iopt->iova_alignment)
136 interval_tree_for_each_span(&allowed_span, &iopt->allowed_itree,
137 PAGE_SIZE, ULONG_MAX - PAGE_SIZE) {
138 if (RB_EMPTY_ROOT(&iopt->allowed_itree.rb_root)) {
139 allowed_span.start_used = PAGE_SIZE;
140 allowed_span.last_used = ULONG_MAX - PAGE_SIZE;
141 allowed_span.is_hole = false;
144 if (!__alloc_iova_check_used(&allowed_span, length,
145 iova_alignment, page_offset))
148 interval_tree_for_each_double_span(
149 &used_span, &iopt->reserved_itree, &iopt->area_itree,
150 allowed_span.start_used, allowed_span.last_used) {
151 if (!__alloc_iova_check_hole(&used_span, length,
156 *iova = used_span.start_hole;
163 static int iopt_check_iova(struct io_pagetable *iopt, unsigned long iova,
164 unsigned long length)
168 lockdep_assert_held(&iopt->iova_rwsem);
170 if ((iova & (iopt->iova_alignment - 1)))
173 if (check_add_overflow(iova, length - 1, &last))
176 /* No reserved IOVA intersects the range */
177 if (iopt_reserved_iter_first(iopt, iova, last))
180 /* Check that there is not already a mapping in the range */
181 if (iopt_area_iter_first(iopt, iova, last))
187 * The area takes a slice of the pages from start_bytes to start_byte + length
189 static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area,
190 struct iopt_pages *pages, unsigned long iova,
191 unsigned long start_byte, unsigned long length,
194 lockdep_assert_held_write(&iopt->iova_rwsem);
196 if ((iommu_prot & IOMMU_WRITE) && !pages->writable)
199 area->iommu_prot = iommu_prot;
200 area->page_offset = start_byte % PAGE_SIZE;
201 if (area->page_offset & (iopt->iova_alignment - 1))
204 area->node.start = iova;
205 if (check_add_overflow(iova, length - 1, &area->node.last))
208 area->pages_node.start = start_byte / PAGE_SIZE;
209 if (check_add_overflow(start_byte, length - 1, &area->pages_node.last))
211 area->pages_node.last = area->pages_node.last / PAGE_SIZE;
212 if (WARN_ON(area->pages_node.last >= pages->npages))
216 * The area is inserted with a NULL pages indicating it is not fully
220 interval_tree_insert(&area->node, &iopt->area_itree);
224 static struct iopt_area *iopt_area_alloc(void)
226 struct iopt_area *area;
228 area = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
231 RB_CLEAR_NODE(&area->node.rb);
232 RB_CLEAR_NODE(&area->pages_node.rb);
236 static int iopt_alloc_area_pages(struct io_pagetable *iopt,
237 struct list_head *pages_list,
238 unsigned long length, unsigned long *dst_iova,
239 int iommu_prot, unsigned int flags)
241 struct iopt_pages_list *elm;
245 list_for_each_entry(elm, pages_list, next) {
246 elm->area = iopt_area_alloc();
251 down_write(&iopt->iova_rwsem);
252 if ((length & (iopt->iova_alignment - 1)) || !length) {
257 if (flags & IOPT_ALLOC_IOVA) {
258 /* Use the first entry to guess the ideal IOVA alignment */
259 elm = list_first_entry(pages_list, struct iopt_pages_list,
261 rc = iopt_alloc_iova(
263 (uintptr_t)elm->pages->uptr + elm->start_byte, length);
266 if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
267 WARN_ON(iopt_check_iova(iopt, *dst_iova, length))) {
272 rc = iopt_check_iova(iopt, *dst_iova, length);
278 * Areas are created with a NULL pages so that the IOVA space is
279 * reserved and we can unlock the iova_rwsem.
282 list_for_each_entry(elm, pages_list, next) {
283 rc = iopt_insert_area(iopt, elm->area, elm->pages, iova,
284 elm->start_byte, elm->length, iommu_prot);
291 up_write(&iopt->iova_rwsem);
295 static void iopt_abort_area(struct iopt_area *area)
297 if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
298 WARN_ON(area->pages);
300 down_write(&area->iopt->iova_rwsem);
301 interval_tree_remove(&area->node, &area->iopt->area_itree);
302 up_write(&area->iopt->iova_rwsem);
307 void iopt_free_pages_list(struct list_head *pages_list)
309 struct iopt_pages_list *elm;
311 while ((elm = list_first_entry_or_null(pages_list,
312 struct iopt_pages_list, next))) {
314 iopt_abort_area(elm->area);
316 iopt_put_pages(elm->pages);
317 list_del(&elm->next);
322 static int iopt_fill_domains_pages(struct list_head *pages_list)
324 struct iopt_pages_list *undo_elm;
325 struct iopt_pages_list *elm;
328 list_for_each_entry(elm, pages_list, next) {
329 rc = iopt_area_fill_domains(elm->area, elm->pages);
336 list_for_each_entry(undo_elm, pages_list, next) {
339 iopt_area_unfill_domains(undo_elm->area, undo_elm->pages);
344 int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list,
345 unsigned long length, unsigned long *dst_iova,
346 int iommu_prot, unsigned int flags)
348 struct iopt_pages_list *elm;
351 rc = iopt_alloc_area_pages(iopt, pages_list, length, dst_iova,
356 down_read(&iopt->domains_rwsem);
357 rc = iopt_fill_domains_pages(pages_list);
359 goto out_unlock_domains;
361 down_write(&iopt->iova_rwsem);
362 list_for_each_entry(elm, pages_list, next) {
364 * area->pages must be set inside the domains_rwsem to ensure
365 * any newly added domains will get filled. Moves the reference
368 elm->area->pages = elm->pages;
372 up_write(&iopt->iova_rwsem);
374 up_read(&iopt->domains_rwsem);
379 * iopt_map_user_pages() - Map a user VA to an iova in the io page table
380 * @ictx: iommufd_ctx the iopt is part of
381 * @iopt: io_pagetable to act on
382 * @iova: If IOPT_ALLOC_IOVA is set this is unused on input and contains
383 * the chosen iova on output. Otherwise is the iova to map to on input
384 * @uptr: User VA to map
385 * @length: Number of bytes to map
386 * @iommu_prot: Combination of IOMMU_READ/WRITE/etc bits for the mapping
387 * @flags: IOPT_ALLOC_IOVA or zero
389 * iova, uptr, and length must be aligned to iova_alignment. For domain backed
390 * page tables this will pin the pages and load them into the domain at iova.
391 * For non-domain page tables this will only setup a lazy reference and the
392 * caller must use iopt_access_pages() to touch them.
394 * iopt_unmap_iova() must be called to undo this before the io_pagetable can be
397 int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
398 unsigned long *iova, void __user *uptr,
399 unsigned long length, int iommu_prot,
402 struct iopt_pages_list elm = {};
403 LIST_HEAD(pages_list);
406 elm.pages = iopt_alloc_pages(uptr, length, iommu_prot & IOMMU_WRITE);
407 if (IS_ERR(elm.pages))
408 return PTR_ERR(elm.pages);
409 if (ictx->account_mode == IOPT_PAGES_ACCOUNT_MM &&
410 elm.pages->account_mode == IOPT_PAGES_ACCOUNT_USER)
411 elm.pages->account_mode = IOPT_PAGES_ACCOUNT_MM;
412 elm.start_byte = uptr - elm.pages->uptr;
414 list_add(&elm.next, &pages_list);
416 rc = iopt_map_pages(iopt, &pages_list, length, iova, iommu_prot, flags);
419 iopt_abort_area(elm.area);
421 iopt_put_pages(elm.pages);
427 int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova,
428 unsigned long length, struct list_head *pages_list)
430 struct iopt_area_contig_iter iter;
431 unsigned long last_iova;
432 struct iopt_area *area;
437 if (check_add_overflow(iova, length - 1, &last_iova))
440 down_read(&iopt->iova_rwsem);
441 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
442 struct iopt_pages_list *elm;
443 unsigned long last = min(last_iova, iopt_area_last_iova(area));
445 elm = kzalloc(sizeof(*elm), GFP_KERNEL_ACCOUNT);
450 elm->start_byte = iopt_area_start_byte(area, iter.cur_iova);
451 elm->pages = area->pages;
452 elm->length = (last - iter.cur_iova) + 1;
453 kref_get(&elm->pages->kref);
454 list_add_tail(&elm->next, pages_list);
456 if (!iopt_area_contig_done(&iter)) {
460 up_read(&iopt->iova_rwsem);
463 up_read(&iopt->iova_rwsem);
464 iopt_free_pages_list(pages_list);
468 static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
469 unsigned long last, unsigned long *unmapped)
471 struct iopt_area *area;
472 unsigned long unmapped_bytes = 0;
473 unsigned int tries = 0;
477 * The domains_rwsem must be held in read mode any time any area->pages
478 * is NULL. This prevents domain attach/detatch from running
479 * concurrently with cleaning up the area.
482 down_read(&iopt->domains_rwsem);
483 down_write(&iopt->iova_rwsem);
484 while ((area = iopt_area_iter_first(iopt, start, last))) {
485 unsigned long area_last = iopt_area_last_iova(area);
486 unsigned long area_first = iopt_area_iova(area);
487 struct iopt_pages *pages;
489 /* Userspace should not race map/unmap's of the same area */
492 goto out_unlock_iova;
495 if (area_first < start || area_last > last) {
497 goto out_unlock_iova;
500 if (area_first != start)
504 * num_accesses writers must hold the iova_rwsem too, so we can
505 * safely read it under the write side of the iovam_rwsem
506 * without the pages->mutex.
508 if (area->num_accesses) {
509 size_t length = iopt_area_length(area);
512 area->prevent_access = true;
513 up_write(&iopt->iova_rwsem);
514 up_read(&iopt->domains_rwsem);
516 iommufd_access_notify_unmap(iopt, area_first, length);
517 /* Something is not responding to unmap requests. */
519 if (WARN_ON(tries > 100))
526 up_write(&iopt->iova_rwsem);
528 iopt_area_unfill_domains(area, pages);
529 iopt_abort_area(area);
530 iopt_put_pages(pages);
532 unmapped_bytes += area_last - area_first + 1;
534 down_write(&iopt->iova_rwsem);
540 up_write(&iopt->iova_rwsem);
541 up_read(&iopt->domains_rwsem);
543 *unmapped = unmapped_bytes;
548 * iopt_unmap_iova() - Remove a range of iova
549 * @iopt: io_pagetable to act on
550 * @iova: Starting iova to unmap
551 * @length: Number of bytes to unmap
552 * @unmapped: Return number of bytes unmapped
554 * The requested range must be a superset of existing ranges.
555 * Splitting/truncating IOVA mappings is not allowed.
557 int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
558 unsigned long length, unsigned long *unmapped)
560 unsigned long iova_last;
565 if (check_add_overflow(iova, length - 1, &iova_last))
568 return iopt_unmap_iova_range(iopt, iova, iova_last, unmapped);
571 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped)
575 rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
576 /* If the IOVAs are empty then unmap all succeeds */
582 /* The caller must always free all the nodes in the allowed_iova rb_root. */
583 int iopt_set_allow_iova(struct io_pagetable *iopt,
584 struct rb_root_cached *allowed_iova)
586 struct iopt_allowed *allowed;
588 down_write(&iopt->iova_rwsem);
589 swap(*allowed_iova, iopt->allowed_itree);
591 for (allowed = iopt_allowed_iter_first(iopt, 0, ULONG_MAX); allowed;
592 allowed = iopt_allowed_iter_next(allowed, 0, ULONG_MAX)) {
593 if (iopt_reserved_iter_first(iopt, allowed->node.start,
594 allowed->node.last)) {
595 swap(*allowed_iova, iopt->allowed_itree);
596 up_write(&iopt->iova_rwsem);
600 up_write(&iopt->iova_rwsem);
604 int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
605 unsigned long last, void *owner)
607 struct iopt_reserved *reserved;
609 lockdep_assert_held_write(&iopt->iova_rwsem);
611 if (iopt_area_iter_first(iopt, start, last) ||
612 iopt_allowed_iter_first(iopt, start, last))
615 reserved = kzalloc(sizeof(*reserved), GFP_KERNEL_ACCOUNT);
618 reserved->node.start = start;
619 reserved->node.last = last;
620 reserved->owner = owner;
621 interval_tree_insert(&reserved->node, &iopt->reserved_itree);
625 static void __iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner)
627 struct iopt_reserved *reserved, *next;
629 lockdep_assert_held_write(&iopt->iova_rwsem);
631 for (reserved = iopt_reserved_iter_first(iopt, 0, ULONG_MAX); reserved;
633 next = iopt_reserved_iter_next(reserved, 0, ULONG_MAX);
635 if (reserved->owner == owner) {
636 interval_tree_remove(&reserved->node,
637 &iopt->reserved_itree);
643 void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner)
645 down_write(&iopt->iova_rwsem);
646 __iopt_remove_reserved_iova(iopt, owner);
647 up_write(&iopt->iova_rwsem);
650 void iopt_init_table(struct io_pagetable *iopt)
652 init_rwsem(&iopt->iova_rwsem);
653 init_rwsem(&iopt->domains_rwsem);
654 iopt->area_itree = RB_ROOT_CACHED;
655 iopt->allowed_itree = RB_ROOT_CACHED;
656 iopt->reserved_itree = RB_ROOT_CACHED;
657 xa_init_flags(&iopt->domains, XA_FLAGS_ACCOUNT);
658 xa_init_flags(&iopt->access_list, XA_FLAGS_ALLOC);
661 * iopt's start as SW tables that can use the entire size_t IOVA space
662 * due to the use of size_t in the APIs. They have no alignment
665 iopt->iova_alignment = 1;
668 void iopt_destroy_table(struct io_pagetable *iopt)
670 struct interval_tree_node *node;
672 if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
673 iopt_remove_reserved_iova(iopt, NULL);
675 while ((node = interval_tree_iter_first(&iopt->allowed_itree, 0,
677 interval_tree_remove(node, &iopt->allowed_itree);
678 kfree(container_of(node, struct iopt_allowed, node));
681 WARN_ON(!RB_EMPTY_ROOT(&iopt->reserved_itree.rb_root));
682 WARN_ON(!xa_empty(&iopt->domains));
683 WARN_ON(!xa_empty(&iopt->access_list));
684 WARN_ON(!RB_EMPTY_ROOT(&iopt->area_itree.rb_root));
688 * iopt_unfill_domain() - Unfill a domain with PFNs
689 * @iopt: io_pagetable to act on
690 * @domain: domain to unfill
692 * This is used when removing a domain from the iopt. Every area in the iopt
693 * will be unmapped from the domain. The domain must already be removed from the
696 static void iopt_unfill_domain(struct io_pagetable *iopt,
697 struct iommu_domain *domain)
699 struct iopt_area *area;
701 lockdep_assert_held(&iopt->iova_rwsem);
702 lockdep_assert_held_write(&iopt->domains_rwsem);
705 * Some other domain is holding all the pfns still, rapidly unmap this
708 if (iopt->next_domain_id != 0) {
709 /* Pick an arbitrary remaining domain to act as storage */
710 struct iommu_domain *storage_domain =
711 xa_load(&iopt->domains, 0);
713 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
714 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
715 struct iopt_pages *pages = area->pages;
720 mutex_lock(&pages->mutex);
721 if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
722 WARN_ON(!area->storage_domain);
723 if (area->storage_domain == domain)
724 area->storage_domain = storage_domain;
725 mutex_unlock(&pages->mutex);
727 iopt_area_unmap_domain(area, domain);
732 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
733 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
734 struct iopt_pages *pages = area->pages;
739 mutex_lock(&pages->mutex);
740 interval_tree_remove(&area->pages_node, &pages->domains_itree);
741 WARN_ON(area->storage_domain != domain);
742 area->storage_domain = NULL;
743 iopt_area_unfill_domain(area, pages, domain);
744 mutex_unlock(&pages->mutex);
749 * iopt_fill_domain() - Fill a domain with PFNs
750 * @iopt: io_pagetable to act on
751 * @domain: domain to fill
753 * Fill the domain with PFNs from every area in the iopt. On failure the domain
756 static int iopt_fill_domain(struct io_pagetable *iopt,
757 struct iommu_domain *domain)
759 struct iopt_area *end_area;
760 struct iopt_area *area;
763 lockdep_assert_held(&iopt->iova_rwsem);
764 lockdep_assert_held_write(&iopt->domains_rwsem);
766 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
767 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
768 struct iopt_pages *pages = area->pages;
773 mutex_lock(&pages->mutex);
774 rc = iopt_area_fill_domain(area, domain);
776 mutex_unlock(&pages->mutex);
779 if (!area->storage_domain) {
780 WARN_ON(iopt->next_domain_id != 0);
781 area->storage_domain = domain;
782 interval_tree_insert(&area->pages_node,
783 &pages->domains_itree);
785 mutex_unlock(&pages->mutex);
791 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
792 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
793 struct iopt_pages *pages = area->pages;
795 if (area == end_area)
799 mutex_lock(&pages->mutex);
800 if (iopt->next_domain_id == 0) {
801 interval_tree_remove(&area->pages_node,
802 &pages->domains_itree);
803 area->storage_domain = NULL;
805 iopt_area_unfill_domain(area, pages, domain);
806 mutex_unlock(&pages->mutex);
811 /* All existing area's conform to an increased page size */
812 static int iopt_check_iova_alignment(struct io_pagetable *iopt,
813 unsigned long new_iova_alignment)
815 unsigned long align_mask = new_iova_alignment - 1;
816 struct iopt_area *area;
818 lockdep_assert_held(&iopt->iova_rwsem);
819 lockdep_assert_held(&iopt->domains_rwsem);
821 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
822 area = iopt_area_iter_next(area, 0, ULONG_MAX))
823 if ((iopt_area_iova(area) & align_mask) ||
824 (iopt_area_length(area) & align_mask) ||
825 (area->page_offset & align_mask))
828 if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) {
829 struct iommufd_access *access;
832 xa_for_each(&iopt->access_list, index, access)
833 if (WARN_ON(access->iova_alignment >
840 int iopt_table_add_domain(struct io_pagetable *iopt,
841 struct iommu_domain *domain)
843 const struct iommu_domain_geometry *geometry = &domain->geometry;
844 struct iommu_domain *iter_domain;
845 unsigned int new_iova_alignment;
849 down_write(&iopt->domains_rwsem);
850 down_write(&iopt->iova_rwsem);
852 xa_for_each(&iopt->domains, index, iter_domain) {
853 if (WARN_ON(iter_domain == domain)) {
860 * The io page size drives the iova_alignment. Internally the iopt_pages
861 * works in PAGE_SIZE units and we adjust when mapping sub-PAGE_SIZE
862 * objects into the iommu_domain.
864 * A iommu_domain must always be able to accept PAGE_SIZE to be
865 * compatible as we can't guarantee higher contiguity.
867 new_iova_alignment = max_t(unsigned long,
868 1UL << __ffs(domain->pgsize_bitmap),
869 iopt->iova_alignment);
870 if (new_iova_alignment > PAGE_SIZE) {
874 if (new_iova_alignment != iopt->iova_alignment) {
875 rc = iopt_check_iova_alignment(iopt, new_iova_alignment);
880 /* No area exists that is outside the allowed domain aperture */
881 if (geometry->aperture_start != 0) {
882 rc = iopt_reserve_iova(iopt, 0, geometry->aperture_start - 1,
887 if (geometry->aperture_end != ULONG_MAX) {
888 rc = iopt_reserve_iova(iopt, geometry->aperture_end + 1,
894 rc = xa_reserve(&iopt->domains, iopt->next_domain_id, GFP_KERNEL);
898 rc = iopt_fill_domain(iopt, domain);
902 iopt->iova_alignment = new_iova_alignment;
903 xa_store(&iopt->domains, iopt->next_domain_id, domain, GFP_KERNEL);
904 iopt->next_domain_id++;
905 up_write(&iopt->iova_rwsem);
906 up_write(&iopt->domains_rwsem);
909 xa_release(&iopt->domains, iopt->next_domain_id);
911 __iopt_remove_reserved_iova(iopt, domain);
913 up_write(&iopt->iova_rwsem);
914 up_write(&iopt->domains_rwsem);
918 static int iopt_calculate_iova_alignment(struct io_pagetable *iopt)
920 unsigned long new_iova_alignment;
921 struct iommufd_access *access;
922 struct iommu_domain *domain;
925 lockdep_assert_held_write(&iopt->iova_rwsem);
926 lockdep_assert_held(&iopt->domains_rwsem);
928 /* See batch_iommu_map_small() */
929 if (iopt->disable_large_pages)
930 new_iova_alignment = PAGE_SIZE;
932 new_iova_alignment = 1;
934 xa_for_each(&iopt->domains, index, domain)
935 new_iova_alignment = max_t(unsigned long,
936 1UL << __ffs(domain->pgsize_bitmap),
938 xa_for_each(&iopt->access_list, index, access)
939 new_iova_alignment = max_t(unsigned long,
940 access->iova_alignment,
943 if (new_iova_alignment > iopt->iova_alignment) {
946 rc = iopt_check_iova_alignment(iopt, new_iova_alignment);
950 iopt->iova_alignment = new_iova_alignment;
954 void iopt_table_remove_domain(struct io_pagetable *iopt,
955 struct iommu_domain *domain)
957 struct iommu_domain *iter_domain = NULL;
960 down_write(&iopt->domains_rwsem);
961 down_write(&iopt->iova_rwsem);
963 xa_for_each(&iopt->domains, index, iter_domain)
964 if (iter_domain == domain)
966 if (WARN_ON(iter_domain != domain) || index >= iopt->next_domain_id)
970 * Compress the xarray to keep it linear by swapping the entry to erase
971 * with the tail entry and shrinking the tail.
973 iopt->next_domain_id--;
974 iter_domain = xa_erase(&iopt->domains, iopt->next_domain_id);
975 if (index != iopt->next_domain_id)
976 xa_store(&iopt->domains, index, iter_domain, GFP_KERNEL);
978 iopt_unfill_domain(iopt, domain);
979 __iopt_remove_reserved_iova(iopt, domain);
981 WARN_ON(iopt_calculate_iova_alignment(iopt));
983 up_write(&iopt->iova_rwsem);
984 up_write(&iopt->domains_rwsem);
988 * iopt_area_split - Split an area into two parts at iova
989 * @area: The area to split
990 * @iova: Becomes the last of a new area
992 * This splits an area into two. It is part of the VFIO compatibility to allow
993 * poking a hole in the mapping. The two areas continue to point at the same
994 * iopt_pages, just with different starting bytes.
996 static int iopt_area_split(struct iopt_area *area, unsigned long iova)
998 unsigned long alignment = area->iopt->iova_alignment;
999 unsigned long last_iova = iopt_area_last_iova(area);
1000 unsigned long start_iova = iopt_area_iova(area);
1001 unsigned long new_start = iova + 1;
1002 struct io_pagetable *iopt = area->iopt;
1003 struct iopt_pages *pages = area->pages;
1004 struct iopt_area *lhs;
1005 struct iopt_area *rhs;
1008 lockdep_assert_held_write(&iopt->iova_rwsem);
1010 if (iova == start_iova || iova == last_iova)
1013 if (!pages || area->prevent_access)
1016 if (new_start & (alignment - 1) ||
1017 iopt_area_start_byte(area, new_start) & (alignment - 1))
1020 lhs = iopt_area_alloc();
1024 rhs = iopt_area_alloc();
1030 mutex_lock(&pages->mutex);
1032 * Splitting is not permitted if an access exists, we don't track enough
1033 * information to split existing accesses.
1035 if (area->num_accesses) {
1041 * Splitting is not permitted if a domain could have been mapped with
1044 if (area->storage_domain && !iopt->disable_large_pages) {
1049 interval_tree_remove(&area->node, &iopt->area_itree);
1050 rc = iopt_insert_area(iopt, lhs, area->pages, start_iova,
1051 iopt_area_start_byte(area, start_iova),
1052 (new_start - 1) - start_iova + 1,
1057 rc = iopt_insert_area(iopt, rhs, area->pages, new_start,
1058 iopt_area_start_byte(area, new_start),
1059 last_iova - new_start + 1, area->iommu_prot);
1061 goto err_remove_lhs;
1063 lhs->storage_domain = area->storage_domain;
1064 lhs->pages = area->pages;
1065 rhs->storage_domain = area->storage_domain;
1066 rhs->pages = area->pages;
1067 kref_get(&rhs->pages->kref);
1069 mutex_unlock(&pages->mutex);
1072 * No change to domains or accesses because the pages hasn't been
1078 interval_tree_remove(&lhs->node, &iopt->area_itree);
1080 interval_tree_insert(&area->node, &iopt->area_itree);
1082 mutex_unlock(&pages->mutex);
1089 int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas,
1095 down_write(&iopt->iova_rwsem);
1096 for (i = 0; i < num_iovas; i++) {
1097 struct iopt_area *area;
1099 area = iopt_area_iter_first(iopt, iovas[i], iovas[i]);
1102 rc = iopt_area_split(area, iovas[i]);
1106 up_write(&iopt->iova_rwsem);
1110 void iopt_enable_large_pages(struct io_pagetable *iopt)
1114 down_write(&iopt->domains_rwsem);
1115 down_write(&iopt->iova_rwsem);
1116 WRITE_ONCE(iopt->disable_large_pages, false);
1117 rc = iopt_calculate_iova_alignment(iopt);
1119 up_write(&iopt->iova_rwsem);
1120 up_write(&iopt->domains_rwsem);
1123 int iopt_disable_large_pages(struct io_pagetable *iopt)
1127 down_write(&iopt->domains_rwsem);
1128 down_write(&iopt->iova_rwsem);
1129 if (iopt->disable_large_pages)
1132 /* Won't do it if domains already have pages mapped in them */
1133 if (!xa_empty(&iopt->domains) &&
1134 !RB_EMPTY_ROOT(&iopt->area_itree.rb_root)) {
1139 WRITE_ONCE(iopt->disable_large_pages, true);
1140 rc = iopt_calculate_iova_alignment(iopt);
1142 WRITE_ONCE(iopt->disable_large_pages, false);
1144 up_write(&iopt->iova_rwsem);
1145 up_write(&iopt->domains_rwsem);
1149 int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
1153 down_write(&iopt->domains_rwsem);
1154 down_write(&iopt->iova_rwsem);
1155 rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access,
1156 xa_limit_16b, GFP_KERNEL_ACCOUNT);
1160 rc = iopt_calculate_iova_alignment(iopt);
1162 xa_erase(&iopt->access_list, access->iopt_access_list_id);
1167 up_write(&iopt->iova_rwsem);
1168 up_write(&iopt->domains_rwsem);
1172 void iopt_remove_access(struct io_pagetable *iopt,
1173 struct iommufd_access *access,
1174 u32 iopt_access_list_id)
1176 down_write(&iopt->domains_rwsem);
1177 down_write(&iopt->iova_rwsem);
1178 WARN_ON(xa_erase(&iopt->access_list, iopt_access_list_id) != access);
1179 WARN_ON(iopt_calculate_iova_alignment(iopt));
1180 up_write(&iopt->iova_rwsem);
1181 up_write(&iopt->domains_rwsem);
1184 /* Narrow the valid_iova_itree to include reserved ranges from a device. */
1185 int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
1187 phys_addr_t *sw_msi_start)
1189 struct iommu_resv_region *resv;
1190 LIST_HEAD(resv_regions);
1191 unsigned int num_hw_msi = 0;
1192 unsigned int num_sw_msi = 0;
1195 if (iommufd_should_fail())
1198 down_write(&iopt->iova_rwsem);
1199 /* FIXME: drivers allocate memory but there is no failure propogated */
1200 iommu_get_resv_regions(dev, &resv_regions);
1202 list_for_each_entry(resv, &resv_regions, list) {
1203 if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
1206 if (sw_msi_start && resv->type == IOMMU_RESV_MSI)
1208 if (sw_msi_start && resv->type == IOMMU_RESV_SW_MSI) {
1209 *sw_msi_start = resv->start;
1213 rc = iopt_reserve_iova(iopt, resv->start,
1214 resv->length - 1 + resv->start, dev);
1219 /* Drivers must offer sane combinations of regions */
1220 if (WARN_ON(num_sw_msi && num_hw_msi) || WARN_ON(num_sw_msi > 1)) {
1229 __iopt_remove_reserved_iova(iopt, dev);
1231 iommu_put_resv_regions(dev, &resv_regions);
1232 up_write(&iopt->iova_rwsem);