1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
5 #include <linux/interval_tree.h>
6 #include <linux/iommufd.h>
7 #include <linux/iommu.h>
8 #include <uapi/linux/iommufd.h>
10 #include "io_pagetable.h"
12 void iommufd_ioas_destroy(struct iommufd_object *obj)
14 struct iommufd_ioas *ioas = container_of(obj, struct iommufd_ioas, obj);
17 rc = iopt_unmap_all(&ioas->iopt, NULL);
18 WARN_ON(rc && rc != -ENOENT);
19 iopt_destroy_table(&ioas->iopt);
20 mutex_destroy(&ioas->mutex);
23 struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx)
25 struct iommufd_ioas *ioas;
27 ioas = iommufd_object_alloc(ictx, ioas, IOMMUFD_OBJ_IOAS);
31 iopt_init_table(&ioas->iopt);
32 INIT_LIST_HEAD(&ioas->hwpt_list);
33 mutex_init(&ioas->mutex);
37 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd)
39 struct iommu_ioas_alloc *cmd = ucmd->cmd;
40 struct iommufd_ioas *ioas;
46 ioas = iommufd_ioas_alloc(ucmd->ictx);
50 cmd->out_ioas_id = ioas->obj.id;
51 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
54 iommufd_object_finalize(ucmd->ictx, &ioas->obj);
58 iommufd_object_abort_and_destroy(ucmd->ictx, &ioas->obj);
62 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd)
64 struct iommu_iova_range __user *ranges;
65 struct iommu_ioas_iova_ranges *cmd = ucmd->cmd;
66 struct iommufd_ioas *ioas;
67 struct interval_tree_span_iter span;
74 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
78 down_read(&ioas->iopt.iova_rwsem);
79 max_iovas = cmd->num_iovas;
80 ranges = u64_to_user_ptr(cmd->allowed_iovas);
82 cmd->out_iova_alignment = ioas->iopt.iova_alignment;
83 interval_tree_for_each_span(&span, &ioas->iopt.reserved_itree, 0,
87 if (cmd->num_iovas < max_iovas) {
88 struct iommu_iova_range elm = {
89 .start = span.start_hole,
90 .last = span.last_hole,
93 if (copy_to_user(&ranges[cmd->num_iovas], &elm,
101 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
104 if (cmd->num_iovas > max_iovas)
107 up_read(&ioas->iopt.iova_rwsem);
108 iommufd_put_object(&ioas->obj);
112 static int iommufd_ioas_load_iovas(struct rb_root_cached *itree,
113 struct iommu_iova_range __user *ranges,
118 for (i = 0; i != num; i++) {
119 struct iommu_iova_range range;
120 struct iopt_allowed *allowed;
122 if (copy_from_user(&range, ranges + i, sizeof(range)))
125 if (range.start >= range.last)
128 if (interval_tree_iter_first(itree, range.start, range.last))
131 allowed = kzalloc(sizeof(*allowed), GFP_KERNEL_ACCOUNT);
134 allowed->node.start = range.start;
135 allowed->node.last = range.last;
137 interval_tree_insert(&allowed->node, itree);
142 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd)
144 struct iommu_ioas_allow_iovas *cmd = ucmd->cmd;
145 struct rb_root_cached allowed_iova = RB_ROOT_CACHED;
146 struct interval_tree_node *node;
147 struct iommufd_ioas *ioas;
148 struct io_pagetable *iopt;
154 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
156 return PTR_ERR(ioas);
159 rc = iommufd_ioas_load_iovas(&allowed_iova,
160 u64_to_user_ptr(cmd->allowed_iovas),
166 * We want the allowed tree update to be atomic, so we have to keep the
167 * original nodes around, and keep track of the new nodes as we allocate
168 * memory for them. The simplest solution is to have a new/old tree and
169 * then swap new for old. On success we free the old tree, on failure we
172 rc = iopt_set_allow_iova(iopt, &allowed_iova);
174 while ((node = interval_tree_iter_first(&allowed_iova, 0, ULONG_MAX))) {
175 interval_tree_remove(node, &allowed_iova);
176 kfree(container_of(node, struct iopt_allowed, node));
178 iommufd_put_object(&ioas->obj);
182 static int conv_iommu_prot(u32 map_flags)
185 * We provide no manual cache coherency ioctls to userspace and most
186 * architectures make the CPU ops for cache flushing privileged.
187 * Therefore we require the underlying IOMMU to support CPU coherent
188 * operation. Support for IOMMU_CACHE is enforced by the
189 * IOMMU_CAP_CACHE_COHERENCY test during bind.
191 int iommu_prot = IOMMU_CACHE;
193 if (map_flags & IOMMU_IOAS_MAP_WRITEABLE)
194 iommu_prot |= IOMMU_WRITE;
195 if (map_flags & IOMMU_IOAS_MAP_READABLE)
196 iommu_prot |= IOMMU_READ;
200 int iommufd_ioas_map(struct iommufd_ucmd *ucmd)
202 struct iommu_ioas_map *cmd = ucmd->cmd;
203 unsigned long iova = cmd->iova;
204 struct iommufd_ioas *ioas;
205 unsigned int flags = 0;
209 ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE |
210 IOMMU_IOAS_MAP_READABLE)) ||
213 if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
216 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
218 return PTR_ERR(ioas);
220 if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA))
221 flags = IOPT_ALLOC_IOVA;
222 rc = iopt_map_user_pages(ucmd->ictx, &ioas->iopt, &iova,
223 u64_to_user_ptr(cmd->user_va), cmd->length,
224 conv_iommu_prot(cmd->flags), flags);
229 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
231 iommufd_put_object(&ioas->obj);
235 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
237 struct iommu_ioas_copy *cmd = ucmd->cmd;
238 struct iommufd_ioas *src_ioas;
239 struct iommufd_ioas *dst_ioas;
240 unsigned int flags = 0;
241 LIST_HEAD(pages_list);
245 iommufd_test_syz_conv_iova_id(ucmd, cmd->src_ioas_id, &cmd->src_iova,
249 ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE |
250 IOMMU_IOAS_MAP_READABLE)))
252 if (cmd->length >= ULONG_MAX || cmd->src_iova >= ULONG_MAX ||
253 cmd->dst_iova >= ULONG_MAX)
256 src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id);
257 if (IS_ERR(src_ioas))
258 return PTR_ERR(src_ioas);
259 rc = iopt_get_pages(&src_ioas->iopt, cmd->src_iova, cmd->length,
261 iommufd_put_object(&src_ioas->obj);
265 dst_ioas = iommufd_get_ioas(ucmd->ictx, cmd->dst_ioas_id);
266 if (IS_ERR(dst_ioas)) {
267 rc = PTR_ERR(dst_ioas);
271 if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA))
272 flags = IOPT_ALLOC_IOVA;
273 iova = cmd->dst_iova;
274 rc = iopt_map_pages(&dst_ioas->iopt, &pages_list, cmd->length, &iova,
275 conv_iommu_prot(cmd->flags), flags);
279 cmd->dst_iova = iova;
280 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
282 iommufd_put_object(&dst_ioas->obj);
284 iopt_free_pages_list(&pages_list);
288 int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
290 struct iommu_ioas_unmap *cmd = ucmd->cmd;
291 struct iommufd_ioas *ioas;
292 unsigned long unmapped = 0;
295 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
297 return PTR_ERR(ioas);
299 if (cmd->iova == 0 && cmd->length == U64_MAX) {
300 rc = iopt_unmap_all(&ioas->iopt, &unmapped);
304 if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) {
308 rc = iopt_unmap_iova(&ioas->iopt, cmd->iova, cmd->length,
314 cmd->length = unmapped;
315 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
318 iommufd_put_object(&ioas->obj);
322 int iommufd_option_rlimit_mode(struct iommu_option *cmd,
323 struct iommufd_ctx *ictx)
328 if (cmd->op == IOMMU_OPTION_OP_GET) {
329 cmd->val64 = ictx->account_mode == IOPT_PAGES_ACCOUNT_MM;
332 if (cmd->op == IOMMU_OPTION_OP_SET) {
335 if (!capable(CAP_SYS_RESOURCE))
338 xa_lock(&ictx->objects);
339 if (!xa_empty(&ictx->objects)) {
343 ictx->account_mode = IOPT_PAGES_ACCOUNT_USER;
344 else if (cmd->val64 == 1)
345 ictx->account_mode = IOPT_PAGES_ACCOUNT_MM;
349 xa_unlock(&ictx->objects);
356 static int iommufd_ioas_option_huge_pages(struct iommu_option *cmd,
357 struct iommufd_ioas *ioas)
359 if (cmd->op == IOMMU_OPTION_OP_GET) {
360 cmd->val64 = !ioas->iopt.disable_large_pages;
363 if (cmd->op == IOMMU_OPTION_OP_SET) {
365 return iopt_disable_large_pages(&ioas->iopt);
366 if (cmd->val64 == 1) {
367 iopt_enable_large_pages(&ioas->iopt);
375 int iommufd_ioas_option(struct iommufd_ucmd *ucmd)
377 struct iommu_option *cmd = ucmd->cmd;
378 struct iommufd_ioas *ioas;
384 ioas = iommufd_get_ioas(ucmd->ictx, cmd->object_id);
386 return PTR_ERR(ioas);
388 switch (cmd->option_id) {
389 case IOMMU_OPTION_HUGE_PAGES:
390 rc = iommufd_ioas_option_huge_pages(cmd, ioas);
396 iommufd_put_object(&ioas->obj);