2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/file.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/sched/mm.h>
36 #include <rdma/ib_verbs.h>
37 #include <rdma/uverbs_types.h>
38 #include <linux/rcupdate.h>
39 #include <rdma/uverbs_ioctl.h>
40 #include <rdma/rdma_user_ioctl.h>
42 #include "core_priv.h"
43 #include "rdma_core.h"
45 void uverbs_uobject_get(struct ib_uobject *uobject)
47 kref_get(&uobject->ref);
50 static void uverbs_uobject_free(struct kref *ref)
52 struct ib_uobject *uobj =
53 container_of(ref, struct ib_uobject, ref);
55 if (uobj->uapi_object->type_class->needs_kfree_rcu)
61 void uverbs_uobject_put(struct ib_uobject *uobject)
63 kref_put(&uobject->ref, uverbs_uobject_free);
66 static int uverbs_try_lock_object(struct ib_uobject *uobj,
67 enum rdma_lookup_mode mode)
70 * When a shared access is required, we use a positive counter. Each
71 * shared access request checks that the value != -1 and increment it.
72 * Exclusive access is required for operations like write or destroy.
73 * In exclusive access mode, we check that the counter is zero (nobody
74 * claimed this object) and we set it to -1. Releasing a shared access
75 * lock is done simply by decreasing the counter. As for exclusive
76 * access locks, since only a single one of them is is allowed
77 * concurrently, setting the counter to zero is enough for releasing
81 case UVERBS_LOOKUP_READ:
82 return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
84 case UVERBS_LOOKUP_WRITE:
85 /* lock is exclusive */
86 return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
87 case UVERBS_LOOKUP_DESTROY:
93 static void assert_uverbs_usecnt(struct ib_uobject *uobj,
94 enum rdma_lookup_mode mode)
98 case UVERBS_LOOKUP_READ:
99 WARN_ON(atomic_read(&uobj->usecnt) <= 0);
101 case UVERBS_LOOKUP_WRITE:
102 WARN_ON(atomic_read(&uobj->usecnt) != -1);
104 case UVERBS_LOOKUP_DESTROY:
111 * This must be called with the hw_destroy_rwsem locked for read or write,
112 * also the uobject itself must be locked for write.
114 * Upon return the HW object is guaranteed to be destroyed.
116 * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
117 * however the type's allocat_commit function cannot have been called and the
118 * uobject cannot be on the uobjects_lists
120 * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
121 * rdma_lookup_get_uobject) and the object is left in a state where the caller
122 * needs to call rdma_lookup_put_uobject.
124 * For all other destroy modes this function internally unlocks the uobject
125 * and consumes the kref on the uobj.
127 static int uverbs_destroy_uobject(struct ib_uobject *uobj,
128 enum rdma_remove_reason reason)
130 struct ib_uverbs_file *ufile = uobj->ufile;
134 lockdep_assert_held(&ufile->hw_destroy_rwsem);
135 assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
138 ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason);
140 if (ib_is_destroy_retryable(ret, reason, uobj))
143 /* Nothing to be done, dangle the memory and move on */
145 "ib_uverbs: failed to remove uobject id %d, driver err=%d",
152 if (reason == RDMA_REMOVE_ABORT) {
153 WARN_ON(!list_empty(&uobj->list));
154 WARN_ON(!uobj->context);
155 uobj->uapi_object->type_class->alloc_abort(uobj);
158 uobj->context = NULL;
161 * For DESTROY the usecnt is held write locked, the caller is expected
162 * to put it unlock and put the object when done with it. Only DESTROY
163 * can remove the IDR handle.
165 if (reason != RDMA_REMOVE_DESTROY)
166 atomic_set(&uobj->usecnt, 0);
168 uobj->uapi_object->type_class->remove_handle(uobj);
170 if (!list_empty(&uobj->list)) {
171 spin_lock_irqsave(&ufile->uobjects_lock, flags);
172 list_del_init(&uobj->list);
173 spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
176 * Pairs with the get in rdma_alloc_commit_uobject(), could
179 uverbs_uobject_put(uobj);
183 * When aborting the stack kref remains owned by the core code, and is
184 * not transferred into the type. Pairs with the get in alloc_uobj
186 if (reason == RDMA_REMOVE_ABORT)
187 uverbs_uobject_put(uobj);
193 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
194 * sequence. It should only be used from command callbacks. On success the
195 * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
196 * version requires the caller to have already obtained an
197 * LOOKUP_DESTROY uobject kref.
199 int uobj_destroy(struct ib_uobject *uobj)
201 struct ib_uverbs_file *ufile = uobj->ufile;
204 down_read(&ufile->hw_destroy_rwsem);
206 ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
210 ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY);
212 atomic_set(&uobj->usecnt, 0);
217 up_read(&ufile->hw_destroy_rwsem);
222 * uobj_get_destroy destroys the HW object and returns a handle to the uobj
223 * with a NULL object pointer. The caller must pair this with
224 * uverbs_put_destroy.
226 struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
228 const struct uverbs_attr_bundle *attrs)
230 struct ib_uobject *uobj;
233 uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id,
234 UVERBS_LOOKUP_DESTROY);
238 ret = uobj_destroy(uobj);
240 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
248 * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success
249 * (negative errno on failure). For use by callers that do not need the uobj.
251 int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
252 const struct uverbs_attr_bundle *attrs)
254 struct ib_uobject *uobj;
256 uobj = __uobj_get_destroy(obj, id, attrs);
258 return PTR_ERR(uobj);
260 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
264 /* alloc_uobj must be undone by uverbs_destroy_uobject() */
265 static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
266 const struct uverbs_api_object *obj)
268 struct ib_uobject *uobj;
269 struct ib_ucontext *ucontext;
271 ucontext = ib_uverbs_get_ucontext_file(ufile);
272 if (IS_ERR(ucontext))
273 return ERR_CAST(ucontext);
275 uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
277 return ERR_PTR(-ENOMEM);
279 * user_handle should be filled by the handler,
280 * The object is added to the list in the commit stage.
283 uobj->context = ucontext;
284 INIT_LIST_HEAD(&uobj->list);
285 uobj->uapi_object = obj;
287 * Allocated objects start out as write locked to deny any other
288 * syscalls from accessing them until they are committed. See
289 * rdma_alloc_commit_uobject
291 atomic_set(&uobj->usecnt, -1);
292 kref_init(&uobj->ref);
297 static int idr_add_uobj(struct ib_uobject *uobj)
301 idr_preload(GFP_KERNEL);
302 spin_lock(&uobj->ufile->idr_lock);
305 * We start with allocating an idr pointing to NULL. This represents an
306 * object which isn't initialized yet. We'll replace it later on with
307 * the real object once we commit.
309 ret = idr_alloc(&uobj->ufile->idr, NULL, 0,
310 min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
314 spin_unlock(&uobj->ufile->idr_lock);
317 return ret < 0 ? ret : 0;
320 /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
321 static struct ib_uobject *
322 lookup_get_idr_uobject(const struct uverbs_api_object *obj,
323 struct ib_uverbs_file *ufile, s64 id,
324 enum rdma_lookup_mode mode)
326 struct ib_uobject *uobj;
327 unsigned long idrno = id;
329 if (id < 0 || id > ULONG_MAX)
330 return ERR_PTR(-EINVAL);
333 /* object won't be released as we're protected in rcu */
334 uobj = idr_find(&ufile->idr, idrno);
336 uobj = ERR_PTR(-ENOENT);
341 * The idr_find is guaranteed to return a pointer to something that
342 * isn't freed yet, or NULL, as the free after idr_remove goes through
343 * kfree_rcu(). However the object may still have been released and
344 * kfree() could be called at any time.
346 if (!kref_get_unless_zero(&uobj->ref))
347 uobj = ERR_PTR(-ENOENT);
354 static struct ib_uobject *
355 lookup_get_fd_uobject(const struct uverbs_api_object *obj,
356 struct ib_uverbs_file *ufile, s64 id,
357 enum rdma_lookup_mode mode)
359 const struct uverbs_obj_fd_type *fd_type;
361 struct ib_uobject *uobject;
365 return ERR_PTR(-EINVAL);
367 if (mode != UVERBS_LOOKUP_READ)
368 return ERR_PTR(-EOPNOTSUPP);
370 if (!obj->type_attrs)
371 return ERR_PTR(-EIO);
373 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
377 return ERR_PTR(-EBADF);
379 uobject = f->private_data;
381 * fget(id) ensures we are not currently running uverbs_close_fd,
382 * and the caller is expected to ensure that uverbs_close_fd is never
383 * done while a call top lookup is possible.
385 if (f->f_op != fd_type->fops) {
387 return ERR_PTR(-EBADF);
390 uverbs_uobject_get(uobject);
394 struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
395 struct ib_uverbs_file *ufile, s64 id,
396 enum rdma_lookup_mode mode)
398 struct ib_uobject *uobj;
401 if (IS_ERR(obj) && PTR_ERR(obj) == -ENOMSG) {
402 /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */
403 uobj = lookup_get_idr_uobject(NULL, ufile, id, mode);
408 return ERR_PTR(-EINVAL);
410 uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
414 if (uobj->uapi_object != obj) {
421 * If we have been disassociated block every command except for
422 * DESTROY based commands.
424 if (mode != UVERBS_LOOKUP_DESTROY &&
425 !srcu_dereference(ufile->device->ib_dev,
426 &ufile->device->disassociate_srcu)) {
431 ret = uverbs_try_lock_object(uobj, mode);
437 uobj->uapi_object->type_class->lookup_put(uobj, mode);
438 uverbs_uobject_put(uobj);
442 static struct ib_uobject *
443 alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
444 struct ib_uverbs_file *ufile)
447 struct ib_uobject *uobj;
449 uobj = alloc_uobj(ufile, obj);
453 ret = idr_add_uobj(uobj);
457 ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
458 RDMACG_RESOURCE_HCA_OBJECT);
465 spin_lock(&ufile->idr_lock);
466 idr_remove(&ufile->idr, uobj->id);
467 spin_unlock(&ufile->idr_lock);
469 uverbs_uobject_put(uobj);
473 static struct ib_uobject *
474 alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
475 struct ib_uverbs_file *ufile)
478 struct ib_uobject *uobj;
480 new_fd = get_unused_fd_flags(O_CLOEXEC);
482 return ERR_PTR(new_fd);
484 uobj = alloc_uobj(ufile, obj);
486 put_unused_fd(new_fd);
496 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
497 struct ib_uverbs_file *ufile)
499 struct ib_uobject *ret;
502 return ERR_PTR(-EINVAL);
505 * The hw_destroy_rwsem is held across the entire object creation and
506 * released during rdma_alloc_commit_uobject or
507 * rdma_alloc_abort_uobject
509 if (!down_read_trylock(&ufile->hw_destroy_rwsem))
510 return ERR_PTR(-EIO);
512 ret = obj->type_class->alloc_begin(obj, ufile);
514 up_read(&ufile->hw_destroy_rwsem);
520 static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
522 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
523 RDMACG_RESOURCE_HCA_OBJECT);
525 spin_lock(&uobj->ufile->idr_lock);
526 idr_remove(&uobj->ufile->idr, uobj->id);
527 spin_unlock(&uobj->ufile->idr_lock);
530 static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
531 enum rdma_remove_reason why)
533 const struct uverbs_obj_idr_type *idr_type =
534 container_of(uobj->uapi_object->type_attrs,
535 struct uverbs_obj_idr_type, type);
536 int ret = idr_type->destroy_object(uobj, why);
539 * We can only fail gracefully if the user requested to destroy the
540 * object or when a retry may be called upon an error.
541 * In the rest of the cases, just remove whatever you can.
543 if (ib_is_destroy_retryable(ret, why, uobj))
546 if (why == RDMA_REMOVE_ABORT)
549 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
550 RDMACG_RESOURCE_HCA_OBJECT);
555 static void remove_handle_idr_uobject(struct ib_uobject *uobj)
557 spin_lock(&uobj->ufile->idr_lock);
558 idr_remove(&uobj->ufile->idr, uobj->id);
559 spin_unlock(&uobj->ufile->idr_lock);
560 /* Matches the kref in alloc_commit_idr_uobject */
561 uverbs_uobject_put(uobj);
564 static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
566 put_unused_fd(uobj->id);
569 static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
570 enum rdma_remove_reason why)
572 const struct uverbs_obj_fd_type *fd_type = container_of(
573 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
574 int ret = fd_type->context_closed(uobj, why);
576 if (ib_is_destroy_retryable(ret, why, uobj))
582 static void remove_handle_fd_uobject(struct ib_uobject *uobj)
586 static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
588 struct ib_uverbs_file *ufile = uobj->ufile;
590 spin_lock(&ufile->idr_lock);
592 * We already allocated this IDR with a NULL object, so
593 * this shouldn't fail.
595 * NOTE: Once we set the IDR we loose ownership of our kref on uobj.
596 * It will be put by remove_commit_idr_uobject()
598 WARN_ON(idr_replace(&ufile->idr, uobj, uobj->id));
599 spin_unlock(&ufile->idr_lock);
604 static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
606 const struct uverbs_obj_fd_type *fd_type = container_of(
607 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
612 * The kref for uobj is moved into filp->private data and put in
613 * uverbs_close_fd(). Once alloc_commit() succeeds uverbs_close_fd()
614 * must be guaranteed to be called from the provided fops release
617 filp = anon_inode_getfile(fd_type->name,
622 return PTR_ERR(filp);
626 /* Matching put will be done in uverbs_close_fd() */
627 kref_get(&uobj->ufile->ref);
629 /* This shouldn't be used anymore. Use the file object instead */
633 * NOTE: Once we install the file we loose ownership of our kref on
634 * uobj. It will be put by uverbs_close_fd()
636 fd_install(fd, filp);
642 * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the
643 * caller can no longer assume uobj is valid. If this function fails it
644 * destroys the uboject, including the attached HW object.
646 int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj)
648 struct ib_uverbs_file *ufile = uobj->ufile;
651 /* alloc_commit consumes the uobj kref */
652 ret = uobj->uapi_object->type_class->alloc_commit(uobj);
654 uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
655 up_read(&ufile->hw_destroy_rwsem);
659 /* kref is held so long as the uobj is on the uobj list. */
660 uverbs_uobject_get(uobj);
661 spin_lock_irq(&ufile->uobjects_lock);
662 list_add(&uobj->list, &ufile->uobjects);
663 spin_unlock_irq(&ufile->uobjects_lock);
665 /* matches atomic_set(-1) in alloc_uobj */
666 atomic_set(&uobj->usecnt, 0);
668 /* Matches the down_read in rdma_alloc_begin_uobject */
669 up_read(&ufile->hw_destroy_rwsem);
675 * This consumes the kref for uobj. It is up to the caller to unwind the HW
676 * object and anything else connected to uobj before calling this.
678 void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
680 struct ib_uverbs_file *ufile = uobj->ufile;
683 uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
685 /* Matches the down_read in rdma_alloc_begin_uobject */
686 up_read(&ufile->hw_destroy_rwsem);
689 static void lookup_put_idr_uobject(struct ib_uobject *uobj,
690 enum rdma_lookup_mode mode)
694 static void lookup_put_fd_uobject(struct ib_uobject *uobj,
695 enum rdma_lookup_mode mode)
697 struct file *filp = uobj->object;
699 WARN_ON(mode != UVERBS_LOOKUP_READ);
700 /* This indirectly calls uverbs_close_fd and free the object */
704 void rdma_lookup_put_uobject(struct ib_uobject *uobj,
705 enum rdma_lookup_mode mode)
707 assert_uverbs_usecnt(uobj, mode);
708 uobj->uapi_object->type_class->lookup_put(uobj, mode);
710 * In order to unlock an object, either decrease its usecnt for
711 * read access or zero it in case of exclusive access. See
712 * uverbs_try_lock_object for locking schema information.
715 case UVERBS_LOOKUP_READ:
716 atomic_dec(&uobj->usecnt);
718 case UVERBS_LOOKUP_WRITE:
719 atomic_set(&uobj->usecnt, 0);
721 case UVERBS_LOOKUP_DESTROY:
725 /* Pairs with the kref obtained by type->lookup_get */
726 uverbs_uobject_put(uobj);
729 void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
731 spin_lock_init(&ufile->idr_lock);
732 idr_init(&ufile->idr);
735 void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
737 struct ib_uobject *entry;
741 * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
742 * there are no HW objects left, however the IDR is still populated
743 * with anything that has not been cleaned up by userspace. Since the
744 * kref on ufile is 0, nothing is allowed to call lookup_get.
746 * This is an optimized equivalent to remove_handle_idr_uobject
748 idr_for_each_entry(&ufile->idr, entry, id) {
749 WARN_ON(entry->object);
750 uverbs_uobject_put(entry);
753 idr_destroy(&ufile->idr);
756 const struct uverbs_obj_type_class uverbs_idr_class = {
757 .alloc_begin = alloc_begin_idr_uobject,
758 .lookup_get = lookup_get_idr_uobject,
759 .alloc_commit = alloc_commit_idr_uobject,
760 .alloc_abort = alloc_abort_idr_uobject,
761 .lookup_put = lookup_put_idr_uobject,
762 .destroy_hw = destroy_hw_idr_uobject,
763 .remove_handle = remove_handle_idr_uobject,
765 * When we destroy an object, we first just lock it for WRITE and
766 * actually DESTROY it in the finalize stage. So, the problematic
767 * scenario is when we just started the finalize stage of the
768 * destruction (nothing was executed yet). Now, the other thread
769 * fetched the object for READ access, but it didn't lock it yet.
770 * The DESTROY thread continues and starts destroying the object.
771 * When the other thread continue - without the RCU, it would
772 * access freed memory. However, the rcu_read_lock delays the free
773 * until the rcu_read_lock of the READ operation quits. Since the
774 * exclusive lock of the object is still taken by the DESTROY flow, the
775 * READ operation will get -EBUSY and it'll just bail out.
777 .needs_kfree_rcu = true,
779 EXPORT_SYMBOL(uverbs_idr_class);
781 void uverbs_close_fd(struct file *f)
783 struct ib_uobject *uobj = f->private_data;
784 struct ib_uverbs_file *ufile = uobj->ufile;
786 if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
788 * lookup_get_fd_uobject holds the kref on the struct file any
789 * time a FD uobj is locked, which prevents this release
790 * method from being invoked. Meaning we can always get the
791 * write lock here, or we have a kernel bug.
793 WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
794 uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE);
795 up_read(&ufile->hw_destroy_rwsem);
798 /* Matches the get in alloc_begin_fd_uobject */
799 kref_put(&ufile->ref, ib_uverbs_release_file);
801 /* Pairs with filp->private_data in alloc_begin_fd_uobject */
802 uverbs_uobject_put(uobj);
806 * Drop the ucontext off the ufile and completely disconnect it from the
809 static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
810 enum rdma_remove_reason reason)
812 struct ib_ucontext *ucontext = ufile->ucontext;
813 struct ib_device *ib_dev = ucontext->device;
817 * If we are closing the FD then the user mmap VMAs must have
818 * already been destroyed as they hold on to the filep, otherwise
819 * they need to be zap'd.
821 if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
822 uverbs_user_mmap_disassociate(ufile);
823 if (ib_dev->ops.disassociate_ucontext)
824 ib_dev->ops.disassociate_ucontext(ucontext);
827 ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
828 RDMACG_RESOURCE_HCA_HANDLE);
830 rdma_restrack_del(&ucontext->res);
833 * FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
836 ret = ib_dev->ops.dealloc_ucontext(ucontext);
839 ufile->ucontext = NULL;
842 static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
843 enum rdma_remove_reason reason)
845 struct ib_uobject *obj, *next_obj;
849 * This shouldn't run while executing other commands on this
850 * context. Thus, the only thing we should take care of is
851 * releasing a FD while traversing this list. The FD could be
852 * closed and released from the _release fop of this FD.
853 * In order to mitigate this, we add a lock.
854 * We take and release the lock per traversal in order to let
855 * other threads (which might still use the FDs) chance to run.
857 list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
859 * if we hit this WARN_ON, that means we are
860 * racing with a lookup_get.
862 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
863 if (!uverbs_destroy_uobject(obj, reason))
866 atomic_set(&obj->usecnt, 0);
872 * Destroy the uncontext and every uobject associated with it. If called with
873 * reason != RDMA_REMOVE_CLOSE this will not return until the destruction has
874 * been completed and ufile->ucontext is NULL.
876 * This is internally locked and can be called in parallel from multiple
879 void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
880 enum rdma_remove_reason reason)
882 if (reason == RDMA_REMOVE_CLOSE) {
884 * During destruction we might trigger something that
885 * synchronously calls release on any file descriptor. For
886 * this reason all paths that come from file_operations
887 * release must use try_lock. They can progress knowing that
888 * there is an ongoing uverbs_destroy_ufile_hw that will clean
889 * up the driver resources.
891 if (!mutex_trylock(&ufile->ucontext_lock))
895 mutex_lock(&ufile->ucontext_lock);
898 down_write(&ufile->hw_destroy_rwsem);
901 * If a ucontext was never created then we can't have any uobjects to
902 * cleanup, nothing to do.
904 if (!ufile->ucontext)
907 ufile->ucontext->closing = true;
908 ufile->ucontext->cleanup_retryable = true;
909 while (!list_empty(&ufile->uobjects))
910 if (__uverbs_cleanup_ufile(ufile, reason)) {
912 * No entry was cleaned-up successfully during this
918 ufile->ucontext->cleanup_retryable = false;
919 if (!list_empty(&ufile->uobjects))
920 __uverbs_cleanup_ufile(ufile, reason);
922 ufile_destroy_ucontext(ufile, reason);
925 up_write(&ufile->hw_destroy_rwsem);
926 mutex_unlock(&ufile->ucontext_lock);
929 const struct uverbs_obj_type_class uverbs_fd_class = {
930 .alloc_begin = alloc_begin_fd_uobject,
931 .lookup_get = lookup_get_fd_uobject,
932 .alloc_commit = alloc_commit_fd_uobject,
933 .alloc_abort = alloc_abort_fd_uobject,
934 .lookup_put = lookup_put_fd_uobject,
935 .destroy_hw = destroy_hw_fd_uobject,
936 .remove_handle = remove_handle_fd_uobject,
937 .needs_kfree_rcu = false,
939 EXPORT_SYMBOL(uverbs_fd_class);
942 uverbs_get_uobject_from_file(u16 object_id,
943 struct ib_uverbs_file *ufile,
944 enum uverbs_obj_access access, s64 id)
946 const struct uverbs_api_object *obj =
947 uapi_get_object(ufile->device->uapi, object_id);
950 case UVERBS_ACCESS_READ:
951 return rdma_lookup_get_uobject(obj, ufile, id,
953 case UVERBS_ACCESS_DESTROY:
954 /* Actual destruction is done inside uverbs_handle_method */
955 return rdma_lookup_get_uobject(obj, ufile, id,
956 UVERBS_LOOKUP_DESTROY);
957 case UVERBS_ACCESS_WRITE:
958 return rdma_lookup_get_uobject(obj, ufile, id,
959 UVERBS_LOOKUP_WRITE);
960 case UVERBS_ACCESS_NEW:
961 return rdma_alloc_begin_uobject(obj, ufile);
964 return ERR_PTR(-EOPNOTSUPP);
968 int uverbs_finalize_object(struct ib_uobject *uobj,
969 enum uverbs_obj_access access,
975 * refcounts should be handled at the object level and not at the
976 * uobject level. Refcounts of the objects themselves are done in
981 case UVERBS_ACCESS_READ:
982 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
984 case UVERBS_ACCESS_WRITE:
985 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
987 case UVERBS_ACCESS_DESTROY:
989 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
991 case UVERBS_ACCESS_NEW:
993 ret = rdma_alloc_commit_uobject(uobj);
995 rdma_alloc_abort_uobject(uobj);