kref_put(&uobject->ref, uverbs_uobject_free);
}
-static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
+static int uverbs_try_lock_object(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
/*
* When a shared access is required, we use a positive counter. Each
* concurrently, setting the counter to zero is enough for releasing
* this lock.
*/
- if (!exclusive)
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
-EBUSY : 0;
-
- /* lock is either WRITE or DESTROY - should be exclusive */
- return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
+ case UVERBS_LOOKUP_WRITE:
+ /* lock is either WRITE or DESTROY - should be exclusive */
+ return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
+ }
+ return 0;
}
-static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
+static void assert_uverbs_usecnt(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
#ifdef CONFIG_LOCKDEP
- if (exclusive)
- WARN_ON(atomic_read(&uobj->usecnt) != -1);
- else
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
WARN_ON(atomic_read(&uobj->usecnt) <= 0);
+ break;
+ case UVERBS_LOOKUP_WRITE:
+ WARN_ON(atomic_read(&uobj->usecnt) != -1);
+ break;
+ }
#endif
}
unsigned long flags;
int ret;
- assert_uverbs_usecnt(uobj, true);
+ assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
if (uobj->object) {
ret = uobj->type->type_class->remove_commit(uobj, reason);
struct ib_uobject *uobj;
int ret;
- uobj = rdma_lookup_get_uobject(type, ufile, id, true);
+ uobj = rdma_lookup_get_uobject(type, ufile, id, UVERBS_LOOKUP_WRITE);
if (IS_ERR(uobj))
return uobj;
ret = rdma_explicit_destroy(uobj);
if (ret) {
- rdma_lookup_put_uobject(uobj, true);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
return ERR_PTR(ret);
}
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- rdma_lookup_put_uobject(uobj, true);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
return success_res;
}
/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
static struct ib_uobject *
lookup_get_idr_uobject(const struct uverbs_obj_type *type,
- struct ib_uverbs_file *ufile, s64 id, bool exclusive)
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
{
struct ib_uobject *uobj;
unsigned long idrno = id;
return uobj;
}
-static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type,
- struct ib_uverbs_file *ufile,
- s64 id, bool exclusive)
+static struct ib_uobject *
+lookup_get_fd_uobject(const struct uverbs_obj_type *type,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
{
struct file *f;
struct ib_uobject *uobject;
if (fdno != id)
return ERR_PTR(-EINVAL);
- if (exclusive)
+ if (mode != UVERBS_LOOKUP_READ)
return ERR_PTR(-EOPNOTSUPP);
f = fget(fdno);
struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
struct ib_uverbs_file *ufile, s64 id,
- bool exclusive)
+ enum rdma_lookup_mode mode)
{
struct ib_uobject *uobj;
int ret;
- uobj = type->type_class->lookup_get(type, ufile, id, exclusive);
+ uobj = type->type_class->lookup_get(type, ufile, id, mode);
if (IS_ERR(uobj))
return uobj;
goto free;
}
- ret = uverbs_try_lock_object(uobj, exclusive);
+ ret = uverbs_try_lock_object(uobj, mode);
if (ret)
goto free;
return uobj;
free:
- uobj->type->type_class->lookup_put(uobj, exclusive);
+ uobj->type->type_class->lookup_put(uobj, mode);
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
}
uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
}
-static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool exclusive)
+static void lookup_put_idr_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
}
-static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
+static void lookup_put_fd_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
struct file *filp = uobj->object;
- WARN_ON(exclusive);
+ WARN_ON(mode != UVERBS_LOOKUP_READ);
/* This indirectly calls uverbs_close_fd and free the object */
fput(filp);
}
-void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
+void rdma_lookup_put_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
- assert_uverbs_usecnt(uobj, exclusive);
- uobj->type->type_class->lookup_put(uobj, exclusive);
+ assert_uverbs_usecnt(uobj, mode);
+ uobj->type->type_class->lookup_put(uobj, mode);
/*
* In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of exclusive access. See
* uverbs_try_lock_object for locking schema information.
*/
- if (!exclusive)
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
atomic_dec(&uobj->usecnt);
- else
+ break;
+ case UVERBS_LOOKUP_WRITE:
atomic_set(&uobj->usecnt, 0);
+ break;
+ }
/* Pairs with the kref obtained by type->lookup_get */
uverbs_uobject_put(uobj);
* method from being invoked. Meaning we can always get the
* write lock here, or we have a kernel bug.
*/
- WARN_ON(uverbs_try_lock_object(uobj, true));
+ WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE);
up_read(&ufile->hw_destroy_rwsem);
}
* if we hit this WARN_ON, that means we are
* racing with a lookup_get.
*/
- WARN_ON(uverbs_try_lock_object(obj, true));
+ WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
if (!uverbs_destroy_uobject(obj, reason))
ret = 0;
}
{
switch (access) {
case UVERBS_ACCESS_READ:
- return rdma_lookup_get_uobject(type_attrs, ufile, id, false);
+ return rdma_lookup_get_uobject(type_attrs, ufile, id,
+ UVERBS_LOOKUP_READ);
case UVERBS_ACCESS_DESTROY:
case UVERBS_ACCESS_WRITE:
- return rdma_lookup_get_uobject(type_attrs, ufile, id, true);
+ return rdma_lookup_get_uobject(type_attrs, ufile, id,
+ UVERBS_LOOKUP_WRITE);
case UVERBS_ACCESS_NEW:
return rdma_alloc_begin_uobject(type_attrs, ufile);
default:
switch (access) {
case UVERBS_ACCESS_READ:
- rdma_lookup_put_uobject(uobj, false);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
break;
case UVERBS_ACCESS_WRITE:
- rdma_lookup_put_uobject(uobj, true);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
break;
case UVERBS_ACCESS_DESTROY:
- rdma_lookup_put_uobject(uobj, true);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
break;
case UVERBS_ACCESS_NEW:
if (commit)
#define uobj_get_read(_type, _id, _ufile) \
rdma_lookup_get_uobject(uobj_get_type(_type), _ufile, \
- _uobj_check_id(_id), false)
+ _uobj_check_id(_id), UVERBS_LOOKUP_READ)
#define ufd_get_read(_type, _fdnum, _ufile) \
rdma_lookup_get_uobject(uobj_get_type(_type), _ufile, \
- (_fdnum)*typecheck(s32, _fdnum), false)
+ (_fdnum)*typecheck(s32, _fdnum), \
+ UVERBS_LOOKUP_READ)
static inline void *_uobj_get_obj_read(struct ib_uobject *uobj)
{
#define uobj_get_write(_type, _id, _ufile) \
rdma_lookup_get_uobject(uobj_get_type(_type), _ufile, \
- _uobj_check_id(_id), true)
+ _uobj_check_id(_id), UVERBS_LOOKUP_WRITE)
int __uobj_perform_destroy(const struct uverbs_obj_type *type, u32 id,
struct ib_uverbs_file *ufile, int success_res);
static inline void uobj_put_destroy(struct ib_uobject *uobj)
{
- rdma_lookup_put_uobject(uobj, true);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
}
static inline void uobj_put_read(struct ib_uobject *uobj)
{
- rdma_lookup_put_uobject(uobj, false);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
}
#define uobj_put_obj_read(_obj) \
static inline void uobj_put_write(struct ib_uobject *uobj)
{
- rdma_lookup_put_uobject(uobj, true);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
}
static inline int __must_check uobj_alloc_commit(struct ib_uobject *uobj,