struct hlist_node node;
/* objects protected by lock */
- struct mutex lock;
+ struct rw_semaphore lock;
struct rb_root objects;
};
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
it.rb) {
}
kfree(node);
}
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
- mutex_lock(&rmn->lock);
+ down_read(&rmn->lock);
it = interval_tree_iter_first(&rmn->objects, address, address);
if (it) {
amdgpu_mn_invalidate_node(node, address, address);
}
- mutex_unlock(&rmn->lock);
+ up_read(&rmn->lock);
}
/**
/* notification is exclusive, but interval is inclusive */
end -= 1;
- mutex_lock(&rmn->lock);
+ down_read(&rmn->lock);
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
amdgpu_mn_invalidate_node(node, start, end);
}
- mutex_unlock(&rmn->lock);
+ up_read(&rmn->lock);
}
static const struct mmu_notifier_ops amdgpu_mn_ops = {
rmn->adev = adev;
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
- mutex_init(&rmn->lock);
+ init_rwsem(&rmn->lock);
rmn->objects = RB_ROOT;
r = __mmu_notifier_register(&rmn->mn, mm);
INIT_LIST_HEAD(&bos);
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node);
if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) {
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
return -ENOMEM;
}
}
interval_tree_insert(&node->it, &rmn->objects);
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
return 0;
}
return;
}
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
kfree(node);
}
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
}