mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&mt_detach, &mm->mmap_lock);
- if (mas_preallocate(mas, GFP_KERNEL))
- return -ENOMEM;
-
mas->last = end - 1;
/*
* If we need to split any vma, do it now to save pain later.
goto userfaultfd_error;
}
- /* Point of no return */
- mas_set_range(mas, start, end - 1);
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
/* Make sure no VMAs are about to be lost. */
{
struct vm_area_struct *vma_mas, *vma_test;
int test_count = 0;
+ mas_set_range(mas, start, end - 1);
rcu_read_lock();
vma_test = mas_find(&test, end - 1);
mas_for_each(mas, vma_mas, end - 1) {
}
rcu_read_unlock();
BUG_ON(count != test_count);
- mas_set_range(mas, start, end - 1);
}
#endif
- mas_store_prealloc(mas, NULL);
+ /* Point of no return */
+ mas_set_range(mas, start, end - 1);
+ if (mas_store_gfp(mas, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
mm->map_count -= count;
/*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
__mt_destroy(&mt_detach);
start_split_failed:
map_count_exceeded:
- mas_destroy(mas);
return error;
}