static void fill_pool(void)
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
- struct debug_obj *new, *obj;
+ struct debug_obj *obj;
unsigned long flags;
if (likely(obj_pool_free >= debug_objects_pool_min_level))
* Recheck with the lock held as the worker thread might have
* won the race and freed the global free list already.
*/
- if (obj_nr_tofree) {
+ while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
obj_nr_tofree--;
return;
while (obj_pool_free < debug_objects_pool_min_level) {
+ struct debug_obj *new[ODEBUG_BATCH_SIZE];
+ int cnt;
- new = kmem_cache_zalloc(obj_cache, gfp);
- if (!new)
+ for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
+ new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
+ if (!new[cnt])
+ break;
+ }
+ if (!cnt)
return;
raw_spin_lock_irqsave(&pool_lock, flags);
- hlist_add_head(&new->node, &obj_pool);
- debug_objects_allocated++;
- obj_pool_free++;
+ while (cnt) {
+ hlist_add_head(&new[--cnt]->node, &obj_pool);
+ debug_objects_allocated++;
+ obj_pool_free++;
+ }
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
}
/*
* The objs on the pool list might be allocated before the work is
* run, so recheck if pool list it full or not, if not fill pool
- * list from the global free list
+ * list from the global free list.
*/
while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);