static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared)
{
- struct ccupdate_struct new;
+ struct ccupdate_struct *new;
int i;
- memset(&new.new, 0, sizeof(new.new));
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
for_each_online_cpu(i) {
- new.new[i] = alloc_arraycache(cpu_to_node(i), limit,
+ new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
batchcount);
- if (!new.new[i]) {
+ if (!new->new[i]) {
for (i--; i >= 0; i--)
- kfree(new.new[i]);
+ kfree(new->new[i]);
+ kfree(new);
return -ENOMEM;
}
}
- new.cachep = cachep;
+ new->cachep = cachep;
- on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1);
+ on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
check_irq_on();
cachep->batchcount = batchcount;
cachep->shared = shared;
for_each_online_cpu(i) {
- struct array_cache *ccold = new.new[i];
+ struct array_cache *ccold = new->new[i];
if (!ccold)
continue;
spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
kfree(ccold);
}
-
+ kfree(new);
return alloc_kmemlist(cachep);
}
show_symbol(m, n[2*i+2]);
seq_putc(m, '\n');
}
+
return 0;
}