{
int nid, rc = 0, core_id = (cpu / threads_per_core);
struct imc_mem_info *mem_info;
+ struct page *page;
/*
* alloc_pages_node() will allocate memory for core in the
mem_info->id = core_id;
/* We need only vbase for core counters */
- mem_info->vbase = page_address(alloc_pages_node(nid,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
- __GFP_NOWARN, get_order(size)));
- if (!mem_info->vbase)
+ page = alloc_pages_node(nid,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_NOWARN, get_order(size));
+ if (!page)
return -ENOMEM;
+ mem_info->vbase = page_address(page);
/* Init the mutex */
core_imc_refc[core_id].id = core_id;
int nid = cpu_to_node(cpu_id);
if (!local_mem) {
+ struct page *page;
/*
* This case could happen only once at start, since we dont
* free the memory in cpu offline path.
*/
- local_mem = page_address(alloc_pages_node(nid,
+ page = alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
- __GFP_NOWARN, get_order(size)));
- if (!local_mem)
+ __GFP_NOWARN, get_order(size));
+ if (!page)
return -ENOMEM;
+ local_mem = page_address(page);
per_cpu(thread_imc_mem, cpu_id) = local_mem;
}
int core_id = (cpu_id / threads_per_core);
if (!local_mem) {
- local_mem = page_address(alloc_pages_node(phys_id,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
- __GFP_NOWARN, get_order(size)));
- if (!local_mem)
+ struct page *page;
+
+ page = alloc_pages_node(phys_id,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_NOWARN, get_order(size));
+ if (!page)
return -ENOMEM;
+ local_mem = page_address(page);
per_cpu(trace_imc_mem, cpu_id) = local_mem;
/* Initialise the counters for trace mode */