return (unsigned int *)get_property(dev, "ibm,associativity", NULL);
}
-static int of_node_numa_domain(struct device_node *device)
+static int of_node_to_nid(struct device_node *device)
{
- int numa_domain;
+ int nid;
unsigned int *tmp;
if (min_common_depth == -1)
tmp = of_get_associativity(device);
if (tmp && (tmp[0] >= min_common_depth)) {
- numa_domain = tmp[min_common_depth];
+ nid = tmp[min_common_depth];
} else {
dbg("WARNING: no NUMA information for %s\n",
device->full_name);
- numa_domain = 0;
+ nid = 0;
}
- return numa_domain;
+ return nid;
}
/*
*/
static int __cpuinit numa_setup_cpu(unsigned long lcpu)
{
- int numa_domain = 0;
+ int nid = 0;
struct device_node *cpu = find_cpu_node(lcpu);
if (!cpu) {
goto out;
}
- numa_domain = of_node_numa_domain(cpu);
+ nid = of_node_to_nid(cpu);
- if (numa_domain >= num_online_nodes()) {
+ if (nid >= num_online_nodes()) {
/*
* POWER4 LPAR uses 0xffff as invalid node,
* dont warn in this case.
*/
- if (numa_domain != 0xffff)
+ if (nid != 0xffff)
printk(KERN_ERR "WARNING: cpu %ld "
"maps to invalid NUMA node %d\n",
- lcpu, numa_domain);
- numa_domain = 0;
+ lcpu, nid);
+ nid = 0;
}
out:
- node_set_online(numa_domain);
+ node_set_online(nid);
- map_cpu_to_node(lcpu, numa_domain);
+ map_cpu_to_node(lcpu, nid);
of_node_put(cpu);
- return numa_domain;
+ return nid;
}
static int cpu_numa_callback(struct notifier_block *nfb,
* with larger node ids. In that case we force the cpu into node 0.
*/
for_each_cpu(i) {
- int numa_domain;
+ int nid;
cpu = find_cpu_node(i);
if (cpu) {
- numa_domain = of_node_numa_domain(cpu);
+ nid = of_node_to_nid(cpu);
of_node_put(cpu);
- if (numa_domain < MAX_NUMNODES &&
- max_domain < numa_domain)
- max_domain = numa_domain;
+ if (nid < MAX_NUMNODES &&
+ max_domain < nid)
+ max_domain = nid;
}
}
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long start;
unsigned long size;
- int numa_domain;
+ int nid;
int ranges;
unsigned int *memcell_buf;
unsigned int len;
start = read_n_cells(n_mem_addr_cells, &memcell_buf);
size = read_n_cells(n_mem_size_cells, &memcell_buf);
- numa_domain = of_node_numa_domain(memory);
+ nid = of_node_to_nid(memory);
- if (numa_domain >= MAX_NUMNODES) {
- if (numa_domain != 0xffff)
+ if (nid >= MAX_NUMNODES) {
+ if (nid != 0xffff)
printk(KERN_ERR "WARNING: memory at %lx maps "
"to invalid NUMA node %d\n", start,
- numa_domain);
- numa_domain = 0;
+ nid);
+ nid = 0;
}
- if (max_domain < numa_domain)
- max_domain = numa_domain;
+ if (max_domain < nid)
+ max_domain = nid;
if (!(size = numa_enforce_memory_limit(start, size))) {
if (--ranges)
continue;
}
- add_region(numa_domain, start >> PAGE_SHIFT,
+ add_region(nid, start >> PAGE_SHIFT,
size >> PAGE_SHIFT);
if (--ranges)
{
struct device_node *memory = NULL;
nodemask_t nodes;
- int numa_domain = 0;
+ int nid = 0;
if (!numa_enabled || (min_common_depth < 0))
- return numa_domain;
+ return nid;
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long start, size;
ha_new_range:
start = read_n_cells(n_mem_addr_cells, &memcell_buf);
size = read_n_cells(n_mem_size_cells, &memcell_buf);
- numa_domain = of_node_numa_domain(memory);
+ nid = of_node_to_nid(memory);
/* Domains not present at boot default to 0 */
- if (!node_online(numa_domain))
- numa_domain = any_online_node(NODE_MASK_ALL);
+ if (!node_online(nid))
+ nid = any_online_node(NODE_MASK_ALL);
if ((scn_addr >= start) && (scn_addr < (start + size))) {
of_node_put(memory);
- goto got_numa_domain;
+ goto got_nid;
}
if (--ranges) /* process all ranges in cell */
BUG(); /* section address should be found above */
/* Temporary code to ensure that returned node is not empty */
-got_numa_domain:
+got_nid:
nodes_setall(nodes);
- while (NODE_DATA(numa_domain)->node_spanned_pages == 0) {
- node_clear(numa_domain, nodes);
- numa_domain = any_online_node(nodes);
+ while (NODE_DATA(nid)->node_spanned_pages == 0) {
+ node_clear(nid, nodes);
+ nid = any_online_node(nodes);
}
- return numa_domain;
+ return nid;
}
#endif /* CONFIG_MEMORY_HOTPLUG */