1 /* Common code for 32 and 64-bit NUMA */
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/memblock.h>
8 #include <linux/mmzone.h>
9 #include <linux/ctype.h>
10 #include <linux/module.h>
11 #include <linux/nodemask.h>
12 #include <linux/sched.h>
13 #include <linux/topology.h>
16 #include <asm/proto.h>
19 #include <asm/amd_nb.h>
21 #include "numa_internal.h"
23 int __initdata numa_off;
24 nodemask_t numa_nodes_parsed __initdata;
27 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
28 EXPORT_SYMBOL(node_data);
30 static struct numa_meminfo numa_meminfo
31 #ifndef CONFIG_MEMORY_HOTPLUG
36 static int numa_distance_cnt;
37 static u8 *numa_distance;
40 static __init int numa_setup(char *opt)
44 if (!strncmp(opt, "off", 3))
46 #ifdef CONFIG_NUMA_EMU
47 if (!strncmp(opt, "fake=", 5))
48 numa_emu_cmdline(opt + 5);
50 #ifdef CONFIG_ACPI_NUMA
51 if (!strncmp(opt, "noacpi", 6))
56 early_param("numa", numa_setup);
59 * apicid, cpu, node mappings
61 s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
62 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
65 int __cpuinit numa_cpu_node(int cpu)
67 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
69 if (apicid != BAD_APICID)
70 return __apicid_to_node[apicid];
74 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
75 EXPORT_SYMBOL(node_to_cpumask_map);
78 * Map cpu index to node index
80 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
81 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
83 void __cpuinit numa_set_node(int cpu, int node)
85 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
87 /* early setting, no percpu area yet */
88 if (cpu_to_node_map) {
89 cpu_to_node_map[cpu] = node;
93 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
94 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
95 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
100 per_cpu(x86_cpu_to_node_map, cpu) = node;
102 if (node != NUMA_NO_NODE)
103 set_cpu_numa_node(cpu, node);
106 void __cpuinit numa_clear_node(int cpu)
108 numa_set_node(cpu, NUMA_NO_NODE);
112 * Allocate node_to_cpumask_map based on number of available nodes
113 * Requires node_possible_map to be valid.
115 * Note: node_to_cpumask() is not valid until after this is done.
116 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
118 void __init setup_node_to_cpumask_map(void)
120 unsigned int node, num = 0;
122 /* setup nr_node_ids if not done yet */
123 if (nr_node_ids == MAX_NUMNODES) {
124 for_each_node_mask(node, node_possible_map)
126 nr_node_ids = num + 1;
129 /* allocate the map */
130 for (node = 0; node < nr_node_ids; node++)
131 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
133 /* cpumask_of_node() will now work */
134 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
138 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
139 struct numa_meminfo *mi)
141 /* ignore zero length blks */
145 /* whine about and ignore invalid blks */
146 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
147 pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
152 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
153 pr_err("NUMA: too many memblk ranges\n");
157 mi->blk[mi->nr_blks].start = start;
158 mi->blk[mi->nr_blks].end = end;
159 mi->blk[mi->nr_blks].nid = nid;
165 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
166 * @idx: Index of memblk to remove
167 * @mi: numa_meminfo to remove memblk from
169 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
170 * decrementing @mi->nr_blks.
172 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
175 memmove(&mi->blk[idx], &mi->blk[idx + 1],
176 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
180 * numa_add_memblk - Add one numa_memblk to numa_meminfo
181 * @nid: NUMA node ID of the new memblk
182 * @start: Start address of the new memblk
183 * @end: End address of the new memblk
185 * Add a new memblk to the default numa_meminfo.
188 * 0 on success, -errno on failure.
190 int __init numa_add_memblk(int nid, u64 start, u64 end)
192 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
195 /* Initialize bootmem allocator for a node */
197 setup_node_bootmem(int nid, unsigned long start, unsigned long end)
199 const u64 nd_low = (u64)MAX_DMA_PFN << PAGE_SHIFT;
200 const u64 nd_high = (u64)max_pfn_mapped << PAGE_SHIFT;
201 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
206 * Don't confuse VM with a node that doesn't have the
207 * minimum amount of memory:
209 if (end && (end - start) < NODE_MIN_SIZE)
212 start = roundup(start, ZONE_ALIGN);
214 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n",
218 * Try to allocate node data on local node and then fall back to
219 * all nodes. Never allocate in DMA zone.
221 nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
222 nd_size, SMP_CACHE_BYTES);
223 if (nd_pa == MEMBLOCK_ERROR)
224 nd_pa = memblock_find_in_range(nd_low, nd_high,
225 nd_size, SMP_CACHE_BYTES);
226 if (nd_pa == MEMBLOCK_ERROR) {
227 pr_err("Cannot find %lu bytes in node %d\n", nd_size, nid);
230 memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
232 /* report and initialize */
233 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n",
234 nd_pa, nd_pa + nd_size - 1);
235 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
237 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
239 node_data[nid] = __va(nd_pa);
240 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
241 NODE_DATA(nid)->node_id = nid;
242 NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
243 NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
245 node_set_online(nid);
249 * numa_cleanup_meminfo - Cleanup a numa_meminfo
250 * @mi: numa_meminfo to clean up
252 * Sanitize @mi by merging and removing unncessary memblks. Also check for
253 * conflicts and clear unused memblks.
256 * 0 on success, -errno on failure.
258 int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
261 const u64 high = (u64)max_pfn << PAGE_SHIFT;
264 for (i = 0; i < mi->nr_blks; i++) {
265 struct numa_memblk *bi = &mi->blk[i];
267 /* make sure all blocks are inside the limits */
268 bi->start = max(bi->start, low);
269 bi->end = min(bi->end, high);
271 /* and there's no empty block */
272 if (bi->start >= bi->end) {
273 numa_remove_memblk_from(i--, mi);
277 for (j = i + 1; j < mi->nr_blks; j++) {
278 struct numa_memblk *bj = &mi->blk[j];
279 unsigned long start, end;
282 * See whether there are overlapping blocks. Whine
283 * about but allow overlaps of the same nid. They
284 * will be merged below.
286 if (bi->end > bj->start && bi->start < bj->end) {
287 if (bi->nid != bj->nid) {
288 pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
289 bi->nid, bi->start, bi->end,
290 bj->nid, bj->start, bj->end);
293 pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
294 bi->nid, bi->start, bi->end,
299 * Join together blocks on the same node, holes
300 * between which don't overlap with memory on other
303 if (bi->nid != bj->nid)
305 start = max(min(bi->start, bj->start), low);
306 end = min(max(bi->end, bj->end), high);
307 for (k = 0; k < mi->nr_blks; k++) {
308 struct numa_memblk *bk = &mi->blk[k];
310 if (bi->nid == bk->nid)
312 if (start < bk->end && end > bk->start)
317 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
318 bi->nid, bi->start, bi->end, bj->start, bj->end,
322 numa_remove_memblk_from(j--, mi);
326 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
327 mi->blk[i].start = mi->blk[i].end = 0;
328 mi->blk[i].nid = NUMA_NO_NODE;
335 * Set nodes, which have memory in @mi, in *@nodemask.
337 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
338 const struct numa_meminfo *mi)
342 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
343 if (mi->blk[i].start != mi->blk[i].end &&
344 mi->blk[i].nid != NUMA_NO_NODE)
345 node_set(mi->blk[i].nid, *nodemask);
349 * numa_reset_distance - Reset NUMA distance table
351 * The current table is freed. The next numa_set_distance() call will
354 void __init numa_reset_distance(void)
356 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
358 /* numa_distance could be 1LU marking allocation failure, test cnt */
359 if (numa_distance_cnt)
360 memblock_x86_free_range(__pa(numa_distance),
361 __pa(numa_distance) + size);
362 numa_distance_cnt = 0;
363 numa_distance = NULL; /* enable table creation */
366 static int __init numa_alloc_distance(void)
368 nodemask_t nodes_parsed;
373 /* size the new table and allocate it */
374 nodes_parsed = numa_nodes_parsed;
375 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
377 for_each_node_mask(i, nodes_parsed)
380 size = cnt * cnt * sizeof(numa_distance[0]);
382 phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT,
384 if (phys == MEMBLOCK_ERROR) {
385 pr_warning("NUMA: Warning: can't allocate distance table!\n");
386 /* don't retry until explicitly reset */
387 numa_distance = (void *)1LU;
390 memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
392 numa_distance = __va(phys);
393 numa_distance_cnt = cnt;
395 /* fill with the default distances */
396 for (i = 0; i < cnt; i++)
397 for (j = 0; j < cnt; j++)
398 numa_distance[i * cnt + j] = i == j ?
399 LOCAL_DISTANCE : REMOTE_DISTANCE;
400 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
406 * numa_set_distance - Set NUMA distance from one NUMA to another
407 * @from: the 'from' node to set distance
408 * @to: the 'to' node to set distance
409 * @distance: NUMA distance
411 * Set the distance from node @from to @to to @distance. If distance table
412 * doesn't exist, one which is large enough to accommodate all the currently
413 * known nodes will be created.
415 * If such table cannot be allocated, a warning is printed and further
416 * calls are ignored until the distance table is reset with
417 * numa_reset_distance().
419 * If @from or @to is higher than the highest known node at the time of
420 * table creation or @distance doesn't make sense, the call is ignored.
421 * This is to allow simplification of specific NUMA config implementations.
423 void __init numa_set_distance(int from, int to, int distance)
425 if (!numa_distance && numa_alloc_distance() < 0)
428 if (from >= numa_distance_cnt || to >= numa_distance_cnt) {
429 printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n",
434 if ((u8)distance != distance ||
435 (from == to && distance != LOCAL_DISTANCE)) {
436 pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
441 numa_distance[from * numa_distance_cnt + to] = distance;
444 int __node_distance(int from, int to)
446 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
447 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
448 return numa_distance[from * numa_distance_cnt + to];
450 EXPORT_SYMBOL(__node_distance);
453 * Sanity check to catch more bad NUMA configurations (they are amazingly
454 * common). Make sure the nodes cover all memory.
456 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
458 unsigned long numaram, e820ram;
462 for (i = 0; i < mi->nr_blks; i++) {
463 unsigned long s = mi->blk[i].start >> PAGE_SHIFT;
464 unsigned long e = mi->blk[i].end >> PAGE_SHIFT;
466 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
467 if ((long)numaram < 0)
471 e820ram = max_pfn - (memblock_x86_hole_size(0,
472 max_pfn << PAGE_SHIFT) >> PAGE_SHIFT);
473 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
474 if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
475 printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
476 (numaram << PAGE_SHIFT) >> 20,
477 (e820ram << PAGE_SHIFT) >> 20);
483 static int __init numa_register_memblks(struct numa_meminfo *mi)
487 /* Account for nodes with cpus and no memory */
488 node_possible_map = numa_nodes_parsed;
489 numa_nodemask_from_meminfo(&node_possible_map, mi);
490 if (WARN_ON(nodes_empty(node_possible_map)))
493 for (i = 0; i < mi->nr_blks; i++)
494 memblock_x86_register_active_regions(mi->blk[i].nid,
495 mi->blk[i].start >> PAGE_SHIFT,
496 mi->blk[i].end >> PAGE_SHIFT);
498 /* for out of order entries */
500 if (!numa_meminfo_cover_memory(mi))
503 /* Finally register nodes. */
504 for_each_node_mask(nid, node_possible_map) {
505 u64 start = (u64)max_pfn << PAGE_SHIFT;
508 for (i = 0; i < mi->nr_blks; i++) {
509 if (nid != mi->blk[i].nid)
511 start = min(mi->blk[i].start, start);
512 end = max(mi->blk[i].end, end);
516 setup_node_bootmem(nid, start, end);
524 * There are unfortunately some poorly designed mainboards around that
525 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
526 * mapping. To avoid this fill in the mapping for all possible CPUs,
527 * as the number of CPUs is not known yet. We round robin the existing
530 void __init numa_init_array(void)
534 rr = first_node(node_online_map);
535 for (i = 0; i < nr_cpu_ids; i++) {
536 if (early_cpu_to_node(i) != NUMA_NO_NODE)
538 numa_set_node(i, rr);
539 rr = next_node(rr, node_online_map);
540 if (rr == MAX_NUMNODES)
541 rr = first_node(node_online_map);
546 static int __init numa_init(int (*init_func)(void))
551 for (i = 0; i < MAX_LOCAL_APIC; i++)
552 set_apicid_to_node(i, NUMA_NO_NODE);
554 nodes_clear(numa_nodes_parsed);
555 nodes_clear(node_possible_map);
556 nodes_clear(node_online_map);
557 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
558 remove_all_active_ranges();
559 numa_reset_distance();
564 ret = numa_cleanup_meminfo(&numa_meminfo);
568 numa_emulation(&numa_meminfo, numa_distance_cnt);
570 ret = numa_register_memblks(&numa_meminfo);
574 for (i = 0; i < nr_cpu_ids; i++) {
575 int nid = early_cpu_to_node(i);
577 if (nid == NUMA_NO_NODE)
579 if (!node_online(nid))
587 * dummy_numa_init - Fallback dummy NUMA init
589 * Used if there's no underlying NUMA architecture, NUMA initialization
590 * fails, or NUMA is disabled on the command line.
592 * Must online at least one node and add memory blocks that cover all
593 * allowed memory. This function must not fail.
595 static int __init dummy_numa_init(void)
597 printk(KERN_INFO "%s\n",
598 numa_off ? "NUMA turned off" : "No NUMA configuration found");
599 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
600 0LU, max_pfn << PAGE_SHIFT);
602 node_set(0, numa_nodes_parsed);
603 numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT);
609 * x86_numa_init - Initialize NUMA
611 * Try each configured NUMA initialization method until one succeeds. The
612 * last fallback is dummy single node config encomapssing whole memory and
615 void __init x86_numa_init(void)
618 #ifdef CONFIG_ACPI_NUMA
619 if (!numa_init(x86_acpi_numa_init))
622 #ifdef CONFIG_AMD_NUMA
623 if (!numa_init(amd_numa_init))
628 numa_init(dummy_numa_init);
632 static __init int find_near_online_node(int node)
635 int min_val = INT_MAX;
638 for_each_online_node(n) {
639 val = node_distance(node, n);
651 * Setup early cpu_to_node.
653 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
654 * and apicid_to_node[] tables have valid entries for a CPU.
655 * This means we skip cpu_to_node[] initialisation for NUMA
656 * emulation and faking node case (when running a kernel compiled
657 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
658 * is already initialized in a round robin manner at numa_init_array,
659 * prior to this call, and this initialization is good enough
660 * for the fake NUMA cases.
662 * Called before the per_cpu areas are setup.
664 void __init init_cpu_to_node(void)
667 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
669 BUG_ON(cpu_to_apicid == NULL);
671 for_each_possible_cpu(cpu) {
672 int node = numa_cpu_node(cpu);
674 if (node == NUMA_NO_NODE)
676 if (!node_online(node))
677 node = find_near_online_node(node);
678 numa_set_node(cpu, node);
682 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
684 # ifndef CONFIG_NUMA_EMU
685 void __cpuinit numa_add_cpu(int cpu)
687 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
690 void __cpuinit numa_remove_cpu(int cpu)
692 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
694 # endif /* !CONFIG_NUMA_EMU */
696 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
698 int __cpu_to_node(int cpu)
700 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
702 "cpu_to_node(%d): usage too early!\n", cpu);
704 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
706 return per_cpu(x86_cpu_to_node_map, cpu);
708 EXPORT_SYMBOL(__cpu_to_node);
711 * Same function as cpu_to_node() but used if called before the
712 * per_cpu areas are setup.
714 int early_cpu_to_node(int cpu)
716 if (early_per_cpu_ptr(x86_cpu_to_node_map))
717 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
719 if (!cpu_possible(cpu)) {
721 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
725 return per_cpu(x86_cpu_to_node_map, cpu);
728 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
730 struct cpumask *mask;
733 if (node == NUMA_NO_NODE) {
734 /* early_cpu_to_node() already emits a warning and trace */
737 mask = node_to_cpumask_map[node];
739 pr_err("node_to_cpumask_map[%i] NULL\n", node);
745 cpumask_set_cpu(cpu, mask);
747 cpumask_clear_cpu(cpu, mask);
749 cpulist_scnprintf(buf, sizeof(buf), mask);
750 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
751 enable ? "numa_add_cpu" : "numa_remove_cpu",
756 # ifndef CONFIG_NUMA_EMU
757 static void __cpuinit numa_set_cpumask(int cpu, bool enable)
759 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
762 void __cpuinit numa_add_cpu(int cpu)
764 numa_set_cpumask(cpu, true);
767 void __cpuinit numa_remove_cpu(int cpu)
769 numa_set_cpumask(cpu, false);
771 # endif /* !CONFIG_NUMA_EMU */
774 * Returns a pointer to the bitmask of CPUs on Node 'node'.
776 const struct cpumask *cpumask_of_node(int node)
778 if (node >= nr_node_ids) {
780 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
783 return cpu_none_mask;
785 if (node_to_cpumask_map[node] == NULL) {
787 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
790 return cpu_online_mask;
792 return node_to_cpumask_map[node];
794 EXPORT_SYMBOL(cpumask_of_node);
796 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
798 #if defined(CONFIG_X86_64) && defined(CONFIG_MEMORY_HOTPLUG)
799 int memory_add_physaddr_to_nid(u64 start)
801 struct numa_meminfo *mi = &numa_meminfo;
802 int nid = mi->blk[0].nid;
805 for (i = 0; i < mi->nr_blks; i++)
806 if (mi->blk[i].start <= start && mi->blk[i].end > start)
807 nid = mi->blk[i].nid;
810 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);