1 /* Common code for 32 and 64-bit NUMA */
2 #include <linux/acpi.h>
3 #include <linux/kernel.h>
5 #include <linux/string.h>
6 #include <linux/init.h>
7 #include <linux/bootmem.h>
8 #include <linux/memblock.h>
9 #include <linux/mmzone.h>
10 #include <linux/ctype.h>
11 #include <linux/nodemask.h>
12 #include <linux/sched.h>
13 #include <linux/topology.h>
16 #include <asm/proto.h>
18 #include <asm/amd_nb.h>
20 #include "numa_internal.h"
22 int __initdata numa_off;
23 nodemask_t numa_nodes_parsed __initdata;
25 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26 EXPORT_SYMBOL(node_data);
28 static struct numa_meminfo numa_meminfo
29 #ifndef CONFIG_MEMORY_HOTPLUG
34 static int numa_distance_cnt;
35 static u8 *numa_distance;
37 static __init int numa_setup(char *opt)
41 if (!strncmp(opt, "off", 3))
43 #ifdef CONFIG_NUMA_EMU
44 if (!strncmp(opt, "fake=", 5))
45 numa_emu_cmdline(opt + 5);
47 #ifdef CONFIG_ACPI_NUMA
48 if (!strncmp(opt, "noacpi", 6))
53 early_param("numa", numa_setup);
56 * apicid, cpu, node mappings
58 s16 __apicid_to_node[MAX_LOCAL_APIC] = {
59 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
62 int numa_cpu_node(int cpu)
64 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
66 if (apicid != BAD_APICID)
67 return __apicid_to_node[apicid];
71 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
72 EXPORT_SYMBOL(node_to_cpumask_map);
75 * Map cpu index to node index
77 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
78 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
80 void numa_set_node(int cpu, int node)
82 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
84 /* early setting, no percpu area yet */
85 if (cpu_to_node_map) {
86 cpu_to_node_map[cpu] = node;
90 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
91 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
92 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
97 per_cpu(x86_cpu_to_node_map, cpu) = node;
99 set_cpu_numa_node(cpu, node);
102 void numa_clear_node(int cpu)
104 numa_set_node(cpu, NUMA_NO_NODE);
108 * Allocate node_to_cpumask_map based on number of available nodes
109 * Requires node_possible_map to be valid.
111 * Note: cpumask_of_node() is not valid until after this is done.
112 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
114 void __init setup_node_to_cpumask_map(void)
118 /* setup nr_node_ids if not done yet */
119 if (nr_node_ids == MAX_NUMNODES)
122 /* allocate the map */
123 for (node = 0; node < nr_node_ids; node++)
124 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
126 /* cpumask_of_node() will now work */
127 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
130 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
131 struct numa_meminfo *mi)
133 /* ignore zero length blks */
137 /* whine about and ignore invalid blks */
138 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
139 pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
140 nid, start, end - 1);
144 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
145 pr_err("NUMA: too many memblk ranges\n");
149 mi->blk[mi->nr_blks].start = start;
150 mi->blk[mi->nr_blks].end = end;
151 mi->blk[mi->nr_blks].nid = nid;
157 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
158 * @idx: Index of memblk to remove
159 * @mi: numa_meminfo to remove memblk from
161 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
162 * decrementing @mi->nr_blks.
164 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
167 memmove(&mi->blk[idx], &mi->blk[idx + 1],
168 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
172 * numa_add_memblk - Add one numa_memblk to numa_meminfo
173 * @nid: NUMA node ID of the new memblk
174 * @start: Start address of the new memblk
175 * @end: End address of the new memblk
177 * Add a new memblk to the default numa_meminfo.
180 * 0 on success, -errno on failure.
182 int __init numa_add_memblk(int nid, u64 start, u64 end)
184 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
187 /* Allocate NODE_DATA for a node on the local memory */
188 static void __init alloc_node_data(int nid)
190 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
196 * Allocate node data. Try node-local memory and then any node.
197 * Never allocate in DMA zone.
199 nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
201 nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
202 MEMBLOCK_ALLOC_ACCESSIBLE);
204 pr_err("Cannot find %zu bytes in node %d\n",
211 /* report and initialize */
212 printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
213 nd_pa, nd_pa + nd_size - 1);
214 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
216 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
219 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
221 node_set_online(nid);
225 * numa_cleanup_meminfo - Cleanup a numa_meminfo
226 * @mi: numa_meminfo to clean up
228 * Sanitize @mi by merging and removing unncessary memblks. Also check for
229 * conflicts and clear unused memblks.
232 * 0 on success, -errno on failure.
234 int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
237 const u64 high = PFN_PHYS(max_pfn);
240 /* first, trim all entries */
241 for (i = 0; i < mi->nr_blks; i++) {
242 struct numa_memblk *bi = &mi->blk[i];
244 /* make sure all blocks are inside the limits */
245 bi->start = max(bi->start, low);
246 bi->end = min(bi->end, high);
248 /* and there's no empty or non-exist block */
249 if (bi->start >= bi->end ||
250 !memblock_overlaps_region(&memblock.memory,
251 bi->start, bi->end - bi->start))
252 numa_remove_memblk_from(i--, mi);
255 /* merge neighboring / overlapping entries */
256 for (i = 0; i < mi->nr_blks; i++) {
257 struct numa_memblk *bi = &mi->blk[i];
259 for (j = i + 1; j < mi->nr_blks; j++) {
260 struct numa_memblk *bj = &mi->blk[j];
264 * See whether there are overlapping blocks. Whine
265 * about but allow overlaps of the same nid. They
266 * will be merged below.
268 if (bi->end > bj->start && bi->start < bj->end) {
269 if (bi->nid != bj->nid) {
270 pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
271 bi->nid, bi->start, bi->end - 1,
272 bj->nid, bj->start, bj->end - 1);
275 pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
276 bi->nid, bi->start, bi->end - 1,
277 bj->start, bj->end - 1);
281 * Join together blocks on the same node, holes
282 * between which don't overlap with memory on other
285 if (bi->nid != bj->nid)
287 start = min(bi->start, bj->start);
288 end = max(bi->end, bj->end);
289 for (k = 0; k < mi->nr_blks; k++) {
290 struct numa_memblk *bk = &mi->blk[k];
292 if (bi->nid == bk->nid)
294 if (start < bk->end && end > bk->start)
299 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
300 bi->nid, bi->start, bi->end - 1, bj->start,
301 bj->end - 1, start, end - 1);
304 numa_remove_memblk_from(j--, mi);
308 /* clear unused ones */
309 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
310 mi->blk[i].start = mi->blk[i].end = 0;
311 mi->blk[i].nid = NUMA_NO_NODE;
318 * Set nodes, which have memory in @mi, in *@nodemask.
320 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
321 const struct numa_meminfo *mi)
325 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
326 if (mi->blk[i].start != mi->blk[i].end &&
327 mi->blk[i].nid != NUMA_NO_NODE)
328 node_set(mi->blk[i].nid, *nodemask);
332 * numa_reset_distance - Reset NUMA distance table
334 * The current table is freed. The next numa_set_distance() call will
337 void __init numa_reset_distance(void)
339 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
341 /* numa_distance could be 1LU marking allocation failure, test cnt */
342 if (numa_distance_cnt)
343 memblock_free(__pa(numa_distance), size);
344 numa_distance_cnt = 0;
345 numa_distance = NULL; /* enable table creation */
348 static int __init numa_alloc_distance(void)
350 nodemask_t nodes_parsed;
355 /* size the new table and allocate it */
356 nodes_parsed = numa_nodes_parsed;
357 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
359 for_each_node_mask(i, nodes_parsed)
362 size = cnt * cnt * sizeof(numa_distance[0]);
364 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
367 pr_warning("NUMA: Warning: can't allocate distance table!\n");
368 /* don't retry until explicitly reset */
369 numa_distance = (void *)1LU;
372 memblock_reserve(phys, size);
374 numa_distance = __va(phys);
375 numa_distance_cnt = cnt;
377 /* fill with the default distances */
378 for (i = 0; i < cnt; i++)
379 for (j = 0; j < cnt; j++)
380 numa_distance[i * cnt + j] = i == j ?
381 LOCAL_DISTANCE : REMOTE_DISTANCE;
382 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
388 * numa_set_distance - Set NUMA distance from one NUMA to another
389 * @from: the 'from' node to set distance
390 * @to: the 'to' node to set distance
391 * @distance: NUMA distance
393 * Set the distance from node @from to @to to @distance. If distance table
394 * doesn't exist, one which is large enough to accommodate all the currently
395 * known nodes will be created.
397 * If such table cannot be allocated, a warning is printed and further
398 * calls are ignored until the distance table is reset with
399 * numa_reset_distance().
401 * If @from or @to is higher than the highest known node or lower than zero
402 * at the time of table creation or @distance doesn't make sense, the call
404 * This is to allow simplification of specific NUMA config implementations.
406 void __init numa_set_distance(int from, int to, int distance)
408 if (!numa_distance && numa_alloc_distance() < 0)
411 if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
412 from < 0 || to < 0) {
413 pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
418 if ((u8)distance != distance ||
419 (from == to && distance != LOCAL_DISTANCE)) {
420 pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
425 numa_distance[from * numa_distance_cnt + to] = distance;
428 int __node_distance(int from, int to)
430 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
431 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
432 return numa_distance[from * numa_distance_cnt + to];
434 EXPORT_SYMBOL(__node_distance);
437 * Sanity check to catch more bad NUMA configurations (they are amazingly
438 * common). Make sure the nodes cover all memory.
440 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
442 u64 numaram, e820ram;
446 for (i = 0; i < mi->nr_blks; i++) {
447 u64 s = mi->blk[i].start >> PAGE_SHIFT;
448 u64 e = mi->blk[i].end >> PAGE_SHIFT;
450 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
451 if ((s64)numaram < 0)
455 e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
457 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
458 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
459 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
460 (numaram << PAGE_SHIFT) >> 20,
461 (e820ram << PAGE_SHIFT) >> 20);
468 * Mark all currently memblock-reserved physical memory (which covers the
469 * kernel's own memory ranges) as hot-unswappable.
471 static void __init numa_clear_kernel_node_hotplug(void)
473 nodemask_t reserved_nodemask = NODE_MASK_NONE;
474 struct memblock_region *mb_region;
478 * We have to do some preprocessing of memblock regions, to
479 * make them suitable for reservation.
481 * At this time, all memory regions reserved by memblock are
482 * used by the kernel, but those regions are not split up
483 * along node boundaries yet, and don't necessarily have their
484 * node ID set yet either.
486 * So iterate over all memory known to the x86 architecture,
487 * and use those ranges to set the nid in memblock.reserved.
488 * This will split up the memblock regions along node
489 * boundaries and will set the node IDs as well.
491 for (i = 0; i < numa_meminfo.nr_blks; i++) {
492 struct numa_memblk *mb = numa_meminfo.blk + i;
495 ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
500 * Now go over all reserved memblock regions, to construct a
501 * node mask of all kernel reserved memory areas.
503 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
504 * numa_meminfo might not include all memblock.reserved
505 * memory ranges, because quirks such as trim_snb_memory()
506 * reserve specific pages for Sandy Bridge graphics. ]
508 for_each_memblock(reserved, mb_region) {
509 if (mb_region->nid != MAX_NUMNODES)
510 node_set(mb_region->nid, reserved_nodemask);
514 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
515 * belonging to the reserved node mask.
517 * Note that this will include memory regions that reside
518 * on nodes that contain kernel memory - entire nodes
519 * become hot-unpluggable:
521 for (i = 0; i < numa_meminfo.nr_blks; i++) {
522 struct numa_memblk *mb = numa_meminfo.blk + i;
524 if (!node_isset(mb->nid, reserved_nodemask))
527 memblock_clear_hotplug(mb->start, mb->end - mb->start);
531 static int __init numa_register_memblks(struct numa_meminfo *mi)
533 unsigned long uninitialized_var(pfn_align);
536 /* Account for nodes with cpus and no memory */
537 node_possible_map = numa_nodes_parsed;
538 numa_nodemask_from_meminfo(&node_possible_map, mi);
539 if (WARN_ON(nodes_empty(node_possible_map)))
542 for (i = 0; i < mi->nr_blks; i++) {
543 struct numa_memblk *mb = &mi->blk[i];
544 memblock_set_node(mb->start, mb->end - mb->start,
545 &memblock.memory, mb->nid);
549 * At very early time, the kernel have to use some memory such as
550 * loading the kernel image. We cannot prevent this anyway. So any
551 * node the kernel resides in should be un-hotpluggable.
553 * And when we come here, alloc node data won't fail.
555 numa_clear_kernel_node_hotplug();
558 * If sections array is gonna be used for pfn -> nid mapping, check
559 * whether its granularity is fine enough.
561 #ifdef NODE_NOT_IN_PAGE_FLAGS
562 pfn_align = node_map_pfn_alignment();
563 if (pfn_align && pfn_align < PAGES_PER_SECTION) {
564 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
565 PFN_PHYS(pfn_align) >> 20,
566 PFN_PHYS(PAGES_PER_SECTION) >> 20);
570 if (!numa_meminfo_cover_memory(mi))
573 /* Finally register nodes. */
574 for_each_node_mask(nid, node_possible_map) {
575 u64 start = PFN_PHYS(max_pfn);
578 for (i = 0; i < mi->nr_blks; i++) {
579 if (nid != mi->blk[i].nid)
581 start = min(mi->blk[i].start, start);
582 end = max(mi->blk[i].end, end);
589 * Don't confuse VM with a node that doesn't have the
590 * minimum amount of memory:
592 if (end && (end - start) < NODE_MIN_SIZE)
595 alloc_node_data(nid);
598 /* Dump memblock with node info and return. */
604 * There are unfortunately some poorly designed mainboards around that
605 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
606 * mapping. To avoid this fill in the mapping for all possible CPUs,
607 * as the number of CPUs is not known yet. We round robin the existing
610 static void __init numa_init_array(void)
614 rr = first_node(node_online_map);
615 for (i = 0; i < nr_cpu_ids; i++) {
616 if (early_cpu_to_node(i) != NUMA_NO_NODE)
618 numa_set_node(i, rr);
619 rr = next_node_in(rr, node_online_map);
623 static int __init numa_init(int (*init_func)(void))
628 for (i = 0; i < MAX_LOCAL_APIC; i++)
629 set_apicid_to_node(i, NUMA_NO_NODE);
631 nodes_clear(numa_nodes_parsed);
632 nodes_clear(node_possible_map);
633 nodes_clear(node_online_map);
634 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
635 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
637 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
639 /* In case that parsing SRAT failed. */
640 WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
641 numa_reset_distance();
648 * We reset memblock back to the top-down direction
649 * here because if we configured ACPI_NUMA, we have
650 * parsed SRAT in init_func(). It is ok to have the
651 * reset here even if we did't configure ACPI_NUMA
652 * or acpi numa init fails and fallbacks to dummy
655 memblock_set_bottom_up(false);
657 ret = numa_cleanup_meminfo(&numa_meminfo);
661 numa_emulation(&numa_meminfo, numa_distance_cnt);
663 ret = numa_register_memblks(&numa_meminfo);
667 for (i = 0; i < nr_cpu_ids; i++) {
668 int nid = early_cpu_to_node(i);
670 if (nid == NUMA_NO_NODE)
672 if (!node_online(nid))
681 * dummy_numa_init - Fallback dummy NUMA init
683 * Used if there's no underlying NUMA architecture, NUMA initialization
684 * fails, or NUMA is disabled on the command line.
686 * Must online at least one node and add memory blocks that cover all
687 * allowed memory. This function must not fail.
689 static int __init dummy_numa_init(void)
691 printk(KERN_INFO "%s\n",
692 numa_off ? "NUMA turned off" : "No NUMA configuration found");
693 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
694 0LLU, PFN_PHYS(max_pfn) - 1);
696 node_set(0, numa_nodes_parsed);
697 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
703 * x86_numa_init - Initialize NUMA
705 * Try each configured NUMA initialization method until one succeeds. The
706 * last fallback is dummy single node config encomapssing whole memory and
709 void __init x86_numa_init(void)
712 #ifdef CONFIG_ACPI_NUMA
713 if (!numa_init(x86_acpi_numa_init))
716 #ifdef CONFIG_AMD_NUMA
717 if (!numa_init(amd_numa_init))
722 numa_init(dummy_numa_init);
725 static __init int find_near_online_node(int node)
728 int min_val = INT_MAX;
731 for_each_online_node(n) {
732 val = node_distance(node, n);
744 * Setup early cpu_to_node.
746 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
747 * and apicid_to_node[] tables have valid entries for a CPU.
748 * This means we skip cpu_to_node[] initialisation for NUMA
749 * emulation and faking node case (when running a kernel compiled
750 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
751 * is already initialized in a round robin manner at numa_init_array,
752 * prior to this call, and this initialization is good enough
753 * for the fake NUMA cases.
755 * Called before the per_cpu areas are setup.
757 void __init init_cpu_to_node(void)
760 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
762 BUG_ON(cpu_to_apicid == NULL);
764 for_each_possible_cpu(cpu) {
765 int node = numa_cpu_node(cpu);
767 if (node == NUMA_NO_NODE)
769 if (!node_online(node))
770 node = find_near_online_node(node);
771 numa_set_node(cpu, node);
775 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
777 # ifndef CONFIG_NUMA_EMU
778 void numa_add_cpu(int cpu)
780 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
783 void numa_remove_cpu(int cpu)
785 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
787 # endif /* !CONFIG_NUMA_EMU */
789 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
791 int __cpu_to_node(int cpu)
793 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
795 "cpu_to_node(%d): usage too early!\n", cpu);
797 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
799 return per_cpu(x86_cpu_to_node_map, cpu);
801 EXPORT_SYMBOL(__cpu_to_node);
804 * Same function as cpu_to_node() but used if called before the
805 * per_cpu areas are setup.
807 int early_cpu_to_node(int cpu)
809 if (early_per_cpu_ptr(x86_cpu_to_node_map))
810 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
812 if (!cpu_possible(cpu)) {
814 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
818 return per_cpu(x86_cpu_to_node_map, cpu);
821 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
823 struct cpumask *mask;
825 if (node == NUMA_NO_NODE) {
826 /* early_cpu_to_node() already emits a warning and trace */
829 mask = node_to_cpumask_map[node];
831 pr_err("node_to_cpumask_map[%i] NULL\n", node);
837 cpumask_set_cpu(cpu, mask);
839 cpumask_clear_cpu(cpu, mask);
841 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
842 enable ? "numa_add_cpu" : "numa_remove_cpu",
843 cpu, node, cpumask_pr_args(mask));
847 # ifndef CONFIG_NUMA_EMU
848 static void numa_set_cpumask(int cpu, bool enable)
850 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
853 void numa_add_cpu(int cpu)
855 numa_set_cpumask(cpu, true);
858 void numa_remove_cpu(int cpu)
860 numa_set_cpumask(cpu, false);
862 # endif /* !CONFIG_NUMA_EMU */
865 * Returns a pointer to the bitmask of CPUs on Node 'node'.
867 const struct cpumask *cpumask_of_node(int node)
869 if (node >= nr_node_ids) {
871 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
874 return cpu_none_mask;
876 if (node_to_cpumask_map[node] == NULL) {
878 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
881 return cpu_online_mask;
883 return node_to_cpumask_map[node];
885 EXPORT_SYMBOL(cpumask_of_node);
887 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
889 #ifdef CONFIG_MEMORY_HOTPLUG
890 int memory_add_physaddr_to_nid(u64 start)
892 struct numa_meminfo *mi = &numa_meminfo;
893 int nid = mi->blk[0].nid;
896 for (i = 0; i < mi->nr_blks; i++)
897 if (mi->blk[i].start <= start && mi->blk[i].end > start)
898 nid = mi->blk[i].nid;
901 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);