4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/lmb.h>
22 #include <linux/pfn.h>
23 #include <asm/sparsemem.h>
25 #include <asm/system.h>
28 static int numa_enabled = 1;
30 static char *cmdline __initdata;
32 static int numa_debug;
33 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
35 int numa_cpu_lookup_table[NR_CPUS];
36 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
37 struct pglist_data *node_data[MAX_NUMNODES];
39 EXPORT_SYMBOL(numa_cpu_lookup_table);
40 EXPORT_SYMBOL(node_to_cpumask_map);
41 EXPORT_SYMBOL(node_data);
43 static int min_common_depth;
44 static int n_mem_addr_cells, n_mem_size_cells;
47 * Allocate node_to_cpumask_map based on number of available nodes
48 * Requires node_possible_map to be valid.
50 * Note: node_to_cpumask() is not valid until after this is done.
52 static void __init setup_node_to_cpumask_map(void)
54 unsigned int node, num = 0;
56 /* setup nr_node_ids if not done yet */
57 if (nr_node_ids == MAX_NUMNODES) {
58 for_each_node_mask(node, node_possible_map)
60 nr_node_ids = num + 1;
63 /* allocate the map */
64 for (node = 0; node < nr_node_ids; node++)
65 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
67 /* cpumask_of_node() will now work */
68 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
71 static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
74 unsigned long long mem;
76 static unsigned int fake_nid;
77 static unsigned long long curr_boundary;
80 * Modify node id, iff we started creating NUMA nodes
81 * We want to continue from where we left of the last time
86 * In case there are no more arguments to parse, the
87 * node_id should be the same as the last fake node id
88 * (we've handled this above).
93 mem = memparse(p, &p);
97 if (mem < curr_boundary)
102 if ((end_pfn << PAGE_SHIFT) > mem) {
104 * Skip commas and spaces
106 while (*p == ',' || *p == ' ' || *p == '\t')
112 dbg("created new fake_node with id %d\n", fake_nid);
119 * get_active_region_work_fn - A helper function for get_node_active_region
120 * Returns datax set to the start_pfn and end_pfn if they contain
121 * the initial value of datax->start_pfn between them
122 * @start_pfn: start page(inclusive) of region to check
123 * @end_pfn: end page(exclusive) of region to check
124 * @datax: comes in with ->start_pfn set to value to search for and
125 * goes out with active range if it contains it
126 * Returns 1 if search value is in range else 0
128 static int __init get_active_region_work_fn(unsigned long start_pfn,
129 unsigned long end_pfn, void *datax)
131 struct node_active_region *data;
132 data = (struct node_active_region *)datax;
134 if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
135 data->start_pfn = start_pfn;
136 data->end_pfn = end_pfn;
144 * get_node_active_region - Return active region containing start_pfn
145 * Active range returned is empty if none found.
146 * @start_pfn: The page to return the region for.
147 * @node_ar: Returned set to the active region containing start_pfn
149 static void __init get_node_active_region(unsigned long start_pfn,
150 struct node_active_region *node_ar)
152 int nid = early_pfn_to_nid(start_pfn);
155 node_ar->start_pfn = start_pfn;
156 node_ar->end_pfn = start_pfn;
157 work_with_active_regions(nid, get_active_region_work_fn, node_ar);
160 static void __cpuinit map_cpu_to_node(int cpu, int node)
162 numa_cpu_lookup_table[cpu] = node;
164 dbg("adding cpu %d to node %d\n", cpu, node);
166 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
167 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
170 #ifdef CONFIG_HOTPLUG_CPU
171 static void unmap_cpu_from_node(unsigned long cpu)
173 int node = numa_cpu_lookup_table[cpu];
175 dbg("removing cpu %lu from node %d\n", cpu, node);
177 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
178 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
180 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
184 #endif /* CONFIG_HOTPLUG_CPU */
186 /* must hold reference to node during call */
187 static const int *of_get_associativity(struct device_node *dev)
189 return of_get_property(dev, "ibm,associativity", NULL);
193 * Returns the property linux,drconf-usable-memory if
194 * it exists (the property exists only in kexec/kdump kernels,
195 * added by kexec-tools)
197 static const u32 *of_get_usable_memory(struct device_node *memory)
201 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
202 if (!prop || len < sizeof(unsigned int))
207 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
210 static int of_node_to_nid_single(struct device_node *device)
213 const unsigned int *tmp;
215 if (min_common_depth == -1)
218 tmp = of_get_associativity(device);
222 if (tmp[0] >= min_common_depth)
223 nid = tmp[min_common_depth];
225 /* POWER4 LPAR uses 0xffff as invalid node */
226 if (nid == 0xffff || nid >= MAX_NUMNODES)
232 /* Walk the device tree upwards, looking for an associativity id */
233 int of_node_to_nid(struct device_node *device)
235 struct device_node *tmp;
240 nid = of_node_to_nid_single(device);
245 device = of_get_parent(tmp);
252 EXPORT_SYMBOL_GPL(of_node_to_nid);
255 * In theory, the "ibm,associativity" property may contain multiple
256 * associativity lists because a resource may be multiply connected
257 * into the machine. This resource then has different associativity
258 * characteristics relative to its multiple connections. We ignore
259 * this for now. We also assume that all cpu and memory sets have
260 * their distances represented at a common level. This won't be
261 * true for hierarchical NUMA.
263 * In any case the ibm,associativity-reference-points should give
264 * the correct depth for a normal NUMA system.
266 * - Dave Hansen <haveblue@us.ibm.com>
268 static int __init find_min_common_depth(void)
271 const unsigned int *ref_points;
272 struct device_node *rtas_root;
274 struct device_node *options;
276 rtas_root = of_find_node_by_path("/rtas");
282 * this property is 2 32-bit integers, each representing a level of
283 * depth in the associativity nodes. The first is for an SMP
284 * configuration (should be all 0's) and the second is for a normal
285 * NUMA configuration.
288 ref_points = of_get_property(rtas_root,
289 "ibm,associativity-reference-points", &len);
292 * For type 1 affinity information we want the first field
294 options = of_find_node_by_path("/options");
297 str = of_get_property(options, "ibm,associativity-form", NULL);
298 if (str && !strcmp(str, "1"))
302 if ((len >= 2 * sizeof(unsigned int)) && ref_points) {
303 depth = ref_points[index];
305 dbg("NUMA: ibm,associativity-reference-points not found.\n");
308 of_node_put(rtas_root);
313 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
315 struct device_node *memory = NULL;
317 memory = of_find_node_by_type(memory, "memory");
319 panic("numa.c: No memory nodes found!");
321 *n_addr_cells = of_n_addr_cells(memory);
322 *n_size_cells = of_n_size_cells(memory);
326 static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
328 unsigned long result = 0;
331 result = (result << 32) | **buf;
337 struct of_drconf_cell {
345 #define DRCONF_MEM_ASSIGNED 0x00000008
346 #define DRCONF_MEM_AI_INVALID 0x00000040
347 #define DRCONF_MEM_RESERVED 0x00000080
350 * Read the next lmb list entry from the ibm,dynamic-memory property
351 * and return the information in the provided of_drconf_cell structure.
353 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
357 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
360 drmem->drc_index = cp[0];
361 drmem->reserved = cp[1];
362 drmem->aa_index = cp[2];
363 drmem->flags = cp[3];
369 * Retreive and validate the ibm,dynamic-memory property of the device tree.
371 * The layout of the ibm,dynamic-memory property is a number N of lmb
372 * list entries followed by N lmb list entries. Each lmb list entry
373 * contains information as layed out in the of_drconf_cell struct above.
375 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
380 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
381 if (!prop || len < sizeof(unsigned int))
386 /* Now that we know the number of entries, revalidate the size
387 * of the property read in to ensure we have everything
389 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
397 * Retreive and validate the ibm,lmb-size property for drconf memory
398 * from the device tree.
400 static u64 of_get_lmb_size(struct device_node *memory)
405 prop = of_get_property(memory, "ibm,lmb-size", &len);
406 if (!prop || len < sizeof(unsigned int))
409 return read_n_cells(n_mem_size_cells, &prop);
412 struct assoc_arrays {
419 * Retreive and validate the list of associativity arrays for drconf
420 * memory from the ibm,associativity-lookup-arrays property of the
423 * The layout of the ibm,associativity-lookup-arrays property is a number N
424 * indicating the number of associativity arrays, followed by a number M
425 * indicating the size of each associativity array, followed by a list
426 * of N associativity arrays.
428 static int of_get_assoc_arrays(struct device_node *memory,
429 struct assoc_arrays *aa)
434 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
435 if (!prop || len < 2 * sizeof(unsigned int))
438 aa->n_arrays = *prop++;
439 aa->array_sz = *prop++;
441 /* Now that we know the number of arrrays and size of each array,
442 * revalidate the size of the property read in.
444 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
452 * This is like of_node_to_nid_single() for memory represented in the
453 * ibm,dynamic-reconfiguration-memory node.
455 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
456 struct assoc_arrays *aa)
459 int nid = default_nid;
462 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
463 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
464 drmem->aa_index < aa->n_arrays) {
465 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
466 nid = aa->arrays[index];
468 if (nid == 0xffff || nid >= MAX_NUMNODES)
476 * Figure out to which domain a cpu belongs and stick it there.
477 * Return the id of the domain used.
479 static int __cpuinit numa_setup_cpu(unsigned long lcpu)
482 struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
489 nid = of_node_to_nid_single(cpu);
491 if (nid < 0 || !node_online(nid))
492 nid = first_online_node;
494 map_cpu_to_node(lcpu, nid);
501 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
502 unsigned long action,
505 unsigned long lcpu = (unsigned long)hcpu;
506 int ret = NOTIFY_DONE;
510 case CPU_UP_PREPARE_FROZEN:
511 numa_setup_cpu(lcpu);
514 #ifdef CONFIG_HOTPLUG_CPU
516 case CPU_DEAD_FROZEN:
517 case CPU_UP_CANCELED:
518 case CPU_UP_CANCELED_FROZEN:
519 unmap_cpu_from_node(lcpu);
528 * Check and possibly modify a memory region to enforce the memory limit.
530 * Returns the size the region should have to enforce the memory limit.
531 * This will either be the original value of size, a truncated value,
532 * or zero. If the returned value of size is 0 the region should be
533 * discarded as it lies wholy above the memory limit.
535 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
539 * We use lmb_end_of_DRAM() in here instead of memory_limit because
540 * we've already adjusted it for the limit and it takes care of
541 * having memory holes below the limit. Also, in the case of
542 * iommu_is_off, memory_limit is not set but is implicitly enforced.
545 if (start + size <= lmb_end_of_DRAM())
548 if (start >= lmb_end_of_DRAM())
551 return lmb_end_of_DRAM() - start;
555 * Reads the counter for a given entry in
556 * linux,drconf-usable-memory property
558 static inline int __init read_usm_ranges(const u32 **usm)
561 * For each lmb in ibm,dynamic-memory a corresponding
562 * entry in linux,drconf-usable-memory property contains
563 * a counter followed by that many (base, size) duple.
564 * read the counter from linux,drconf-usable-memory
566 return read_n_cells(n_mem_size_cells, usm);
570 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
571 * node. This assumes n_mem_{addr,size}_cells have been set.
573 static void __init parse_drconf_memory(struct device_node *memory)
576 unsigned int n, rc, ranges, is_kexec_kdump = 0;
577 unsigned long lmb_size, base, size, sz;
579 struct assoc_arrays aa;
581 n = of_get_drconf_memory(memory, &dm);
585 lmb_size = of_get_lmb_size(memory);
589 rc = of_get_assoc_arrays(memory, &aa);
593 /* check if this is a kexec/kdump kernel */
594 usm = of_get_usable_memory(memory);
598 for (; n != 0; --n) {
599 struct of_drconf_cell drmem;
601 read_drconf_cell(&drmem, &dm);
603 /* skip this block if the reserved bit is set in flags (0x80)
604 or if the block is not assigned to this partition (0x8) */
605 if ((drmem.flags & DRCONF_MEM_RESERVED)
606 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
609 base = drmem.base_addr;
613 if (is_kexec_kdump) {
614 ranges = read_usm_ranges(&usm);
615 if (!ranges) /* there are no (base, size) duple */
619 if (is_kexec_kdump) {
620 base = read_n_cells(n_mem_addr_cells, &usm);
621 size = read_n_cells(n_mem_size_cells, &usm);
623 nid = of_drconf_to_nid_single(&drmem, &aa);
624 fake_numa_create_new_node(
625 ((base + size) >> PAGE_SHIFT),
627 node_set_online(nid);
628 sz = numa_enforce_memory_limit(base, size);
630 add_active_range(nid, base >> PAGE_SHIFT,
632 + (sz >> PAGE_SHIFT));
637 static int __init parse_numa_properties(void)
639 struct device_node *cpu = NULL;
640 struct device_node *memory = NULL;
644 if (numa_enabled == 0) {
645 printk(KERN_WARNING "NUMA disabled by user\n");
649 min_common_depth = find_min_common_depth();
651 if (min_common_depth < 0)
652 return min_common_depth;
654 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
657 * Even though we connect cpus to numa domains later in SMP
658 * init, we need to know the node ids now. This is because
659 * each node to be onlined must have NODE_DATA etc backing it.
661 for_each_present_cpu(i) {
664 cpu = of_get_cpu_node(i, NULL);
666 nid = of_node_to_nid_single(cpu);
670 * Don't fall back to default_nid yet -- we will plug
671 * cpus into nodes once the memory scan has discovered
676 node_set_online(nid);
679 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
681 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
686 const unsigned int *memcell_buf;
689 memcell_buf = of_get_property(memory,
690 "linux,usable-memory", &len);
691 if (!memcell_buf || len <= 0)
692 memcell_buf = of_get_property(memory, "reg", &len);
693 if (!memcell_buf || len <= 0)
697 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
699 /* these are order-sensitive, and modify the buffer pointer */
700 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
701 size = read_n_cells(n_mem_size_cells, &memcell_buf);
704 * Assumption: either all memory nodes or none will
705 * have associativity properties. If none, then
706 * everything goes to default_nid.
708 nid = of_node_to_nid_single(memory);
712 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
713 node_set_online(nid);
715 if (!(size = numa_enforce_memory_limit(start, size))) {
722 add_active_range(nid, start >> PAGE_SHIFT,
723 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
730 * Now do the same thing for each LMB listed in the ibm,dynamic-memory
731 * property in the ibm,dynamic-reconfiguration-memory node.
733 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
735 parse_drconf_memory(memory);
740 static void __init setup_nonnuma(void)
742 unsigned long top_of_ram = lmb_end_of_DRAM();
743 unsigned long total_ram = lmb_phys_mem_size();
744 unsigned long start_pfn, end_pfn;
745 unsigned int i, nid = 0;
747 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
748 top_of_ram, total_ram);
749 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
750 (top_of_ram - total_ram) >> 20);
752 for (i = 0; i < lmb.memory.cnt; ++i) {
753 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
754 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
756 fake_numa_create_new_node(end_pfn, &nid);
757 add_active_range(nid, start_pfn, end_pfn);
758 node_set_online(nid);
762 void __init dump_numa_cpu_topology(void)
765 unsigned int cpu, count;
767 if (min_common_depth == -1 || !numa_enabled)
770 for_each_online_node(node) {
771 printk(KERN_DEBUG "Node %d CPUs:", node);
775 * If we used a CPU iterator here we would miss printing
776 * the holes in the cpumap.
778 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
779 if (cpumask_test_cpu(cpu,
780 node_to_cpumask_map[node])) {
786 printk("-%u", cpu - 1);
792 printk("-%u", nr_cpu_ids - 1);
797 static void __init dump_numa_memory_topology(void)
802 if (min_common_depth == -1 || !numa_enabled)
805 for_each_online_node(node) {
808 printk(KERN_DEBUG "Node %d Memory:", node);
812 for (i = 0; i < lmb_end_of_DRAM();
813 i += (1 << SECTION_SIZE_BITS)) {
814 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
832 * Allocate some memory, satisfying the lmb or bootmem allocator where
833 * required. nid is the preferred node and end is the physical address of
834 * the highest address in the node.
836 * Returns the virtual address of the memory.
838 static void __init *careful_zallocation(int nid, unsigned long size,
840 unsigned long end_pfn)
844 unsigned long ret_paddr;
846 ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
848 /* retry over all memory */
850 ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
853 panic("numa.c: cannot allocate %lu bytes for node %d",
856 ret = __va(ret_paddr);
859 * We initialize the nodes in numeric order: 0, 1, 2...
860 * and hand over control from the LMB allocator to the
861 * bootmem allocator. If this function is called for
862 * node 5, then we know that all nodes <5 are using the
863 * bootmem allocator instead of the LMB allocator.
865 * So, check the nid from which this allocation came
866 * and double check to see if we need to use bootmem
867 * instead of the LMB. We don't free the LMB memory
868 * since it would be useless.
870 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
872 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
875 dbg("alloc_bootmem %p %lx\n", ret, size);
878 memset(ret, 0, size);
882 static struct notifier_block __cpuinitdata ppc64_numa_nb = {
883 .notifier_call = cpu_numa_callback,
884 .priority = 1 /* Must run before sched domains notifier. */
887 static void mark_reserved_regions_for_nid(int nid)
889 struct pglist_data *node = NODE_DATA(nid);
892 for (i = 0; i < lmb.reserved.cnt; i++) {
893 unsigned long physbase = lmb.reserved.region[i].base;
894 unsigned long size = lmb.reserved.region[i].size;
895 unsigned long start_pfn = physbase >> PAGE_SHIFT;
896 unsigned long end_pfn = PFN_UP(physbase + size);
897 struct node_active_region node_ar;
898 unsigned long node_end_pfn = node->node_start_pfn +
899 node->node_spanned_pages;
902 * Check to make sure that this lmb.reserved area is
903 * within the bounds of the node that we care about.
904 * Checking the nid of the start and end points is not
905 * sufficient because the reserved area could span the
908 if (end_pfn <= node->node_start_pfn ||
909 start_pfn >= node_end_pfn)
912 get_node_active_region(start_pfn, &node_ar);
913 while (start_pfn < end_pfn &&
914 node_ar.start_pfn < node_ar.end_pfn) {
915 unsigned long reserve_size = size;
917 * if reserved region extends past active region
918 * then trim size to active region
920 if (end_pfn > node_ar.end_pfn)
921 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
924 * Only worry about *this* node, others may not
925 * yet have valid NODE_DATA().
927 if (node_ar.nid == nid) {
928 dbg("reserve_bootmem %lx %lx nid=%d\n",
929 physbase, reserve_size, node_ar.nid);
930 reserve_bootmem_node(NODE_DATA(node_ar.nid),
931 physbase, reserve_size,
935 * if reserved region is contained in the active region
938 if (end_pfn <= node_ar.end_pfn)
942 * reserved region extends past the active region
943 * get next active region that contains this
946 start_pfn = node_ar.end_pfn;
947 physbase = start_pfn << PAGE_SHIFT;
948 size = size - reserve_size;
949 get_node_active_region(start_pfn, &node_ar);
955 void __init do_init_bootmem(void)
960 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
961 max_pfn = max_low_pfn;
963 if (parse_numa_properties())
966 dump_numa_memory_topology();
968 for_each_online_node(nid) {
969 unsigned long start_pfn, end_pfn;
971 unsigned long bootmap_pages;
973 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
976 * Allocate the node structure node local if possible
978 * Be careful moving this around, as it relies on all
979 * previous nodes' bootmem to be initialized and have
980 * all reserved areas marked.
982 NODE_DATA(nid) = careful_zallocation(nid,
983 sizeof(struct pglist_data),
984 SMP_CACHE_BYTES, end_pfn);
986 dbg("node %d\n", nid);
987 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
989 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
990 NODE_DATA(nid)->node_start_pfn = start_pfn;
991 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
993 if (NODE_DATA(nid)->node_spanned_pages == 0)
996 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
997 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
999 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1000 bootmem_vaddr = careful_zallocation(nid,
1001 bootmap_pages << PAGE_SHIFT,
1002 PAGE_SIZE, end_pfn);
1004 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1006 init_bootmem_node(NODE_DATA(nid),
1007 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1008 start_pfn, end_pfn);
1010 free_bootmem_with_active_regions(nid, end_pfn);
1012 * Be very careful about moving this around. Future
1013 * calls to careful_zallocation() depend on this getting
1016 mark_reserved_regions_for_nid(nid);
1017 sparse_memory_present_with_active_regions(nid);
1020 init_bootmem_done = 1;
1023 * Now bootmem is initialised we can create the node to cpumask
1024 * lookup tables and setup the cpu callback to populate them.
1026 setup_node_to_cpumask_map();
1028 register_cpu_notifier(&ppc64_numa_nb);
1029 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1030 (void *)(unsigned long)boot_cpuid);
1033 void __init paging_init(void)
1035 unsigned long max_zone_pfns[MAX_NR_ZONES];
1036 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1037 max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
1038 free_area_init_nodes(max_zone_pfns);
1041 static int __init early_numa(char *p)
1046 if (strstr(p, "off"))
1049 if (strstr(p, "debug"))
1052 p = strstr(p, "fake=");
1054 cmdline = p + strlen("fake=");
1058 early_param("numa", early_numa);
1060 #ifdef CONFIG_MEMORY_HOTPLUG
1062 * Find the node associated with a hot added memory section for
1063 * memory represented in the device tree by the property
1064 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1066 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1067 unsigned long scn_addr)
1070 unsigned int drconf_cell_cnt, rc;
1071 unsigned long lmb_size;
1072 struct assoc_arrays aa;
1075 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1076 if (!drconf_cell_cnt)
1079 lmb_size = of_get_lmb_size(memory);
1083 rc = of_get_assoc_arrays(memory, &aa);
1087 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1088 struct of_drconf_cell drmem;
1090 read_drconf_cell(&drmem, &dm);
1092 /* skip this block if it is reserved or not assigned to
1094 if ((drmem.flags & DRCONF_MEM_RESERVED)
1095 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1098 if ((scn_addr < drmem.base_addr)
1099 || (scn_addr >= (drmem.base_addr + lmb_size)))
1102 nid = of_drconf_to_nid_single(&drmem, &aa);
1110 * Find the node associated with a hot added memory section for memory
1111 * represented in the device tree as a node (i.e. memory@XXXX) for
1114 int hot_add_node_scn_to_nid(unsigned long scn_addr)
1116 struct device_node *memory = NULL;
1119 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
1120 unsigned long start, size;
1122 const unsigned int *memcell_buf;
1125 memcell_buf = of_get_property(memory, "reg", &len);
1126 if (!memcell_buf || len <= 0)
1129 /* ranges in cell */
1130 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1133 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1134 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1136 if ((scn_addr < start) || (scn_addr >= (start + size)))
1139 nid = of_node_to_nid_single(memory);
1143 of_node_put(memory);
1152 * Find the node associated with a hot added memory section. Section
1153 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
1154 * sections are fully contained within a single LMB.
1156 int hot_add_scn_to_nid(unsigned long scn_addr)
1158 struct device_node *memory = NULL;
1161 if (!numa_enabled || (min_common_depth < 0))
1162 return first_online_node;
1164 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1166 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1167 of_node_put(memory);
1169 nid = hot_add_node_scn_to_nid(scn_addr);
1172 if (nid < 0 || !node_online(nid))
1173 nid = first_online_node;
1175 if (NODE_DATA(nid)->node_spanned_pages)
1178 for_each_online_node(nid) {
1179 if (NODE_DATA(nid)->node_spanned_pages) {
1189 #endif /* CONFIG_MEMORY_HOTPLUG */