1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
4 * Copyright (c) 2001 Intel Corp.
5 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
6 * Copyright (c) 2002 NEC Corp.
7 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
8 * Copyright (c) 2004 Silicon Graphics, Inc
9 * Russ Anderson <rja@sgi.com>
10 * Jesse Barnes <jbarnes@sgi.com>
11 * Jack Steiner <steiner@sgi.com>
15 * Platform initialization for Discontig Memory
18 #include <linux/kernel.h>
20 #include <linux/nmi.h>
21 #include <linux/swap.h>
22 #include <linux/bootmem.h>
23 #include <linux/memblock.h>
24 #include <linux/acpi.h>
25 #include <linux/efi.h>
26 #include <linux/nodemask.h>
27 #include <linux/slab.h>
28 #include <asm/pgalloc.h>
30 #include <asm/meminit.h>
32 #include <asm/sections.h>
35 * Track per-node information needed to setup the boot memory allocator, the
36 * per-node areas, and the real VM.
38 struct early_node_data {
39 struct ia64_node_data *node_data;
40 unsigned long pernode_addr;
41 unsigned long pernode_size;
42 unsigned long min_pfn;
43 unsigned long max_pfn;
46 static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
47 static nodemask_t memory_less_mask __initdata;
49 pg_data_t *pgdat_list[MAX_NUMNODES];
52 * To prevent cache aliasing effects, align per-node structures so that they
53 * start at addresses that are strided by node number.
55 #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024)
56 #define NODEDATA_ALIGN(addr, node) \
57 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
58 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
61 * build_node_maps - callback to setup mem_data structs for each node
62 * @start: physical start of range
63 * @len: length of range
64 * @node: node where this range resides
66 * Detect extents of each piece of memory that we wish to
67 * treat as a virtually contiguous block (i.e. each node). Each such block
68 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
69 * if necessary. Any non-existent pages will simply be part of the virtual
72 static int __init build_node_maps(unsigned long start, unsigned long len,
75 unsigned long spfn, epfn, end = start + len;
77 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
78 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
80 if (!mem_data[node].min_pfn) {
81 mem_data[node].min_pfn = spfn;
82 mem_data[node].max_pfn = epfn;
84 mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn);
85 mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn);
92 * early_nr_cpus_node - return number of cpus on a given node
93 * @node: node to check
95 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
96 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
97 * called yet. Note that node 0 will also count all non-existent cpus.
99 static int __meminit early_nr_cpus_node(int node)
103 for_each_possible_early_cpu(cpu)
104 if (node == node_cpuid[cpu].nid)
111 * compute_pernodesize - compute size of pernode data
112 * @node: the node id.
114 static unsigned long __meminit compute_pernodesize(int node)
116 unsigned long pernodesize = 0, cpus;
118 cpus = early_nr_cpus_node(node);
119 pernodesize += PERCPU_PAGE_SIZE * cpus;
120 pernodesize += node * L1_CACHE_BYTES;
121 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
122 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
123 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
124 pernodesize = PAGE_ALIGN(pernodesize);
129 * per_cpu_node_setup - setup per-cpu areas on each node
130 * @cpu_data: per-cpu area on this node
131 * @node: node to setup
133 * Copy the static per-cpu data into the region we just set aside and then
134 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
135 * the end of the area.
137 static void *per_cpu_node_setup(void *cpu_data, int node)
142 for_each_possible_early_cpu(cpu) {
143 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
145 if (node != node_cpuid[cpu].nid)
148 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
149 __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
153 * percpu area for cpu0 is moved from the __init area
154 * which is setup by head.S and used till this point.
155 * Update ar.k3. This move is ensures that percpu
156 * area for cpu0 is on the correct node and its
157 * virtual address isn't insanely far from other
158 * percpu areas which is important for congruent
162 ia64_set_kr(IA64_KR_PER_CPU_DATA,
163 (unsigned long)cpu_data -
164 (unsigned long)__per_cpu_start);
166 cpu_data += PERCPU_PAGE_SIZE;
174 * setup_per_cpu_areas - setup percpu areas
176 * Arch code has already allocated and initialized percpu areas. All
177 * this function has to do is to teach the determined layout to the
178 * dynamic percpu allocator, which happens to be more complex than
179 * creating whole new ones using helpers.
181 void __init setup_per_cpu_areas(void)
183 struct pcpu_alloc_info *ai;
184 struct pcpu_group_info *uninitialized_var(gi);
185 unsigned int *cpu_map;
187 unsigned long base_offset;
189 ssize_t static_size, reserved_size, dyn_size;
190 int node, prev_node, unit, nr_units, rc;
192 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
194 panic("failed to allocate pcpu_alloc_info");
195 cpu_map = ai->groups[0].cpu_map;
198 base = (void *)ULONG_MAX;
199 for_each_possible_cpu(cpu)
201 (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
202 base_offset = (void *)__per_cpu_start - base;
204 /* build cpu_map, units are grouped by node */
207 for_each_possible_cpu(cpu)
208 if (node == node_cpuid[cpu].nid)
209 cpu_map[unit++] = cpu;
212 /* set basic parameters */
213 static_size = __per_cpu_end - __per_cpu_start;
214 reserved_size = PERCPU_MODULE_RESERVE;
215 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
217 panic("percpu area overflow static=%zd reserved=%zd\n",
218 static_size, reserved_size);
220 ai->static_size = static_size;
221 ai->reserved_size = reserved_size;
222 ai->dyn_size = dyn_size;
223 ai->unit_size = PERCPU_PAGE_SIZE;
224 ai->atom_size = PAGE_SIZE;
225 ai->alloc_size = PERCPU_PAGE_SIZE;
228 * CPUs are put into groups according to node. Walk cpu_map
229 * and create new groups at node boundaries.
233 for (unit = 0; unit < nr_units; unit++) {
235 node = node_cpuid[cpu].nid;
237 if (node == prev_node) {
243 gi = &ai->groups[ai->nr_groups++];
245 gi->base_offset = __per_cpu_offset[cpu] + base_offset;
246 gi->cpu_map = &cpu_map[unit];
249 rc = pcpu_setup_first_chunk(ai, base);
251 panic("failed to setup percpu area (err=%d)", rc);
253 pcpu_free_alloc_info(ai);
258 * fill_pernode - initialize pernode data.
259 * @node: the node id.
260 * @pernode: physical address of pernode data
261 * @pernodesize: size of the pernode data
263 static void __init fill_pernode(int node, unsigned long pernode,
264 unsigned long pernodesize)
267 int cpus = early_nr_cpus_node(node);
269 mem_data[node].pernode_addr = pernode;
270 mem_data[node].pernode_size = pernodesize;
271 memset(__va(pernode), 0, pernodesize);
273 cpu_data = (void *)pernode;
274 pernode += PERCPU_PAGE_SIZE * cpus;
275 pernode += node * L1_CACHE_BYTES;
277 pgdat_list[node] = __va(pernode);
278 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
280 mem_data[node].node_data = __va(pernode);
281 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
282 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
284 cpu_data = per_cpu_node_setup(cpu_data, node);
290 * find_pernode_space - allocate memory for memory map and per-node structures
291 * @start: physical start of range
292 * @len: length of range
293 * @node: node where this range resides
295 * This routine reserves space for the per-cpu data struct, the list of
296 * pg_data_ts and the per-node data struct. Each node will have something like
297 * the following in the first chunk of addr. space large enough to hold it.
299 * ________________________
301 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
302 * | PERCPU_PAGE_SIZE * | start and length big enough
303 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
304 * |------------------------|
305 * | local pg_data_t * |
306 * |------------------------|
307 * | local ia64_node_data |
308 * |------------------------|
310 * |________________________|
312 * Once this space has been set aside, the bootmem maps are initialized. We
313 * could probably move the allocation of the per-cpu and ia64_node_data space
314 * outside of this function and use alloc_bootmem_node(), but doing it here
315 * is straightforward and we get the alignments we want so...
317 static int __init find_pernode_space(unsigned long start, unsigned long len,
320 unsigned long spfn, epfn;
321 unsigned long pernodesize = 0, pernode;
323 spfn = start >> PAGE_SHIFT;
324 epfn = (start + len) >> PAGE_SHIFT;
327 * Make sure this memory falls within this node's usable memory
328 * since we may have thrown some away in build_maps().
330 if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn)
333 /* Don't setup this node's local space twice... */
334 if (mem_data[node].pernode_addr)
338 * Calculate total size needed, incl. what's necessary
339 * for good alignment and alias prevention.
341 pernodesize = compute_pernodesize(node);
342 pernode = NODEDATA_ALIGN(start, node);
344 /* Is this range big enough for what we want to store here? */
345 if (start + len > (pernode + pernodesize))
346 fill_pernode(node, pernode, pernodesize);
352 * reserve_pernode_space - reserve memory for per-node space
354 * Reserve the space used by the bootmem maps & per-node space in the boot
355 * allocator so that when we actually create the real mem maps we don't
358 static void __init reserve_pernode_space(void)
360 unsigned long base, size;
363 for_each_online_node(node) {
364 if (node_isset(node, memory_less_mask))
367 /* Now the per-node space */
368 size = mem_data[node].pernode_size;
369 base = __pa(mem_data[node].pernode_addr);
370 memblock_reserve(base, size);
374 static void __meminit scatter_node_data(void)
380 * for_each_online_node() can't be used at here.
381 * node_online_map is not set for hot-added nodes at this time,
382 * because we are halfway through initialization of the new node's
383 * structures. If for_each_online_node() is used, a new node's
384 * pg_data_ptrs will be not initialized. Instead of using it,
385 * pgdat_list[] is checked.
387 for_each_node(node) {
388 if (pgdat_list[node]) {
389 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
390 memcpy(dst, pgdat_list, sizeof(pgdat_list));
396 * initialize_pernode_data - fixup per-cpu & per-node pointers
398 * Each node's per-node area has a copy of the global pg_data_t list, so
399 * we copy that to each node here, as well as setting the per-cpu pointer
400 * to the local node data structure. The active_cpus field of the per-node
401 * structure gets setup by the platform_cpu_init() function later.
403 static void __init initialize_pernode_data(void)
410 /* Set the node_data pointer for each per-cpu struct */
411 for_each_possible_early_cpu(cpu) {
412 node = node_cpuid[cpu].nid;
413 per_cpu(ia64_cpu_info, cpu).node_data =
414 mem_data[node].node_data;
418 struct cpuinfo_ia64 *cpu0_cpu_info;
420 node = node_cpuid[cpu].nid;
421 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
422 ((char *)&ia64_cpu_info - __per_cpu_start));
423 cpu0_cpu_info->node_data = mem_data[node].node_data;
425 #endif /* CONFIG_SMP */
429 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
430 * node but fall back to any other node when __alloc_bootmem_node fails
433 * @pernodesize: size of this node's pernode data
435 static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
439 int bestnode = -1, node, anynode = 0;
441 for_each_online_node(node) {
442 if (node_isset(node, memory_less_mask))
444 else if (node_distance(nid, node) < best) {
445 best = node_distance(nid, node);
454 ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
455 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
461 * memory_less_nodes - allocate and initialize CPU only nodes pernode
464 static void __init memory_less_nodes(void)
466 unsigned long pernodesize;
470 for_each_node_mask(node, memory_less_mask) {
471 pernodesize = compute_pernodesize(node);
472 pernode = memory_less_node_alloc(node, pernodesize);
473 fill_pernode(node, __pa(pernode), pernodesize);
480 * find_memory - walk the EFI memory map and setup the bootmem allocator
482 * Called early in boot to setup the bootmem allocator, and to
483 * allocate the per-cpu and per-node structures.
485 void __init find_memory(void)
490 efi_memmap_walk(filter_memory, register_active_ranges);
492 if (num_online_nodes() == 0) {
493 printk(KERN_ERR "node info missing!\n");
497 nodes_or(memory_less_mask, memory_less_mask, node_online_map);
501 /* These actually end up getting called by call_pernode_memory() */
502 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
503 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
504 efi_memmap_walk(find_max_min_low_pfn, NULL);
506 for_each_online_node(node)
507 if (mem_data[node].min_pfn)
508 node_clear(node, memory_less_mask);
510 reserve_pernode_space();
512 initialize_pernode_data();
514 max_pfn = max_low_pfn;
521 * per_cpu_init - setup per-cpu variables
523 * find_pernode_space() does most of this already, we just need to set
524 * local_per_cpu_offset
526 void *per_cpu_init(void)
529 static int first_time = 1;
533 for_each_possible_early_cpu(cpu)
534 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
537 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
539 #endif /* CONFIG_SMP */
542 * call_pernode_memory - use SRAT to call callback functions with node info
543 * @start: physical start of range
544 * @len: length of range
545 * @arg: function to call for each range
547 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
548 * out to which node a block of memory belongs. Ignore memory that we cannot
549 * identify, and split blocks that run across multiple nodes.
551 * Take this opportunity to round the start address up and the end address
552 * down to page boundaries.
554 void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
556 unsigned long rs, re, end = start + len;
557 void (*func)(unsigned long, unsigned long, int);
560 start = PAGE_ALIGN(start);
567 if (!num_node_memblks) {
568 /* No SRAT table, so assume one node (node 0) */
570 (*func)(start, end - start, 0);
574 for (i = 0; i < num_node_memblks; i++) {
575 rs = max(start, node_memblk[i].start_paddr);
576 re = min(end, node_memblk[i].start_paddr +
577 node_memblk[i].size);
580 (*func)(rs, re - rs, node_memblk[i].nid);
588 * paging_init - setup page tables
590 * paging_init() sets up the page tables for each node of the system and frees
591 * the bootmem allocator memory for general use.
593 void __init paging_init(void)
595 unsigned long max_dma;
596 unsigned long pfn_offset = 0;
597 unsigned long max_pfn = 0;
599 unsigned long max_zone_pfns[MAX_NR_ZONES];
601 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
603 sparse_memory_present_with_active_regions(MAX_NUMNODES);
606 #ifdef CONFIG_VIRTUAL_MEM_MAP
607 VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
608 sizeof(struct page));
609 vmem_map = (struct page *) VMALLOC_END;
610 efi_memmap_walk(create_mem_map_page_table, NULL);
611 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
614 for_each_online_node(node) {
615 pfn_offset = mem_data[node].min_pfn;
617 #ifdef CONFIG_VIRTUAL_MEM_MAP
618 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
620 if (mem_data[node].max_pfn > max_pfn)
621 max_pfn = mem_data[node].max_pfn;
624 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
625 #ifdef CONFIG_ZONE_DMA32
626 max_zone_pfns[ZONE_DMA32] = max_dma;
628 max_zone_pfns[ZONE_NORMAL] = max_pfn;
629 free_area_init_nodes(max_zone_pfns);
631 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
634 #ifdef CONFIG_MEMORY_HOTPLUG
635 pg_data_t *arch_alloc_nodedata(int nid)
637 unsigned long size = compute_pernodesize(nid);
639 return kzalloc(size, GFP_KERNEL);
642 void arch_free_nodedata(pg_data_t *pgdat)
647 void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
649 pgdat_list[update_node] = update_pgdat;
654 #ifdef CONFIG_SPARSEMEM_VMEMMAP
655 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
656 struct vmem_altmap *altmap)
658 return vmemmap_populate_basepages(start, end, node);
661 void vmemmap_free(unsigned long start, unsigned long end,
662 struct vmem_altmap *altmap)