1 // SPDX-License-Identifier: GPL-2.0
3 * Author: Xiang Gao <gaoxiang@loongson.cn>
4 * Huacai Chen <chenhuacai@loongson.cn>
6 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8 #include <linux/init.h>
9 #include <linux/kernel.h>
11 #include <linux/mmzone.h>
12 #include <linux/export.h>
13 #include <linux/nodemask.h>
14 #include <linux/swap.h>
15 #include <linux/memblock.h>
16 #include <linux/pfn.h>
17 #include <linux/acpi.h>
18 #include <linux/efi.h>
19 #include <linux/irq.h>
20 #include <linux/pci.h>
21 #include <asm/bootinfo.h>
22 #include <asm/loongson.h>
25 #include <asm/pgalloc.h>
26 #include <asm/sections.h>
30 struct pglist_data *node_data[MAX_NUMNODES];
31 unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
33 EXPORT_SYMBOL(node_data);
34 EXPORT_SYMBOL(node_distances);
36 static struct numa_meminfo numa_meminfo;
37 cpumask_t cpus_on_node[MAX_NUMNODES];
38 cpumask_t phys_cpus_on_node[MAX_NUMNODES];
39 EXPORT_SYMBOL(cpus_on_node);
42 * apicid, cpu, node mappings
44 s16 __cpuid_to_node[CONFIG_NR_CPUS] = {
45 [0 ... CONFIG_NR_CPUS - 1] = NUMA_NO_NODE
47 EXPORT_SYMBOL(__cpuid_to_node);
49 nodemask_t numa_nodes_parsed __initdata;
51 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
52 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
53 EXPORT_SYMBOL(__per_cpu_offset);
55 static int __init pcpu_cpu_to_node(int cpu)
57 return early_cpu_to_node(cpu);
60 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
62 if (early_cpu_to_node(from) == early_cpu_to_node(to))
63 return LOCAL_DISTANCE;
65 return REMOTE_DISTANCE;
68 void __init pcpu_populate_pte(unsigned long addr)
70 populate_kernel_pte(addr);
73 void __init setup_per_cpu_areas(void)
79 if (pcpu_chosen_fc == PCPU_FC_AUTO) {
81 pcpu_chosen_fc = PCPU_FC_PAGE;
83 pcpu_chosen_fc = PCPU_FC_EMBED;
87 * Always reserve area for module percpu variables. That's
88 * what the legacy allocator did.
90 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
91 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
92 PERCPU_DYNAMIC_RESERVE, PMD_SIZE,
93 pcpu_cpu_distance, pcpu_cpu_to_node);
95 pr_warn("%s allocator failed (%d), falling back to page size\n",
96 pcpu_fc_names[pcpu_chosen_fc], rc);
99 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_cpu_to_node);
101 panic("cannot initialize percpu area (err=%d)", rc);
103 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
104 for_each_possible_cpu(cpu)
105 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
110 * Get nodeid by logical cpu number.
111 * __cpuid_to_node maps phyical cpu id to node, so we
112 * should use cpu_logical_map(cpu) to index it.
114 * This routine is only used in early phase during
115 * booting, after setup_per_cpu_areas calling and numa_node
116 * initialization, cpu_to_node will be used instead.
118 int early_cpu_to_node(int cpu)
120 int physid = cpu_logical_map(cpu);
125 return __cpuid_to_node[physid];
128 void __init early_numa_add_cpu(int cpuid, s16 node)
130 int cpu = __cpu_number_map[cpuid];
135 cpumask_set_cpu(cpu, &cpus_on_node[node]);
136 cpumask_set_cpu(cpuid, &phys_cpus_on_node[node]);
139 void numa_add_cpu(unsigned int cpu)
141 int nid = cpu_to_node(cpu);
142 cpumask_set_cpu(cpu, &cpus_on_node[nid]);
145 void numa_remove_cpu(unsigned int cpu)
147 int nid = cpu_to_node(cpu);
148 cpumask_clear_cpu(cpu, &cpus_on_node[nid]);
151 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
152 struct numa_meminfo *mi)
154 /* ignore zero length blks */
158 /* whine about and ignore invalid blks */
159 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
160 pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
161 nid, start, end - 1);
165 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
166 pr_err("NUMA: too many memblk ranges\n");
170 mi->blk[mi->nr_blks].start = PFN_ALIGN(start);
171 mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1);
172 mi->blk[mi->nr_blks].nid = nid;
178 * numa_add_memblk - Add one numa_memblk to numa_meminfo
179 * @nid: NUMA node ID of the new memblk
180 * @start: Start address of the new memblk
181 * @end: End address of the new memblk
183 * Add a new memblk to the default numa_meminfo.
186 * 0 on success, -errno on failure.
188 int __init numa_add_memblk(int nid, u64 start, u64 end)
190 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
193 static void __init alloc_node_data(int nid)
197 size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
199 nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
201 pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
208 memset(nd, 0, sizeof(pg_data_t));
211 static void __init node_mem_init(unsigned int node)
213 unsigned long start_pfn, end_pfn;
214 unsigned long node_addrspace_offset;
216 node_addrspace_offset = nid_to_addrbase(node);
217 pr_info("Node%d's addrspace_offset is 0x%lx\n",
218 node, node_addrspace_offset);
220 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
221 pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
222 node, start_pfn, end_pfn);
224 alloc_node_data(node);
227 #ifdef CONFIG_ACPI_NUMA
230 * Sanity check to catch more bad NUMA configurations (they are amazingly
231 * common). Make sure the nodes cover all memory.
233 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
236 u64 numaram, biosram;
239 for (i = 0; i < mi->nr_blks; i++) {
240 u64 s = mi->blk[i].start >> PAGE_SHIFT;
241 u64 e = mi->blk[i].end >> PAGE_SHIFT;
244 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
245 if ((s64)numaram < 0)
248 max_pfn = max_low_pfn;
249 biosram = max_pfn - absent_pages_in_range(0, max_pfn);
251 BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT)));
255 static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
257 static unsigned long num_physpages;
259 num_physpages += (size >> PAGE_SHIFT);
260 pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
261 node, type, start, size);
262 pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
263 start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages);
264 memblock_set_node(start, size, &memblock.memory, node);
270 * Add a uasable memory region described by BIOS. The
271 * routine gets each intersection between BIOS's region
272 * and node's region, and adds them into node's memblock
276 static void __init add_numamem_region(u64 start, u64 end, u32 type)
282 pr_debug("Invalid region: %016llx-%016llx\n", start, end);
286 for (i = 0; i < numa_meminfo.nr_blks; i++) {
287 struct numa_memblk *mb = &numa_meminfo.blk[i];
293 add_node_intersection(mb->nid, ofs, mb->end - ofs, type);
296 add_node_intersection(mb->nid, ofs, end - ofs, type);
302 static void __init init_node_memblock(void)
305 u64 mem_end, mem_start, mem_size;
306 efi_memory_desc_t *md;
308 /* Parse memory information and activate */
309 for_each_efi_memory_desc(md) {
311 mem_start = md->phys_addr;
312 mem_size = md->num_pages << EFI_PAGE_SHIFT;
313 mem_end = mem_start + mem_size;
316 case EFI_LOADER_CODE:
317 case EFI_LOADER_DATA:
318 case EFI_BOOT_SERVICES_CODE:
319 case EFI_BOOT_SERVICES_DATA:
320 case EFI_PERSISTENT_MEMORY:
321 case EFI_CONVENTIONAL_MEMORY:
322 add_numamem_region(mem_start, mem_end, mem_type);
325 case EFI_UNUSABLE_MEMORY:
326 case EFI_ACPI_RECLAIM_MEMORY:
327 add_numamem_region(mem_start, mem_end, mem_type);
329 case EFI_RESERVED_TYPE:
330 case EFI_RUNTIME_SERVICES_CODE:
331 case EFI_RUNTIME_SERVICES_DATA:
332 case EFI_MEMORY_MAPPED_IO:
333 case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
334 pr_info("Resvd: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
335 mem_type, mem_start, mem_size);
341 static void __init numa_default_distance(void)
345 for (row = 0; row < MAX_NUMNODES; row++)
346 for (col = 0; col < MAX_NUMNODES; col++) {
348 node_distances[row][col] = LOCAL_DISTANCE;
350 /* We assume that one node per package here!
352 * A SLIT should be used for multiple nodes
353 * per package to override default setting.
355 node_distances[row][col] = REMOTE_DISTANCE;
360 * fake_numa_init() - For Non-ACPI systems
361 * Return: 0 on success, -errno on failure.
363 static int __init fake_numa_init(void)
365 phys_addr_t start = memblock_start_of_DRAM();
366 phys_addr_t end = memblock_end_of_DRAM() - 1;
368 node_set(0, numa_nodes_parsed);
369 pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end);
371 return numa_add_memblk(0, start, end + 1);
374 int __init init_numa_memory(void)
380 for (i = 0; i < NR_CPUS; i++)
381 set_cpuid_to_node(i, NUMA_NO_NODE);
383 numa_default_distance();
384 nodes_clear(numa_nodes_parsed);
385 nodes_clear(node_possible_map);
386 nodes_clear(node_online_map);
387 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
389 /* Parse SRAT and SLIT if provided by firmware. */
390 ret = acpi_disabled ? fake_numa_init() : acpi_numa_init();
394 node_possible_map = numa_nodes_parsed;
395 if (WARN_ON(nodes_empty(node_possible_map)))
398 init_node_memblock();
399 if (numa_meminfo_cover_memory(&numa_meminfo) == false)
402 for_each_node_mask(node, node_possible_map) {
404 node_set_online(node);
406 max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
409 loongson_sysconf.nr_nodes = nr_node_ids;
410 loongson_sysconf.cores_per_node = cpumask_weight(&phys_cpus_on_node[0]);
417 void __init paging_init(void)
420 unsigned long zones_size[MAX_NR_ZONES] = {0, };
422 for_each_online_node(node) {
423 unsigned long start_pfn, end_pfn;
425 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
427 if (end_pfn > max_low_pfn)
428 max_low_pfn = end_pfn;
430 #ifdef CONFIG_ZONE_DMA32
431 zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
433 zones_size[ZONE_NORMAL] = max_low_pfn;
434 free_area_init(zones_size);
437 void __init mem_init(void)
439 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
443 int pcibus_to_node(struct pci_bus *bus)
445 return dev_to_node(&bus->dev);
447 EXPORT_SYMBOL(pcibus_to_node);