2 * acpi_numa.c - ACPI NUMA support
4 * Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/acpi.h>
31 #include <linux/numa.h>
33 #define PREFIX "ACPI: "
35 #define ACPI_NUMA 0x80000000
36 #define _COMPONENT ACPI_NUMA
37 ACPI_MODULE_NAME("numa");
39 static nodemask_t nodes_found_map = NODE_MASK_NONE;
41 /* maps to convert between proximity domain and logical node ID */
42 static int pxm_to_node_map[MAX_PXM_DOMAINS]
43 = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
44 static int node_to_pxm_map[MAX_NUMNODES]
45 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 unsigned char acpi_srat_revision __initdata;
49 int pxm_to_node(int pxm)
53 return pxm_to_node_map[pxm];
56 int node_to_pxm(int node)
60 return node_to_pxm_map[node];
63 void __acpi_map_pxm_to_node(int pxm, int node)
65 if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
66 pxm_to_node_map[pxm] = node;
67 if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node])
68 node_to_pxm_map[node] = pxm;
71 int acpi_map_pxm_to_node(int pxm)
73 int node = pxm_to_node_map[pxm];
75 if (node == NUMA_NO_NODE) {
76 if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
78 node = first_unset_node(nodes_found_map);
79 __acpi_map_pxm_to_node(pxm, node);
80 node_set(node, nodes_found_map);
87 acpi_table_print_srat_entry(struct acpi_subtable_header *header)
90 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
95 switch (header->type) {
97 case ACPI_SRAT_TYPE_CPU_AFFINITY:
98 #ifdef ACPI_DEBUG_OUTPUT
100 struct acpi_srat_cpu_affinity *p =
101 (struct acpi_srat_cpu_affinity *)header;
102 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
103 "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
104 p->apic_id, p->local_sapic_eid,
105 p->proximity_domain_lo,
106 (p->flags & ACPI_SRAT_CPU_ENABLED)?
107 "enabled" : "disabled"));
109 #endif /* ACPI_DEBUG_OUTPUT */
112 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
113 #ifdef ACPI_DEBUG_OUTPUT
115 struct acpi_srat_mem_affinity *p =
116 (struct acpi_srat_mem_affinity *)header;
117 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
118 "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
119 (unsigned long)p->base_address,
120 (unsigned long)p->length,
122 (p->flags & ACPI_SRAT_MEM_ENABLED)?
123 "enabled" : "disabled",
124 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
125 " hot-pluggable" : "",
126 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)?
127 " non-volatile" : ""));
129 #endif /* ACPI_DEBUG_OUTPUT */
132 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
133 #ifdef ACPI_DEBUG_OUTPUT
135 struct acpi_srat_x2apic_cpu_affinity *p =
136 (struct acpi_srat_x2apic_cpu_affinity *)header;
137 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
138 "SRAT Processor (x2apicid[0x%08x]) in"
139 " proximity domain %d %s\n",
142 (p->flags & ACPI_SRAT_CPU_ENABLED) ?
143 "enabled" : "disabled"));
145 #endif /* ACPI_DEBUG_OUTPUT */
148 printk(KERN_WARNING PREFIX
149 "Found unsupported SRAT entry (type = 0x%x)\n",
156 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
157 * up the NUMA heuristics which wants the local node to have a smaller
158 * distance than the others.
159 * Do some quick checks here and only use the SLIT if it passes.
161 static int __init slit_valid(struct acpi_table_slit *slit)
164 int d = slit->locality_count;
165 for (i = 0; i < d; i++) {
166 for (j = 0; j < d; j++) {
167 u8 val = slit->entry[d*i + j];
169 if (val != LOCAL_DISTANCE)
171 } else if (val <= LOCAL_DISTANCE)
178 static int __init acpi_parse_slit(struct acpi_table_header *table)
180 struct acpi_table_slit *slit;
185 slit = (struct acpi_table_slit *)table;
187 if (!slit_valid(slit)) {
188 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
191 acpi_numa_slit_init(slit);
196 void __init __attribute__ ((weak))
197 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
199 printk(KERN_WARNING PREFIX
200 "Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
206 acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
207 const unsigned long end)
209 struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
211 processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header;
212 if (!processor_affinity)
215 acpi_table_print_srat_entry(header);
217 /* let architecture-dependent part to do it */
218 acpi_numa_x2apic_affinity_init(processor_affinity);
224 acpi_parse_processor_affinity(struct acpi_subtable_header *header,
225 const unsigned long end)
227 struct acpi_srat_cpu_affinity *processor_affinity;
229 processor_affinity = (struct acpi_srat_cpu_affinity *)header;
230 if (!processor_affinity)
233 acpi_table_print_srat_entry(header);
235 /* let architecture-dependent part to do it */
236 acpi_numa_processor_affinity_init(processor_affinity);
241 static int __initdata parsed_numa_memblks;
244 acpi_parse_memory_affinity(struct acpi_subtable_header * header,
245 const unsigned long end)
247 struct acpi_srat_mem_affinity *memory_affinity;
249 memory_affinity = (struct acpi_srat_mem_affinity *)header;
250 if (!memory_affinity)
253 acpi_table_print_srat_entry(header);
255 /* let architecture-dependent part to do it */
256 if (!acpi_numa_memory_affinity_init(memory_affinity))
257 parsed_numa_memblks++;
261 static int __init acpi_parse_srat(struct acpi_table_header *table)
263 struct acpi_table_srat *srat;
267 srat = (struct acpi_table_srat *)table;
268 acpi_srat_revision = srat->header.revision;
270 /* Real work done in acpi_table_parse_srat below. */
276 acpi_table_parse_srat(enum acpi_srat_type id,
277 acpi_tbl_entry_handler handler, unsigned int max_entries)
279 return acpi_table_parse_entries(ACPI_SIG_SRAT,
280 sizeof(struct acpi_table_srat), id,
281 handler, max_entries);
284 int __init acpi_numa_init(void)
289 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
290 * SRAT cpu entries could have different order with that in MADT.
291 * So go over all cpu entries in SRAT to get apicid to node mapping.
294 /* SRAT: Static Resource Affinity Table */
295 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
296 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
297 acpi_parse_x2apic_affinity, 0);
298 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
299 acpi_parse_processor_affinity, 0);
300 cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
301 acpi_parse_memory_affinity,
305 /* SLIT: System Locality Information Table */
306 acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
308 acpi_numa_arch_fixup();
312 else if (!parsed_numa_memblks)
317 int acpi_get_pxm(acpi_handle h)
319 unsigned long long pxm;
322 acpi_handle phandle = h;
326 status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
327 if (ACPI_SUCCESS(status))
329 status = acpi_get_parent(handle, &phandle);
330 } while (ACPI_SUCCESS(status));
334 int acpi_get_node(acpi_handle *handle)
336 int pxm, node = NUMA_NO_NODE;
338 pxm = acpi_get_pxm(handle);
339 if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
340 node = acpi_map_pxm_to_node(pxm);
344 EXPORT_SYMBOL(acpi_get_node);