2 * acpi_numa.c - ACPI NUMA support
4 * Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/acpi.h>
31 #include <linux/numa.h>
32 #include <acpi/acpi_bus.h>
34 #define PREFIX "ACPI: "
36 #define ACPI_NUMA 0x80000000
37 #define _COMPONENT ACPI_NUMA
38 ACPI_MODULE_NAME("numa");
40 static nodemask_t nodes_found_map = NODE_MASK_NONE;
42 /* maps to convert between proximity domain and logical node ID */
43 static int pxm_to_node_map[MAX_PXM_DOMAINS]
44 = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
45 static int node_to_pxm_map[MAX_NUMNODES]
46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
48 unsigned char acpi_srat_revision __initdata;
50 int pxm_to_node(int pxm)
54 return pxm_to_node_map[pxm];
57 int node_to_pxm(int node)
61 return node_to_pxm_map[node];
64 void __acpi_map_pxm_to_node(int pxm, int node)
66 if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
67 pxm_to_node_map[pxm] = node;
68 if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node])
69 node_to_pxm_map[node] = pxm;
72 int acpi_map_pxm_to_node(int pxm)
74 int node = pxm_to_node_map[pxm];
77 if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
79 node = first_unset_node(nodes_found_map);
80 __acpi_map_pxm_to_node(pxm, node);
81 node_set(node, nodes_found_map);
88 acpi_table_print_srat_entry(struct acpi_subtable_header *header)
91 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
96 switch (header->type) {
98 case ACPI_SRAT_TYPE_CPU_AFFINITY:
99 #ifdef ACPI_DEBUG_OUTPUT
101 struct acpi_srat_cpu_affinity *p =
102 (struct acpi_srat_cpu_affinity *)header;
103 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
104 "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
105 p->apic_id, p->local_sapic_eid,
106 p->proximity_domain_lo,
107 (p->flags & ACPI_SRAT_CPU_ENABLED)?
108 "enabled" : "disabled"));
110 #endif /* ACPI_DEBUG_OUTPUT */
113 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
114 #ifdef ACPI_DEBUG_OUTPUT
116 struct acpi_srat_mem_affinity *p =
117 (struct acpi_srat_mem_affinity *)header;
118 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
119 "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
120 (unsigned long)p->base_address,
121 (unsigned long)p->length,
123 (p->flags & ACPI_SRAT_MEM_ENABLED)?
124 "enabled" : "disabled",
125 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
126 " hot-pluggable" : "",
127 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)?
128 " non-volatile" : ""));
130 #endif /* ACPI_DEBUG_OUTPUT */
133 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
134 #ifdef ACPI_DEBUG_OUTPUT
136 struct acpi_srat_x2apic_cpu_affinity *p =
137 (struct acpi_srat_x2apic_cpu_affinity *)header;
138 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
139 "SRAT Processor (x2apicid[0x%08x]) in"
140 " proximity domain %d %s\n",
143 (p->flags & ACPI_SRAT_CPU_ENABLED) ?
144 "enabled" : "disabled"));
146 #endif /* ACPI_DEBUG_OUTPUT */
149 printk(KERN_WARNING PREFIX
150 "Found unsupported SRAT entry (type = 0x%x)\n",
157 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
158 * up the NUMA heuristics which wants the local node to have a smaller
159 * distance than the others.
160 * Do some quick checks here and only use the SLIT if it passes.
162 static int __init slit_valid(struct acpi_table_slit *slit)
165 int d = slit->locality_count;
166 for (i = 0; i < d; i++) {
167 for (j = 0; j < d; j++) {
168 u8 val = slit->entry[d*i + j];
170 if (val != LOCAL_DISTANCE)
172 } else if (val <= LOCAL_DISTANCE)
179 static int __init acpi_parse_slit(struct acpi_table_header *table)
181 struct acpi_table_slit *slit;
186 slit = (struct acpi_table_slit *)table;
188 if (!slit_valid(slit)) {
189 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
192 acpi_numa_slit_init(slit);
197 void __init __attribute__ ((weak))
198 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
200 printk(KERN_WARNING PREFIX
201 "Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
207 acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
208 const unsigned long end)
210 struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
212 processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header;
213 if (!processor_affinity)
216 acpi_table_print_srat_entry(header);
218 /* let architecture-dependent part to do it */
219 acpi_numa_x2apic_affinity_init(processor_affinity);
225 acpi_parse_processor_affinity(struct acpi_subtable_header *header,
226 const unsigned long end)
228 struct acpi_srat_cpu_affinity *processor_affinity;
230 processor_affinity = (struct acpi_srat_cpu_affinity *)header;
231 if (!processor_affinity)
234 acpi_table_print_srat_entry(header);
236 /* let architecture-dependent part to do it */
237 acpi_numa_processor_affinity_init(processor_affinity);
242 static int __initdata parsed_numa_memblks;
245 acpi_parse_memory_affinity(struct acpi_subtable_header * header,
246 const unsigned long end)
248 struct acpi_srat_mem_affinity *memory_affinity;
250 memory_affinity = (struct acpi_srat_mem_affinity *)header;
251 if (!memory_affinity)
254 acpi_table_print_srat_entry(header);
256 /* let architecture-dependent part to do it */
257 if (!acpi_numa_memory_affinity_init(memory_affinity))
258 parsed_numa_memblks++;
262 static int __init acpi_parse_srat(struct acpi_table_header *table)
264 struct acpi_table_srat *srat;
268 srat = (struct acpi_table_srat *)table;
269 acpi_srat_revision = srat->header.revision;
271 /* Real work done in acpi_table_parse_srat below. */
277 acpi_table_parse_srat(enum acpi_srat_type id,
278 acpi_tbl_entry_handler handler, unsigned int max_entries)
280 return acpi_table_parse_entries(ACPI_SIG_SRAT,
281 sizeof(struct acpi_table_srat), id,
282 handler, max_entries);
285 int __init acpi_numa_init(void)
290 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
291 * SRAT cpu entries could have different order with that in MADT.
292 * So go over all cpu entries in SRAT to get apicid to node mapping.
295 /* SRAT: Static Resource Affinity Table */
296 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
297 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
298 acpi_parse_x2apic_affinity, 0);
299 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
300 acpi_parse_processor_affinity, 0);
301 cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
302 acpi_parse_memory_affinity,
306 /* SLIT: System Locality Information Table */
307 acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
309 acpi_numa_arch_fixup();
313 else if (!parsed_numa_memblks)
318 int acpi_get_pxm(acpi_handle h)
320 unsigned long long pxm;
323 acpi_handle phandle = h;
327 status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
328 if (ACPI_SUCCESS(status))
330 status = acpi_get_parent(handle, &phandle);
331 } while (ACPI_SUCCESS(status));
335 int acpi_get_node(acpi_handle *handle)
339 pxm = acpi_get_pxm(handle);
340 if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
341 node = acpi_map_pxm_to_node(pxm);
345 EXPORT_SYMBOL(acpi_get_node);