1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <asm/percpu.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/topology.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
18 #ifndef CONFIG_DEBUG_BOOT_PARAMS
19 struct boot_params __initdata boot_params;
21 struct boot_params boot_params;
24 #ifdef CONFIG_X86_LOCAL_APIC
25 unsigned int num_processors;
26 unsigned disabled_cpus __cpuinitdata;
27 /* Processor that is doing the boot up */
28 unsigned int boot_cpu_physical_apicid = -1U;
29 unsigned int max_physical_apicid;
30 EXPORT_SYMBOL(boot_cpu_physical_apicid);
32 /* Bitmask of physically existing CPUs */
33 physid_mask_t phys_cpu_present_map;
36 /* map cpu index to physical APIC ID */
37 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
38 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
39 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
40 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
42 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
45 /* map cpu index to node index */
46 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
47 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
49 /* which logical CPUs are on which nodes */
50 cpumask_t *node_to_cpumask_map;
51 EXPORT_SYMBOL(node_to_cpumask_map);
53 /* setup node_to_cpumask_map */
54 static void __init setup_node_to_cpumask_map(void);
57 static inline void setup_node_to_cpumask_map(void) { }
60 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
62 * Copy data used in early init routines from the initial arrays to the
63 * per cpu data areas. These arrays then become expendable and the
64 * *_early_ptr's are zeroed indicating that the static arrays are gone.
66 static void __init setup_per_cpu_maps(void)
70 for_each_possible_cpu(cpu) {
71 per_cpu(x86_cpu_to_apicid, cpu) =
72 early_per_cpu_map(x86_cpu_to_apicid, cpu);
73 per_cpu(x86_bios_cpu_apicid, cpu) =
74 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
76 per_cpu(x86_cpu_to_node_map, cpu) =
77 early_per_cpu_map(x86_cpu_to_node_map, cpu);
81 /* indicate the early static arrays will soon be gone */
82 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
83 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
85 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
89 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
90 cpumask_t *cpumask_of_cpu_map __read_mostly;
91 EXPORT_SYMBOL(cpumask_of_cpu_map);
93 /* requires nr_cpu_ids to be initialized */
94 static void __init setup_cpumask_of_cpu(void)
98 /* alloc_bootmem zeroes memory */
99 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
100 for (i = 0; i < nr_cpu_ids; i++)
101 cpu_set(i, cpumask_of_cpu_map[i]);
104 static inline void setup_cpumask_of_cpu(void) { }
109 * Great future not-so-futuristic plan: make i386 and x86_64 do it
112 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
113 EXPORT_SYMBOL(__per_cpu_offset);
114 static inline void setup_cpu_pda_map(void) { }
116 #elif !defined(CONFIG_SMP)
117 static inline void setup_cpu_pda_map(void) { }
119 #else /* CONFIG_SMP && CONFIG_X86_64 */
122 * Allocate cpu_pda pointer table and array via alloc_bootmem.
124 static void __init setup_cpu_pda_map(void)
127 struct x8664_pda **new_cpu_pda;
131 size = roundup(sizeof(struct x8664_pda), cache_line_size());
133 /* allocate cpu_pda array and pointer table */
135 unsigned long tsize = nr_cpu_ids * sizeof(void *);
136 unsigned long asize = size * (nr_cpu_ids - 1);
138 tsize = roundup(tsize, cache_line_size());
139 new_cpu_pda = alloc_bootmem(tsize + asize);
140 pda = (char *)new_cpu_pda + tsize;
143 /* initialize pointer table to static pda's */
144 for_each_possible_cpu(cpu) {
146 /* leave boot cpu pda in place */
147 new_cpu_pda[0] = cpu_pda(0);
150 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
151 new_cpu_pda[cpu]->in_bootmem = 1;
155 /* point to new pointer table */
156 _cpu_pda = new_cpu_pda;
162 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
163 * Always point %gs to its beginning
165 void __init setup_per_cpu_areas(void)
167 ssize_t size = PERCPU_ENOUGH_ROOM;
171 /* no processor from mptable or madt */
175 #ifdef CONFIG_HOTPLUG_CPU
176 prefill_possible_map();
178 nr_cpu_ids = num_processors;
181 /* Setup cpu_pda map */
184 /* Copy section for each CPU (we discard the original) */
185 size = PERCPU_ENOUGH_ROOM;
186 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
189 for_each_possible_cpu(cpu) {
190 #ifndef CONFIG_NEED_MULTIPLE_NODES
191 ptr = alloc_bootmem_pages(size);
193 int node = early_cpu_to_node(cpu);
194 if (!node_online(node) || !NODE_DATA(node)) {
195 ptr = alloc_bootmem_pages(size);
197 "cpu %d has no node %d or node-local memory\n",
201 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
203 per_cpu_offset(cpu) = ptr - __per_cpu_start;
204 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
208 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
209 NR_CPUS, nr_cpu_ids, nr_node_ids);
211 /* Setup percpu data maps */
212 setup_per_cpu_maps();
214 /* Setup node to cpumask map */
215 setup_node_to_cpumask_map();
217 /* Setup cpumask_of_cpu map */
218 setup_cpumask_of_cpu();
223 void __init parse_setup_data(void)
225 struct setup_data *data;
228 if (boot_params.hdr.version < 0x0209)
230 pa_data = boot_params.hdr.setup_data;
232 data = early_ioremap(pa_data, PAGE_SIZE);
233 switch (data->type) {
235 parse_e820_ext(data, pa_data);
240 #ifndef CONFIG_DEBUG_BOOT_PARAMS
241 free_early(pa_data, pa_data+sizeof(*data)+data->len);
243 pa_data = data->next;
244 early_iounmap(data, PAGE_SIZE);
251 * Allocate node_to_cpumask_map based on number of available nodes
252 * Requires node_possible_map to be valid.
254 * Note: node_to_cpumask() is not valid until after this is done.
256 static void __init setup_node_to_cpumask_map(void)
258 unsigned int node, num = 0;
261 /* setup nr_node_ids if not done yet */
262 if (nr_node_ids == MAX_NUMNODES) {
263 for_each_node_mask(node, node_possible_map)
265 nr_node_ids = num + 1;
268 /* allocate the map */
269 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
271 Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
274 /* node_to_cpumask() will now work */
275 node_to_cpumask_map = map;
278 void __cpuinit numa_set_node(int cpu, int node)
280 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
282 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
283 cpu_pda(cpu)->nodenumber = node;
286 cpu_to_node_map[cpu] = node;
288 else if (per_cpu_offset(cpu))
289 per_cpu(x86_cpu_to_node_map, cpu) = node;
292 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
295 void __cpuinit numa_clear_node(int cpu)
297 numa_set_node(cpu, NUMA_NO_NODE);
300 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
302 void __cpuinit numa_add_cpu(int cpu)
304 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
307 void __cpuinit numa_remove_cpu(int cpu)
309 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
312 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
315 * --------- debug versions of the numa functions ---------
317 static void __cpuinit numa_set_cpumask(int cpu, int enable)
319 int node = cpu_to_node(cpu);
323 if (node_to_cpumask_map == NULL) {
324 printk(KERN_ERR "node_to_cpumask_map NULL\n");
329 mask = &node_to_cpumask_map[node];
333 cpu_clear(cpu, *mask);
335 cpulist_scnprintf(buf, sizeof(buf), *mask);
336 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
337 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
340 void __cpuinit numa_add_cpu(int cpu)
342 numa_set_cpumask(cpu, 1);
345 void __cpuinit numa_remove_cpu(int cpu)
347 numa_set_cpumask(cpu, 0);
350 int cpu_to_node(int cpu)
352 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
354 "cpu_to_node(%d): usage too early!\n", cpu);
356 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
358 return per_cpu(x86_cpu_to_node_map, cpu);
360 EXPORT_SYMBOL(cpu_to_node);
363 * Same function as cpu_to_node() but used if called before the
364 * per_cpu areas are setup.
366 int early_cpu_to_node(int cpu)
368 if (early_per_cpu_ptr(x86_cpu_to_node_map))
369 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
371 if (!per_cpu_offset(cpu)) {
373 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
377 return per_cpu(x86_cpu_to_node_map, cpu);
381 * Returns a pointer to the bitmask of CPUs on Node 'node'.
383 cpumask_t *_node_to_cpumask_ptr(int node)
385 if (node_to_cpumask_map == NULL) {
387 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
390 return &cpu_online_map;
392 BUG_ON(node >= nr_node_ids);
393 return &node_to_cpumask_map[node];
395 EXPORT_SYMBOL(_node_to_cpumask_ptr);
398 * Returns a bitmask of CPUs on Node 'node'.
400 cpumask_t node_to_cpumask(int node)
402 if (node_to_cpumask_map == NULL) {
404 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
406 return cpu_online_map;
408 BUG_ON(node >= nr_node_ids);
409 return node_to_cpumask_map[node];
411 EXPORT_SYMBOL(node_to_cpumask);
414 * --------- end of debug versions of the numa functions ---------
417 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
419 #endif /* X86_64_NUMA */
423 * --------- Crashkernel reservation ------------------------------
426 static inline unsigned long long get_total_mem(void)
428 unsigned long long total;
430 total = max_low_pfn - min_low_pfn;
431 #ifdef CONFIG_HIGHMEM
432 total += highend_pfn - highstart_pfn;
435 return total << PAGE_SHIFT;
439 void __init reserve_crashkernel(void)
441 unsigned long long total_mem;
442 unsigned long long crash_size, crash_base;
445 total_mem = get_total_mem();
447 ret = parse_crashkernel(boot_command_line, total_mem,
448 &crash_size, &crash_base);
449 if (ret == 0 && crash_size > 0) {
450 if (crash_base <= 0) {
451 printk(KERN_INFO "crashkernel reservation failed - "
452 "you have to specify a base address\n");
456 if (reserve_bootmem_generic(crash_base, crash_size,
457 BOOTMEM_EXCLUSIVE) < 0) {
458 printk(KERN_INFO "crashkernel reservation failed - "
459 "memory is in use\n");
463 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
464 "for crashkernel (System RAM: %ldMB)\n",
465 (unsigned long)(crash_size >> 20),
466 (unsigned long)(crash_base >> 20),
467 (unsigned long)(total_mem >> 20));
469 crashk_res.start = crash_base;
470 crashk_res.end = crash_base + crash_size - 1;
471 insert_resource(&iomem_resource, &crashk_res);
475 void __init reserve_crashkernel(void)
478 static struct resource standard_io_resources[] = {
479 { .name = "dma1", .start = 0x00, .end = 0x1f,
480 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
481 { .name = "pic1", .start = 0x20, .end = 0x21,
482 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
483 { .name = "timer0", .start = 0x40, .end = 0x43,
484 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
485 { .name = "timer1", .start = 0x50, .end = 0x53,
486 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
487 { .name = "keyboard", .start = 0x60, .end = 0x60,
488 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
489 { .name = "keyboard", .start = 0x64, .end = 0x64,
490 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
491 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
492 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
493 { .name = "pic2", .start = 0xa0, .end = 0xa1,
494 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
495 { .name = "dma2", .start = 0xc0, .end = 0xdf,
496 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
497 { .name = "fpu", .start = 0xf0, .end = 0xff,
498 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
501 void __init reserve_standard_io_resources(void)
505 /* request I/O space for devices used on all i[345]86 PCs */
506 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
507 request_resource(&ioport_resource, &standard_io_resources[i]);
511 #ifdef CONFIG_PROC_VMCORE
512 /* elfcorehdr= specifies the location of elf core header
513 * stored by the crashed kernel. This option will be passed
514 * by kexec loader to the capture kernel.
516 static int __init setup_elfcorehdr(char *arg)
521 elfcorehdr_addr = memparse(arg, &end);
522 return end > arg ? 0 : -EINVAL;
524 early_param("elfcorehdr", setup_elfcorehdr);