2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
34 #include <asm/unified.h>
37 #include <asm/cputype.h>
39 #include <asm/procinfo.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
66 static int __init fpe_setup(char *line)
68 memcpy(fpe_type, line, 8);
72 __setup("fpe=", fpe_setup);
75 extern void paging_init(struct machine_desc *desc);
76 extern void sanity_check_meminfo(void);
77 extern enum reboot_mode reboot_mode;
78 extern void setup_dma_zone(struct machine_desc *desc);
80 unsigned int processor_id;
81 EXPORT_SYMBOL(processor_id);
82 unsigned int __machine_arch_type __read_mostly;
83 EXPORT_SYMBOL(__machine_arch_type);
84 unsigned int cacheid __read_mostly;
85 EXPORT_SYMBOL(cacheid);
87 unsigned int __atags_pointer __initdata;
89 unsigned int system_rev;
90 EXPORT_SYMBOL(system_rev);
92 unsigned int system_serial_low;
93 EXPORT_SYMBOL(system_serial_low);
95 unsigned int system_serial_high;
96 EXPORT_SYMBOL(system_serial_high);
98 unsigned int elf_hwcap __read_mostly;
99 EXPORT_SYMBOL(elf_hwcap);
103 struct processor processor __read_mostly;
106 struct cpu_tlb_fns cpu_tlb __read_mostly;
109 struct cpu_user_fns cpu_user __read_mostly;
112 struct cpu_cache_fns cpu_cache __read_mostly;
114 #ifdef CONFIG_OUTER_CACHE
115 struct outer_cache_fns outer_cache __read_mostly;
116 EXPORT_SYMBOL(outer_cache);
120 * Cached cpu_architecture() result for use by assembler code.
121 * C code should use the cpu_architecture() function instead of accessing this
124 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
130 } ____cacheline_aligned;
132 #ifndef CONFIG_CPU_V7M
133 static struct stack stacks[NR_CPUS];
136 char elf_platform[ELF_PLATFORM_SIZE];
137 EXPORT_SYMBOL(elf_platform);
139 static const char *cpu_name;
140 static const char *machine_name;
141 static char __initdata cmd_line[COMMAND_LINE_SIZE];
142 struct machine_desc *machine_desc __initdata;
144 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
145 #define ENDIANNESS ((char)endian_test.l)
147 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
150 * Standard memory resources
152 static struct resource mem_res[] = {
157 .flags = IORESOURCE_MEM
160 .name = "Kernel code",
163 .flags = IORESOURCE_MEM
166 .name = "Kernel data",
169 .flags = IORESOURCE_MEM
173 #define video_ram mem_res[0]
174 #define kernel_code mem_res[1]
175 #define kernel_data mem_res[2]
177 static struct resource io_res[] = {
182 .flags = IORESOURCE_IO | IORESOURCE_BUSY
188 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194 .flags = IORESOURCE_IO | IORESOURCE_BUSY
198 #define lp0 io_res[0]
199 #define lp1 io_res[1]
200 #define lp2 io_res[2]
202 static const char *proc_arch[] = {
222 #ifdef CONFIG_CPU_V7M
223 static int __get_cpu_architecture(void)
225 return CPU_ARCH_ARMv7M;
228 static int __get_cpu_architecture(void)
232 if ((read_cpuid_id() & 0x0008f000) == 0) {
233 cpu_arch = CPU_ARCH_UNKNOWN;
234 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
235 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
236 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
237 cpu_arch = (read_cpuid_id() >> 16) & 7;
239 cpu_arch += CPU_ARCH_ARMv3;
240 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
243 /* Revised CPUID format. Read the Memory Model Feature
244 * Register 0 and check for VMSAv7 or PMSAv7 */
245 asm("mrc p15, 0, %0, c0, c1, 4"
247 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
248 (mmfr0 & 0x000000f0) >= 0x00000030)
249 cpu_arch = CPU_ARCH_ARMv7;
250 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
251 (mmfr0 & 0x000000f0) == 0x00000020)
252 cpu_arch = CPU_ARCH_ARMv6;
254 cpu_arch = CPU_ARCH_UNKNOWN;
256 cpu_arch = CPU_ARCH_UNKNOWN;
262 int __pure cpu_architecture(void)
264 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
266 return __cpu_architecture;
269 static int cpu_has_aliasing_icache(unsigned int arch)
272 unsigned int id_reg, num_sets, line_size;
274 /* PIPT caches never alias. */
275 if (icache_is_pipt())
278 /* arch specifies the register format */
281 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
282 : /* No output operands */
285 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
287 line_size = 4 << ((id_reg & 0x7) + 2);
288 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
289 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
292 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
295 /* I-cache aliases will be handled by D-cache aliasing code */
299 return aliasing_icache;
302 static void __init cacheid_init(void)
304 unsigned int arch = cpu_architecture();
306 if (arch == CPU_ARCH_ARMv7M) {
308 } else if (arch >= CPU_ARCH_ARMv6) {
309 unsigned int cachetype = read_cpuid_cachetype();
310 if ((cachetype & (7 << 29)) == 4 << 29) {
311 /* ARMv7 register format */
312 arch = CPU_ARCH_ARMv7;
313 cacheid = CACHEID_VIPT_NONALIASING;
314 switch (cachetype & (3 << 14)) {
316 cacheid |= CACHEID_ASID_TAGGED;
319 cacheid |= CACHEID_PIPT;
323 arch = CPU_ARCH_ARMv6;
324 if (cachetype & (1 << 23))
325 cacheid = CACHEID_VIPT_ALIASING;
327 cacheid = CACHEID_VIPT_NONALIASING;
329 if (cpu_has_aliasing_icache(arch))
330 cacheid |= CACHEID_VIPT_I_ALIASING;
332 cacheid = CACHEID_VIVT;
335 printk("CPU: %s data cache, %s instruction cache\n",
336 cache_is_vivt() ? "VIVT" :
337 cache_is_vipt_aliasing() ? "VIPT aliasing" :
338 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
339 cache_is_vivt() ? "VIVT" :
340 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
341 icache_is_vipt_aliasing() ? "VIPT aliasing" :
342 icache_is_pipt() ? "PIPT" :
343 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
347 * These functions re-use the assembly code in head.S, which
348 * already provide the required functionality.
350 extern struct proc_info_list *lookup_processor_type(unsigned int);
352 void __init early_print(const char *str, ...)
354 extern void printascii(const char *);
359 vsnprintf(buf, sizeof(buf), str, ap);
362 #ifdef CONFIG_DEBUG_LL
368 static void __init cpuid_init_hwcaps(void)
370 unsigned int divide_instrs, vmsa;
372 if (cpu_architecture() < CPU_ARCH_ARMv7)
375 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
377 switch (divide_instrs) {
379 elf_hwcap |= HWCAP_IDIVA;
381 elf_hwcap |= HWCAP_IDIVT;
384 /* LPAE implies atomic ldrd/strd instructions */
385 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
387 elf_hwcap |= HWCAP_LPAE;
390 static void __init feat_v6_fixup(void)
392 int id = read_cpuid_id();
394 if ((id & 0xff0f0000) != 0x41070000)
398 * HWCAP_TLS is available only on 1136 r1p0 and later,
399 * see also kuser_get_tls_init.
401 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
402 elf_hwcap &= ~HWCAP_TLS;
406 * cpu_init - initialise one CPU.
408 * cpu_init sets up the per-CPU stacks.
410 void notrace cpu_init(void)
412 #ifndef CONFIG_CPU_V7M
413 unsigned int cpu = smp_processor_id();
414 struct stack *stk = &stacks[cpu];
416 if (cpu >= NR_CPUS) {
417 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
422 * This only works on resume and secondary cores. For booting on the
423 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
425 set_my_cpu_offset(per_cpu_offset(cpu));
430 * Define the placement constraint for the inline asm directive below.
431 * In Thumb-2, msr with an immediate value is not allowed.
433 #ifdef CONFIG_THUMB2_KERNEL
440 * setup stacks for re-entrant exception handlers
444 "add r14, %0, %2\n\t"
447 "add r14, %0, %4\n\t"
450 "add r14, %0, %6\n\t"
455 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
456 "I" (offsetof(struct stack, irq[0])),
457 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
458 "I" (offsetof(struct stack, abt[0])),
459 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
460 "I" (offsetof(struct stack, und[0])),
461 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
466 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
468 void __init smp_setup_processor_id(void)
471 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
472 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
474 cpu_logical_map(0) = cpu;
475 for (i = 1; i < nr_cpu_ids; ++i)
476 cpu_logical_map(i) = i == cpu ? 0 : i;
479 * clear __my_cpu_offset on boot CPU to avoid hang caused by
480 * using percpu variable early, for example, lockdep will
481 * access percpu variable inside lock_release
483 set_my_cpu_offset(0);
485 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
488 struct mpidr_hash mpidr_hash;
491 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
492 * level in order to build a linear index from an
493 * MPIDR value. Resulting algorithm is a collision
494 * free hash carried out through shifting and ORing
496 static void __init smp_build_mpidr_hash(void)
499 u32 fs[3], bits[3], ls, mask = 0;
501 * Pre-scan the list of MPIDRS and filter out bits that do
502 * not contribute to affinity levels, ie they never toggle.
504 for_each_possible_cpu(i)
505 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
506 pr_debug("mask of set bits 0x%x\n", mask);
508 * Find and stash the last and first bit set at all affinity levels to
509 * check how many bits are required to represent them.
511 for (i = 0; i < 3; i++) {
512 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
514 * Find the MSB bit and LSB bits position
515 * to determine how many bits are required
516 * to express the affinity level.
519 fs[i] = affinity ? ffs(affinity) - 1 : 0;
520 bits[i] = ls - fs[i];
523 * An index can be created from the MPIDR by isolating the
524 * significant bits at each affinity level and by shifting
525 * them in order to compress the 24 bits values space to a
526 * compressed set of values. This is equivalent to hashing
527 * the MPIDR through shifting and ORing. It is a collision free
528 * hash though not minimal since some levels might contain a number
529 * of CPUs that is not an exact power of 2 and their bit
530 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
532 mpidr_hash.shift_aff[0] = fs[0];
533 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
534 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
536 mpidr_hash.mask = mask;
537 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
538 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
539 mpidr_hash.shift_aff[0],
540 mpidr_hash.shift_aff[1],
541 mpidr_hash.shift_aff[2],
545 * 4x is an arbitrary value used to warn on a hash table much bigger
546 * than expected on most systems.
548 if (mpidr_hash_size() > 4 * num_possible_cpus())
549 pr_warn("Large number of MPIDR hash buckets detected\n");
550 sync_cache_w(&mpidr_hash);
554 static void __init setup_processor(void)
556 struct proc_info_list *list;
559 * locate processor in the list of supported processor
560 * types. The linker builds this table for us from the
561 * entries in arch/arm/mm/proc-*.S
563 list = lookup_processor_type(read_cpuid_id());
565 printk("CPU configuration botched (ID %08x), unable "
566 "to continue.\n", read_cpuid_id());
570 cpu_name = list->cpu_name;
571 __cpu_architecture = __get_cpu_architecture();
574 processor = *list->proc;
577 cpu_tlb = *list->tlb;
580 cpu_user = *list->user;
583 cpu_cache = *list->cache;
586 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
587 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
588 proc_arch[cpu_architecture()], cr_alignment);
590 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
591 list->arch_name, ENDIANNESS);
592 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
593 list->elf_name, ENDIANNESS);
594 elf_hwcap = list->elf_hwcap;
598 #ifndef CONFIG_ARM_THUMB
599 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
608 void __init dump_machine_table(void)
610 struct machine_desc *p;
612 early_print("Available machine support:\n\nID (hex)\tNAME\n");
613 for_each_machine_desc(p)
614 early_print("%08x\t%s\n", p->nr, p->name);
616 early_print("\nPlease check your kernel config and/or bootloader.\n");
619 /* can't use cpu_relax() here as it may require MMU setup */;
622 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
624 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
626 if (meminfo.nr_banks >= NR_BANKS) {
627 printk(KERN_CRIT "NR_BANKS too low, "
628 "ignoring memory at 0x%08llx\n", (long long)start);
633 * Ensure that start/size are aligned to a page boundary.
634 * Size is appropriately rounded down, start is rounded up.
636 size -= start & ~PAGE_MASK;
637 bank->start = PAGE_ALIGN(start);
639 #ifndef CONFIG_ARM_LPAE
640 if (bank->start + size < bank->start) {
641 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
642 "32-bit physical address space\n", (long long)start);
644 * To ensure bank->start + bank->size is representable in
645 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
646 * This means we lose a page after masking.
648 size = ULONG_MAX - bank->start;
652 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
655 * Check whether this memory region has non-zero size or
656 * invalid node number.
666 * Pick out the memory size. We look for mem=size@start,
667 * where start and size are "size[KkMm]"
669 static int __init early_mem(char *p)
671 static int usermem __initdata = 0;
677 * If the user specifies memory size, we
678 * blow away any automatically generated
683 meminfo.nr_banks = 0;
687 size = memparse(p, &endp);
689 start = memparse(endp + 1, NULL);
691 arm_add_memory(start, size);
695 early_param("mem", early_mem);
697 static void __init request_standard_resources(struct machine_desc *mdesc)
699 struct memblock_region *region;
700 struct resource *res;
702 kernel_code.start = virt_to_phys(_text);
703 kernel_code.end = virt_to_phys(_etext - 1);
704 kernel_data.start = virt_to_phys(_sdata);
705 kernel_data.end = virt_to_phys(_end - 1);
707 for_each_memblock(memory, region) {
708 res = alloc_bootmem_low(sizeof(*res));
709 res->name = "System RAM";
710 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
711 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
712 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
714 request_resource(&iomem_resource, res);
716 if (kernel_code.start >= res->start &&
717 kernel_code.end <= res->end)
718 request_resource(res, &kernel_code);
719 if (kernel_data.start >= res->start &&
720 kernel_data.end <= res->end)
721 request_resource(res, &kernel_data);
724 if (mdesc->video_start) {
725 video_ram.start = mdesc->video_start;
726 video_ram.end = mdesc->video_end;
727 request_resource(&iomem_resource, &video_ram);
731 * Some machines don't have the possibility of ever
732 * possessing lp0, lp1 or lp2
734 if (mdesc->reserve_lp0)
735 request_resource(&ioport_resource, &lp0);
736 if (mdesc->reserve_lp1)
737 request_resource(&ioport_resource, &lp1);
738 if (mdesc->reserve_lp2)
739 request_resource(&ioport_resource, &lp2);
742 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
743 struct screen_info screen_info = {
744 .orig_video_lines = 30,
745 .orig_video_cols = 80,
746 .orig_video_mode = 0,
747 .orig_video_ega_bx = 0,
748 .orig_video_isVGA = 1,
749 .orig_video_points = 8
753 static int __init customize_machine(void)
756 * customizes platform devices, or adds new ones
757 * On DT based machines, we fall back to populating the
758 * machine from the device tree, if no callback is provided,
759 * otherwise we would always need an init_machine callback.
761 if (machine_desc->init_machine)
762 machine_desc->init_machine();
765 of_platform_populate(NULL, of_default_bus_match_table,
770 arch_initcall(customize_machine);
772 static int __init init_machine_late(void)
774 if (machine_desc->init_late)
775 machine_desc->init_late();
778 late_initcall(init_machine_late);
781 static inline unsigned long long get_total_mem(void)
785 total = max_low_pfn - min_low_pfn;
786 return total << PAGE_SHIFT;
790 * reserve_crashkernel() - reserves memory are for crash kernel
792 * This function reserves memory area given in "crashkernel=" kernel command
793 * line parameter. The memory reserved is used by a dump capture kernel when
794 * primary kernel is crashing.
796 static void __init reserve_crashkernel(void)
798 unsigned long long crash_size, crash_base;
799 unsigned long long total_mem;
802 total_mem = get_total_mem();
803 ret = parse_crashkernel(boot_command_line, total_mem,
804 &crash_size, &crash_base);
808 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
810 printk(KERN_WARNING "crashkernel reservation failed - "
811 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
815 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
816 "for crashkernel (System RAM: %ldMB)\n",
817 (unsigned long)(crash_size >> 20),
818 (unsigned long)(crash_base >> 20),
819 (unsigned long)(total_mem >> 20));
821 crashk_res.start = crash_base;
822 crashk_res.end = crash_base + crash_size - 1;
823 insert_resource(&iomem_resource, &crashk_res);
826 static inline void reserve_crashkernel(void) {}
827 #endif /* CONFIG_KEXEC */
829 static int __init meminfo_cmp(const void *_a, const void *_b)
831 const struct membank *a = _a, *b = _b;
832 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
833 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
836 void __init hyp_mode_check(void)
838 #ifdef CONFIG_ARM_VIRT_EXT
839 if (is_hyp_mode_available()) {
840 pr_info("CPU: All CPU(s) started in HYP mode.\n");
841 pr_info("CPU: Virtualization extensions available.\n");
842 } else if (is_hyp_mode_mismatched()) {
843 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
844 __boot_cpu_mode & MODE_MASK);
845 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
847 pr_info("CPU: All CPU(s) started in SVC mode.\n");
851 void __init setup_arch(char **cmdline_p)
853 struct machine_desc *mdesc;
856 mdesc = setup_machine_fdt(__atags_pointer);
858 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
859 machine_desc = mdesc;
860 machine_name = mdesc->name;
862 setup_dma_zone(mdesc);
864 if (mdesc->reboot_mode != REBOOT_HARD)
865 reboot_mode = mdesc->reboot_mode;
867 init_mm.start_code = (unsigned long) _text;
868 init_mm.end_code = (unsigned long) _etext;
869 init_mm.end_data = (unsigned long) _edata;
870 init_mm.brk = (unsigned long) _end;
872 /* populate cmd_line too for later use, preserving boot_command_line */
873 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
874 *cmdline_p = cmd_line;
878 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
879 sanity_check_meminfo();
880 arm_memblock_init(&meminfo, mdesc);
883 request_standard_resources(mdesc);
886 arm_pm_restart = mdesc->restart;
888 unflatten_device_tree();
890 arm_dt_init_cpu_maps();
894 if (!mdesc->smp_init || !mdesc->smp_init()) {
895 if (psci_smp_available())
896 smp_set_ops(&psci_smp_ops);
898 smp_set_ops(mdesc->smp);
901 smp_build_mpidr_hash();
908 reserve_crashkernel();
910 #ifdef CONFIG_MULTI_IRQ_HANDLER
911 handle_arch_irq = mdesc->handle_irq;
915 #if defined(CONFIG_VGA_CONSOLE)
916 conswitchp = &vga_con;
917 #elif defined(CONFIG_DUMMY_CONSOLE)
918 conswitchp = &dummy_con;
922 if (mdesc->init_early)
927 static int __init topology_init(void)
931 for_each_possible_cpu(cpu) {
932 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
933 cpuinfo->cpu.hotpluggable = 1;
934 register_cpu(&cpuinfo->cpu, cpu);
939 subsys_initcall(topology_init);
941 #ifdef CONFIG_HAVE_PROC_CPU
942 static int __init proc_cpu_init(void)
944 struct proc_dir_entry *res;
946 res = proc_mkdir("cpu", NULL);
951 fs_initcall(proc_cpu_init);
954 static const char *hwcap_str[] = {
978 static int c_show(struct seq_file *m, void *v)
983 for_each_online_cpu(i) {
985 * glibc reads /proc/cpuinfo to determine the number of
986 * online processors, looking for lines beginning with
987 * "processor". Give glibc what it expects.
989 seq_printf(m, "processor\t: %d\n", i);
990 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
991 seq_printf(m, "model name\t: %s rev %d (%s)\n",
992 cpu_name, cpuid & 15, elf_platform);
994 #if defined(CONFIG_SMP)
995 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
996 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
997 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
999 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1000 loops_per_jiffy / (500000/HZ),
1001 (loops_per_jiffy / (5000/HZ)) % 100);
1003 /* dump out the processor features */
1004 seq_puts(m, "Features\t: ");
1006 for (j = 0; hwcap_str[j]; j++)
1007 if (elf_hwcap & (1 << j))
1008 seq_printf(m, "%s ", hwcap_str[j]);
1010 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1011 seq_printf(m, "CPU architecture: %s\n",
1012 proc_arch[cpu_architecture()]);
1014 if ((cpuid & 0x0008f000) == 0x00000000) {
1016 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1018 if ((cpuid & 0x0008f000) == 0x00007000) {
1020 seq_printf(m, "CPU variant\t: 0x%02x\n",
1021 (cpuid >> 16) & 127);
1024 seq_printf(m, "CPU variant\t: 0x%x\n",
1025 (cpuid >> 20) & 15);
1027 seq_printf(m, "CPU part\t: 0x%03x\n",
1028 (cpuid >> 4) & 0xfff);
1030 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1033 seq_printf(m, "Hardware\t: %s\n", machine_name);
1034 seq_printf(m, "Revision\t: %04x\n", system_rev);
1035 seq_printf(m, "Serial\t\t: %08x%08x\n",
1036 system_serial_high, system_serial_low);
1041 static void *c_start(struct seq_file *m, loff_t *pos)
1043 return *pos < 1 ? (void *)1 : NULL;
1046 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1052 static void c_stop(struct seq_file *m, void *v)
1056 const struct seq_operations cpuinfo_op = {