Merge tag 'soc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[platform/kernel/linux-stable.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/psci.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58 #include <asm/virt.h>
59
60 #include "atags.h"
61
62
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65
66 static int __init fpe_setup(char *line)
67 {
68         memcpy(fpe_type, line, 8);
69         return 1;
70 }
71
72 __setup("fpe=", fpe_setup);
73 #endif
74
75 extern void paging_init(struct machine_desc *desc);
76 extern void sanity_check_meminfo(void);
77 extern void reboot_setup(char *str);
78 extern void setup_dma_zone(struct machine_desc *desc);
79
80 unsigned int processor_id;
81 EXPORT_SYMBOL(processor_id);
82 unsigned int __machine_arch_type __read_mostly;
83 EXPORT_SYMBOL(__machine_arch_type);
84 unsigned int cacheid __read_mostly;
85 EXPORT_SYMBOL(cacheid);
86
87 unsigned int __atags_pointer __initdata;
88
89 unsigned int system_rev;
90 EXPORT_SYMBOL(system_rev);
91
92 unsigned int system_serial_low;
93 EXPORT_SYMBOL(system_serial_low);
94
95 unsigned int system_serial_high;
96 EXPORT_SYMBOL(system_serial_high);
97
98 unsigned int elf_hwcap __read_mostly;
99 EXPORT_SYMBOL(elf_hwcap);
100
101
102 #ifdef MULTI_CPU
103 struct processor processor __read_mostly;
104 #endif
105 #ifdef MULTI_TLB
106 struct cpu_tlb_fns cpu_tlb __read_mostly;
107 #endif
108 #ifdef MULTI_USER
109 struct cpu_user_fns cpu_user __read_mostly;
110 #endif
111 #ifdef MULTI_CACHE
112 struct cpu_cache_fns cpu_cache __read_mostly;
113 #endif
114 #ifdef CONFIG_OUTER_CACHE
115 struct outer_cache_fns outer_cache __read_mostly;
116 EXPORT_SYMBOL(outer_cache);
117 #endif
118
119 /*
120  * Cached cpu_architecture() result for use by assembler code.
121  * C code should use the cpu_architecture() function instead of accessing this
122  * variable directly.
123  */
124 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
125
126 struct stack {
127         u32 irq[3];
128         u32 abt[3];
129         u32 und[3];
130 } ____cacheline_aligned;
131
132 #ifndef CONFIG_CPU_V7M
133 static struct stack stacks[NR_CPUS];
134 #endif
135
136 char elf_platform[ELF_PLATFORM_SIZE];
137 EXPORT_SYMBOL(elf_platform);
138
139 static const char *cpu_name;
140 static const char *machine_name;
141 static char __initdata cmd_line[COMMAND_LINE_SIZE];
142 struct machine_desc *machine_desc __initdata;
143
144 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
145 #define ENDIANNESS ((char)endian_test.l)
146
147 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
148
149 /*
150  * Standard memory resources
151  */
152 static struct resource mem_res[] = {
153         {
154                 .name = "Video RAM",
155                 .start = 0,
156                 .end = 0,
157                 .flags = IORESOURCE_MEM
158         },
159         {
160                 .name = "Kernel code",
161                 .start = 0,
162                 .end = 0,
163                 .flags = IORESOURCE_MEM
164         },
165         {
166                 .name = "Kernel data",
167                 .start = 0,
168                 .end = 0,
169                 .flags = IORESOURCE_MEM
170         }
171 };
172
173 #define video_ram   mem_res[0]
174 #define kernel_code mem_res[1]
175 #define kernel_data mem_res[2]
176
177 static struct resource io_res[] = {
178         {
179                 .name = "reserved",
180                 .start = 0x3bc,
181                 .end = 0x3be,
182                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
183         },
184         {
185                 .name = "reserved",
186                 .start = 0x378,
187                 .end = 0x37f,
188                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
189         },
190         {
191                 .name = "reserved",
192                 .start = 0x278,
193                 .end = 0x27f,
194                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
195         }
196 };
197
198 #define lp0 io_res[0]
199 #define lp1 io_res[1]
200 #define lp2 io_res[2]
201
202 static const char *proc_arch[] = {
203         "undefined/unknown",
204         "3",
205         "4",
206         "4T",
207         "5",
208         "5T",
209         "5TE",
210         "5TEJ",
211         "6TEJ",
212         "7",
213         "7M",
214         "?(12)",
215         "?(13)",
216         "?(14)",
217         "?(15)",
218         "?(16)",
219         "?(17)",
220 };
221
222 #ifdef CONFIG_CPU_V7M
223 static int __get_cpu_architecture(void)
224 {
225         return CPU_ARCH_ARMv7M;
226 }
227 #else
228 static int __get_cpu_architecture(void)
229 {
230         int cpu_arch;
231
232         if ((read_cpuid_id() & 0x0008f000) == 0) {
233                 cpu_arch = CPU_ARCH_UNKNOWN;
234         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
235                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
236         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
237                 cpu_arch = (read_cpuid_id() >> 16) & 7;
238                 if (cpu_arch)
239                         cpu_arch += CPU_ARCH_ARMv3;
240         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
241                 unsigned int mmfr0;
242
243                 /* Revised CPUID format. Read the Memory Model Feature
244                  * Register 0 and check for VMSAv7 or PMSAv7 */
245                 asm("mrc        p15, 0, %0, c0, c1, 4"
246                     : "=r" (mmfr0));
247                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
248                     (mmfr0 & 0x000000f0) >= 0x00000030)
249                         cpu_arch = CPU_ARCH_ARMv7;
250                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
251                          (mmfr0 & 0x000000f0) == 0x00000020)
252                         cpu_arch = CPU_ARCH_ARMv6;
253                 else
254                         cpu_arch = CPU_ARCH_UNKNOWN;
255         } else
256                 cpu_arch = CPU_ARCH_UNKNOWN;
257
258         return cpu_arch;
259 }
260 #endif
261
262 int __pure cpu_architecture(void)
263 {
264         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
265
266         return __cpu_architecture;
267 }
268
269 static int cpu_has_aliasing_icache(unsigned int arch)
270 {
271         int aliasing_icache;
272         unsigned int id_reg, num_sets, line_size;
273
274         /* PIPT caches never alias. */
275         if (icache_is_pipt())
276                 return 0;
277
278         /* arch specifies the register format */
279         switch (arch) {
280         case CPU_ARCH_ARMv7:
281                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
282                     : /* No output operands */
283                     : "r" (1));
284                 isb();
285                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
286                     : "=r" (id_reg));
287                 line_size = 4 << ((id_reg & 0x7) + 2);
288                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
289                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
290                 break;
291         case CPU_ARCH_ARMv6:
292                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
293                 break;
294         default:
295                 /* I-cache aliases will be handled by D-cache aliasing code */
296                 aliasing_icache = 0;
297         }
298
299         return aliasing_icache;
300 }
301
302 static void __init cacheid_init(void)
303 {
304         unsigned int arch = cpu_architecture();
305
306         if (arch == CPU_ARCH_ARMv7M) {
307                 cacheid = 0;
308         } else if (arch >= CPU_ARCH_ARMv6) {
309                 unsigned int cachetype = read_cpuid_cachetype();
310                 if ((cachetype & (7 << 29)) == 4 << 29) {
311                         /* ARMv7 register format */
312                         arch = CPU_ARCH_ARMv7;
313                         cacheid = CACHEID_VIPT_NONALIASING;
314                         switch (cachetype & (3 << 14)) {
315                         case (1 << 14):
316                                 cacheid |= CACHEID_ASID_TAGGED;
317                                 break;
318                         case (3 << 14):
319                                 cacheid |= CACHEID_PIPT;
320                                 break;
321                         }
322                 } else {
323                         arch = CPU_ARCH_ARMv6;
324                         if (cachetype & (1 << 23))
325                                 cacheid = CACHEID_VIPT_ALIASING;
326                         else
327                                 cacheid = CACHEID_VIPT_NONALIASING;
328                 }
329                 if (cpu_has_aliasing_icache(arch))
330                         cacheid |= CACHEID_VIPT_I_ALIASING;
331         } else {
332                 cacheid = CACHEID_VIVT;
333         }
334
335         printk("CPU: %s data cache, %s instruction cache\n",
336                 cache_is_vivt() ? "VIVT" :
337                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
338                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
339                 cache_is_vivt() ? "VIVT" :
340                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
341                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
342                 icache_is_pipt() ? "PIPT" :
343                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
344 }
345
346 /*
347  * These functions re-use the assembly code in head.S, which
348  * already provide the required functionality.
349  */
350 extern struct proc_info_list *lookup_processor_type(unsigned int);
351
352 void __init early_print(const char *str, ...)
353 {
354         extern void printascii(const char *);
355         char buf[256];
356         va_list ap;
357
358         va_start(ap, str);
359         vsnprintf(buf, sizeof(buf), str, ap);
360         va_end(ap);
361
362 #ifdef CONFIG_DEBUG_LL
363         printascii(buf);
364 #endif
365         printk("%s", buf);
366 }
367
368 static void __init cpuid_init_hwcaps(void)
369 {
370         unsigned int divide_instrs;
371
372         if (cpu_architecture() < CPU_ARCH_ARMv7)
373                 return;
374
375         divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
376
377         switch (divide_instrs) {
378         case 2:
379                 elf_hwcap |= HWCAP_IDIVA;
380         case 1:
381                 elf_hwcap |= HWCAP_IDIVT;
382         }
383 }
384
385 static void __init feat_v6_fixup(void)
386 {
387         int id = read_cpuid_id();
388
389         if ((id & 0xff0f0000) != 0x41070000)
390                 return;
391
392         /*
393          * HWCAP_TLS is available only on 1136 r1p0 and later,
394          * see also kuser_get_tls_init.
395          */
396         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
397                 elf_hwcap &= ~HWCAP_TLS;
398 }
399
400 /*
401  * cpu_init - initialise one CPU.
402  *
403  * cpu_init sets up the per-CPU stacks.
404  */
405 void notrace cpu_init(void)
406 {
407 #ifndef CONFIG_CPU_V7M
408         unsigned int cpu = smp_processor_id();
409         struct stack *stk = &stacks[cpu];
410
411         if (cpu >= NR_CPUS) {
412                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
413                 BUG();
414         }
415
416         /*
417          * This only works on resume and secondary cores. For booting on the
418          * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
419          */
420         set_my_cpu_offset(per_cpu_offset(cpu));
421
422         cpu_proc_init();
423
424         /*
425          * Define the placement constraint for the inline asm directive below.
426          * In Thumb-2, msr with an immediate value is not allowed.
427          */
428 #ifdef CONFIG_THUMB2_KERNEL
429 #define PLC     "r"
430 #else
431 #define PLC     "I"
432 #endif
433
434         /*
435          * setup stacks for re-entrant exception handlers
436          */
437         __asm__ (
438         "msr    cpsr_c, %1\n\t"
439         "add    r14, %0, %2\n\t"
440         "mov    sp, r14\n\t"
441         "msr    cpsr_c, %3\n\t"
442         "add    r14, %0, %4\n\t"
443         "mov    sp, r14\n\t"
444         "msr    cpsr_c, %5\n\t"
445         "add    r14, %0, %6\n\t"
446         "mov    sp, r14\n\t"
447         "msr    cpsr_c, %7"
448             :
449             : "r" (stk),
450               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
451               "I" (offsetof(struct stack, irq[0])),
452               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
453               "I" (offsetof(struct stack, abt[0])),
454               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
455               "I" (offsetof(struct stack, und[0])),
456               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
457             : "r14");
458 #endif
459 }
460
461 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
462
463 void __init smp_setup_processor_id(void)
464 {
465         int i;
466         u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
467         u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
468
469         cpu_logical_map(0) = cpu;
470         for (i = 1; i < nr_cpu_ids; ++i)
471                 cpu_logical_map(i) = i == cpu ? 0 : i;
472
473         printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
474 }
475
476 static void __init setup_processor(void)
477 {
478         struct proc_info_list *list;
479
480         /*
481          * locate processor in the list of supported processor
482          * types.  The linker builds this table for us from the
483          * entries in arch/arm/mm/proc-*.S
484          */
485         list = lookup_processor_type(read_cpuid_id());
486         if (!list) {
487                 printk("CPU configuration botched (ID %08x), unable "
488                        "to continue.\n", read_cpuid_id());
489                 while (1);
490         }
491
492         cpu_name = list->cpu_name;
493         __cpu_architecture = __get_cpu_architecture();
494
495 #ifdef MULTI_CPU
496         processor = *list->proc;
497 #endif
498 #ifdef MULTI_TLB
499         cpu_tlb = *list->tlb;
500 #endif
501 #ifdef MULTI_USER
502         cpu_user = *list->user;
503 #endif
504 #ifdef MULTI_CACHE
505         cpu_cache = *list->cache;
506 #endif
507
508         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
509                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
510                proc_arch[cpu_architecture()], cr_alignment);
511
512         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
513                  list->arch_name, ENDIANNESS);
514         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
515                  list->elf_name, ENDIANNESS);
516         elf_hwcap = list->elf_hwcap;
517
518         cpuid_init_hwcaps();
519
520 #ifndef CONFIG_ARM_THUMB
521         elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
522 #endif
523
524         feat_v6_fixup();
525
526         cacheid_init();
527         cpu_init();
528 }
529
530 void __init dump_machine_table(void)
531 {
532         struct machine_desc *p;
533
534         early_print("Available machine support:\n\nID (hex)\tNAME\n");
535         for_each_machine_desc(p)
536                 early_print("%08x\t%s\n", p->nr, p->name);
537
538         early_print("\nPlease check your kernel config and/or bootloader.\n");
539
540         while (true)
541                 /* can't use cpu_relax() here as it may require MMU setup */;
542 }
543
544 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
545 {
546         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
547
548         if (meminfo.nr_banks >= NR_BANKS) {
549                 printk(KERN_CRIT "NR_BANKS too low, "
550                         "ignoring memory at 0x%08llx\n", (long long)start);
551                 return -EINVAL;
552         }
553
554         /*
555          * Ensure that start/size are aligned to a page boundary.
556          * Size is appropriately rounded down, start is rounded up.
557          */
558         size -= start & ~PAGE_MASK;
559         bank->start = PAGE_ALIGN(start);
560
561 #ifndef CONFIG_ARM_LPAE
562         if (bank->start + size < bank->start) {
563                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
564                         "32-bit physical address space\n", (long long)start);
565                 /*
566                  * To ensure bank->start + bank->size is representable in
567                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
568                  * This means we lose a page after masking.
569                  */
570                 size = ULONG_MAX - bank->start;
571         }
572 #endif
573
574         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
575
576         /*
577          * Check whether this memory region has non-zero size or
578          * invalid node number.
579          */
580         if (bank->size == 0)
581                 return -EINVAL;
582
583         meminfo.nr_banks++;
584         return 0;
585 }
586
587 /*
588  * Pick out the memory size.  We look for mem=size@start,
589  * where start and size are "size[KkMm]"
590  */
591 static int __init early_mem(char *p)
592 {
593         static int usermem __initdata = 0;
594         phys_addr_t size;
595         phys_addr_t start;
596         char *endp;
597
598         /*
599          * If the user specifies memory size, we
600          * blow away any automatically generated
601          * size.
602          */
603         if (usermem == 0) {
604                 usermem = 1;
605                 meminfo.nr_banks = 0;
606         }
607
608         start = PHYS_OFFSET;
609         size  = memparse(p, &endp);
610         if (*endp == '@')
611                 start = memparse(endp + 1, NULL);
612
613         arm_add_memory(start, size);
614
615         return 0;
616 }
617 early_param("mem", early_mem);
618
619 static void __init request_standard_resources(struct machine_desc *mdesc)
620 {
621         struct memblock_region *region;
622         struct resource *res;
623
624         kernel_code.start   = virt_to_phys(_text);
625         kernel_code.end     = virt_to_phys(_etext - 1);
626         kernel_data.start   = virt_to_phys(_sdata);
627         kernel_data.end     = virt_to_phys(_end - 1);
628
629         for_each_memblock(memory, region) {
630                 res = alloc_bootmem_low(sizeof(*res));
631                 res->name  = "System RAM";
632                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
633                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
634                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
635
636                 request_resource(&iomem_resource, res);
637
638                 if (kernel_code.start >= res->start &&
639                     kernel_code.end <= res->end)
640                         request_resource(res, &kernel_code);
641                 if (kernel_data.start >= res->start &&
642                     kernel_data.end <= res->end)
643                         request_resource(res, &kernel_data);
644         }
645
646         if (mdesc->video_start) {
647                 video_ram.start = mdesc->video_start;
648                 video_ram.end   = mdesc->video_end;
649                 request_resource(&iomem_resource, &video_ram);
650         }
651
652         /*
653          * Some machines don't have the possibility of ever
654          * possessing lp0, lp1 or lp2
655          */
656         if (mdesc->reserve_lp0)
657                 request_resource(&ioport_resource, &lp0);
658         if (mdesc->reserve_lp1)
659                 request_resource(&ioport_resource, &lp1);
660         if (mdesc->reserve_lp2)
661                 request_resource(&ioport_resource, &lp2);
662 }
663
664 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
665 struct screen_info screen_info = {
666  .orig_video_lines      = 30,
667  .orig_video_cols       = 80,
668  .orig_video_mode       = 0,
669  .orig_video_ega_bx     = 0,
670  .orig_video_isVGA      = 1,
671  .orig_video_points     = 8
672 };
673 #endif
674
675 static int __init customize_machine(void)
676 {
677         /*
678          * customizes platform devices, or adds new ones
679          * On DT based machines, we fall back to populating the
680          * machine from the device tree, if no callback is provided,
681          * otherwise we would always need an init_machine callback.
682          */
683         if (machine_desc->init_machine)
684                 machine_desc->init_machine();
685 #ifdef CONFIG_OF
686         else
687                 of_platform_populate(NULL, of_default_bus_match_table,
688                                         NULL, NULL);
689 #endif
690         return 0;
691 }
692 arch_initcall(customize_machine);
693
694 static int __init init_machine_late(void)
695 {
696         if (machine_desc->init_late)
697                 machine_desc->init_late();
698         return 0;
699 }
700 late_initcall(init_machine_late);
701
702 #ifdef CONFIG_KEXEC
703 static inline unsigned long long get_total_mem(void)
704 {
705         unsigned long total;
706
707         total = max_low_pfn - min_low_pfn;
708         return total << PAGE_SHIFT;
709 }
710
711 /**
712  * reserve_crashkernel() - reserves memory are for crash kernel
713  *
714  * This function reserves memory area given in "crashkernel=" kernel command
715  * line parameter. The memory reserved is used by a dump capture kernel when
716  * primary kernel is crashing.
717  */
718 static void __init reserve_crashkernel(void)
719 {
720         unsigned long long crash_size, crash_base;
721         unsigned long long total_mem;
722         int ret;
723
724         total_mem = get_total_mem();
725         ret = parse_crashkernel(boot_command_line, total_mem,
726                                 &crash_size, &crash_base);
727         if (ret)
728                 return;
729
730         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
731         if (ret < 0) {
732                 printk(KERN_WARNING "crashkernel reservation failed - "
733                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
734                 return;
735         }
736
737         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
738                "for crashkernel (System RAM: %ldMB)\n",
739                (unsigned long)(crash_size >> 20),
740                (unsigned long)(crash_base >> 20),
741                (unsigned long)(total_mem >> 20));
742
743         crashk_res.start = crash_base;
744         crashk_res.end = crash_base + crash_size - 1;
745         insert_resource(&iomem_resource, &crashk_res);
746 }
747 #else
748 static inline void reserve_crashkernel(void) {}
749 #endif /* CONFIG_KEXEC */
750
751 static int __init meminfo_cmp(const void *_a, const void *_b)
752 {
753         const struct membank *a = _a, *b = _b;
754         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
755         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
756 }
757
758 void __init hyp_mode_check(void)
759 {
760 #ifdef CONFIG_ARM_VIRT_EXT
761         if (is_hyp_mode_available()) {
762                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
763                 pr_info("CPU: Virtualization extensions available.\n");
764         } else if (is_hyp_mode_mismatched()) {
765                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
766                         __boot_cpu_mode & MODE_MASK);
767                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
768         } else
769                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
770 #endif
771 }
772
773 void __init setup_arch(char **cmdline_p)
774 {
775         struct machine_desc *mdesc;
776
777         setup_processor();
778         mdesc = setup_machine_fdt(__atags_pointer);
779         if (!mdesc)
780                 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
781         machine_desc = mdesc;
782         machine_name = mdesc->name;
783
784         setup_dma_zone(mdesc);
785
786         if (mdesc->restart_mode)
787                 reboot_setup(&mdesc->restart_mode);
788
789         init_mm.start_code = (unsigned long) _text;
790         init_mm.end_code   = (unsigned long) _etext;
791         init_mm.end_data   = (unsigned long) _edata;
792         init_mm.brk        = (unsigned long) _end;
793
794         /* populate cmd_line too for later use, preserving boot_command_line */
795         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
796         *cmdline_p = cmd_line;
797
798         parse_early_param();
799
800         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
801         sanity_check_meminfo();
802         arm_memblock_init(&meminfo, mdesc);
803
804         paging_init(mdesc);
805         request_standard_resources(mdesc);
806
807         if (mdesc->restart)
808                 arm_pm_restart = mdesc->restart;
809
810         unflatten_device_tree();
811
812         arm_dt_init_cpu_maps();
813         psci_init();
814 #ifdef CONFIG_SMP
815         if (is_smp()) {
816                 if (!mdesc->smp_init || !mdesc->smp_init()) {
817                         if (psci_smp_available())
818                                 smp_set_ops(&psci_smp_ops);
819                         else if (mdesc->smp)
820                                 smp_set_ops(mdesc->smp);
821                 }
822                 smp_init_cpus();
823         }
824 #endif
825
826         if (!is_smp())
827                 hyp_mode_check();
828
829         reserve_crashkernel();
830
831 #ifdef CONFIG_MULTI_IRQ_HANDLER
832         handle_arch_irq = mdesc->handle_irq;
833 #endif
834
835 #ifdef CONFIG_VT
836 #if defined(CONFIG_VGA_CONSOLE)
837         conswitchp = &vga_con;
838 #elif defined(CONFIG_DUMMY_CONSOLE)
839         conswitchp = &dummy_con;
840 #endif
841 #endif
842
843         if (mdesc->init_early)
844                 mdesc->init_early();
845 }
846
847
848 static int __init topology_init(void)
849 {
850         int cpu;
851
852         for_each_possible_cpu(cpu) {
853                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
854                 cpuinfo->cpu.hotpluggable = 1;
855                 register_cpu(&cpuinfo->cpu, cpu);
856         }
857
858         return 0;
859 }
860 subsys_initcall(topology_init);
861
862 #ifdef CONFIG_HAVE_PROC_CPU
863 static int __init proc_cpu_init(void)
864 {
865         struct proc_dir_entry *res;
866
867         res = proc_mkdir("cpu", NULL);
868         if (!res)
869                 return -ENOMEM;
870         return 0;
871 }
872 fs_initcall(proc_cpu_init);
873 #endif
874
875 static const char *hwcap_str[] = {
876         "swp",
877         "half",
878         "thumb",
879         "26bit",
880         "fastmult",
881         "fpa",
882         "vfp",
883         "edsp",
884         "java",
885         "iwmmxt",
886         "crunch",
887         "thumbee",
888         "neon",
889         "vfpv3",
890         "vfpv3d16",
891         "tls",
892         "vfpv4",
893         "idiva",
894         "idivt",
895         NULL
896 };
897
898 static int c_show(struct seq_file *m, void *v)
899 {
900         int i, j;
901         u32 cpuid;
902
903         for_each_online_cpu(i) {
904                 /*
905                  * glibc reads /proc/cpuinfo to determine the number of
906                  * online processors, looking for lines beginning with
907                  * "processor".  Give glibc what it expects.
908                  */
909                 seq_printf(m, "processor\t: %d\n", i);
910                 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
911                 seq_printf(m, "model name\t: %s rev %d (%s)\n",
912                            cpu_name, cpuid & 15, elf_platform);
913
914 #if defined(CONFIG_SMP)
915                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
916                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
917                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
918 #else
919                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
920                            loops_per_jiffy / (500000/HZ),
921                            (loops_per_jiffy / (5000/HZ)) % 100);
922 #endif
923                 /* dump out the processor features */
924                 seq_puts(m, "Features\t: ");
925
926                 for (j = 0; hwcap_str[j]; j++)
927                         if (elf_hwcap & (1 << j))
928                                 seq_printf(m, "%s ", hwcap_str[j]);
929
930                 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
931                 seq_printf(m, "CPU architecture: %s\n",
932                            proc_arch[cpu_architecture()]);
933
934                 if ((cpuid & 0x0008f000) == 0x00000000) {
935                         /* pre-ARM7 */
936                         seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
937                 } else {
938                         if ((cpuid & 0x0008f000) == 0x00007000) {
939                                 /* ARM7 */
940                                 seq_printf(m, "CPU variant\t: 0x%02x\n",
941                                            (cpuid >> 16) & 127);
942                         } else {
943                                 /* post-ARM7 */
944                                 seq_printf(m, "CPU variant\t: 0x%x\n",
945                                            (cpuid >> 20) & 15);
946                         }
947                         seq_printf(m, "CPU part\t: 0x%03x\n",
948                                    (cpuid >> 4) & 0xfff);
949                 }
950                 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
951         }
952
953         seq_printf(m, "Hardware\t: %s\n", machine_name);
954         seq_printf(m, "Revision\t: %04x\n", system_rev);
955         seq_printf(m, "Serial\t\t: %08x%08x\n",
956                    system_serial_high, system_serial_low);
957
958         return 0;
959 }
960
961 static void *c_start(struct seq_file *m, loff_t *pos)
962 {
963         return *pos < 1 ? (void *)1 : NULL;
964 }
965
966 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
967 {
968         ++*pos;
969         return NULL;
970 }
971
972 static void c_stop(struct seq_file *m, void *v)
973 {
974 }
975
976 const struct seq_operations cpuinfo_op = {
977         .start  = c_start,
978         .next   = c_next,
979         .stop   = c_stop,
980         .show   = c_show
981 };