arm: introduce psci_smp_ops
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/psci.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58 #include <asm/virt.h>
59
60 #include "atags.h"
61
62
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65
66 static int __init fpe_setup(char *line)
67 {
68         memcpy(fpe_type, line, 8);
69         return 1;
70 }
71
72 __setup("fpe=", fpe_setup);
73 #endif
74
75 extern void paging_init(struct machine_desc *desc);
76 extern void sanity_check_meminfo(void);
77 extern void reboot_setup(char *str);
78 extern void setup_dma_zone(struct machine_desc *desc);
79
80 unsigned int processor_id;
81 EXPORT_SYMBOL(processor_id);
82 unsigned int __machine_arch_type __read_mostly;
83 EXPORT_SYMBOL(__machine_arch_type);
84 unsigned int cacheid __read_mostly;
85 EXPORT_SYMBOL(cacheid);
86
87 unsigned int __atags_pointer __initdata;
88
89 unsigned int system_rev;
90 EXPORT_SYMBOL(system_rev);
91
92 unsigned int system_serial_low;
93 EXPORT_SYMBOL(system_serial_low);
94
95 unsigned int system_serial_high;
96 EXPORT_SYMBOL(system_serial_high);
97
98 unsigned int elf_hwcap __read_mostly;
99 EXPORT_SYMBOL(elf_hwcap);
100
101
102 #ifdef MULTI_CPU
103 struct processor processor __read_mostly;
104 #endif
105 #ifdef MULTI_TLB
106 struct cpu_tlb_fns cpu_tlb __read_mostly;
107 #endif
108 #ifdef MULTI_USER
109 struct cpu_user_fns cpu_user __read_mostly;
110 #endif
111 #ifdef MULTI_CACHE
112 struct cpu_cache_fns cpu_cache __read_mostly;
113 #endif
114 #ifdef CONFIG_OUTER_CACHE
115 struct outer_cache_fns outer_cache __read_mostly;
116 EXPORT_SYMBOL(outer_cache);
117 #endif
118
119 /*
120  * Cached cpu_architecture() result for use by assembler code.
121  * C code should use the cpu_architecture() function instead of accessing this
122  * variable directly.
123  */
124 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
125
126 struct stack {
127         u32 irq[3];
128         u32 abt[3];
129         u32 und[3];
130 } ____cacheline_aligned;
131
132 static struct stack stacks[NR_CPUS];
133
134 char elf_platform[ELF_PLATFORM_SIZE];
135 EXPORT_SYMBOL(elf_platform);
136
137 static const char *cpu_name;
138 static const char *machine_name;
139 static char __initdata cmd_line[COMMAND_LINE_SIZE];
140 struct machine_desc *machine_desc __initdata;
141
142 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
143 #define ENDIANNESS ((char)endian_test.l)
144
145 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
146
147 /*
148  * Standard memory resources
149  */
150 static struct resource mem_res[] = {
151         {
152                 .name = "Video RAM",
153                 .start = 0,
154                 .end = 0,
155                 .flags = IORESOURCE_MEM
156         },
157         {
158                 .name = "Kernel code",
159                 .start = 0,
160                 .end = 0,
161                 .flags = IORESOURCE_MEM
162         },
163         {
164                 .name = "Kernel data",
165                 .start = 0,
166                 .end = 0,
167                 .flags = IORESOURCE_MEM
168         }
169 };
170
171 #define video_ram   mem_res[0]
172 #define kernel_code mem_res[1]
173 #define kernel_data mem_res[2]
174
175 static struct resource io_res[] = {
176         {
177                 .name = "reserved",
178                 .start = 0x3bc,
179                 .end = 0x3be,
180                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
181         },
182         {
183                 .name = "reserved",
184                 .start = 0x378,
185                 .end = 0x37f,
186                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
187         },
188         {
189                 .name = "reserved",
190                 .start = 0x278,
191                 .end = 0x27f,
192                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
193         }
194 };
195
196 #define lp0 io_res[0]
197 #define lp1 io_res[1]
198 #define lp2 io_res[2]
199
200 static const char *proc_arch[] = {
201         "undefined/unknown",
202         "3",
203         "4",
204         "4T",
205         "5",
206         "5T",
207         "5TE",
208         "5TEJ",
209         "6TEJ",
210         "7",
211         "?(11)",
212         "?(12)",
213         "?(13)",
214         "?(14)",
215         "?(15)",
216         "?(16)",
217         "?(17)",
218 };
219
220 static int __get_cpu_architecture(void)
221 {
222         int cpu_arch;
223
224         if ((read_cpuid_id() & 0x0008f000) == 0) {
225                 cpu_arch = CPU_ARCH_UNKNOWN;
226         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
227                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
228         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
229                 cpu_arch = (read_cpuid_id() >> 16) & 7;
230                 if (cpu_arch)
231                         cpu_arch += CPU_ARCH_ARMv3;
232         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
233                 unsigned int mmfr0;
234
235                 /* Revised CPUID format. Read the Memory Model Feature
236                  * Register 0 and check for VMSAv7 or PMSAv7 */
237                 asm("mrc        p15, 0, %0, c0, c1, 4"
238                     : "=r" (mmfr0));
239                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
240                     (mmfr0 & 0x000000f0) >= 0x00000030)
241                         cpu_arch = CPU_ARCH_ARMv7;
242                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
243                          (mmfr0 & 0x000000f0) == 0x00000020)
244                         cpu_arch = CPU_ARCH_ARMv6;
245                 else
246                         cpu_arch = CPU_ARCH_UNKNOWN;
247         } else
248                 cpu_arch = CPU_ARCH_UNKNOWN;
249
250         return cpu_arch;
251 }
252
253 int __pure cpu_architecture(void)
254 {
255         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
256
257         return __cpu_architecture;
258 }
259
260 static int cpu_has_aliasing_icache(unsigned int arch)
261 {
262         int aliasing_icache;
263         unsigned int id_reg, num_sets, line_size;
264
265         /* PIPT caches never alias. */
266         if (icache_is_pipt())
267                 return 0;
268
269         /* arch specifies the register format */
270         switch (arch) {
271         case CPU_ARCH_ARMv7:
272                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
273                     : /* No output operands */
274                     : "r" (1));
275                 isb();
276                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
277                     : "=r" (id_reg));
278                 line_size = 4 << ((id_reg & 0x7) + 2);
279                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
280                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
281                 break;
282         case CPU_ARCH_ARMv6:
283                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
284                 break;
285         default:
286                 /* I-cache aliases will be handled by D-cache aliasing code */
287                 aliasing_icache = 0;
288         }
289
290         return aliasing_icache;
291 }
292
293 static void __init cacheid_init(void)
294 {
295         unsigned int arch = cpu_architecture();
296
297         if (arch >= CPU_ARCH_ARMv6) {
298                 unsigned int cachetype = read_cpuid_cachetype();
299                 if ((cachetype & (7 << 29)) == 4 << 29) {
300                         /* ARMv7 register format */
301                         arch = CPU_ARCH_ARMv7;
302                         cacheid = CACHEID_VIPT_NONALIASING;
303                         switch (cachetype & (3 << 14)) {
304                         case (1 << 14):
305                                 cacheid |= CACHEID_ASID_TAGGED;
306                                 break;
307                         case (3 << 14):
308                                 cacheid |= CACHEID_PIPT;
309                                 break;
310                         }
311                 } else {
312                         arch = CPU_ARCH_ARMv6;
313                         if (cachetype & (1 << 23))
314                                 cacheid = CACHEID_VIPT_ALIASING;
315                         else
316                                 cacheid = CACHEID_VIPT_NONALIASING;
317                 }
318                 if (cpu_has_aliasing_icache(arch))
319                         cacheid |= CACHEID_VIPT_I_ALIASING;
320         } else {
321                 cacheid = CACHEID_VIVT;
322         }
323
324         printk("CPU: %s data cache, %s instruction cache\n",
325                 cache_is_vivt() ? "VIVT" :
326                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
327                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
328                 cache_is_vivt() ? "VIVT" :
329                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
330                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
331                 icache_is_pipt() ? "PIPT" :
332                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
333 }
334
335 /*
336  * These functions re-use the assembly code in head.S, which
337  * already provide the required functionality.
338  */
339 extern struct proc_info_list *lookup_processor_type(unsigned int);
340
341 void __init early_print(const char *str, ...)
342 {
343         extern void printascii(const char *);
344         char buf[256];
345         va_list ap;
346
347         va_start(ap, str);
348         vsnprintf(buf, sizeof(buf), str, ap);
349         va_end(ap);
350
351 #ifdef CONFIG_DEBUG_LL
352         printascii(buf);
353 #endif
354         printk("%s", buf);
355 }
356
357 static void __init cpuid_init_hwcaps(void)
358 {
359         unsigned int divide_instrs;
360
361         if (cpu_architecture() < CPU_ARCH_ARMv7)
362                 return;
363
364         divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
365
366         switch (divide_instrs) {
367         case 2:
368                 elf_hwcap |= HWCAP_IDIVA;
369         case 1:
370                 elf_hwcap |= HWCAP_IDIVT;
371         }
372 }
373
374 static void __init feat_v6_fixup(void)
375 {
376         int id = read_cpuid_id();
377
378         if ((id & 0xff0f0000) != 0x41070000)
379                 return;
380
381         /*
382          * HWCAP_TLS is available only on 1136 r1p0 and later,
383          * see also kuser_get_tls_init.
384          */
385         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
386                 elf_hwcap &= ~HWCAP_TLS;
387 }
388
389 /*
390  * cpu_init - initialise one CPU.
391  *
392  * cpu_init sets up the per-CPU stacks.
393  */
394 void notrace cpu_init(void)
395 {
396         unsigned int cpu = smp_processor_id();
397         struct stack *stk = &stacks[cpu];
398
399         if (cpu >= NR_CPUS) {
400                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
401                 BUG();
402         }
403
404         /*
405          * This only works on resume and secondary cores. For booting on the
406          * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
407          */
408         set_my_cpu_offset(per_cpu_offset(cpu));
409
410         cpu_proc_init();
411
412         /*
413          * Define the placement constraint for the inline asm directive below.
414          * In Thumb-2, msr with an immediate value is not allowed.
415          */
416 #ifdef CONFIG_THUMB2_KERNEL
417 #define PLC     "r"
418 #else
419 #define PLC     "I"
420 #endif
421
422         /*
423          * setup stacks for re-entrant exception handlers
424          */
425         __asm__ (
426         "msr    cpsr_c, %1\n\t"
427         "add    r14, %0, %2\n\t"
428         "mov    sp, r14\n\t"
429         "msr    cpsr_c, %3\n\t"
430         "add    r14, %0, %4\n\t"
431         "mov    sp, r14\n\t"
432         "msr    cpsr_c, %5\n\t"
433         "add    r14, %0, %6\n\t"
434         "mov    sp, r14\n\t"
435         "msr    cpsr_c, %7"
436             :
437             : "r" (stk),
438               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
439               "I" (offsetof(struct stack, irq[0])),
440               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
441               "I" (offsetof(struct stack, abt[0])),
442               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
443               "I" (offsetof(struct stack, und[0])),
444               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
445             : "r14");
446 }
447
448 int __cpu_logical_map[NR_CPUS];
449
450 void __init smp_setup_processor_id(void)
451 {
452         int i;
453         u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
454         u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
455
456         cpu_logical_map(0) = cpu;
457         for (i = 1; i < nr_cpu_ids; ++i)
458                 cpu_logical_map(i) = i == cpu ? 0 : i;
459
460         printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
461 }
462
463 static void __init setup_processor(void)
464 {
465         struct proc_info_list *list;
466
467         /*
468          * locate processor in the list of supported processor
469          * types.  The linker builds this table for us from the
470          * entries in arch/arm/mm/proc-*.S
471          */
472         list = lookup_processor_type(read_cpuid_id());
473         if (!list) {
474                 printk("CPU configuration botched (ID %08x), unable "
475                        "to continue.\n", read_cpuid_id());
476                 while (1);
477         }
478
479         cpu_name = list->cpu_name;
480         __cpu_architecture = __get_cpu_architecture();
481
482 #ifdef MULTI_CPU
483         processor = *list->proc;
484 #endif
485 #ifdef MULTI_TLB
486         cpu_tlb = *list->tlb;
487 #endif
488 #ifdef MULTI_USER
489         cpu_user = *list->user;
490 #endif
491 #ifdef MULTI_CACHE
492         cpu_cache = *list->cache;
493 #endif
494
495         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
496                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
497                proc_arch[cpu_architecture()], cr_alignment);
498
499         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
500                  list->arch_name, ENDIANNESS);
501         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
502                  list->elf_name, ENDIANNESS);
503         elf_hwcap = list->elf_hwcap;
504
505         cpuid_init_hwcaps();
506
507 #ifndef CONFIG_ARM_THUMB
508         elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
509 #endif
510
511         feat_v6_fixup();
512
513         cacheid_init();
514         cpu_init();
515 }
516
517 void __init dump_machine_table(void)
518 {
519         struct machine_desc *p;
520
521         early_print("Available machine support:\n\nID (hex)\tNAME\n");
522         for_each_machine_desc(p)
523                 early_print("%08x\t%s\n", p->nr, p->name);
524
525         early_print("\nPlease check your kernel config and/or bootloader.\n");
526
527         while (true)
528                 /* can't use cpu_relax() here as it may require MMU setup */;
529 }
530
531 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
532 {
533         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
534
535         if (meminfo.nr_banks >= NR_BANKS) {
536                 printk(KERN_CRIT "NR_BANKS too low, "
537                         "ignoring memory at 0x%08llx\n", (long long)start);
538                 return -EINVAL;
539         }
540
541         /*
542          * Ensure that start/size are aligned to a page boundary.
543          * Size is appropriately rounded down, start is rounded up.
544          */
545         size -= start & ~PAGE_MASK;
546         bank->start = PAGE_ALIGN(start);
547
548 #ifndef CONFIG_ARM_LPAE
549         if (bank->start + size < bank->start) {
550                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
551                         "32-bit physical address space\n", (long long)start);
552                 /*
553                  * To ensure bank->start + bank->size is representable in
554                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
555                  * This means we lose a page after masking.
556                  */
557                 size = ULONG_MAX - bank->start;
558         }
559 #endif
560
561         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
562
563         /*
564          * Check whether this memory region has non-zero size or
565          * invalid node number.
566          */
567         if (bank->size == 0)
568                 return -EINVAL;
569
570         meminfo.nr_banks++;
571         return 0;
572 }
573
574 /*
575  * Pick out the memory size.  We look for mem=size@start,
576  * where start and size are "size[KkMm]"
577  */
578 static int __init early_mem(char *p)
579 {
580         static int usermem __initdata = 0;
581         phys_addr_t size;
582         phys_addr_t start;
583         char *endp;
584
585         /*
586          * If the user specifies memory size, we
587          * blow away any automatically generated
588          * size.
589          */
590         if (usermem == 0) {
591                 usermem = 1;
592                 meminfo.nr_banks = 0;
593         }
594
595         start = PHYS_OFFSET;
596         size  = memparse(p, &endp);
597         if (*endp == '@')
598                 start = memparse(endp + 1, NULL);
599
600         arm_add_memory(start, size);
601
602         return 0;
603 }
604 early_param("mem", early_mem);
605
606 static void __init request_standard_resources(struct machine_desc *mdesc)
607 {
608         struct memblock_region *region;
609         struct resource *res;
610
611         kernel_code.start   = virt_to_phys(_text);
612         kernel_code.end     = virt_to_phys(_etext - 1);
613         kernel_data.start   = virt_to_phys(_sdata);
614         kernel_data.end     = virt_to_phys(_end - 1);
615
616         for_each_memblock(memory, region) {
617                 res = alloc_bootmem_low(sizeof(*res));
618                 res->name  = "System RAM";
619                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
620                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
621                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
622
623                 request_resource(&iomem_resource, res);
624
625                 if (kernel_code.start >= res->start &&
626                     kernel_code.end <= res->end)
627                         request_resource(res, &kernel_code);
628                 if (kernel_data.start >= res->start &&
629                     kernel_data.end <= res->end)
630                         request_resource(res, &kernel_data);
631         }
632
633         if (mdesc->video_start) {
634                 video_ram.start = mdesc->video_start;
635                 video_ram.end   = mdesc->video_end;
636                 request_resource(&iomem_resource, &video_ram);
637         }
638
639         /*
640          * Some machines don't have the possibility of ever
641          * possessing lp0, lp1 or lp2
642          */
643         if (mdesc->reserve_lp0)
644                 request_resource(&ioport_resource, &lp0);
645         if (mdesc->reserve_lp1)
646                 request_resource(&ioport_resource, &lp1);
647         if (mdesc->reserve_lp2)
648                 request_resource(&ioport_resource, &lp2);
649 }
650
651 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
652 struct screen_info screen_info = {
653  .orig_video_lines      = 30,
654  .orig_video_cols       = 80,
655  .orig_video_mode       = 0,
656  .orig_video_ega_bx     = 0,
657  .orig_video_isVGA      = 1,
658  .orig_video_points     = 8
659 };
660 #endif
661
662 static int __init customize_machine(void)
663 {
664         /*
665          * customizes platform devices, or adds new ones
666          * On DT based machines, we fall back to populating the
667          * machine from the device tree, if no callback is provided,
668          * otherwise we would always need an init_machine callback.
669          */
670         if (machine_desc->init_machine)
671                 machine_desc->init_machine();
672 #ifdef CONFIG_OF
673         else
674                 of_platform_populate(NULL, of_default_bus_match_table,
675                                         NULL, NULL);
676 #endif
677         return 0;
678 }
679 arch_initcall(customize_machine);
680
681 static int __init init_machine_late(void)
682 {
683         if (machine_desc->init_late)
684                 machine_desc->init_late();
685         return 0;
686 }
687 late_initcall(init_machine_late);
688
689 #ifdef CONFIG_KEXEC
690 static inline unsigned long long get_total_mem(void)
691 {
692         unsigned long total;
693
694         total = max_low_pfn - min_low_pfn;
695         return total << PAGE_SHIFT;
696 }
697
698 /**
699  * reserve_crashkernel() - reserves memory are for crash kernel
700  *
701  * This function reserves memory area given in "crashkernel=" kernel command
702  * line parameter. The memory reserved is used by a dump capture kernel when
703  * primary kernel is crashing.
704  */
705 static void __init reserve_crashkernel(void)
706 {
707         unsigned long long crash_size, crash_base;
708         unsigned long long total_mem;
709         int ret;
710
711         total_mem = get_total_mem();
712         ret = parse_crashkernel(boot_command_line, total_mem,
713                                 &crash_size, &crash_base);
714         if (ret)
715                 return;
716
717         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
718         if (ret < 0) {
719                 printk(KERN_WARNING "crashkernel reservation failed - "
720                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
721                 return;
722         }
723
724         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
725                "for crashkernel (System RAM: %ldMB)\n",
726                (unsigned long)(crash_size >> 20),
727                (unsigned long)(crash_base >> 20),
728                (unsigned long)(total_mem >> 20));
729
730         crashk_res.start = crash_base;
731         crashk_res.end = crash_base + crash_size - 1;
732         insert_resource(&iomem_resource, &crashk_res);
733 }
734 #else
735 static inline void reserve_crashkernel(void) {}
736 #endif /* CONFIG_KEXEC */
737
738 static int __init meminfo_cmp(const void *_a, const void *_b)
739 {
740         const struct membank *a = _a, *b = _b;
741         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
742         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
743 }
744
745 void __init hyp_mode_check(void)
746 {
747 #ifdef CONFIG_ARM_VIRT_EXT
748         if (is_hyp_mode_available()) {
749                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
750                 pr_info("CPU: Virtualization extensions available.\n");
751         } else if (is_hyp_mode_mismatched()) {
752                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
753                         __boot_cpu_mode & MODE_MASK);
754                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
755         } else
756                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
757 #endif
758 }
759
760 void __init setup_arch(char **cmdline_p)
761 {
762         struct machine_desc *mdesc;
763
764         setup_processor();
765         mdesc = setup_machine_fdt(__atags_pointer);
766         if (!mdesc)
767                 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
768         machine_desc = mdesc;
769         machine_name = mdesc->name;
770
771         setup_dma_zone(mdesc);
772
773         if (mdesc->restart_mode)
774                 reboot_setup(&mdesc->restart_mode);
775
776         init_mm.start_code = (unsigned long) _text;
777         init_mm.end_code   = (unsigned long) _etext;
778         init_mm.end_data   = (unsigned long) _edata;
779         init_mm.brk        = (unsigned long) _end;
780
781         /* populate cmd_line too for later use, preserving boot_command_line */
782         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
783         *cmdline_p = cmd_line;
784
785         parse_early_param();
786
787         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
788         sanity_check_meminfo();
789         arm_memblock_init(&meminfo, mdesc);
790
791         paging_init(mdesc);
792         request_standard_resources(mdesc);
793
794         if (mdesc->restart)
795                 arm_pm_restart = mdesc->restart;
796
797         unflatten_device_tree();
798
799         arm_dt_init_cpu_maps();
800         psci_init();
801 #ifdef CONFIG_SMP
802         if (is_smp()) {
803                 if (psci_smp_available())
804                         smp_set_ops(&psci_smp_ops);
805                 else if (mdesc->smp)
806                         smp_set_ops(mdesc->smp);
807                 smp_init_cpus();
808         }
809 #endif
810
811         if (!is_smp())
812                 hyp_mode_check();
813
814         reserve_crashkernel();
815
816 #ifdef CONFIG_MULTI_IRQ_HANDLER
817         handle_arch_irq = mdesc->handle_irq;
818 #endif
819
820 #ifdef CONFIG_VT
821 #if defined(CONFIG_VGA_CONSOLE)
822         conswitchp = &vga_con;
823 #elif defined(CONFIG_DUMMY_CONSOLE)
824         conswitchp = &dummy_con;
825 #endif
826 #endif
827
828         if (mdesc->init_early)
829                 mdesc->init_early();
830 }
831
832
833 static int __init topology_init(void)
834 {
835         int cpu;
836
837         for_each_possible_cpu(cpu) {
838                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
839                 cpuinfo->cpu.hotpluggable = 1;
840                 register_cpu(&cpuinfo->cpu, cpu);
841         }
842
843         return 0;
844 }
845 subsys_initcall(topology_init);
846
847 #ifdef CONFIG_HAVE_PROC_CPU
848 static int __init proc_cpu_init(void)
849 {
850         struct proc_dir_entry *res;
851
852         res = proc_mkdir("cpu", NULL);
853         if (!res)
854                 return -ENOMEM;
855         return 0;
856 }
857 fs_initcall(proc_cpu_init);
858 #endif
859
860 static const char *hwcap_str[] = {
861         "swp",
862         "half",
863         "thumb",
864         "26bit",
865         "fastmult",
866         "fpa",
867         "vfp",
868         "edsp",
869         "java",
870         "iwmmxt",
871         "crunch",
872         "thumbee",
873         "neon",
874         "vfpv3",
875         "vfpv3d16",
876         "tls",
877         "vfpv4",
878         "idiva",
879         "idivt",
880         NULL
881 };
882
883 static int c_show(struct seq_file *m, void *v)
884 {
885         int i, j;
886         u32 cpuid;
887
888         for_each_online_cpu(i) {
889                 /*
890                  * glibc reads /proc/cpuinfo to determine the number of
891                  * online processors, looking for lines beginning with
892                  * "processor".  Give glibc what it expects.
893                  */
894                 seq_printf(m, "processor\t: %d\n", i);
895                 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
896                 seq_printf(m, "model name\t: %s rev %d (%s)\n",
897                            cpu_name, cpuid & 15, elf_platform);
898
899 #if defined(CONFIG_SMP)
900                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
901                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
902                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
903 #else
904                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
905                            loops_per_jiffy / (500000/HZ),
906                            (loops_per_jiffy / (5000/HZ)) % 100);
907 #endif
908                 /* dump out the processor features */
909                 seq_puts(m, "Features\t: ");
910
911                 for (j = 0; hwcap_str[j]; j++)
912                         if (elf_hwcap & (1 << j))
913                                 seq_printf(m, "%s ", hwcap_str[j]);
914
915                 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
916                 seq_printf(m, "CPU architecture: %s\n",
917                            proc_arch[cpu_architecture()]);
918
919                 if ((cpuid & 0x0008f000) == 0x00000000) {
920                         /* pre-ARM7 */
921                         seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
922                 } else {
923                         if ((cpuid & 0x0008f000) == 0x00007000) {
924                                 /* ARM7 */
925                                 seq_printf(m, "CPU variant\t: 0x%02x\n",
926                                            (cpuid >> 16) & 127);
927                         } else {
928                                 /* post-ARM7 */
929                                 seq_printf(m, "CPU variant\t: 0x%x\n",
930                                            (cpuid >> 20) & 15);
931                         }
932                         seq_printf(m, "CPU part\t: 0x%03x\n",
933                                    (cpuid >> 4) & 0xfff);
934                 }
935                 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
936         }
937
938         seq_printf(m, "Hardware\t: %s\n", machine_name);
939         seq_printf(m, "Revision\t: %04x\n", system_rev);
940         seq_printf(m, "Serial\t\t: %08x%08x\n",
941                    system_serial_high, system_serial_low);
942
943         return 0;
944 }
945
946 static void *c_start(struct seq_file *m, loff_t *pos)
947 {
948         return *pos < 1 ? (void *)1 : NULL;
949 }
950
951 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
952 {
953         ++*pos;
954         return NULL;
955 }
956
957 static void c_stop(struct seq_file *m, void *v)
958 {
959 }
960
961 const struct seq_operations cpuinfo_op = {
962         .start  = c_start,
963         .next   = c_next,
964         .stop   = c_stop,
965         .show   = c_show
966 };