2 * Procedures for interfacing to Open Firmware.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/proc_fs.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
34 #include <asm/processor.h>
39 #include <asm/pgtable.h>
40 #include <asm/iommu.h>
41 #include <asm/btext.h>
42 #include <asm/sections.h>
43 #include <asm/machdep.h>
46 #include <linux/linux_logo.h>
49 * Eventually bump that one up
51 #define DEVTREE_CHUNK_SIZE 0x100000
54 * This is the size of the local memory reserve map that gets copied
55 * into the boot params passed to the kernel. That size is totally
56 * flexible as the kernel just reads the list until it encounters an
57 * entry with size 0, so it can be changed without breaking binary
60 #define MEM_RESERVE_MAP_SIZE 8
63 * prom_init() is called very early on, before the kernel text
64 * and data have been mapped to KERNELBASE. At this point the code
65 * is running at whatever address it has been loaded at.
66 * On ppc32 we compile with -mrelocatable, which means that references
67 * to extern and static variables get relocated automatically.
68 * ppc64 objects are always relocatable, we just need to relocate the
71 * Because OF may have mapped I/O devices into the area starting at
72 * KERNELBASE, particularly on CHRP machines, we can't safely call
73 * OF once the kernel has been mapped to KERNELBASE. Therefore all
74 * OF calls must be done within prom_init().
76 * ADDR is used in calls to call_prom. The 4th and following
77 * arguments to call_prom should be 32-bit values.
78 * On ppc64, 64 bit values are truncated to 32 bits (and
79 * fortunately don't get interpreted as two arguments).
81 #define ADDR(x) (u32)(unsigned long)(x)
84 #define OF_WORKAROUNDS 0
86 #define OF_WORKAROUNDS of_workarounds
90 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
91 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
93 #define PROM_BUG() do { \
94 prom_printf("kernel BUG at %s line 0x%x!\n", \
95 __FILE__, __LINE__); \
96 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
100 #define prom_debug(x...) prom_printf(x)
102 #define prom_debug(x...)
106 typedef u32 prom_arg_t;
124 struct mem_map_entry {
129 typedef __be32 cell_t;
131 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
132 unsigned long r6, unsigned long r7, unsigned long r8,
136 extern int enter_prom(struct prom_args *args, unsigned long entry);
138 static inline int enter_prom(struct prom_args *args, unsigned long entry)
140 return ((int (*)(struct prom_args *))entry)(args);
144 extern void copy_and_flush(unsigned long dest, unsigned long src,
145 unsigned long size, unsigned long offset);
148 static struct prom_t __initdata prom;
150 static unsigned long prom_entry __initdata;
152 #define PROM_SCRATCH_SIZE 256
154 static char __initdata of_stdout_device[256];
155 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
157 static unsigned long __initdata dt_header_start;
158 static unsigned long __initdata dt_struct_start, dt_struct_end;
159 static unsigned long __initdata dt_string_start, dt_string_end;
161 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
164 static int __initdata prom_iommu_force_on;
165 static int __initdata prom_iommu_off;
166 static unsigned long __initdata prom_tce_alloc_start;
167 static unsigned long __initdata prom_tce_alloc_end;
170 /* Platforms codes are now obsolete in the kernel. Now only used within this
171 * file and ultimately gone too. Feel free to change them if you need, they
172 * are not shared with anything outside of this file anymore
174 #define PLATFORM_PSERIES 0x0100
175 #define PLATFORM_PSERIES_LPAR 0x0101
176 #define PLATFORM_LPAR 0x0001
177 #define PLATFORM_POWERMAC 0x0400
178 #define PLATFORM_GENERIC 0x0500
179 #define PLATFORM_OPAL 0x0600
181 static int __initdata of_platform;
183 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
185 static unsigned long __initdata prom_memory_limit;
187 static unsigned long __initdata alloc_top;
188 static unsigned long __initdata alloc_top_high;
189 static unsigned long __initdata alloc_bottom;
190 static unsigned long __initdata rmo_top;
191 static unsigned long __initdata ram_top;
193 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
194 static int __initdata mem_reserve_cnt;
196 static cell_t __initdata regbuf[1024];
198 static bool rtas_has_query_cpu_stopped;
202 * Error results ... some OF calls will return "-1" on error, some
203 * will return 0, some will return either. To simplify, here are
204 * macros to use with any ihandle or phandle return value to check if
208 #define PROM_ERROR (-1u)
209 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
210 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
213 /* This is the one and *ONLY* place where we actually call open
217 static int __init call_prom(const char *service, int nargs, int nret, ...)
220 struct prom_args args;
223 args.service = cpu_to_be32(ADDR(service));
224 args.nargs = cpu_to_be32(nargs);
225 args.nret = cpu_to_be32(nret);
227 va_start(list, nret);
228 for (i = 0; i < nargs; i++)
229 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
232 for (i = 0; i < nret; i++)
233 args.args[nargs+i] = 0;
235 if (enter_prom(&args, prom_entry) < 0)
238 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
241 static int __init call_prom_ret(const char *service, int nargs, int nret,
242 prom_arg_t *rets, ...)
245 struct prom_args args;
248 args.service = cpu_to_be32(ADDR(service));
249 args.nargs = cpu_to_be32(nargs);
250 args.nret = cpu_to_be32(nret);
252 va_start(list, rets);
253 for (i = 0; i < nargs; i++)
254 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
257 for (i = 0; i < nret; i++)
258 args.args[nargs+i] = 0;
260 if (enter_prom(&args, prom_entry) < 0)
264 for (i = 1; i < nret; ++i)
265 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
267 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
271 static void __init prom_print(const char *msg)
275 if (prom.stdout == 0)
278 for (p = msg; *p != 0; p = q) {
279 for (q = p; *q != 0 && *q != '\n'; ++q)
282 call_prom("write", 3, 1, prom.stdout, p, q - p);
286 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
291 static void __init prom_print_hex(unsigned long val)
293 int i, nibbles = sizeof(val)*2;
294 char buf[sizeof(val)*2+1];
296 for (i = nibbles-1; i >= 0; i--) {
297 buf[i] = (val & 0xf) + '0';
299 buf[i] += ('a'-'0'-10);
303 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
306 /* max number of decimal digits in an unsigned long */
308 static void __init prom_print_dec(unsigned long val)
311 char buf[UL_DIGITS+1];
313 for (i = UL_DIGITS-1; i >= 0; i--) {
314 buf[i] = (val % 10) + '0';
319 /* shift stuff down */
320 size = UL_DIGITS - i;
321 call_prom("write", 3, 1, prom.stdout, buf+i, size);
324 static void __init prom_printf(const char *format, ...)
326 const char *p, *q, *s;
331 va_start(args, format);
332 for (p = format; *p != 0; p = q) {
333 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
336 call_prom("write", 3, 1, prom.stdout, p, q - p);
341 call_prom("write", 3, 1, prom.stdout,
351 s = va_arg(args, const char *);
356 v = va_arg(args, unsigned long);
361 vs = va_arg(args, int);
372 else if (*q == 'x') {
374 v = va_arg(args, unsigned long);
376 } else if (*q == 'u') { /* '%lu' */
378 v = va_arg(args, unsigned long);
380 } else if (*q == 'd') { /* %ld */
382 vs = va_arg(args, long);
396 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
400 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
402 * Old OF requires we claim physical and virtual separately
403 * and then map explicitly (assuming virtual mode)
408 ret = call_prom_ret("call-method", 5, 2, &result,
409 ADDR("claim"), prom.memory,
411 if (ret != 0 || result == -1)
413 ret = call_prom_ret("call-method", 5, 2, &result,
414 ADDR("claim"), prom.mmumap,
417 call_prom("call-method", 4, 1, ADDR("release"),
418 prom.memory, size, virt);
421 /* the 0x12 is M (coherence) + PP == read/write */
422 call_prom("call-method", 6, 1,
423 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
426 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
430 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
433 /* Do not call exit because it clears the screen on pmac
434 * it also causes some sort of double-fault on early pmacs */
435 if (of_platform == PLATFORM_POWERMAC)
438 /* ToDo: should put up an SRC here on pSeries */
439 call_prom("exit", 0, 0);
441 for (;;) /* should never get here */
446 static int __init prom_next_node(phandle *nodep)
450 if ((node = *nodep) != 0
451 && (*nodep = call_prom("child", 1, 1, node)) != 0)
453 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
456 if ((node = call_prom("parent", 1, 1, node)) == 0)
458 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
463 static int inline prom_getprop(phandle node, const char *pname,
464 void *value, size_t valuelen)
466 return call_prom("getprop", 4, 1, node, ADDR(pname),
467 (u32)(unsigned long) value, (u32) valuelen);
470 static int inline prom_getproplen(phandle node, const char *pname)
472 return call_prom("getproplen", 2, 1, node, ADDR(pname));
475 static void add_string(char **str, const char *q)
485 static char *tohex(unsigned int x)
487 static char digits[] = "0123456789abcdef";
488 static char result[9];
495 result[i] = digits[x & 0xf];
497 } while (x != 0 && i > 0);
501 static int __init prom_setprop(phandle node, const char *nodename,
502 const char *pname, void *value, size_t valuelen)
506 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
507 return call_prom("setprop", 4, 1, node, ADDR(pname),
508 (u32)(unsigned long) value, (u32) valuelen);
510 /* gah... setprop doesn't work on longtrail, have to use interpret */
512 add_string(&p, "dev");
513 add_string(&p, nodename);
514 add_string(&p, tohex((u32)(unsigned long) value));
515 add_string(&p, tohex(valuelen));
516 add_string(&p, tohex(ADDR(pname)));
517 add_string(&p, tohex(strlen(pname)));
518 add_string(&p, "property");
520 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
523 /* We can't use the standard versions because of relocation headaches. */
524 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
525 || ('a' <= (c) && (c) <= 'f') \
526 || ('A' <= (c) && (c) <= 'F'))
528 #define isdigit(c) ('0' <= (c) && (c) <= '9')
529 #define islower(c) ('a' <= (c) && (c) <= 'z')
530 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
532 static unsigned long prom_strtoul(const char *cp, const char **endp)
534 unsigned long result = 0, base = 10, value;
539 if (toupper(*cp) == 'X') {
545 while (isxdigit(*cp) &&
546 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
547 result = result * base + value;
557 static unsigned long prom_memparse(const char *ptr, const char **retptr)
559 unsigned long ret = prom_strtoul(ptr, retptr);
563 * We can't use a switch here because GCC *may* generate a
564 * jump table which won't work, because we're not running at
565 * the address we're linked at.
567 if ('G' == **retptr || 'g' == **retptr)
570 if ('M' == **retptr || 'm' == **retptr)
573 if ('K' == **retptr || 'k' == **retptr)
585 * Early parsing of the command line passed to the kernel, used for
586 * "mem=x" and the options that affect the iommu
588 static void __init early_cmdline_parse(void)
595 prom_cmd_line[0] = 0;
597 if ((long)prom.chosen > 0)
598 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
599 #ifdef CONFIG_CMDLINE
600 if (l <= 0 || p[0] == '\0') /* dbl check */
601 strlcpy(prom_cmd_line,
602 CONFIG_CMDLINE, sizeof(prom_cmd_line));
603 #endif /* CONFIG_CMDLINE */
604 prom_printf("command line: %s\n", prom_cmd_line);
607 opt = strstr(prom_cmd_line, "iommu=");
609 prom_printf("iommu opt is: %s\n", opt);
611 while (*opt && *opt == ' ')
613 if (!strncmp(opt, "off", 3))
615 else if (!strncmp(opt, "force", 5))
616 prom_iommu_force_on = 1;
619 opt = strstr(prom_cmd_line, "mem=");
622 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
624 /* Align to 16 MB == size of ppc64 large page */
625 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
630 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
632 * The architecture vector has an array of PVR mask/value pairs,
633 * followed by # option vectors - 1, followed by the option vectors.
635 * See prom.h for the definition of the bits specified in the
636 * architecture vector.
638 * Because the description vector contains a mix of byte and word
639 * values, we declare it as an unsigned char array, and use this
640 * macro to put word values in.
642 #define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
643 ((x) >> 8) & 0xff, (x) & 0xff
645 /* Firmware expects the value to be n - 1, where n is the # of vectors */
646 #define NUM_VECTORS(n) ((n) - 1)
649 * Firmware expects 1 + n - 2, where n is the length of the option vector in
650 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
652 #define VECTOR_LENGTH(n) (1 + (n) - 2)
654 unsigned char ibm_architecture_vec[] = {
655 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
656 W(0xffff0000), W(0x003e0000), /* POWER6 */
657 W(0xffff0000), W(0x003f0000), /* POWER7 */
658 W(0xffff0000), W(0x004b0000), /* POWER8E */
659 W(0xffff0000), W(0x004c0000), /* POWER8NVL */
660 W(0xffff0000), W(0x004d0000), /* POWER8 */
661 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
662 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
663 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
664 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
665 NUM_VECTORS(6), /* 6 option vectors */
667 /* option vector 1: processor architectures supported */
668 VECTOR_LENGTH(2), /* length */
669 0, /* don't ignore, don't halt */
670 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
671 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
673 /* option vector 2: Open Firmware options supported */
674 VECTOR_LENGTH(33), /* length */
677 W(0xffffffff), /* real_base */
678 W(0xffffffff), /* real_size */
679 W(0xffffffff), /* virt_base */
680 W(0xffffffff), /* virt_size */
681 W(0xffffffff), /* load_base */
682 W(256), /* 256MB min RMA */
683 W(0xffffffff), /* full client load */
684 0, /* min RMA percentage of total RAM */
685 48, /* max log_2(hash table size) */
687 /* option vector 3: processor options supported */
688 VECTOR_LENGTH(2), /* length */
689 0, /* don't ignore, don't halt */
690 OV3_FP | OV3_VMX | OV3_DFP,
692 /* option vector 4: IBM PAPR implementation */
693 VECTOR_LENGTH(2), /* length */
695 OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
697 /* option vector 5: PAPR/OF options */
698 VECTOR_LENGTH(18), /* length */
699 0, /* don't ignore, don't halt */
700 OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
701 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
702 #ifdef CONFIG_PCI_MSI
703 /* PCIe/MSI support. Without MSI full PCIe is not supported */
709 #ifdef CONFIG_PPC_SMLPAR
710 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
714 OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
718 /* WARNING: The offset of the "number of cores" field below
719 * must match by the macro below. Update the definition if
720 * the structure layout changes.
722 #define IBM_ARCH_VEC_NRCORES_OFFSET 133
723 W(NR_CPUS), /* number of cores supported */
728 OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
729 OV5_FEAT(OV5_PFO_HW_842),
730 OV5_FEAT(OV5_SUB_PROCESSORS),
732 /* option vector 6: IBM PAPR hints */
733 VECTOR_LENGTH(3), /* length */
739 /* Old method - ELF header with PT_NOTE sections only works on BE */
740 #ifdef __BIG_ENDIAN__
741 static struct fake_elf {
748 char name[8]; /* "PowerPC" */
762 char name[24]; /* "IBM,RPA-Client-Config" */
776 .e_ident = { 0x7f, 'E', 'L', 'F',
777 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
778 .e_type = ET_EXEC, /* yeah right */
780 .e_version = EV_CURRENT,
781 .e_phoff = offsetof(struct fake_elf, phdr),
782 .e_phentsize = sizeof(Elf32_Phdr),
788 .p_offset = offsetof(struct fake_elf, chrpnote),
789 .p_filesz = sizeof(struct chrpnote)
792 .p_offset = offsetof(struct fake_elf, rpanote),
793 .p_filesz = sizeof(struct rpanote)
797 .namesz = sizeof("PowerPC"),
798 .descsz = sizeof(struct chrpdesc),
802 .real_mode = ~0U, /* ~0 means "don't care" */
811 .namesz = sizeof("IBM,RPA-Client-Config"),
812 .descsz = sizeof(struct rpadesc),
814 .name = "IBM,RPA-Client-Config",
817 .min_rmo_size = 64, /* in megabytes */
818 .min_rmo_percent = 0,
819 .max_pft_size = 48, /* 2^48 bytes max PFT size */
826 #endif /* __BIG_ENDIAN__ */
828 static int __init prom_count_smt_threads(void)
834 /* Pick up th first CPU node we can find */
835 for (node = 0; prom_next_node(&node); ) {
837 prom_getprop(node, "device_type", type, sizeof(type));
839 if (strcmp(type, "cpu"))
842 * There is an entry for each smt thread, each entry being
843 * 4 bytes long. All cpus should have the same number of
844 * smt threads, so return after finding the first.
846 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
847 if (plen == PROM_ERROR)
850 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
853 if (plen < 1 || plen > 64) {
854 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
855 (unsigned long)plen);
860 prom_debug("No threads found, assuming 1 per core\n");
867 static void __init prom_send_capabilities(void)
872 unsigned char *ptcores;
874 root = call_prom("open", 1, 1, ADDR("/"));
876 /* We need to tell the FW about the number of cores we support.
878 * To do that, we count the number of threads on the first core
879 * (we assume this is the same for all cores) and use it to
883 /* The core value may start at an odd address. If such a word
884 * access is made at a cache line boundary, this leads to an
885 * exception which may not be handled at this time.
886 * Forcing a per byte access to avoid exception.
888 ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
890 cores |= ptcores[0] << 24;
891 cores |= ptcores[1] << 16;
892 cores |= ptcores[2] << 8;
894 if (cores != NR_CPUS) {
895 prom_printf("WARNING ! "
896 "ibm_architecture_vec structure inconsistent: %lu!\n",
899 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
900 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
902 ptcores[0] = (cores >> 24) & 0xff;
903 ptcores[1] = (cores >> 16) & 0xff;
904 ptcores[2] = (cores >> 8) & 0xff;
905 ptcores[3] = cores & 0xff;
908 /* try calling the ibm,client-architecture-support method */
909 prom_printf("Calling ibm,client-architecture-support...");
910 if (call_prom_ret("call-method", 3, 2, &ret,
911 ADDR("ibm,client-architecture-support"),
913 ADDR(ibm_architecture_vec)) == 0) {
914 /* the call exists... */
916 prom_printf("\nWARNING: ibm,client-architecture"
917 "-support call FAILED!\n");
918 call_prom("close", 1, 0, root);
919 prom_printf(" done\n");
922 call_prom("close", 1, 0, root);
923 prom_printf(" not implemented\n");
926 #ifdef __BIG_ENDIAN__
930 /* no ibm,client-architecture-support call, try the old way */
931 elfloader = call_prom("open", 1, 1,
932 ADDR("/packages/elf-loader"));
933 if (elfloader == 0) {
934 prom_printf("couldn't open /packages/elf-loader\n");
937 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
938 elfloader, ADDR(&fake_elf));
939 call_prom("close", 1, 0, elfloader);
941 #endif /* __BIG_ENDIAN__ */
943 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
946 * Memory allocation strategy... our layout is normally:
948 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
949 * rare cases, initrd might end up being before the kernel though.
950 * We assume this won't override the final kernel at 0, we have no
951 * provision to handle that in this version, but it should hopefully
954 * alloc_top is set to the top of RMO, eventually shrink down if the
957 * alloc_bottom is set to the top of kernel/initrd
959 * from there, allocations are done this way : rtas is allocated
960 * topmost, and the device-tree is allocated from the bottom. We try
961 * to grow the device-tree allocation as we progress. If we can't,
962 * then we fail, we don't currently have a facility to restart
963 * elsewhere, but that shouldn't be necessary.
965 * Note that calls to reserve_mem have to be done explicitly, memory
966 * allocated with either alloc_up or alloc_down isn't automatically
972 * Allocates memory in the RMO upward from the kernel/initrd
974 * When align is 0, this is a special case, it means to allocate in place
975 * at the current location of alloc_bottom or fail (that is basically
976 * extending the previous allocation). Used for the device-tree flattening
978 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
980 unsigned long base = alloc_bottom;
981 unsigned long addr = 0;
984 base = _ALIGN_UP(base, align);
985 prom_debug("alloc_up(%x, %x)\n", size, align);
987 prom_panic("alloc_up() called with mem not initialized\n");
990 base = _ALIGN_UP(alloc_bottom, align);
994 for(; (base + size) <= alloc_top;
995 base = _ALIGN_UP(base + 0x100000, align)) {
996 prom_debug(" trying: 0x%x\n\r", base);
997 addr = (unsigned long)prom_claim(base, size, 0);
998 if (addr != PROM_ERROR && addr != 0)
1006 alloc_bottom = addr + size;
1008 prom_debug(" -> %x\n", addr);
1009 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1010 prom_debug(" alloc_top : %x\n", alloc_top);
1011 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1012 prom_debug(" rmo_top : %x\n", rmo_top);
1013 prom_debug(" ram_top : %x\n", ram_top);
1019 * Allocates memory downward, either from top of RMO, or if highmem
1020 * is set, from the top of RAM. Note that this one doesn't handle
1021 * failures. It does claim memory if highmem is not set.
1023 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1026 unsigned long base, addr = 0;
1028 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1029 highmem ? "(high)" : "(low)");
1031 prom_panic("alloc_down() called with mem not initialized\n");
1034 /* Carve out storage for the TCE table. */
1035 addr = _ALIGN_DOWN(alloc_top_high - size, align);
1036 if (addr <= alloc_bottom)
1038 /* Will we bump into the RMO ? If yes, check out that we
1039 * didn't overlap existing allocations there, if we did,
1040 * we are dead, we must be the first in town !
1042 if (addr < rmo_top) {
1043 /* Good, we are first */
1044 if (alloc_top == rmo_top)
1045 alloc_top = rmo_top = addr;
1049 alloc_top_high = addr;
1053 base = _ALIGN_DOWN(alloc_top - size, align);
1054 for (; base > alloc_bottom;
1055 base = _ALIGN_DOWN(base - 0x100000, align)) {
1056 prom_debug(" trying: 0x%x\n\r", base);
1057 addr = (unsigned long)prom_claim(base, size, 0);
1058 if (addr != PROM_ERROR && addr != 0)
1067 prom_debug(" -> %x\n", addr);
1068 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1069 prom_debug(" alloc_top : %x\n", alloc_top);
1070 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1071 prom_debug(" rmo_top : %x\n", rmo_top);
1072 prom_debug(" ram_top : %x\n", ram_top);
1078 * Parse a "reg" cell
1080 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1083 unsigned long r = 0;
1085 /* Ignore more than 2 cells */
1086 while (s > sizeof(unsigned long) / 4) {
1090 r = be32_to_cpu(*p++);
1094 r |= be32_to_cpu(*(p++));
1102 * Very dumb function for adding to the memory reserve list, but
1103 * we don't need anything smarter at this point
1105 * XXX Eventually check for collisions. They should NEVER happen.
1106 * If problems seem to show up, it would be a good start to track
1109 static void __init reserve_mem(u64 base, u64 size)
1111 u64 top = base + size;
1112 unsigned long cnt = mem_reserve_cnt;
1117 /* We need to always keep one empty entry so that we
1118 * have our terminator with "size" set to 0 since we are
1119 * dumb and just copy this entire array to the boot params
1121 base = _ALIGN_DOWN(base, PAGE_SIZE);
1122 top = _ALIGN_UP(top, PAGE_SIZE);
1125 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1126 prom_panic("Memory reserve map exhausted !\n");
1127 mem_reserve_map[cnt].base = cpu_to_be64(base);
1128 mem_reserve_map[cnt].size = cpu_to_be64(size);
1129 mem_reserve_cnt = cnt + 1;
1133 * Initialize memory allocation mechanism, parse "memory" nodes and
1134 * obtain that way the top of memory and RMO to setup out local allocator
1136 static void __init prom_init_mem(void)
1139 char *path, type[64];
1146 * We iterate the memory nodes to find
1147 * 1) top of RMO (first node)
1150 val = cpu_to_be32(2);
1151 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1152 rac = be32_to_cpu(val);
1153 val = cpu_to_be32(1);
1154 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1155 rsc = be32_to_cpu(val);
1156 prom_debug("root_addr_cells: %x\n", rac);
1157 prom_debug("root_size_cells: %x\n", rsc);
1159 prom_debug("scanning memory:\n");
1160 path = prom_scratch;
1162 for (node = 0; prom_next_node(&node); ) {
1164 prom_getprop(node, "device_type", type, sizeof(type));
1168 * CHRP Longtrail machines have no device_type
1169 * on the memory node, so check the name instead...
1171 prom_getprop(node, "name", type, sizeof(type));
1173 if (strcmp(type, "memory"))
1176 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1177 if (plen > sizeof(regbuf)) {
1178 prom_printf("memory node too large for buffer !\n");
1179 plen = sizeof(regbuf);
1182 endp = p + (plen / sizeof(cell_t));
1185 memset(path, 0, PROM_SCRATCH_SIZE);
1186 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1187 prom_debug(" node %s :\n", path);
1188 #endif /* DEBUG_PROM */
1190 while ((endp - p) >= (rac + rsc)) {
1191 unsigned long base, size;
1193 base = prom_next_cell(rac, &p);
1194 size = prom_next_cell(rsc, &p);
1198 prom_debug(" %x %x\n", base, size);
1199 if (base == 0 && (of_platform & PLATFORM_LPAR))
1201 if ((base + size) > ram_top)
1202 ram_top = base + size;
1206 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1209 * If prom_memory_limit is set we reduce the upper limits *except* for
1210 * alloc_top_high. This must be the real top of RAM so we can put
1214 alloc_top_high = ram_top;
1216 if (prom_memory_limit) {
1217 if (prom_memory_limit <= alloc_bottom) {
1218 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1220 prom_memory_limit = 0;
1221 } else if (prom_memory_limit >= ram_top) {
1222 prom_printf("Ignoring mem=%x >= ram_top.\n",
1224 prom_memory_limit = 0;
1226 ram_top = prom_memory_limit;
1227 rmo_top = min(rmo_top, prom_memory_limit);
1232 * Setup our top alloc point, that is top of RMO or top of
1233 * segment 0 when running non-LPAR.
1234 * Some RS64 machines have buggy firmware where claims up at
1235 * 1GB fail. Cap at 768MB as a workaround.
1236 * Since 768MB is plenty of room, and we need to cap to something
1237 * reasonable on 32-bit, cap at 768MB on all machines.
1241 rmo_top = min(0x30000000ul, rmo_top);
1242 alloc_top = rmo_top;
1243 alloc_top_high = ram_top;
1246 * Check if we have an initrd after the kernel but still inside
1247 * the RMO. If we do move our bottom point to after it.
1249 if (prom_initrd_start &&
1250 prom_initrd_start < rmo_top &&
1251 prom_initrd_end > alloc_bottom)
1252 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1254 prom_printf("memory layout at init:\n");
1255 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1256 prom_printf(" alloc_bottom : %x\n", alloc_bottom);
1257 prom_printf(" alloc_top : %x\n", alloc_top);
1258 prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
1259 prom_printf(" rmo_top : %x\n", rmo_top);
1260 prom_printf(" ram_top : %x\n", ram_top);
1263 static void __init prom_close_stdin(void)
1268 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1269 stdin = be32_to_cpu(val);
1270 call_prom("close", 1, 0, stdin);
1274 #ifdef CONFIG_PPC_POWERNV
1276 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1277 static u64 __initdata prom_opal_base;
1278 static u64 __initdata prom_opal_entry;
1282 * Allocate room for and instantiate OPAL
1284 static void __init prom_instantiate_opal(void)
1289 u64 size = 0, align = 0x10000;
1293 prom_debug("prom_instantiate_opal: start...\n");
1295 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1296 prom_debug("opal_node: %x\n", opal_node);
1297 if (!PHANDLE_VALID(opal_node))
1301 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1302 size = be64_to_cpu(val64);
1306 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1307 align = be64_to_cpu(val64);
1309 base = alloc_down(size, align, 0);
1311 prom_printf("OPAL allocation failed !\n");
1315 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1316 if (!IHANDLE_VALID(opal_inst)) {
1317 prom_printf("opening opal package failed (%x)\n", opal_inst);
1321 prom_printf("instantiating opal at 0x%x...", base);
1323 if (call_prom_ret("call-method", 4, 3, rets,
1324 ADDR("load-opal-runtime"),
1326 base >> 32, base & 0xffffffff) != 0
1327 || (rets[0] == 0 && rets[1] == 0)) {
1328 prom_printf(" failed\n");
1331 entry = (((u64)rets[0]) << 32) | rets[1];
1333 prom_printf(" done\n");
1335 reserve_mem(base, size);
1337 prom_debug("opal base = 0x%x\n", base);
1338 prom_debug("opal align = 0x%x\n", align);
1339 prom_debug("opal entry = 0x%x\n", entry);
1340 prom_debug("opal size = 0x%x\n", (long)size);
1342 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1343 &base, sizeof(base));
1344 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1345 &entry, sizeof(entry));
1347 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1348 prom_opal_base = base;
1349 prom_opal_entry = entry;
1351 prom_debug("prom_instantiate_opal: end...\n");
1354 #endif /* CONFIG_PPC_POWERNV */
1357 * Allocate room for and instantiate RTAS
1359 static void __init prom_instantiate_rtas(void)
1363 u32 base, entry = 0;
1367 prom_debug("prom_instantiate_rtas: start...\n");
1369 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1370 prom_debug("rtas_node: %x\n", rtas_node);
1371 if (!PHANDLE_VALID(rtas_node))
1375 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1376 size = be32_to_cpu(val);
1380 base = alloc_down(size, PAGE_SIZE, 0);
1382 prom_panic("Could not allocate memory for RTAS\n");
1384 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1385 if (!IHANDLE_VALID(rtas_inst)) {
1386 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1390 prom_printf("instantiating rtas at 0x%x...", base);
1392 if (call_prom_ret("call-method", 3, 2, &entry,
1393 ADDR("instantiate-rtas"),
1394 rtas_inst, base) != 0
1396 prom_printf(" failed\n");
1399 prom_printf(" done\n");
1401 reserve_mem(base, size);
1403 val = cpu_to_be32(base);
1404 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1406 val = cpu_to_be32(entry);
1407 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1410 /* Check if it supports "query-cpu-stopped-state" */
1411 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1412 &val, sizeof(val)) != PROM_ERROR)
1413 rtas_has_query_cpu_stopped = true;
1415 prom_debug("rtas base = 0x%x\n", base);
1416 prom_debug("rtas entry = 0x%x\n", entry);
1417 prom_debug("rtas size = 0x%x\n", (long)size);
1419 prom_debug("prom_instantiate_rtas: end...\n");
1424 * Allocate room for and instantiate Stored Measurement Log (SML)
1426 static void __init prom_instantiate_sml(void)
1428 phandle ibmvtpm_node;
1429 ihandle ibmvtpm_inst;
1430 u32 entry = 0, size = 0, succ = 0;
1434 prom_debug("prom_instantiate_sml: start...\n");
1436 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1437 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1438 if (!PHANDLE_VALID(ibmvtpm_node))
1441 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1442 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1443 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1447 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1448 &val, sizeof(val)) != PROM_ERROR) {
1449 if (call_prom_ret("call-method", 2, 2, &succ,
1450 ADDR("reformat-sml-to-efi-alignment"),
1451 ibmvtpm_inst) != 0 || succ == 0) {
1452 prom_printf("Reformat SML to EFI alignment failed\n");
1456 if (call_prom_ret("call-method", 2, 2, &size,
1457 ADDR("sml-get-allocated-size"),
1458 ibmvtpm_inst) != 0 || size == 0) {
1459 prom_printf("SML get allocated size failed\n");
1463 if (call_prom_ret("call-method", 2, 2, &size,
1464 ADDR("sml-get-handover-size"),
1465 ibmvtpm_inst) != 0 || size == 0) {
1466 prom_printf("SML get handover size failed\n");
1471 base = alloc_down(size, PAGE_SIZE, 0);
1473 prom_panic("Could not allocate memory for sml\n");
1475 prom_printf("instantiating sml at 0x%x...", base);
1477 memset((void *)base, 0, size);
1479 if (call_prom_ret("call-method", 4, 2, &entry,
1480 ADDR("sml-handover"),
1481 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1482 prom_printf("SML handover failed\n");
1485 prom_printf(" done\n");
1487 reserve_mem(base, size);
1489 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1490 &base, sizeof(base));
1491 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1492 &size, sizeof(size));
1494 prom_debug("sml base = 0x%x\n", base);
1495 prom_debug("sml size = 0x%x\n", (long)size);
1497 prom_debug("prom_instantiate_sml: end...\n");
1501 * Allocate room for and initialize TCE tables
1503 #ifdef __BIG_ENDIAN__
1504 static void __init prom_initialize_tce_table(void)
1508 char compatible[64], type[64], model[64];
1509 char *path = prom_scratch;
1511 u32 minalign, minsize;
1512 u64 tce_entry, *tce_entryp;
1513 u64 local_alloc_top, local_alloc_bottom;
1519 prom_debug("starting prom_initialize_tce_table\n");
1521 /* Cache current top of allocs so we reserve a single block */
1522 local_alloc_top = alloc_top_high;
1523 local_alloc_bottom = local_alloc_top;
1525 /* Search all nodes looking for PHBs. */
1526 for (node = 0; prom_next_node(&node); ) {
1530 prom_getprop(node, "compatible",
1531 compatible, sizeof(compatible));
1532 prom_getprop(node, "device_type", type, sizeof(type));
1533 prom_getprop(node, "model", model, sizeof(model));
1535 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1538 /* Keep the old logic intact to avoid regression. */
1539 if (compatible[0] != 0) {
1540 if ((strstr(compatible, "python") == NULL) &&
1541 (strstr(compatible, "Speedwagon") == NULL) &&
1542 (strstr(compatible, "Winnipeg") == NULL))
1544 } else if (model[0] != 0) {
1545 if ((strstr(model, "ython") == NULL) &&
1546 (strstr(model, "peedwagon") == NULL) &&
1547 (strstr(model, "innipeg") == NULL))
1551 if (prom_getprop(node, "tce-table-minalign", &minalign,
1552 sizeof(minalign)) == PROM_ERROR)
1554 if (prom_getprop(node, "tce-table-minsize", &minsize,
1555 sizeof(minsize)) == PROM_ERROR)
1556 minsize = 4UL << 20;
1559 * Even though we read what OF wants, we just set the table
1560 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1561 * By doing this, we avoid the pitfalls of trying to DMA to
1562 * MMIO space and the DMA alias hole.
1564 * On POWER4, firmware sets the TCE region by assuming
1565 * each TCE table is 8MB. Using this memory for anything
1566 * else will impact performance, so we always allocate 8MB.
1569 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1570 minsize = 8UL << 20;
1572 minsize = 4UL << 20;
1574 /* Align to the greater of the align or size */
1575 align = max(minalign, minsize);
1576 base = alloc_down(minsize, align, 1);
1578 prom_panic("ERROR, cannot find space for TCE table.\n");
1579 if (base < local_alloc_bottom)
1580 local_alloc_bottom = base;
1582 /* It seems OF doesn't null-terminate the path :-( */
1583 memset(path, 0, PROM_SCRATCH_SIZE);
1584 /* Call OF to setup the TCE hardware */
1585 if (call_prom("package-to-path", 3, 1, node,
1586 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1587 prom_printf("package-to-path failed\n");
1590 /* Save away the TCE table attributes for later use. */
1591 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1592 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1594 prom_debug("TCE table: %s\n", path);
1595 prom_debug("\tnode = 0x%x\n", node);
1596 prom_debug("\tbase = 0x%x\n", base);
1597 prom_debug("\tsize = 0x%x\n", minsize);
1599 /* Initialize the table to have a one-to-one mapping
1600 * over the allocated size.
1602 tce_entryp = (u64 *)base;
1603 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1604 tce_entry = (i << PAGE_SHIFT);
1606 *tce_entryp = tce_entry;
1609 prom_printf("opening PHB %s", path);
1610 phb_node = call_prom("open", 1, 1, path);
1612 prom_printf("... failed\n");
1614 prom_printf("... done\n");
1616 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1617 phb_node, -1, minsize,
1618 (u32) base, (u32) (base >> 32));
1619 call_prom("close", 1, 0, phb_node);
1622 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1624 /* These are only really needed if there is a memory limit in
1625 * effect, but we don't know so export them always. */
1626 prom_tce_alloc_start = local_alloc_bottom;
1627 prom_tce_alloc_end = local_alloc_top;
1629 /* Flag the first invalid entry */
1630 prom_debug("ending prom_initialize_tce_table\n");
1632 #endif /* __BIG_ENDIAN__ */
1633 #endif /* CONFIG_PPC64 */
1636 * With CHRP SMP we need to use the OF to start the other processors.
1637 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1638 * so we have to put the processors into a holding pattern controlled
1639 * by the kernel (not OF) before we destroy the OF.
1641 * This uses a chunk of low memory, puts some holding pattern
1642 * code there and sends the other processors off to there until
1643 * smp_boot_cpus tells them to do something. The holding pattern
1644 * checks that address until its cpu # is there, when it is that
1645 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1646 * of setting those values.
1648 * We also use physical address 0x4 here to tell when a cpu
1649 * is in its holding pattern code.
1654 * We want to reference the copy of __secondary_hold_* in the
1655 * 0 - 0x100 address range
1657 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1659 static void __init prom_hold_cpus(void)
1664 unsigned long *spinloop
1665 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1666 unsigned long *acknowledge
1667 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1668 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1671 * On pseries, if RTAS supports "query-cpu-stopped-state",
1672 * we skip this stage, the CPUs will be started by the
1673 * kernel using RTAS.
1675 if ((of_platform == PLATFORM_PSERIES ||
1676 of_platform == PLATFORM_PSERIES_LPAR) &&
1677 rtas_has_query_cpu_stopped) {
1678 prom_printf("prom_hold_cpus: skipped\n");
1682 prom_debug("prom_hold_cpus: start...\n");
1683 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1684 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1685 prom_debug(" 1) acknowledge = 0x%x\n",
1686 (unsigned long)acknowledge);
1687 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1688 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1690 /* Set the common spinloop variable, so all of the secondary cpus
1691 * will block when they are awakened from their OF spinloop.
1692 * This must occur for both SMP and non SMP kernels, since OF will
1693 * be trashed when we move the kernel.
1698 for (node = 0; prom_next_node(&node); ) {
1699 unsigned int cpu_no;
1703 prom_getprop(node, "device_type", type, sizeof(type));
1704 if (strcmp(type, "cpu") != 0)
1707 /* Skip non-configured cpus. */
1708 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1709 if (strcmp(type, "okay") != 0)
1712 reg = cpu_to_be32(-1); /* make sparse happy */
1713 prom_getprop(node, "reg", ®, sizeof(reg));
1714 cpu_no = be32_to_cpu(reg);
1716 prom_debug("cpu hw idx = %lu\n", cpu_no);
1718 /* Init the acknowledge var which will be reset by
1719 * the secondary cpu when it awakens from its OF
1722 *acknowledge = (unsigned long)-1;
1724 if (cpu_no != prom.cpu) {
1725 /* Primary Thread of non-boot cpu or any thread */
1726 prom_printf("starting cpu hw idx %lu... ", cpu_no);
1727 call_prom("start-cpu", 3, 0, node,
1728 secondary_hold, cpu_no);
1730 for (i = 0; (i < 100000000) &&
1731 (*acknowledge == ((unsigned long)-1)); i++ )
1734 if (*acknowledge == cpu_no)
1735 prom_printf("done\n");
1737 prom_printf("failed: %x\n", *acknowledge);
1741 prom_printf("boot cpu hw idx %lu\n", cpu_no);
1742 #endif /* CONFIG_SMP */
1745 prom_debug("prom_hold_cpus: end...\n");
1749 static void __init prom_init_client_services(unsigned long pp)
1751 /* Get a handle to the prom entry point before anything else */
1754 /* get a handle for the stdout device */
1755 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1756 if (!PHANDLE_VALID(prom.chosen))
1757 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1759 /* get device tree root */
1760 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1761 if (!PHANDLE_VALID(prom.root))
1762 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1769 * For really old powermacs, we need to map things we claim.
1770 * For that, we need the ihandle of the mmu.
1771 * Also, on the longtrail, we need to work around other bugs.
1773 static void __init prom_find_mmu(void)
1778 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1779 if (!PHANDLE_VALID(oprom))
1781 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1783 version[sizeof(version) - 1] = 0;
1784 /* XXX might need to add other versions here */
1785 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1786 of_workarounds = OF_WA_CLAIM;
1787 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1788 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1789 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1792 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
1793 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
1794 sizeof(prom.mmumap));
1795 prom.mmumap = be32_to_cpu(prom.mmumap);
1796 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
1797 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1800 #define prom_find_mmu()
1803 static void __init prom_init_stdout(void)
1805 char *path = of_stdout_device;
1807 phandle stdout_node;
1810 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
1811 prom_panic("cannot find stdout");
1813 prom.stdout = be32_to_cpu(val);
1815 /* Get the full OF pathname of the stdout device */
1816 memset(path, 0, 256);
1817 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1818 prom_printf("OF stdout device is: %s\n", of_stdout_device);
1819 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
1820 path, strlen(path) + 1);
1822 /* instance-to-package fails on PA-Semi */
1823 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1824 if (stdout_node != PROM_ERROR) {
1825 val = cpu_to_be32(stdout_node);
1826 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1829 /* If it's a display, note it */
1830 memset(type, 0, sizeof(type));
1831 prom_getprop(stdout_node, "device_type", type, sizeof(type));
1832 if (strcmp(type, "display") == 0)
1833 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
1837 static int __init prom_find_machine_type(void)
1846 /* Look for a PowerMac or a Cell */
1847 len = prom_getprop(prom.root, "compatible",
1848 compat, sizeof(compat)-1);
1852 char *p = &compat[i];
1856 if (strstr(p, "Power Macintosh") ||
1857 strstr(p, "MacRISC"))
1858 return PLATFORM_POWERMAC;
1860 /* We must make sure we don't detect the IBM Cell
1861 * blades as pSeries due to some firmware issues,
1864 if (strstr(p, "IBM,CBEA") ||
1865 strstr(p, "IBM,CPBW-1.0"))
1866 return PLATFORM_GENERIC;
1867 #endif /* CONFIG_PPC64 */
1872 /* Try to detect OPAL */
1873 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
1874 return PLATFORM_OPAL;
1876 /* Try to figure out if it's an IBM pSeries or any other
1877 * PAPR compliant platform. We assume it is if :
1878 * - /device_type is "chrp" (please, do NOT use that for future
1882 len = prom_getprop(prom.root, "device_type",
1883 compat, sizeof(compat)-1);
1885 return PLATFORM_GENERIC;
1886 if (strcmp(compat, "chrp"))
1887 return PLATFORM_GENERIC;
1889 /* Default to pSeries. We need to know if we are running LPAR */
1890 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1891 if (!PHANDLE_VALID(rtas))
1892 return PLATFORM_GENERIC;
1893 x = prom_getproplen(rtas, "ibm,hypertas-functions");
1894 if (x != PROM_ERROR) {
1895 prom_debug("Hypertas detected, assuming LPAR !\n");
1896 return PLATFORM_PSERIES_LPAR;
1898 return PLATFORM_PSERIES;
1900 return PLATFORM_GENERIC;
1904 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1906 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1910 * If we have a display that we don't know how to drive,
1911 * we will want to try to execute OF's open method for it
1912 * later. However, OF will probably fall over if we do that
1913 * we've taken over the MMU.
1914 * So we check whether we will need to open the display,
1915 * and if so, open it now.
1917 static void __init prom_check_displays(void)
1919 char type[16], *path;
1924 static unsigned char default_colors[] = {
1942 const unsigned char *clut;
1944 prom_debug("Looking for displays\n");
1945 for (node = 0; prom_next_node(&node); ) {
1946 memset(type, 0, sizeof(type));
1947 prom_getprop(node, "device_type", type, sizeof(type));
1948 if (strcmp(type, "display") != 0)
1951 /* It seems OF doesn't null-terminate the path :-( */
1952 path = prom_scratch;
1953 memset(path, 0, PROM_SCRATCH_SIZE);
1956 * leave some room at the end of the path for appending extra
1959 if (call_prom("package-to-path", 3, 1, node, path,
1960 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1962 prom_printf("found display : %s, opening... ", path);
1964 ih = call_prom("open", 1, 1, path);
1966 prom_printf("failed\n");
1971 prom_printf("done\n");
1972 prom_setprop(node, path, "linux,opened", NULL, 0);
1974 /* Setup a usable color table when the appropriate
1975 * method is available. Should update this to set-colors */
1976 clut = default_colors;
1977 for (i = 0; i < 16; i++, clut += 3)
1978 if (prom_set_color(ih, i, clut[0], clut[1],
1982 #ifdef CONFIG_LOGO_LINUX_CLUT224
1983 clut = PTRRELOC(logo_linux_clut224.clut);
1984 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
1985 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1988 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
1990 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
1991 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
1993 u32 width, height, pitch, addr;
1995 prom_printf("Setting btext !\n");
1996 prom_getprop(node, "width", &width, 4);
1997 prom_getprop(node, "height", &height, 4);
1998 prom_getprop(node, "linebytes", &pitch, 4);
1999 prom_getprop(node, "address", &addr, 4);
2000 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2001 width, height, pitch, addr);
2002 btext_setup_display(width, height, 8, pitch, addr);
2004 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2009 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2010 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2011 unsigned long needed, unsigned long align)
2015 *mem_start = _ALIGN(*mem_start, align);
2016 while ((*mem_start + needed) > *mem_end) {
2017 unsigned long room, chunk;
2019 prom_debug("Chunk exhausted, claiming more at %x...\n",
2021 room = alloc_top - alloc_bottom;
2022 if (room > DEVTREE_CHUNK_SIZE)
2023 room = DEVTREE_CHUNK_SIZE;
2024 if (room < PAGE_SIZE)
2025 prom_panic("No memory for flatten_device_tree "
2027 chunk = alloc_up(room, 0);
2029 prom_panic("No memory for flatten_device_tree "
2030 "(claim failed)\n");
2031 *mem_end = chunk + room;
2034 ret = (void *)*mem_start;
2035 *mem_start += needed;
2040 #define dt_push_token(token, mem_start, mem_end) do { \
2041 void *room = make_room(mem_start, mem_end, 4, 4); \
2042 *(__be32 *)room = cpu_to_be32(token); \
2045 static unsigned long __init dt_find_string(char *str)
2049 s = os = (char *)dt_string_start;
2051 while (s < (char *)dt_string_end) {
2052 if (strcmp(s, str) == 0)
2060 * The Open Firmware 1275 specification states properties must be 31 bytes or
2061 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2063 #define MAX_PROPERTY_NAME 64
2065 static void __init scan_dt_build_strings(phandle node,
2066 unsigned long *mem_start,
2067 unsigned long *mem_end)
2069 char *prev_name, *namep, *sstart;
2073 sstart = (char *)dt_string_start;
2075 /* get and store all property names */
2078 /* 64 is max len of name including nul. */
2079 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2080 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2081 /* No more nodes: unwind alloc */
2082 *mem_start = (unsigned long)namep;
2087 if (strcmp(namep, "name") == 0) {
2088 *mem_start = (unsigned long)namep;
2092 /* get/create string entry */
2093 soff = dt_find_string(namep);
2095 *mem_start = (unsigned long)namep;
2096 namep = sstart + soff;
2098 /* Trim off some if we can */
2099 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2100 dt_string_end = *mem_start;
2105 /* do all our children */
2106 child = call_prom("child", 1, 1, node);
2107 while (child != 0) {
2108 scan_dt_build_strings(child, mem_start, mem_end);
2109 child = call_prom("peer", 1, 1, child);
2113 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2114 unsigned long *mem_end)
2117 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2119 unsigned char *valp;
2120 static char pname[MAX_PROPERTY_NAME];
2121 int l, room, has_phandle = 0;
2123 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2125 /* get the node's full name */
2126 namep = (char *)*mem_start;
2127 room = *mem_end - *mem_start;
2130 l = call_prom("package-to-path", 3, 1, node, namep, room);
2132 /* Didn't fit? Get more room. */
2134 if (l >= *mem_end - *mem_start)
2135 namep = make_room(mem_start, mem_end, l+1, 1);
2136 call_prom("package-to-path", 3, 1, node, namep, l);
2140 /* Fixup an Apple bug where they have bogus \0 chars in the
2141 * middle of the path in some properties, and extract
2142 * the unit name (everything after the last '/').
2144 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2151 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2154 /* get it again for debugging */
2155 path = prom_scratch;
2156 memset(path, 0, PROM_SCRATCH_SIZE);
2157 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2159 /* get and store all properties */
2161 sstart = (char *)dt_string_start;
2163 if (call_prom("nextprop", 3, 1, node, prev_name,
2168 if (strcmp(pname, "name") == 0) {
2173 /* find string offset */
2174 soff = dt_find_string(pname);
2176 prom_printf("WARNING: Can't find string index for"
2177 " <%s>, node %s\n", pname, path);
2180 prev_name = sstart + soff;
2183 l = call_prom("getproplen", 2, 1, node, pname);
2186 if (l == PROM_ERROR)
2189 /* push property head */
2190 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2191 dt_push_token(l, mem_start, mem_end);
2192 dt_push_token(soff, mem_start, mem_end);
2194 /* push property content */
2195 valp = make_room(mem_start, mem_end, l, 4);
2196 call_prom("getprop", 4, 1, node, pname, valp, l);
2197 *mem_start = _ALIGN(*mem_start, 4);
2199 if (!strcmp(pname, "phandle"))
2203 /* Add a "linux,phandle" property if no "phandle" property already
2204 * existed (can happen with OPAL)
2207 soff = dt_find_string("linux,phandle");
2209 prom_printf("WARNING: Can't find string index for"
2210 " <linux-phandle> node %s\n", path);
2212 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2213 dt_push_token(4, mem_start, mem_end);
2214 dt_push_token(soff, mem_start, mem_end);
2215 valp = make_room(mem_start, mem_end, 4, 4);
2216 *(__be32 *)valp = cpu_to_be32(node);
2220 /* do all our children */
2221 child = call_prom("child", 1, 1, node);
2222 while (child != 0) {
2223 scan_dt_build_struct(child, mem_start, mem_end);
2224 child = call_prom("peer", 1, 1, child);
2227 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2230 static void __init flatten_device_tree(void)
2233 unsigned long mem_start, mem_end, room;
2234 struct boot_param_header *hdr;
2239 * Check how much room we have between alloc top & bottom (+/- a
2240 * few pages), crop to 1MB, as this is our "chunk" size
2242 room = alloc_top - alloc_bottom - 0x4000;
2243 if (room > DEVTREE_CHUNK_SIZE)
2244 room = DEVTREE_CHUNK_SIZE;
2245 prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2247 /* Now try to claim that */
2248 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2250 prom_panic("Can't allocate initial device-tree chunk\n");
2251 mem_end = mem_start + room;
2253 /* Get root of tree */
2254 root = call_prom("peer", 1, 1, (phandle)0);
2255 if (root == (phandle)0)
2256 prom_panic ("couldn't get device tree root\n");
2258 /* Build header and make room for mem rsv map */
2259 mem_start = _ALIGN(mem_start, 4);
2260 hdr = make_room(&mem_start, &mem_end,
2261 sizeof(struct boot_param_header), 4);
2262 dt_header_start = (unsigned long)hdr;
2263 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2265 /* Start of strings */
2266 mem_start = PAGE_ALIGN(mem_start);
2267 dt_string_start = mem_start;
2268 mem_start += 4; /* hole */
2270 /* Add "linux,phandle" in there, we'll need it */
2271 namep = make_room(&mem_start, &mem_end, 16, 1);
2272 strcpy(namep, "linux,phandle");
2273 mem_start = (unsigned long)namep + strlen(namep) + 1;
2275 /* Build string array */
2276 prom_printf("Building dt strings...\n");
2277 scan_dt_build_strings(root, &mem_start, &mem_end);
2278 dt_string_end = mem_start;
2280 /* Build structure */
2281 mem_start = PAGE_ALIGN(mem_start);
2282 dt_struct_start = mem_start;
2283 prom_printf("Building dt structure...\n");
2284 scan_dt_build_struct(root, &mem_start, &mem_end);
2285 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2286 dt_struct_end = PAGE_ALIGN(mem_start);
2289 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2290 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2291 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2292 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2293 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2294 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2295 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2296 hdr->version = cpu_to_be32(OF_DT_VERSION);
2297 /* Version 16 is not backward compatible */
2298 hdr->last_comp_version = cpu_to_be32(0x10);
2300 /* Copy the reserve map in */
2301 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2306 prom_printf("reserved memory map:\n");
2307 for (i = 0; i < mem_reserve_cnt; i++)
2308 prom_printf(" %x - %x\n",
2309 be64_to_cpu(mem_reserve_map[i].base),
2310 be64_to_cpu(mem_reserve_map[i].size));
2313 /* Bump mem_reserve_cnt to cause further reservations to fail
2314 * since it's too late.
2316 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2318 prom_printf("Device tree strings 0x%x -> 0x%x\n",
2319 dt_string_start, dt_string_end);
2320 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2321 dt_struct_start, dt_struct_end);
2324 #ifdef CONFIG_PPC_MAPLE
2325 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2326 * The values are bad, and it doesn't even have the right number of cells. */
2327 static void __init fixup_device_tree_maple(void)
2330 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2334 name = "/ht@0/isa@4";
2335 isa = call_prom("finddevice", 1, 1, ADDR(name));
2336 if (!PHANDLE_VALID(isa)) {
2337 name = "/ht@0/isa@6";
2338 isa = call_prom("finddevice", 1, 1, ADDR(name));
2339 rloc = 0x01003000; /* IO space; PCI device = 6 */
2341 if (!PHANDLE_VALID(isa))
2344 if (prom_getproplen(isa, "ranges") != 12)
2346 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2350 if (isa_ranges[0] != 0x1 ||
2351 isa_ranges[1] != 0xf4000000 ||
2352 isa_ranges[2] != 0x00010000)
2355 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2357 isa_ranges[0] = 0x1;
2358 isa_ranges[1] = 0x0;
2359 isa_ranges[2] = rloc;
2360 isa_ranges[3] = 0x0;
2361 isa_ranges[4] = 0x0;
2362 isa_ranges[5] = 0x00010000;
2363 prom_setprop(isa, name, "ranges",
2364 isa_ranges, sizeof(isa_ranges));
2367 #define CPC925_MC_START 0xf8000000
2368 #define CPC925_MC_LENGTH 0x1000000
2369 /* The values for memory-controller don't have right number of cells */
2370 static void __init fixup_device_tree_maple_memory_controller(void)
2374 char *name = "/hostbridge@f8000000";
2377 mc = call_prom("finddevice", 1, 1, ADDR(name));
2378 if (!PHANDLE_VALID(mc))
2381 if (prom_getproplen(mc, "reg") != 8)
2384 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2385 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2386 if ((ac != 2) || (sc != 2))
2389 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2392 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2395 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2398 mc_reg[1] = CPC925_MC_START;
2400 mc_reg[3] = CPC925_MC_LENGTH;
2401 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2404 #define fixup_device_tree_maple()
2405 #define fixup_device_tree_maple_memory_controller()
2408 #ifdef CONFIG_PPC_CHRP
2410 * Pegasos and BriQ lacks the "ranges" property in the isa node
2411 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2412 * Pegasos has the IDE configured in legacy mode, but advertised as native
2414 static void __init fixup_device_tree_chrp(void)
2418 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2422 name = "/pci@80000000/isa@c";
2423 ph = call_prom("finddevice", 1, 1, ADDR(name));
2424 if (!PHANDLE_VALID(ph)) {
2425 name = "/pci@ff500000/isa@6";
2426 ph = call_prom("finddevice", 1, 1, ADDR(name));
2427 rloc = 0x01003000; /* IO space; PCI device = 6 */
2429 if (PHANDLE_VALID(ph)) {
2430 rc = prom_getproplen(ph, "ranges");
2431 if (rc == 0 || rc == PROM_ERROR) {
2432 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2439 prop[5] = 0x00010000;
2440 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2444 name = "/pci@80000000/ide@C,1";
2445 ph = call_prom("finddevice", 1, 1, ADDR(name));
2446 if (PHANDLE_VALID(ph)) {
2447 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2450 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2451 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2452 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2453 if (rc == sizeof(u32)) {
2455 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2460 #define fixup_device_tree_chrp()
2463 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2464 static void __init fixup_device_tree_pmac(void)
2466 phandle u3, i2c, mpic;
2471 /* Some G5s have a missing interrupt definition, fix it up here */
2472 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2473 if (!PHANDLE_VALID(u3))
2475 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2476 if (!PHANDLE_VALID(i2c))
2478 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2479 if (!PHANDLE_VALID(mpic))
2482 /* check if proper rev of u3 */
2483 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2486 if (u3_rev < 0x35 || u3_rev > 0x39)
2488 /* does it need fixup ? */
2489 if (prom_getproplen(i2c, "interrupts") > 0)
2492 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2494 /* interrupt on this revision of u3 is number 0 and level */
2497 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2498 &interrupts, sizeof(interrupts));
2500 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2501 &parent, sizeof(parent));
2504 #define fixup_device_tree_pmac()
2507 #ifdef CONFIG_PPC_EFIKA
2509 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2510 * to talk to the phy. If the phy-handle property is missing, then this
2511 * function is called to add the appropriate nodes and link it to the
2514 static void __init fixup_device_tree_efika_add_phy(void)
2520 /* Check if /builtin/ethernet exists - bail if it doesn't */
2521 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2522 if (!PHANDLE_VALID(node))
2525 /* Check if the phy-handle property exists - bail if it does */
2526 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2531 * At this point the ethernet device doesn't have a phy described.
2532 * Now we need to add the missing phy node and linkage
2535 /* Check for an MDIO bus node - if missing then create one */
2536 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2537 if (!PHANDLE_VALID(node)) {
2538 prom_printf("Adding Ethernet MDIO node\n");
2539 call_prom("interpret", 1, 1,
2540 " s\" /builtin\" find-device"
2542 " 1 encode-int s\" #address-cells\" property"
2543 " 0 encode-int s\" #size-cells\" property"
2544 " s\" mdio\" device-name"
2545 " s\" fsl,mpc5200b-mdio\" encode-string"
2546 " s\" compatible\" property"
2547 " 0xf0003000 0x400 reg"
2549 " 0x5 encode-int encode+"
2550 " 0x3 encode-int encode+"
2551 " s\" interrupts\" property"
2555 /* Check for a PHY device node - if missing then create one and
2556 * give it's phandle to the ethernet node */
2557 node = call_prom("finddevice", 1, 1,
2558 ADDR("/builtin/mdio/ethernet-phy"));
2559 if (!PHANDLE_VALID(node)) {
2560 prom_printf("Adding Ethernet PHY node\n");
2561 call_prom("interpret", 1, 1,
2562 " s\" /builtin/mdio\" find-device"
2564 " s\" ethernet-phy\" device-name"
2565 " 0x10 encode-int s\" reg\" property"
2569 " s\" /builtin/ethernet\" find-device"
2571 " s\" phy-handle\" property"
2576 static void __init fixup_device_tree_efika(void)
2578 int sound_irq[3] = { 2, 2, 0 };
2579 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2580 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2581 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2582 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2587 /* Check if we're really running on a EFIKA */
2588 node = call_prom("finddevice", 1, 1, ADDR("/"));
2589 if (!PHANDLE_VALID(node))
2592 rv = prom_getprop(node, "model", prop, sizeof(prop));
2593 if (rv == PROM_ERROR)
2595 if (strcmp(prop, "EFIKA5K2"))
2598 prom_printf("Applying EFIKA device tree fixups\n");
2600 /* Claiming to be 'chrp' is death */
2601 node = call_prom("finddevice", 1, 1, ADDR("/"));
2602 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2603 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2604 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2606 /* CODEGEN,description is exposed in /proc/cpuinfo so
2608 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2609 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2610 prom_setprop(node, "/", "CODEGEN,description",
2611 "Efika 5200B PowerPC System",
2612 sizeof("Efika 5200B PowerPC System"));
2614 /* Fixup bestcomm interrupts property */
2615 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2616 if (PHANDLE_VALID(node)) {
2617 len = prom_getproplen(node, "interrupts");
2619 prom_printf("Fixing bestcomm interrupts property\n");
2620 prom_setprop(node, "/builtin/bestcom", "interrupts",
2621 bcomm_irq, sizeof(bcomm_irq));
2625 /* Fixup sound interrupts property */
2626 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2627 if (PHANDLE_VALID(node)) {
2628 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2629 if (rv == PROM_ERROR) {
2630 prom_printf("Adding sound interrupts property\n");
2631 prom_setprop(node, "/builtin/sound", "interrupts",
2632 sound_irq, sizeof(sound_irq));
2636 /* Make sure ethernet phy-handle property exists */
2637 fixup_device_tree_efika_add_phy();
2640 #define fixup_device_tree_efika()
2643 static void __init fixup_device_tree(void)
2645 fixup_device_tree_maple();
2646 fixup_device_tree_maple_memory_controller();
2647 fixup_device_tree_chrp();
2648 fixup_device_tree_pmac();
2649 fixup_device_tree_efika();
2652 static void __init prom_find_boot_cpu(void)
2659 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2661 prom_cpu = be32_to_cpu(rval);
2663 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2665 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2666 prom.cpu = be32_to_cpu(rval);
2668 prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2671 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2673 #ifdef CONFIG_BLK_DEV_INITRD
2674 if (r3 && r4 && r4 != 0xdeadbeef) {
2677 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2678 prom_initrd_end = prom_initrd_start + r4;
2680 val = cpu_to_be64(prom_initrd_start);
2681 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2683 val = cpu_to_be64(prom_initrd_end);
2684 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2687 reserve_mem(prom_initrd_start,
2688 prom_initrd_end - prom_initrd_start);
2690 prom_debug("initrd_start=0x%x\n", prom_initrd_start);
2691 prom_debug("initrd_end=0x%x\n", prom_initrd_end);
2693 #endif /* CONFIG_BLK_DEV_INITRD */
2697 #ifdef CONFIG_RELOCATABLE
2698 static void reloc_toc(void)
2702 static void unreloc_toc(void)
2706 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2709 unsigned long *toc_entry;
2711 /* Get the start of the TOC by using r2 directly. */
2712 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2714 for (i = 0; i < nr_entries; i++) {
2715 *toc_entry = *toc_entry + offset;
2720 static void reloc_toc(void)
2722 unsigned long offset = reloc_offset();
2723 unsigned long nr_entries =
2724 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2726 __reloc_toc(offset, nr_entries);
2731 static void unreloc_toc(void)
2733 unsigned long offset = reloc_offset();
2734 unsigned long nr_entries =
2735 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2739 __reloc_toc(-offset, nr_entries);
2745 * We enter here early on, when the Open Firmware prom is still
2746 * handling exceptions and the MMU hash table for us.
2749 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2751 unsigned long r6, unsigned long r7,
2752 unsigned long kbase)
2757 unsigned long offset = reloc_offset();
2764 * First zero the BSS
2766 memset(&__bss_start, 0, __bss_stop - __bss_start);
2769 * Init interface to Open Firmware, get some node references,
2772 prom_init_client_services(pp);
2775 * See if this OF is old enough that we need to do explicit maps
2776 * and other workarounds
2781 * Init prom stdout device
2785 prom_printf("Preparing to boot %s", linux_banner);
2788 * Get default machine type. At this point, we do not differentiate
2789 * between pSeries SMP and pSeries LPAR
2791 of_platform = prom_find_machine_type();
2792 prom_printf("Detected machine type: %x\n", of_platform);
2794 #ifndef CONFIG_NONSTATIC_KERNEL
2795 /* Bail if this is a kdump kernel. */
2796 if (PHYSICAL_START > 0)
2797 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2801 * Check for an initrd
2803 prom_check_initrd(r3, r4);
2805 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2807 * On pSeries, inform the firmware about our capabilities
2809 if (of_platform == PLATFORM_PSERIES ||
2810 of_platform == PLATFORM_PSERIES_LPAR)
2811 prom_send_capabilities();
2815 * Copy the CPU hold code
2817 if (of_platform != PLATFORM_POWERMAC)
2818 copy_and_flush(0, kbase, 0x100, 0);
2821 * Do early parsing of command line
2823 early_cmdline_parse();
2826 * Initialize memory management within prom_init
2831 * Determine which cpu is actually running right _now_
2833 prom_find_boot_cpu();
2836 * Initialize display devices
2838 prom_check_displays();
2840 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
2842 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2843 * that uses the allocator, we need to make sure we get the top of memory
2844 * available for us here...
2846 if (of_platform == PLATFORM_PSERIES)
2847 prom_initialize_tce_table();
2851 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
2852 * have a usable RTAS implementation.
2854 if (of_platform != PLATFORM_POWERMAC &&
2855 of_platform != PLATFORM_OPAL)
2856 prom_instantiate_rtas();
2858 #ifdef CONFIG_PPC_POWERNV
2859 if (of_platform == PLATFORM_OPAL)
2860 prom_instantiate_opal();
2861 #endif /* CONFIG_PPC_POWERNV */
2864 /* instantiate sml */
2865 prom_instantiate_sml();
2869 * On non-powermacs, put all CPUs in spin-loops.
2871 * PowerMacs use a different mechanism to spin CPUs
2873 * (This must be done after instanciating RTAS)
2875 if (of_platform != PLATFORM_POWERMAC &&
2876 of_platform != PLATFORM_OPAL)
2880 * Fill in some infos for use by the kernel later on
2882 if (prom_memory_limit) {
2883 __be64 val = cpu_to_be64(prom_memory_limit);
2884 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
2889 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
2892 if (prom_iommu_force_on)
2893 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
2896 if (prom_tce_alloc_start) {
2897 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
2898 &prom_tce_alloc_start,
2899 sizeof(prom_tce_alloc_start));
2900 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
2901 &prom_tce_alloc_end,
2902 sizeof(prom_tce_alloc_end));
2907 * Fixup any known bugs in the device-tree
2909 fixup_device_tree();
2912 * Now finally create the flattened device-tree
2914 prom_printf("copying OF device tree...\n");
2915 flatten_device_tree();
2918 * in case stdin is USB and still active on IBM machines...
2919 * Unfortunately quiesce crashes on some powermacs if we have
2920 * closed stdin already (in particular the powerbook 101). It
2921 * appears that the OPAL version of OFW doesn't like it either.
2923 if (of_platform != PLATFORM_POWERMAC &&
2924 of_platform != PLATFORM_OPAL)
2928 * Call OF "quiesce" method to shut down pending DMA's from
2931 prom_printf("Quiescing Open Firmware ...\n");
2932 call_prom("quiesce", 0, 0);
2935 * And finally, call the kernel passing it the flattened device
2936 * tree and NULL as r5, thus triggering the new entry point which
2937 * is common to us and kexec
2939 hdr = dt_header_start;
2941 /* Don't print anything after quiesce under OPAL, it crashes OFW */
2942 if (of_platform != PLATFORM_OPAL) {
2943 prom_printf("Booting Linux via __start() ...\n");
2944 prom_debug("->dt_header_start=0x%x\n", hdr);
2948 reloc_got2(-offset);
2953 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
2954 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
2955 __start(hdr, kbase, 0, 0, 0,
2956 prom_opal_base, prom_opal_entry);
2958 __start(hdr, kbase, 0, 0, 0, 0, 0);