2 * Startup Code for MIPS64 CPU-core
4 * Copyright (c) 2003 Wolfgang Denk <wd@denx.de>
6 * See file CREDITS for list of people who contributed to this
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any dlater version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICUdlaR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Pdlace, Suite 330, Boston,
25 #include <asm-offsets.h>
27 #include <asm/regdef.h>
28 #include <asm/mipsregs.h>
30 #ifndef CONFIG_SYS_MIPS_CACHE_MODE
31 #define CONFIG_SYS_MIPS_CACHE_MODE CONF_CM_CACHABLE_NONCOHERENT
34 #ifdef CONFIG_SYS_LITTLE_ENDIAN
35 #define MIPS64_R_INFO(ssym, r_type3, r_type2, r_type) \
36 (((r_type) << 24) | ((r_type2) << 16) | ((r_type3) << 8) | (ssym))
38 #define MIPS64_R_INFO(ssym, r_type3, r_type2, r_type) \
39 ((r_type) | ((r_type2) << 8) | ((r_type3) << 16) | (ssym) << 24)
43 * For the moment disable interrupts, mark the kernel mode and
44 * set ST0_KX so that the CPU does not spit fire when using
47 .macro setup_c0_status set clr
50 or t0, ST0_CU0 | \set | 0x1f | \clr
63 /* U-boot entry point */
68 /* TLB refill, 32 bit task */
73 /* XTLB refill, 64 bit task */
78 /* Cache error exception */
83 /* General exception */
88 /* Catch interrupt exceptions */
93 /* EJTAG debug exception */
100 /* Clear watch registers */
101 dmtc0 zero, CP0_WATCHLO
102 dmtc0 zero, CP0_WATCHHI
104 /* WP(Watch Pending), SW0/1 should be cleared */
107 setup_c0_status ST0_KX 0
111 mtc0 zero, CP0_COMPARE
113 #ifndef CONFIG_SKIP_LOWLEVEL_INIT
114 /* CONFIG0 register */
115 dli t0, CONF_CM_UNCACHED
120 * Initialize $gp, force 8 byte alignment of bal instruction to forbid
121 * the compiler to put nop's between bal and _gp. This is required to
122 * keep _gp and ra aligned to 8 byte.
131 #ifndef CONFIG_SKIP_LOWLEVEL_INIT
132 /* Initialize any external memory */
133 dla t9, lowlevel_init
137 /* Initialize caches... */
138 dla t9, mips_cache_reset
142 /* ... and enable them */
143 dli t0, CONFIG_SYS_MIPS_CACHE_MODE
147 /* Set up temporary stack */
148 dli sp, CONFIG_SYS_SDRAM_BASE + CONFIG_SYS_INIT_SP_OFFSET
155 * void relocate_code (addr_sp, gd, addr_moni)
157 * This "function" does not return, instead it continues in RAM
158 * after relocating the monitor code.
162 * a2 = destination address
167 move sp, a0 # set new stack pointer
169 move s0, a1 # save gd in s0
170 move s2, a2 # save destination address in s2
172 dli t0, CONFIG_SYS_MONITOR_BASE
173 dsub s1, s2, t0 # s1 <-- relocation offset
176 ld t2, -24(t3) # t2 <-- __image_copy_end
179 dadd gp, s1 # adjust gp
182 * t0 = source address
183 * t1 = target address
184 * t2 = source end address
193 /* If caches were enabled, we would have to flush them here. */
194 dsub a1, t1, s2 # a1 <-- size
197 move a0, s2 # a0 <-- destination address
199 /* Jump to where we've relocated ourselves */
200 daddi t0, s2, in_ram - _start
205 .dword __rel_dyn_start
206 .dword __image_copy_end
207 .dword _GLOBAL_OFFSET_TABLE_
208 .dword num_got_entries
212 * Now we want to update GOT.
214 * GOT[0] is reserved. GOT[1] is also reserved for the dynamic object
215 * generated by GNU ld. Skip these reserved entries from relocation.
217 ld t3, -8(t0) # t3 <-- num_got_entries
218 ld t8, -16(t0) # t8 <-- _GLOBAL_OFFSET_TABLE_
219 dadd t8, s1 # t8 now holds relocated _G_O_T_
220 daddi t8, t8, 16 # skipping first two entries
232 /* Update dynamic relocations */
233 ld t1, -32(t0) # t1 <-- __rel_dyn_start
234 ld t2, -40(t0) # t2 <-- __rel_dyn_end
236 b 2f # skip first reserved entry
240 lw t8, -4(t1) # t8 <-- relocation info
242 dli t3, MIPS64_R_INFO(0x00, 0x00, 0x12, 0x03)
243 bne t8, t3, 2f # skip non R_MIPS_REL32 entries
246 ld t3, -16(t1) # t3 <-- location to fix up in FLASH
248 ld t8, 0(t3) # t8 <-- original pointer
249 dadd t8, s1 # t8 <-- adjusted pointer
251 dadd t3, s1 # t3 <-- location to fix up in RAM
256 daddi t1, 16 # each rel.dyn entry is 16 bytes
261 * GOT is now relocated. Thus __bss_start and __bss_end can be
262 * accessed directly via $gp.
264 dla t1, __bss_start # t1 <-- __bss_start
265 dla t2, __bss_end # t2 <-- __bss_end
272 move a0, s0 # a0 <-- gd