2 #define PROVIDE32(x) PROVIDE(__unused__##x)
4 #define PROVIDE32(x) PROVIDE(x)
7 #include <asm-generic/vmlinux.lds.h>
13 kernel PT_LOAD FLAGS(7); /* RWX */
14 notes PT_NOTE FLAGS(0);
15 dummy PT_NOTE FLAGS(0);
17 /* binutils < 2.18 has a bug that makes it misbehave when taking an
18 ELF file with all segments at load address 0 as input. This
19 happens when running "strip" on vmlinux, because of the AT() magic
20 in this linker script. People using GCC >= 4.2 won't run into
21 this problem, because the "build-id" support will put some data
22 into the "notes" segment (at a non-zero load address).
24 To work around this, we force some data into both the "dummy"
25 segment and the kernel segment, so the dummy segment will get a
26 non-zero load address. It's not enough to always create the
27 "notes" segment, since if nothing gets assigned to it, its load
28 address will be zero. */
32 OUTPUT_ARCH(powerpc:common64)
35 OUTPUT_ARCH(powerpc:common)
36 jiffies = jiffies_64 + 4;
40 /* Sections to be discarded. */
49 * Text, read only data and other permanent read-only sections
53 .text : AT(ADDR(.text) - LOAD_OFFSET) {
57 *(.text .fixup .text.init.refok .exit.text.refok __ftr_alt_*)
67 #endif /* CONFIG_PPC32 */
73 PROVIDE32 (etext = .);
78 /* Exception & bug tables */
79 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
80 __start___ex_table = .;
82 __stop___ex_table = .;
87 /* The dummy segment contents for the bug workaround mentioned above
89 .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
94 * Init sections discarded at runtime
99 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
105 /* .exit.text is discarded at runtime, not link time,
106 * to deal with references from __bug_table
108 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
112 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
114 __vtop_table_begin = .;
116 __vtop_table_end = .;
117 __ptov_table_begin = .;
119 __ptov_table_end = .;
120 #ifdef CONFIG_PPC_ISERIES
121 __dt_strings_start = .;
123 __dt_strings_end = .;
128 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
134 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
135 __initcall_start = .;
140 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
141 __con_initcall_start = .;
142 *(.con_initcall.init)
143 __con_initcall_end = .;
149 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
150 __start___ftr_fixup = .;
152 __stop___ftr_fixup = .;
155 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
156 __start___mmu_ftr_fixup = .;
158 __stop___mmu_ftr_fixup = .;
161 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
162 __start___lwsync_fixup = .;
164 __stop___lwsync_fixup = .;
168 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
169 __start___fw_ftr_fixup = .;
171 __stop___fw_ftr_fixup = .;
174 #ifdef CONFIG_BLK_DEV_INITRD
175 . = ALIGN(PAGE_SIZE);
176 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
177 __initramfs_start = .;
182 . = ALIGN(PAGE_SIZE);
183 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
186 *(.data.percpu.shared_aligned)
191 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
192 __machine_desc_start = . ;
194 __machine_desc_end = . ;
196 #ifdef CONFIG_RELOCATABLE
198 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) }
199 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
200 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
205 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
206 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
207 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
209 __rela_dyn_start = .;
214 /* freed after init ends here */
215 . = ALIGN(PAGE_SIZE);
219 * And now the various read/write data
222 . = ALIGN(PAGE_SIZE);
226 .data : AT(ADDR(.data) - LOAD_OFFSET) {
232 .data : AT(ADDR(.data) - LOAD_OFFSET) {
239 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
243 .got : AT(ADDR(.got) - LOAD_OFFSET) {
250 . = ALIGN(PAGE_SIZE);
252 PROVIDE32 (edata = .);
254 /* The initial task and kernel stack */
260 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
264 . = ALIGN(PAGE_SIZE);
265 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
266 *(.data.page_aligned)
269 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
270 *(.data.cacheline_aligned)
273 . = ALIGN(L1_CACHE_BYTES);
274 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
278 . = ALIGN(PAGE_SIZE);
279 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
282 . = ALIGN(PAGE_SIZE);
287 * And finally the bss
290 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
299 . = ALIGN(PAGE_SIZE);