1 /* Common target-dependent code for ppc64 GDB, the GNU debugger.
3 Copyright (C) 1986-2017 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "ppc64-tdep.h"
28 /* Macros for matching instructions. Note that, since all the
29 operands are masked off before they're or-ed into the instruction,
30 you can use -1 to make masks. */
32 #define insn_d(opcd, rts, ra, d) \
33 ((((opcd) & 0x3f) << 26) \
34 | (((rts) & 0x1f) << 21) \
35 | (((ra) & 0x1f) << 16) \
38 #define insn_ds(opcd, rts, ra, d, xo) \
39 ((((opcd) & 0x3f) << 26) \
40 | (((rts) & 0x1f) << 21) \
41 | (((ra) & 0x1f) << 16) \
45 #define insn_xfx(opcd, rts, spr, xo) \
46 ((((opcd) & 0x3f) << 26) \
47 | (((rts) & 0x1f) << 21) \
48 | (((spr) & 0x1f) << 16) \
49 | (((spr) & 0x3e0) << 6) \
50 | (((xo) & 0x3ff) << 1))
52 /* PLT_OFF is the TOC-relative offset of a 64-bit PowerPC PLT entry.
53 Return the function's entry point. */
56 ppc64_plt_entry_point (struct frame_info *frame, CORE_ADDR plt_off)
58 struct gdbarch *gdbarch = get_frame_arch (frame);
59 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
60 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
63 if (execution_direction == EXEC_REVERSE)
65 /* If executing in reverse, r2 will have been stored to the stack. */
66 CORE_ADDR sp = get_frame_register_unsigned (frame,
67 tdep->ppc_gp0_regnum + 1);
68 unsigned int sp_off = tdep->elf_abi == POWERPC_ELF_V1 ? 40 : 24;
69 tocp = read_memory_unsigned_integer (sp + sp_off, 8, byte_order);
72 tocp = get_frame_register_unsigned (frame, tdep->ppc_gp0_regnum + 2);
74 /* The first word of the PLT entry is the function entry point. */
75 return read_memory_unsigned_integer (tocp + plt_off, 8, byte_order);
78 /* Patterns for the standard linkage functions. These are built by
79 build_plt_stub in bfd/elf64-ppc.c. */
81 /* Old ELFv1 PLT call stub. */
83 static struct ppc_insn_pattern ppc64_standard_linkage1[] =
85 /* addis r12, r2, <any> */
86 { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
89 { -1, insn_ds (62, 2, 1, 40, 0), 0 },
91 /* ld r11, <any>(r12) */
92 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 },
94 /* addis r12, r12, 1 <optional> */
95 { insn_d (-1, -1, -1, -1), insn_d (15, 12, 12, 1), 1 },
97 /* ld r2, <any>(r12) */
98 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 12, 0, 0), 0 },
100 /* addis r12, r12, 1 <optional> */
101 { insn_d (-1, -1, -1, -1), insn_d (15, 12, 12, 1), 1 },
104 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), 0 },
106 /* ld r11, <any>(r12) <optional> */
107 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 1 },
110 { -1, 0x4e800420, 0 },
115 /* ELFv1 PLT call stub to access PLT entries more than +/- 32k from r2.
116 Also supports older stub with different placement of std 2,40(1),
117 a stub that omits the std 2,40(1), and both versions of power7
118 thread safety read barriers. Note that there are actually two more
119 instructions following "cmpldi r2, 0", "bnectr+" and "b <glink_i>",
120 but there isn't any need to match them. */
122 static struct ppc_insn_pattern ppc64_standard_linkage2[] =
124 /* std r2, 40(r1) <optional> */
125 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
127 /* addis r12, r2, <any> */
128 { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
130 /* std r2, 40(r1) <optional> */
131 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
133 /* ld r11, <any>(r12) */
134 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 },
136 /* addi r12, r12, <any> <optional> */
137 { insn_d (-1, -1, -1, 0), insn_d (14, 12, 12, 0), 1 },
140 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), 0 },
142 /* xor r11, r11, r11 <optional> */
143 { -1, 0x7d6b5a78, 1 },
145 /* add r12, r12, r11 <optional> */
146 { -1, 0x7d8c5a14, 1 },
148 /* ld r2, <any>(r12) */
149 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 12, 0, 0), 0 },
151 /* ld r11, <any>(r12) <optional> */
152 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 1 },
154 /* bctr <optional> */
155 { -1, 0x4e800420, 1 },
157 /* cmpldi r2, 0 <optional> */
158 { -1, 0x28220000, 1 },
163 /* ELFv1 PLT call stub to access PLT entries within +/- 32k of r2. */
165 static struct ppc_insn_pattern ppc64_standard_linkage3[] =
167 /* std r2, 40(r1) <optional> */
168 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
170 /* ld r11, <any>(r2) */
171 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 2, 0, 0), 0 },
173 /* addi r2, r2, <any> <optional> */
174 { insn_d (-1, -1, -1, 0), insn_d (14, 2, 2, 0), 1 },
177 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), 0 },
179 /* xor r11, r11, r11 <optional> */
180 { -1, 0x7d6b5a78, 1 },
182 /* add r2, r2, r11 <optional> */
183 { -1, 0x7c425a14, 1 },
185 /* ld r11, <any>(r2) <optional> */
186 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 2, 0, 0), 1 },
188 /* ld r2, <any>(r2) */
189 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 2, 0, 0), 0 },
191 /* bctr <optional> */
192 { -1, 0x4e800420, 1 },
194 /* cmpldi r2, 0 <optional> */
195 { -1, 0x28220000, 1 },
200 /* ELFv1 PLT call stub to access PLT entries more than +/- 32k from r2.
201 A more modern variant of ppc64_standard_linkage2 differing in
204 static struct ppc_insn_pattern ppc64_standard_linkage4[] =
206 /* std r2, 40(r1) <optional> */
207 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
209 /* addis r11, r2, <any> */
210 { insn_d (-1, -1, -1, 0), insn_d (15, 11, 2, 0), 0 },
212 /* ld r12, <any>(r11) */
213 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 11, 0, 0), 0 },
215 /* addi r11, r11, <any> <optional> */
216 { insn_d (-1, -1, -1, 0), insn_d (14, 11, 11, 0), 1 },
219 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
221 /* xor r2, r12, r12 <optional> */
222 { -1, 0x7d826278, 1 },
224 /* add r11, r11, r2 <optional> */
225 { -1, 0x7d6b1214, 1 },
227 /* ld r2, <any>(r11) */
228 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 11, 0, 0), 0 },
230 /* ld r11, <any>(r11) <optional> */
231 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 11, 0, 0), 1 },
233 /* bctr <optional> */
234 { -1, 0x4e800420, 1 },
236 /* cmpldi r2, 0 <optional> */
237 { -1, 0x28220000, 1 },
242 /* ELFv1 PLT call stub to access PLT entries within +/- 32k of r2.
243 A more modern variant of ppc64_standard_linkage3 differing in
246 static struct ppc_insn_pattern ppc64_standard_linkage5[] =
248 /* std r2, 40(r1) <optional> */
249 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
251 /* ld r12, <any>(r2) */
252 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 2, 0, 0), 0 },
254 /* addi r2, r2, <any> <optional> */
255 { insn_d (-1, -1, -1, 0), insn_d (14, 2, 2, 0), 1 },
258 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
260 /* xor r11, r12, r12 <optional> */
261 { -1, 0x7d8b6278, 1 },
263 /* add r2, r2, r11 <optional> */
264 { -1, 0x7c425a14, 1 },
266 /* ld r11, <any>(r2) <optional> */
267 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 2, 0, 0), 1 },
269 /* ld r2, <any>(r2) */
270 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 2, 0, 0), 0 },
272 /* bctr <optional> */
273 { -1, 0x4e800420, 1 },
275 /* cmpldi r2, 0 <optional> */
276 { -1, 0x28220000, 1 },
281 /* ELFv2 PLT call stub to access PLT entries more than +/- 32k from r2. */
283 static struct ppc_insn_pattern ppc64_standard_linkage6[] =
285 /* std r2, 24(r1) <optional> */
286 { -1, insn_ds (62, 2, 1, 24, 0), 1 },
288 /* addis r11, r2, <any> */
289 { insn_d (-1, -1, -1, 0), insn_d (15, 11, 2, 0), 0 },
291 /* ld r12, <any>(r11) */
292 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 11, 0, 0), 0 },
295 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
298 { -1, 0x4e800420, 0 },
303 /* ELFv2 PLT call stub to access PLT entries within +/- 32k of r2. */
305 static struct ppc_insn_pattern ppc64_standard_linkage7[] =
307 /* std r2, 24(r1) <optional> */
308 { -1, insn_ds (62, 2, 1, 24, 0), 1 },
310 /* ld r12, <any>(r2) */
311 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 2, 0, 0), 0 },
314 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
317 { -1, 0x4e800420, 0 },
322 /* ELFv2 PLT call stub to access PLT entries more than +/- 32k from r2,
323 supporting fusion. */
325 static struct ppc_insn_pattern ppc64_standard_linkage8[] =
327 /* std r2, 24(r1) <optional> */
328 { -1, insn_ds (62, 2, 1, 24, 0), 1 },
330 /* addis r12, r2, <any> */
331 { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
333 /* ld r12, <any>(r12) */
334 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 12, 0, 0), 0 },
337 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
340 { -1, 0x4e800420, 0 },
345 /* When the dynamic linker is doing lazy symbol resolution, the first
346 call to a function in another object will go like this:
348 - The user's function calls the linkage function:
350 100003d4: 4b ff ff ad bl 10000380 <nnnn.plt_call.printf>
351 100003d8: e8 41 00 28 ld r2,40(r1)
353 - The linkage function loads the entry point and toc pointer from
354 the function descriptor in the PLT, and jumps to it:
356 <nnnn.plt_call.printf>:
357 10000380: f8 41 00 28 std r2,40(r1)
358 10000384: e9 62 80 78 ld r11,-32648(r2)
359 10000388: 7d 69 03 a6 mtctr r11
360 1000038c: e8 42 80 80 ld r2,-32640(r2)
361 10000390: 28 22 00 00 cmpldi r2,0
362 10000394: 4c e2 04 20 bnectr+
363 10000398: 48 00 03 a0 b 10000738 <printf@plt>
365 - But since this is the first time that PLT entry has been used, it
366 sends control to its glink entry. That loads the number of the
367 PLT entry and jumps to the common glink0 code:
370 10000738: 38 00 00 01 li r0,1
371 1000073c: 4b ff ff bc b 100006f8 <__glink_PLTresolve>
373 - The common glink0 code then transfers control to the dynamic
376 100006f0: 0000000000010440 .quad plt0 - (. + 16)
377 <__glink_PLTresolve>:
378 100006f8: 7d 88 02 a6 mflr r12
379 100006fc: 42 9f 00 05 bcl 20,4*cr7+so,10000700
380 10000700: 7d 68 02 a6 mflr r11
381 10000704: e8 4b ff f0 ld r2,-16(r11)
382 10000708: 7d 88 03 a6 mtlr r12
383 1000070c: 7d 82 5a 14 add r12,r2,r11
384 10000710: e9 6c 00 00 ld r11,0(r12)
385 10000714: e8 4c 00 08 ld r2,8(r12)
386 10000718: 7d 69 03 a6 mtctr r11
387 1000071c: e9 6c 00 10 ld r11,16(r12)
388 10000720: 4e 80 04 20 bctr
390 Eventually, this code will figure out how to skip all of this,
391 including the dynamic linker. At the moment, we just get through
392 the linkage function. */
394 /* If the current thread is about to execute a series of instructions
395 matching the ppc64_standard_linkage pattern, and INSN is the result
396 from that pattern match, return the code address to which the
397 standard linkage function will send them. (This doesn't deal with
398 dynamic linker lazy symbol resolution stubs.) */
401 ppc64_standard_linkage1_target (struct frame_info *frame, unsigned int *insn)
403 CORE_ADDR plt_off = ((ppc_insn_d_field (insn[0]) << 16)
404 + ppc_insn_ds_field (insn[2]));
406 return ppc64_plt_entry_point (frame, plt_off);
410 ppc64_standard_linkage2_target (struct frame_info *frame, unsigned int *insn)
412 CORE_ADDR plt_off = ((ppc_insn_d_field (insn[1]) << 16)
413 + ppc_insn_ds_field (insn[3]));
415 return ppc64_plt_entry_point (frame, plt_off);
419 ppc64_standard_linkage3_target (struct frame_info *frame, unsigned int *insn)
421 CORE_ADDR plt_off = ppc_insn_ds_field (insn[1]);
423 return ppc64_plt_entry_point (frame, plt_off);
427 ppc64_standard_linkage4_target (struct frame_info *frame, unsigned int *insn)
429 CORE_ADDR plt_off = ((ppc_insn_d_field (insn[1]) << 16)
430 + ppc_insn_ds_field (insn[2]));
432 return ppc64_plt_entry_point (frame, plt_off);
436 /* Given that we've begun executing a call trampoline at PC, return
437 the entry point of the function the trampoline will go to.
439 When the execution direction is EXEC_REVERSE, scan backward to
440 check whether we are in the middle of a PLT stub. */
443 ppc64_skip_trampoline_code_1 (struct frame_info *frame, CORE_ADDR pc)
445 #define MAX(a,b) ((a) > (b) ? (a) : (b))
446 unsigned int insns[MAX (MAX (MAX (ARRAY_SIZE (ppc64_standard_linkage1),
447 ARRAY_SIZE (ppc64_standard_linkage2)),
448 MAX (ARRAY_SIZE (ppc64_standard_linkage3),
449 ARRAY_SIZE (ppc64_standard_linkage4))),
450 MAX (MAX (ARRAY_SIZE (ppc64_standard_linkage5),
451 ARRAY_SIZE (ppc64_standard_linkage6)),
452 MAX (ARRAY_SIZE (ppc64_standard_linkage7),
453 ARRAY_SIZE (ppc64_standard_linkage8))))
459 /* When reverse-debugging, scan backward to check whether we are
460 in the middle of trampoline code. */
461 if (execution_direction == EXEC_REVERSE)
462 scan_limit = ARRAY_SIZE (insns) - 1;
464 for (i = 0; i < scan_limit; i++)
466 if (i < ARRAY_SIZE (ppc64_standard_linkage8) - 1
467 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage8, insns))
468 pc = ppc64_standard_linkage4_target (frame, insns);
469 else if (i < ARRAY_SIZE (ppc64_standard_linkage7) - 1
470 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage7,
472 pc = ppc64_standard_linkage3_target (frame, insns);
473 else if (i < ARRAY_SIZE (ppc64_standard_linkage6) - 1
474 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage6,
476 pc = ppc64_standard_linkage4_target (frame, insns);
477 else if (i < ARRAY_SIZE (ppc64_standard_linkage5) - 1
478 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage5,
480 && (insns[8] != 0 || insns[9] != 0))
481 pc = ppc64_standard_linkage3_target (frame, insns);
482 else if (i < ARRAY_SIZE (ppc64_standard_linkage4) - 1
483 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage4,
485 && (insns[9] != 0 || insns[10] != 0))
486 pc = ppc64_standard_linkage4_target (frame, insns);
487 else if (i < ARRAY_SIZE (ppc64_standard_linkage3) - 1
488 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage3,
490 && (insns[8] != 0 || insns[9] != 0))
491 pc = ppc64_standard_linkage3_target (frame, insns);
492 else if (i < ARRAY_SIZE (ppc64_standard_linkage2) - 1
493 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage2,
495 && (insns[10] != 0 || insns[11] != 0))
496 pc = ppc64_standard_linkage2_target (frame, insns);
497 else if (i < ARRAY_SIZE (ppc64_standard_linkage1) - 1
498 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage1,
500 pc = ppc64_standard_linkage1_target (frame, insns);
503 /* Scan backward one more instructions if doesn't match. */
508 /* The PLT descriptor will either point to the already resolved target
509 address, or else to a glink stub. As the latter carry synthetic @plt
510 symbols, find_solib_trampoline_target should be able to resolve them. */
511 target = find_solib_trampoline_target (frame, pc);
512 return target ? target : pc;
518 /* Wrapper of ppc64_skip_trampoline_code_1 checking also
519 ppc_elfv2_skip_entrypoint. */
522 ppc64_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
524 struct gdbarch *gdbarch = get_frame_arch (frame);
526 pc = ppc64_skip_trampoline_code_1 (frame, pc);
527 if (pc != 0 && gdbarch_skip_entrypoint_p (gdbarch))
528 pc = gdbarch_skip_entrypoint (gdbarch, pc);
532 /* Support for convert_from_func_ptr_addr (ARCH, ADDR, TARG) on PPC64
535 Usually a function pointer's representation is simply the address
536 of the function. On GNU/Linux on the PowerPC however, a function
537 pointer may be a pointer to a function descriptor.
539 For PPC64, a function descriptor is a TOC entry, in a data section,
540 which contains three words: the first word is the address of the
541 function, the second word is the TOC pointer (r2), and the third word
542 is the static chain value.
544 Throughout GDB it is currently assumed that a function pointer contains
545 the address of the function, which is not easy to fix. In addition, the
546 conversion of a function address to a function pointer would
547 require allocation of a TOC entry in the inferior's memory space,
548 with all its drawbacks. To be able to call C++ virtual methods in
549 the inferior (which are called via function pointers),
550 find_function_addr uses this function to get the function address
551 from a function pointer.
553 If ADDR points at what is clearly a function descriptor, transform
554 it into the address of the corresponding function, if needed. Be
555 conservative, otherwise GDB will do the transformation on any
556 random addresses such as occur when there is no symbol table. */
559 ppc64_convert_from_func_ptr_addr (struct gdbarch *gdbarch,
561 struct target_ops *targ)
563 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
564 struct target_section *s = target_section_by_addr (targ, addr);
566 /* Check if ADDR points to a function descriptor. */
567 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
569 /* There may be relocations that need to be applied to the .opd
570 section. Unfortunately, this function may be called at a time
571 where these relocations have not yet been performed -- this can
572 happen for example shortly after a library has been loaded with
573 dlopen, but ld.so has not yet applied the relocations.
575 To cope with both the case where the relocation has been applied,
576 and the case where it has not yet been applied, we do *not* read
577 the (maybe) relocated value from target memory, but we instead
578 read the non-relocated value from the BFD, and apply the relocation
581 This makes the assumption that all .opd entries are always relocated
582 by the same offset the section itself was relocated. This should
583 always be the case for GNU/Linux executables and shared libraries.
584 Note that other kind of object files (e.g. those added via
585 add-symbol-files) will currently never end up here anyway, as this
586 function accesses *target* sections only; only the main exec and
587 shared libraries are ever added to the target. */
592 res = bfd_get_section_contents (s->the_bfd_section->owner,
594 &buf, addr - s->addr, 8);
596 return extract_unsigned_integer (buf, 8, byte_order)
597 - bfd_section_vma (s->bfd, s->the_bfd_section) + s->addr;
603 /* A synthetic 'dot' symbols on ppc64 has the udata.p entry pointing
604 back to the original ELF symbol it was derived from. Get the size
608 ppc64_elf_make_msymbol_special (asymbol *sym, struct minimal_symbol *msym)
610 if ((sym->flags & BSF_SYNTHETIC) != 0 && sym->udata.p != NULL)
612 elf_symbol_type *elf_sym = (elf_symbol_type *) sym->udata.p;
613 SET_MSYMBOL_SIZE (msym, elf_sym->internal_elf_sym.st_size);