1 /* libunwind - a platform-independent unwind library
2 Copyright (c) 2003, 2005 Hewlett-Packard Development Company, L.P.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
5 This file is part of libunwind.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice shall be
16 included in all copies or substantial portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
27 #include "libunwind_i.h"
31 #define alloc_reg_state() (mempool_alloc (&dwarf_reg_state_pool))
32 #define free_reg_state(rs) (mempool_free (&dwarf_reg_state_pool, rs))
34 #define DWARF_UNW_CACHE_SIZE(log_size) (1 << log_size)
35 #define DWARF_UNW_HASH_SIZE(log_size) (1 << (log_size + 1))
38 read_regnum (unw_addr_space_t as, unw_accessors_t *a, unw_word_t *addr,
39 unw_word_t *valp, void *arg)
43 if ((ret = dwarf_read_uleb128 (as, a, addr, valp, arg)) < 0)
46 if (*valp >= DWARF_NUM_PRESERVED_REGS)
48 Debug (1, "Invalid register number %u\n", (unsigned int) *valp);
55 set_reg (dwarf_state_record_t *sr, unw_word_t regnum, dwarf_where_t where,
58 sr->rs_current.reg.where[regnum] = where;
59 sr->rs_current.reg.val[regnum] = val;
63 push_rstate_stack(dwarf_stackable_reg_state_t **rs_stack)
65 dwarf_stackable_reg_state_t *old_rs = *rs_stack;
66 if (NULL == (*rs_stack = alloc_reg_state ()))
71 (*rs_stack)->next = old_rs;
76 pop_rstate_stack(dwarf_stackable_reg_state_t **rs_stack)
78 dwarf_stackable_reg_state_t *old_rs = *rs_stack;
79 *rs_stack = old_rs->next;
80 free_reg_state (old_rs);
84 empty_rstate_stack(dwarf_stackable_reg_state_t **rs_stack)
87 pop_rstate_stack(rs_stack);
90 /* Run a CFI program to update the register state. */
92 run_cfi_program (struct dwarf_cursor *c, dwarf_state_record_t *sr,
93 unw_word_t *ip, unw_word_t end_ip,
94 unw_word_t *addr, unw_word_t end_addr,
95 dwarf_stackable_reg_state_t **rs_stack,
96 struct dwarf_cie_info *dci)
101 if (c->pi.flags & UNW_PI_FLAG_DEBUG_FRAME)
103 /* .debug_frame CFI is stored in local address space. */
104 as = unw_local_addr_space;
112 unw_accessors_t *a = unw_get_accessors_int (as);
115 while (*ip <= end_ip && *addr < end_addr && ret >= 0)
117 unw_word_t operand = 0, regnum, val, len;
122 if ((ret = dwarf_readu8 (as, a, addr, &op, arg)) < 0)
125 if (op & DWARF_CFA_OPCODE_MASK)
127 operand = op & DWARF_CFA_OPERAND_MASK;
128 op &= ~DWARF_CFA_OPERAND_MASK;
130 switch ((dwarf_cfa_t) op)
132 case DW_CFA_advance_loc:
133 *ip += operand * dci->code_align;
134 Debug (15, "CFA_advance_loc to 0x%lx\n", (long) *ip);
137 case DW_CFA_advance_loc1:
138 if ((ret = dwarf_readu8 (as, a, addr, &u8, arg)) < 0)
140 *ip += u8 * dci->code_align;
141 Debug (15, "CFA_advance_loc1 to 0x%lx\n", (long) *ip);
144 case DW_CFA_advance_loc2:
145 if ((ret = dwarf_readu16 (as, a, addr, &u16, arg)) < 0)
147 *ip += u16 * dci->code_align;
148 Debug (15, "CFA_advance_loc2 to 0x%lx\n", (long) *ip);
151 case DW_CFA_advance_loc4:
152 if ((ret = dwarf_readu32 (as, a, addr, &u32, arg)) < 0)
154 *ip += u32 * dci->code_align;
155 Debug (15, "CFA_advance_loc4 to 0x%lx\n", (long) *ip);
158 case DW_CFA_MIPS_advance_loc8:
159 #ifdef UNW_TARGET_MIPS
163 if ((ret = dwarf_readu64 (as, a, addr, &u64, arg)) < 0)
165 *ip += u64 * dci->code_align;
166 Debug (15, "CFA_MIPS_advance_loc8\n");
170 Debug (1, "DW_CFA_MIPS_advance_loc8 on non-MIPS target\n");
177 if (regnum >= DWARF_NUM_PRESERVED_REGS)
179 Debug (1, "Invalid register number %u in DW_cfa_OFFSET\n",
180 (unsigned int) regnum);
184 if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)
186 set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align);
187 Debug (15, "CFA_offset r%lu at cfa+0x%lx\n",
188 (long) regnum, (long) (val * dci->data_align));
191 case DW_CFA_offset_extended:
192 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
193 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
195 set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align);
196 Debug (15, "CFA_offset_extended r%lu at cf+0x%lx\n",
197 (long) regnum, (long) (val * dci->data_align));
200 case DW_CFA_offset_extended_sf:
201 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
202 || ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0))
204 set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align);
205 Debug (15, "CFA_offset_extended_sf r%lu at cf+0x%lx\n",
206 (long) regnum, (long) (val * dci->data_align));
211 if (regnum >= DWARF_NUM_PRESERVED_REGS)
213 Debug (1, "Invalid register number %u in DW_CFA_restore\n",
214 (unsigned int) regnum);
218 sr->rs_current.reg.where[regnum] = sr->rs_initial.reg.where[regnum];
219 sr->rs_current.reg.val[regnum] = sr->rs_initial.reg.val[regnum];
220 Debug (15, "CFA_restore r%lu\n", (long) regnum);
223 case DW_CFA_restore_extended:
224 if ((ret = dwarf_read_uleb128 (as, a, addr, ®num, arg)) < 0)
226 if (regnum >= DWARF_NUM_PRESERVED_REGS)
228 Debug (1, "Invalid register number %u in "
229 "DW_CFA_restore_extended\n", (unsigned int) regnum);
233 sr->rs_current.reg.where[regnum] = sr->rs_initial.reg.where[regnum];
234 sr->rs_current.reg.val[regnum] = sr->rs_initial.reg.val[regnum];
235 Debug (15, "CFA_restore_extended r%lu\n", (long) regnum);
242 if ((ret = dwarf_read_encoded_pointer (as, a, addr, dci->fde_encoding,
246 Debug (15, "CFA_set_loc to 0x%lx\n", (long) *ip);
249 case DW_CFA_undefined:
250 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
252 set_reg (sr, regnum, DWARF_WHERE_UNDEF, 0);
253 Debug (15, "CFA_undefined r%lu\n", (long) regnum);
256 case DW_CFA_same_value:
257 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
259 set_reg (sr, regnum, DWARF_WHERE_SAME, 0);
260 Debug (15, "CFA_same_value r%lu\n", (long) regnum);
263 case DW_CFA_register:
264 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
265 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
267 set_reg (sr, regnum, DWARF_WHERE_REG, val);
268 Debug (15, "CFA_register r%lu to r%lu\n", (long) regnum, (long) val);
271 case DW_CFA_remember_state:
272 if (push_rstate_stack(rs_stack) < 0)
274 Debug (1, "Out of memory in DW_CFA_remember_state\n");
278 (*rs_stack)->state = sr->rs_current;
279 Debug (15, "CFA_remember_state\n");
282 case DW_CFA_restore_state:
285 Debug (1, "register-state stack underflow\n");
289 sr->rs_current = (*rs_stack)->state;
290 pop_rstate_stack(rs_stack);
291 Debug (15, "CFA_restore_state\n");
295 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
296 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
298 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum);
299 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, val); /* NOT factored! */
300 Debug (15, "CFA_def_cfa r%lu+0x%lx\n", (long) regnum, (long) val);
303 case DW_CFA_def_cfa_sf:
304 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
305 || ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0))
307 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum);
308 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0,
309 val * dci->data_align); /* factored! */
310 Debug (15, "CFA_def_cfa_sf r%lu+0x%lx\n",
311 (long) regnum, (long) (val * dci->data_align));
314 case DW_CFA_def_cfa_register:
315 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
317 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum);
318 Debug (15, "CFA_def_cfa_register r%lu\n", (long) regnum);
321 case DW_CFA_def_cfa_offset:
322 if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)
324 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, val); /* NOT factored! */
325 Debug (15, "CFA_def_cfa_offset 0x%lx\n", (long) val);
328 case DW_CFA_def_cfa_offset_sf:
329 if ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0)
331 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0,
332 val * dci->data_align); /* factored! */
333 Debug (15, "CFA_def_cfa_offset_sf 0x%lx\n",
334 (long) (val * dci->data_align));
337 case DW_CFA_def_cfa_expression:
338 /* Save the address of the DW_FORM_block for later evaluation. */
339 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_EXPR, *addr);
341 if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0)
344 Debug (15, "CFA_def_cfa_expr @ 0x%lx [%lu bytes]\n",
345 (long) *addr, (long) len);
349 case DW_CFA_expression:
350 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
353 /* Save the address of the DW_FORM_block for later evaluation. */
354 set_reg (sr, regnum, DWARF_WHERE_EXPR, *addr);
356 if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0)
359 Debug (15, "CFA_expression r%lu @ 0x%lx [%lu bytes]\n",
360 (long) regnum, (long) addr, (long) len);
364 case DW_CFA_val_expression:
365 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
368 /* Save the address of the DW_FORM_block for later evaluation. */
369 set_reg (sr, regnum, DWARF_WHERE_VAL_EXPR, *addr);
371 if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0)
374 Debug (15, "CFA_val_expression r%lu @ 0x%lx [%lu bytes]\n",
375 (long) regnum, (long) addr, (long) len);
379 case DW_CFA_GNU_args_size:
380 if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)
383 Debug (15, "CFA_GNU_args_size %lu\n", (long) val);
386 case DW_CFA_GNU_negative_offset_extended:
387 /* A comment in GCC says that this is obsoleted by
388 DW_CFA_offset_extended_sf, but that it's used by older
390 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
391 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
393 set_reg (sr, regnum, DWARF_WHERE_CFAREL, -(val * dci->data_align));
394 Debug (15, "CFA_GNU_negative_offset_extended cfa+0x%lx\n",
395 (long) -(val * dci->data_align));
398 case DW_CFA_GNU_window_save:
399 #ifdef UNW_TARGET_SPARC
400 /* This is a special CFA to handle all 16 windowed registers
402 for (regnum = 16; regnum < 32; ++regnum)
403 set_reg (sr, regnum, DWARF_WHERE_CFAREL,
404 (regnum - 16) * sizeof (unw_word_t));
405 Debug (15, "CFA_GNU_window_save\n");
412 Debug (1, "Unexpected CFA opcode 0x%x\n", op);
424 fetch_proc_info (struct dwarf_cursor *c, unw_word_t ip)
426 int ret, dynamic = 1;
428 /* The 'ip' can point either to the previous or next instruction
429 depending on what type of frame we have: normal call or a place
430 to resume execution (e.g. after signal frame).
432 For a normal call frame we need to back up so we point within the
433 call itself; this is important because a) the call might be the
434 very last instruction of the function and the edge of the FDE,
435 and b) so that run_cfi_program() runs locations up to the call
438 For signal frame, we need to do the exact opposite and look
439 up using the current 'ip' value. That is where execution will
440 continue, and it's important we get this right, as 'ip' could be
441 right at the function entry and hence FDE edge, or at instruction
442 that manipulates CFA (push/pop). */
443 if (c->use_prev_instr)
446 memset (&c->pi, 0, sizeof (c->pi));
448 /* check dynamic info first --- it overrides everything else */
449 ret = unwi_find_dynamic_proc_info (c->as, ip, &c->pi, 1,
451 if (ret == -UNW_ENOINFO)
454 if ((ret = tdep_find_proc_info (c, ip, 1)) < 0)
458 if (c->pi.format != UNW_INFO_FORMAT_DYNAMIC
459 && c->pi.format != UNW_INFO_FORMAT_TABLE
460 && c->pi.format != UNW_INFO_FORMAT_REMOTE_TABLE)
464 c->pi_is_dynamic = dynamic;
466 /* Let system/machine-dependent code determine frame-specific attributes. */
468 tdep_fetch_frame (c, ip, 1);
474 parse_dynamic (struct dwarf_cursor *c, unw_word_t ip, dwarf_state_record_t *sr)
476 Debug (1, "Not yet implemented\n");
481 put_unwind_info (struct dwarf_cursor *c, unw_proc_info_t *pi)
483 if (c->pi_is_dynamic)
484 unwi_put_dynamic_unwind_info (c->as, pi, c->as_arg);
485 else if (pi->unwind_info && pi->format == UNW_INFO_FORMAT_TABLE)
487 mempool_free (&dwarf_cie_info_pool, pi->unwind_info);
488 pi->unwind_info = NULL;
494 setup_fde (struct dwarf_cursor *c, dwarf_state_record_t *sr)
498 assert (c->pi_valid);
500 memset (sr, 0, sizeof (*sr));
501 for (i = 0; i < DWARF_NUM_PRESERVED_REGS + 2; ++i)
502 set_reg (sr, i, DWARF_WHERE_SAME, 0);
504 struct dwarf_cie_info *dci = c->pi.unwind_info;
505 sr->rs_current.ret_addr_column = dci->ret_addr_column;
506 unw_word_t addr = dci->cie_instr_start;
507 unw_word_t curr_ip = 0;
508 dwarf_stackable_reg_state_t *rs_stack = NULL;
509 ret = run_cfi_program (c, sr, &curr_ip, ~(unw_word_t) 0, &addr,
512 empty_rstate_stack(&rs_stack);
516 memcpy (&sr->rs_initial, &sr->rs_current, sizeof (sr->rs_initial));
521 parse_fde (struct dwarf_cursor *c, unw_word_t ip, dwarf_state_record_t *sr)
524 struct dwarf_cie_info *dci = c->pi.unwind_info;
525 unw_word_t addr = dci->fde_instr_start;
526 unw_word_t curr_ip = c->pi.start_ip;
527 dwarf_stackable_reg_state_t *rs_stack = NULL;
528 /* Process up to current `ip` for signal frame and `ip - 1` for normal call frame
529 See `c->use_prev_instr` use in `fetch_proc_info` for details. */
530 ret = run_cfi_program (c, sr, &curr_ip, ip - c->use_prev_instr, &addr, dci->fde_instr_end,
532 empty_rstate_stack(&rs_stack);
540 dwarf_flush_rs_cache (struct dwarf_rs_cache *cache)
544 if (cache->log_size == DWARF_DEFAULT_LOG_UNW_CACHE_SIZE
546 cache->hash = cache->default_hash;
547 cache->buckets = cache->default_buckets;
548 cache->links = cache->default_links;
549 cache->log_size = DWARF_DEFAULT_LOG_UNW_CACHE_SIZE;
551 if (cache->hash && cache->hash != cache->default_hash)
552 munmap(cache->hash, DWARF_UNW_HASH_SIZE(cache->prev_log_size)
553 * sizeof (cache->hash[0]));
554 if (cache->buckets && cache->buckets != cache->default_buckets)
555 munmap(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->prev_log_size)
556 * sizeof (cache->buckets[0]));
557 if (cache->links && cache->links != cache->default_links)
558 munmap(cache->links, DWARF_UNW_CACHE_SIZE(cache->prev_log_size)
559 * sizeof (cache->links[0]));
560 GET_MEMORY(cache->hash, DWARF_UNW_HASH_SIZE(cache->log_size)
561 * sizeof (cache->hash[0]));
562 GET_MEMORY(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->log_size)
563 * sizeof (cache->buckets[0]));
564 GET_MEMORY(cache->links, DWARF_UNW_CACHE_SIZE(cache->log_size)
565 * sizeof (cache->links[0]));
566 if (!cache->hash || !cache->buckets || !cache->links)
568 Debug (1, "Unable to allocate cache memory");
571 cache->prev_log_size = cache->log_size;
576 for (i = 0; i < DWARF_UNW_CACHE_SIZE(cache->log_size); ++i)
578 cache->links[i].coll_chain = -1;
579 cache->links[i].ip = 0;
580 cache->links[i].valid = 0;
582 for (i = 0; i< DWARF_UNW_HASH_SIZE(cache->log_size); ++i)
588 static inline struct dwarf_rs_cache *
589 get_rs_cache (unw_addr_space_t as, intrmask_t *saved_maskp)
591 struct dwarf_rs_cache *cache = &as->global_cache;
592 unw_caching_policy_t caching = as->caching_policy;
594 if (caching == UNW_CACHE_NONE)
597 #if defined(HAVE___THREAD) && HAVE___THREAD
598 if (likely (caching == UNW_CACHE_PER_THREAD))
600 static __thread struct dwarf_rs_cache tls_cache __attribute__((tls_model("initial-exec")));
601 Debug (16, "using TLS cache\n");
606 if (likely (caching == UNW_CACHE_GLOBAL))
609 Debug (16, "acquiring lock\n");
610 lock_acquire (&cache->lock, *saved_maskp);
613 if ((atomic_read (&as->cache_generation) != atomic_read (&cache->generation))
616 /* cache_size is only set in the global_cache, copy it over before flushing */
617 cache->log_size = as->global_cache.log_size;
618 if (dwarf_flush_rs_cache (cache) < 0)
620 cache->generation = as->cache_generation;
627 put_rs_cache (unw_addr_space_t as, struct dwarf_rs_cache *cache,
628 intrmask_t *saved_maskp)
630 assert (as->caching_policy != UNW_CACHE_NONE);
632 Debug (16, "unmasking signals/interrupts and releasing lock\n");
633 if (likely (as->caching_policy == UNW_CACHE_GLOBAL))
634 lock_release (&cache->lock, *saved_maskp);
637 static inline unw_hash_index_t CONST_ATTR
638 hash (unw_word_t ip, unsigned short log_size)
640 /* based on (sqrt(5)/2-1)*2^64 */
641 # define magic ((unw_word_t) 0x9e3779b97f4a7c16ULL)
643 return ip * magic >> ((sizeof(unw_word_t) * 8) - (log_size + 1));
647 cache_match (struct dwarf_rs_cache *cache, unsigned short index, unw_word_t ip)
649 return (cache->links[index].valid && (ip == cache->links[index].ip));
652 static dwarf_reg_state_t *
653 rs_lookup (struct dwarf_rs_cache *cache, struct dwarf_cursor *c)
655 unsigned short index;
656 unw_word_t ip = c->ip;
661 if (cache_match (cache, index, ip))
662 return &cache->buckets[index];
665 for (index = cache->hash[hash (ip, cache->log_size)];
666 index < DWARF_UNW_CACHE_SIZE(cache->log_size);
667 index = cache->links[index].coll_chain)
669 if (cache_match (cache, index, ip))
670 return &cache->buckets[index];
675 static inline dwarf_reg_state_t *
676 rs_new (struct dwarf_rs_cache *cache, struct dwarf_cursor * c)
678 unw_hash_index_t index;
681 head = cache->rr_head;
682 cache->rr_head = (head + 1) & (DWARF_UNW_CACHE_SIZE(cache->log_size) - 1);
684 /* remove the old rs from the hash table (if it's there): */
685 if (cache->links[head].ip)
687 unsigned short *pindex;
688 for (pindex = &cache->hash[hash (cache->links[head].ip, cache->log_size)];
689 *pindex < DWARF_UNW_CACHE_SIZE(cache->log_size);
690 pindex = &cache->links[*pindex].coll_chain)
694 *pindex = cache->links[*pindex].coll_chain;
700 /* enter new rs in the hash table */
701 index = hash (c->ip, cache->log_size);
702 cache->links[head].coll_chain = cache->hash[index];
703 cache->hash[index] = head;
705 cache->links[head].ip = c->ip;
706 cache->links[head].valid = 1;
707 cache->links[head].signal_frame = tdep_cache_frame(c);
708 return cache->buckets + head;
712 create_state_record_for (struct dwarf_cursor *c, dwarf_state_record_t *sr,
716 switch (c->pi.format)
718 case UNW_INFO_FORMAT_TABLE:
719 case UNW_INFO_FORMAT_REMOTE_TABLE:
720 if ((ret = setup_fde(c, sr)) < 0)
722 ret = parse_fde (c, ip, sr);
725 case UNW_INFO_FORMAT_DYNAMIC:
726 ret = parse_dynamic (c, ip, sr);
730 Debug (1, "Unexpected unwind-info format %d\n", c->pi.format);
737 eval_location_expr (struct dwarf_cursor *c, unw_addr_space_t as,
738 unw_accessors_t *a, unw_word_t addr,
739 dwarf_loc_t *locp, void *arg)
741 int ret, is_register;
744 /* read the length of the expression: */
745 if ((ret = dwarf_read_uleb128 (as, a, &addr, &len, arg)) < 0)
748 /* evaluate the expression: */
749 if ((ret = dwarf_eval_expr (c, &addr, len, &val, &is_register)) < 0)
753 *locp = DWARF_REG_LOC (c, dwarf_to_unw_regnum (val));
755 *locp = DWARF_MEM_LOC (c, val);
761 apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs)
763 unw_word_t regnum, addr, cfa, ip;
764 unw_word_t prev_ip, prev_cfa;
776 a = unw_get_accessors_int (as);
778 /* Evaluate the CFA first, because it may be referred to by other
781 if (rs->reg.where[DWARF_CFA_REG_COLUMN] == DWARF_WHERE_REG)
783 /* CFA is equal to [reg] + offset: */
785 /* As a special-case, if the stack-pointer is the CFA and the
786 stack-pointer wasn't saved, popping the CFA implicitly pops
787 the stack-pointer as well. */
788 if ((rs->reg.val[DWARF_CFA_REG_COLUMN] == UNW_TDEP_SP)
789 && (UNW_TDEP_SP < ARRAY_SIZE(rs->reg.val))
790 && (rs->reg.where[UNW_TDEP_SP] == DWARF_WHERE_SAME))
794 regnum = dwarf_to_unw_regnum (rs->reg.val[DWARF_CFA_REG_COLUMN]);
795 if ((ret = unw_get_reg ((unw_cursor_t *) c, regnum, &cfa)) < 0)
798 cfa += rs->reg.val[DWARF_CFA_OFF_COLUMN];
802 /* CFA is equal to EXPR: */
804 assert (rs->reg.where[DWARF_CFA_REG_COLUMN] == DWARF_WHERE_EXPR);
806 addr = rs->reg.val[DWARF_CFA_REG_COLUMN];
807 if ((ret = eval_location_expr (c, as, a, addr, &cfa_loc, arg)) < 0)
809 /* the returned location better be a memory location... */
810 if (DWARF_IS_REG_LOC (cfa_loc))
811 return -UNW_EBADFRAME;
812 cfa = DWARF_GET_LOC (cfa_loc);
815 dwarf_loc_t new_loc[DWARF_NUM_PRESERVED_REGS];
816 memcpy(new_loc, c->loc, sizeof(new_loc));
818 for (i = 0; i < DWARF_NUM_PRESERVED_REGS; ++i)
820 switch ((dwarf_where_t) rs->reg.where[i])
822 case DWARF_WHERE_UNDEF:
823 new_loc[i] = DWARF_NULL_LOC;
826 case DWARF_WHERE_SAME:
829 case DWARF_WHERE_CFAREL:
830 new_loc[i] = DWARF_MEM_LOC (c, cfa + rs->reg.val[i]);
833 case DWARF_WHERE_REG:
834 new_loc[i] = DWARF_REG_LOC (c, dwarf_to_unw_regnum (rs->reg.val[i]));
837 case DWARF_WHERE_EXPR:
838 addr = rs->reg.val[i];
839 if ((ret = eval_location_expr (c, as, a, addr, new_loc + i, arg)) < 0)
843 case DWARF_WHERE_VAL_EXPR:
844 addr = rs->reg.val[i];
845 if ((ret = eval_location_expr (c, as, a, addr, new_loc + i, arg)) < 0)
847 new_loc[i] = DWARF_VAL_LOC (c, DWARF_GET_LOC (new_loc[i]));
852 memcpy(c->loc, new_loc, sizeof(new_loc));
855 /* DWARF spec says undefined return address location means end of stack. */
856 if (DWARF_IS_NULL_LOC (c->loc[rs->ret_addr_column]))
863 ret = dwarf_get (c, c->loc[rs->ret_addr_column], &ip);
870 /* XXX: check for ip to be code_aligned */
871 if (c->ip == prev_ip && c->cfa == prev_cfa)
873 Dprintf ("%s: ip and cfa unchanged; stopping here (ip=0x%lx)\n",
874 __FUNCTION__, (long) c->ip);
875 return -UNW_EBADFRAME;
879 tdep_stash_frame (c, rs);
884 /* Find the saved locations. */
886 find_reg_state (struct dwarf_cursor *c, dwarf_state_record_t *sr)
888 dwarf_reg_state_t *rs;
889 struct dwarf_rs_cache *cache;
891 intrmask_t saved_mask;
893 if ((cache = get_rs_cache(c->as, &saved_mask)) &&
894 (rs = rs_lookup(cache, c)))
896 /* update hint; no locking needed: single-word writes are atomic */
897 unsigned short index = rs - cache->buckets;
898 c->use_prev_instr = ! cache->links[index].signal_frame;
899 memcpy (&sr->rs_current, rs, sizeof (*rs));
903 ret = fetch_proc_info (c, c->ip);
904 int next_use_prev_instr = c->use_prev_instr;
907 /* Update use_prev_instr for the next frame. */
908 assert(c->pi.unwind_info);
909 struct dwarf_cie_info *dci = c->pi.unwind_info;
910 next_use_prev_instr = ! dci->signal_frame;
911 ret = create_state_record_for (c, sr, c->ip);
913 put_unwind_info (c, &c->pi);
914 c->use_prev_instr = next_use_prev_instr;
916 if (cache && ret >= 0)
918 rs = rs_new (cache, c);
919 cache->links[rs - cache->buckets].hint = 0;
920 memcpy(rs, &sr->rs_current, sizeof(*rs));
924 unsigned short index = -1;
927 put_rs_cache (c->as, cache, &saved_mask);
930 index = rs - cache->buckets;
931 c->hint = cache->links[index].hint;
932 cache->links[c->prev_rs].hint = index + 1;
939 tdep_reuse_frame (c, cache->links[index].signal_frame);
943 /* The function finds the saved locations and applies the register
946 dwarf_step (struct dwarf_cursor *c)
949 dwarf_state_record_t sr;
950 if ((ret = find_reg_state (c, &sr)) < 0)
952 return apply_reg_state (c, &sr.rs_current);
956 dwarf_make_proc_info (struct dwarf_cursor *c)
959 if (c->as->caching_policy == UNW_CACHE_NONE
960 || get_cached_proc_info (c) < 0)
962 /* Need to check if current frame contains
963 args_size, and set cursor appropriately. Only
964 needed for unw_resume */
965 dwarf_state_record_t sr;
968 /* Lookup it up the slow way... */
969 ret = fetch_proc_info (c, c->ip);
971 ret = create_state_record_for (c, &sr, c->ip);
972 put_unwind_info (c, &c->pi);
975 c->args_size = sr.args_size;
981 dwarf_reg_states_dynamic_iterate(struct dwarf_cursor *c,
982 unw_reg_states_callback cb,
985 Debug (1, "Not yet implemented\n");
990 dwarf_reg_states_table_iterate(struct dwarf_cursor *c,
991 unw_reg_states_callback cb,
994 dwarf_state_record_t sr;
995 int ret = setup_fde(c, &sr);
996 struct dwarf_cie_info *dci = c->pi.unwind_info;
997 unw_word_t addr = dci->fde_instr_start;
998 unw_word_t curr_ip = c->pi.start_ip;
999 dwarf_stackable_reg_state_t *rs_stack = NULL;
1000 while (ret >= 0 && curr_ip < c->pi.end_ip && addr < dci->fde_instr_end)
1002 unw_word_t prev_ip = curr_ip;
1003 ret = run_cfi_program (c, &sr, &curr_ip, prev_ip, &addr, dci->fde_instr_end,
1005 if (ret >= 0 && prev_ip < curr_ip)
1006 ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), prev_ip, curr_ip);
1008 empty_rstate_stack(&rs_stack);
1009 #if defined(NEED_LAST_IP)
1010 if (ret >= 0 && curr_ip < c->pi.last_ip)
1011 /* report the dead zone after the procedure ends */
1012 ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), curr_ip, c->pi.last_ip);
1014 if (ret >= 0 && curr_ip < c->pi.end_ip)
1015 /* report for whatever is left before procedure end */
1016 ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), curr_ip, c->pi.end_ip);
1022 dwarf_reg_states_iterate(struct dwarf_cursor *c,
1023 unw_reg_states_callback cb,
1026 int ret = fetch_proc_info (c, c->ip);
1027 int next_use_prev_instr = c->use_prev_instr;
1030 /* Update use_prev_instr for the next frame. */
1031 assert(c->pi.unwind_info);
1032 struct dwarf_cie_info *dci = c->pi.unwind_info;
1033 next_use_prev_instr = ! dci->signal_frame;
1034 switch (c->pi.format)
1036 case UNW_INFO_FORMAT_TABLE:
1037 case UNW_INFO_FORMAT_REMOTE_TABLE:
1038 ret = dwarf_reg_states_table_iterate(c, cb, token);
1041 case UNW_INFO_FORMAT_DYNAMIC:
1042 ret = dwarf_reg_states_dynamic_iterate (c, cb, token);
1046 Debug (1, "Unexpected unwind-info format %d\n", c->pi.format);
1050 put_unwind_info (c, &c->pi);
1051 c->use_prev_instr = next_use_prev_instr;
1056 dwarf_apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs)
1058 return apply_reg_state(c, rs);