1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
79 /* Command lists for "set/show record btrace". */
80 static struct cmd_list_element *set_record_btrace_cmdlist;
81 static struct cmd_list_element *show_record_btrace_cmdlist;
83 /* Command lists for "set/show record btrace bts". */
84 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
85 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
87 /* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
90 #define DEBUG(msg, args...) \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
100 /* Update the branch trace for the current thread and return a pointer to its
103 Throws an error if there is no thread or no trace. This function never
106 static struct thread_info *
107 require_btrace_thread (void)
109 struct thread_info *tp;
113 tp = find_thread_ptid (inferior_ptid);
115 error (_("No thread."));
119 if (btrace_is_empty (tp))
120 error (_("No trace."));
125 /* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
128 Throws an error if there is no thread or no trace. This function never
131 static struct btrace_thread_info *
132 require_btrace (void)
134 struct thread_info *tp;
136 tp = require_btrace_thread ();
141 /* Enable branch tracing for one thread. Warn on errors. */
144 record_btrace_enable_warn (struct thread_info *tp)
146 volatile struct gdb_exception error;
148 TRY_CATCH (error, RETURN_MASK_ERROR)
149 btrace_enable (tp, &record_btrace_conf);
151 if (error.message != NULL)
152 warning ("%s", error.message);
155 /* Callback function to disable branch tracing for one thread. */
158 record_btrace_disable_callback (void *arg)
160 struct thread_info *tp;
167 /* Enable automatic tracing of new threads. */
170 record_btrace_auto_enable (void)
172 DEBUG ("attach thread observer");
174 record_btrace_thread_observer
175 = observer_attach_new_thread (record_btrace_enable_warn);
178 /* Disable automatic tracing of new threads. */
181 record_btrace_auto_disable (void)
183 /* The observer may have been detached, already. */
184 if (record_btrace_thread_observer == NULL)
187 DEBUG ("detach thread observer");
189 observer_detach_new_thread (record_btrace_thread_observer);
190 record_btrace_thread_observer = NULL;
193 /* The record-btrace async event handler function. */
196 record_btrace_handle_async_inferior_event (gdb_client_data data)
198 inferior_event_handler (INF_REG_EVENT, NULL);
201 /* The to_open method of target record-btrace. */
204 record_btrace_open (const char *args, int from_tty)
206 struct cleanup *disable_chain;
207 struct thread_info *tp;
213 if (!target_has_execution)
214 error (_("The program is not being run."));
217 error (_("Record btrace can't debug inferior in non-stop mode."));
219 gdb_assert (record_btrace_thread_observer == NULL);
221 disable_chain = make_cleanup (null_cleanup, NULL);
222 ALL_NON_EXITED_THREADS (tp)
223 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
225 btrace_enable (tp, &record_btrace_conf);
227 make_cleanup (record_btrace_disable_callback, tp);
230 record_btrace_auto_enable ();
232 push_target (&record_btrace_ops);
234 record_btrace_async_inferior_event_handler
235 = create_async_event_handler (record_btrace_handle_async_inferior_event,
237 record_btrace_generating_corefile = 0;
239 observer_notify_record_changed (current_inferior (), 1);
241 discard_cleanups (disable_chain);
244 /* The to_stop_recording method of target record-btrace. */
247 record_btrace_stop_recording (struct target_ops *self)
249 struct thread_info *tp;
251 DEBUG ("stop recording");
253 record_btrace_auto_disable ();
255 ALL_NON_EXITED_THREADS (tp)
256 if (tp->btrace.target != NULL)
260 /* The to_close method of target record-btrace. */
263 record_btrace_close (struct target_ops *self)
265 struct thread_info *tp;
267 if (record_btrace_async_inferior_event_handler != NULL)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
276 ALL_NON_EXITED_THREADS (tp)
277 btrace_teardown (tp);
280 /* The to_async method of target record-btrace. */
283 record_btrace_async (struct target_ops *ops,
284 void (*callback) (enum inferior_event_type event_type,
288 if (callback != NULL)
289 mark_async_event_handler (record_btrace_async_inferior_event_handler);
291 clear_async_event_handler (record_btrace_async_inferior_event_handler);
293 ops->beneath->to_async (ops->beneath, callback, context);
296 /* Adjusts the size and returns a human readable size suffix. */
299 record_btrace_adjust_size (unsigned int *size)
305 if ((sz & ((1u << 30) - 1)) == 0)
310 else if ((sz & ((1u << 20) - 1)) == 0)
315 else if ((sz & ((1u << 10) - 1)) == 0)
324 /* Print a BTS configuration. */
327 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
335 suffix = record_btrace_adjust_size (&size);
336 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
340 /* Print a branch tracing configuration. */
343 record_btrace_print_conf (const struct btrace_config *conf)
345 printf_unfiltered (_("Recording format: %s.\n"),
346 btrace_format_string (conf->format));
348 switch (conf->format)
350 case BTRACE_FORMAT_NONE:
353 case BTRACE_FORMAT_BTS:
354 record_btrace_print_bts_conf (&conf->bts);
358 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
361 /* The to_info_record method of target record-btrace. */
364 record_btrace_info (struct target_ops *self)
366 struct btrace_thread_info *btinfo;
367 const struct btrace_config *conf;
368 struct thread_info *tp;
369 unsigned int insns, calls;
373 tp = find_thread_ptid (inferior_ptid);
375 error (_("No thread."));
377 btinfo = &tp->btrace;
379 conf = btrace_conf (btinfo);
381 record_btrace_print_conf (conf);
388 if (!btrace_is_empty (tp))
390 struct btrace_call_iterator call;
391 struct btrace_insn_iterator insn;
393 btrace_call_end (&call, btinfo);
394 btrace_call_prev (&call, 1);
395 calls = btrace_call_number (&call);
397 btrace_insn_end (&insn, btinfo);
398 btrace_insn_prev (&insn, 1);
399 insns = btrace_insn_number (&insn);
402 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
403 "%d (%s).\n"), insns, calls, tp->num,
404 target_pid_to_str (tp->ptid));
406 if (btrace_is_replaying (tp))
407 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
408 btrace_insn_number (btinfo->replay));
411 /* Print an unsigned int. */
414 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
416 ui_out_field_fmt (uiout, fld, "%u", val);
419 /* Disassemble a section of the recorded instruction trace. */
422 btrace_insn_history (struct ui_out *uiout,
423 const struct btrace_insn_iterator *begin,
424 const struct btrace_insn_iterator *end, int flags)
426 struct gdbarch *gdbarch;
427 struct btrace_insn_iterator it;
429 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
430 btrace_insn_number (end));
432 gdbarch = target_gdbarch ();
434 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
436 const struct btrace_insn *insn;
438 insn = btrace_insn_get (&it);
440 /* Print the instruction index. */
441 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
442 ui_out_text (uiout, "\t");
444 /* Disassembly with '/m' flag may not produce the expected result.
446 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
450 /* The to_insn_history method of target record-btrace. */
453 record_btrace_insn_history (struct target_ops *self, int size, int flags)
455 struct btrace_thread_info *btinfo;
456 struct btrace_insn_history *history;
457 struct btrace_insn_iterator begin, end;
458 struct cleanup *uiout_cleanup;
459 struct ui_out *uiout;
460 unsigned int context, covered;
462 uiout = current_uiout;
463 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
465 context = abs (size);
467 error (_("Bad record instruction-history-size."));
469 btinfo = require_btrace ();
470 history = btinfo->insn_history;
473 struct btrace_insn_iterator *replay;
475 DEBUG ("insn-history (0x%x): %d", flags, size);
477 /* If we're replaying, we start at the replay position. Otherwise, we
478 start at the tail of the trace. */
479 replay = btinfo->replay;
483 btrace_insn_end (&begin, btinfo);
485 /* We start from here and expand in the requested direction. Then we
486 expand in the other direction, as well, to fill up any remaining
491 /* We want the current position covered, as well. */
492 covered = btrace_insn_next (&end, 1);
493 covered += btrace_insn_prev (&begin, context - covered);
494 covered += btrace_insn_next (&end, context - covered);
498 covered = btrace_insn_next (&end, context);
499 covered += btrace_insn_prev (&begin, context - covered);
504 begin = history->begin;
507 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
508 btrace_insn_number (&begin), btrace_insn_number (&end));
513 covered = btrace_insn_prev (&begin, context);
518 covered = btrace_insn_next (&end, context);
523 btrace_insn_history (uiout, &begin, &end, flags);
527 printf_unfiltered (_("At the start of the branch trace record.\n"));
529 printf_unfiltered (_("At the end of the branch trace record.\n"));
532 btrace_set_insn_history (btinfo, &begin, &end);
533 do_cleanups (uiout_cleanup);
536 /* The to_insn_history_range method of target record-btrace. */
539 record_btrace_insn_history_range (struct target_ops *self,
540 ULONGEST from, ULONGEST to, int flags)
542 struct btrace_thread_info *btinfo;
543 struct btrace_insn_history *history;
544 struct btrace_insn_iterator begin, end;
545 struct cleanup *uiout_cleanup;
546 struct ui_out *uiout;
547 unsigned int low, high;
550 uiout = current_uiout;
551 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
556 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
558 /* Check for wrap-arounds. */
559 if (low != from || high != to)
560 error (_("Bad range."));
563 error (_("Bad range."));
565 btinfo = require_btrace ();
567 found = btrace_find_insn_by_number (&begin, btinfo, low);
569 error (_("Range out of bounds."));
571 found = btrace_find_insn_by_number (&end, btinfo, high);
574 /* Silently truncate the range. */
575 btrace_insn_end (&end, btinfo);
579 /* We want both begin and end to be inclusive. */
580 btrace_insn_next (&end, 1);
583 btrace_insn_history (uiout, &begin, &end, flags);
584 btrace_set_insn_history (btinfo, &begin, &end);
586 do_cleanups (uiout_cleanup);
589 /* The to_insn_history_from method of target record-btrace. */
592 record_btrace_insn_history_from (struct target_ops *self,
593 ULONGEST from, int size, int flags)
595 ULONGEST begin, end, context;
597 context = abs (size);
599 error (_("Bad record instruction-history-size."));
608 begin = from - context + 1;
613 end = from + context - 1;
615 /* Check for wrap-around. */
620 record_btrace_insn_history_range (self, begin, end, flags);
623 /* Print the instruction number range for a function call history line. */
626 btrace_call_history_insn_range (struct ui_out *uiout,
627 const struct btrace_function *bfun)
629 unsigned int begin, end, size;
631 size = VEC_length (btrace_insn_s, bfun->insn);
632 gdb_assert (size > 0);
634 begin = bfun->insn_offset;
635 end = begin + size - 1;
637 ui_out_field_uint (uiout, "insn begin", begin);
638 ui_out_text (uiout, ",");
639 ui_out_field_uint (uiout, "insn end", end);
642 /* Print the source line information for a function call history line. */
645 btrace_call_history_src_line (struct ui_out *uiout,
646 const struct btrace_function *bfun)
655 ui_out_field_string (uiout, "file",
656 symtab_to_filename_for_display (symbol_symtab (sym)));
658 begin = bfun->lbegin;
664 ui_out_text (uiout, ":");
665 ui_out_field_int (uiout, "min line", begin);
670 ui_out_text (uiout, ",");
671 ui_out_field_int (uiout, "max line", end);
674 /* Get the name of a branch trace function. */
677 btrace_get_bfun_name (const struct btrace_function *bfun)
679 struct minimal_symbol *msym;
689 return SYMBOL_PRINT_NAME (sym);
690 else if (msym != NULL)
691 return MSYMBOL_PRINT_NAME (msym);
696 /* Disassemble a section of the recorded function trace. */
699 btrace_call_history (struct ui_out *uiout,
700 const struct btrace_thread_info *btinfo,
701 const struct btrace_call_iterator *begin,
702 const struct btrace_call_iterator *end,
703 enum record_print_flag flags)
705 struct btrace_call_iterator it;
707 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
708 btrace_call_number (end));
710 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
712 const struct btrace_function *bfun;
713 struct minimal_symbol *msym;
716 bfun = btrace_call_get (&it);
720 /* Print the function index. */
721 ui_out_field_uint (uiout, "index", bfun->number);
722 ui_out_text (uiout, "\t");
724 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
726 int level = bfun->level + btinfo->level, i;
728 for (i = 0; i < level; ++i)
729 ui_out_text (uiout, " ");
733 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
734 else if (msym != NULL)
735 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
736 else if (!ui_out_is_mi_like_p (uiout))
737 ui_out_field_string (uiout, "function", "??");
739 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
741 ui_out_text (uiout, _("\tinst "));
742 btrace_call_history_insn_range (uiout, bfun);
745 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
747 ui_out_text (uiout, _("\tat "));
748 btrace_call_history_src_line (uiout, bfun);
751 ui_out_text (uiout, "\n");
755 /* The to_call_history method of target record-btrace. */
758 record_btrace_call_history (struct target_ops *self, int size, int flags)
760 struct btrace_thread_info *btinfo;
761 struct btrace_call_history *history;
762 struct btrace_call_iterator begin, end;
763 struct cleanup *uiout_cleanup;
764 struct ui_out *uiout;
765 unsigned int context, covered;
767 uiout = current_uiout;
768 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
770 context = abs (size);
772 error (_("Bad record function-call-history-size."));
774 btinfo = require_btrace ();
775 history = btinfo->call_history;
778 struct btrace_insn_iterator *replay;
780 DEBUG ("call-history (0x%x): %d", flags, size);
782 /* If we're replaying, we start at the replay position. Otherwise, we
783 start at the tail of the trace. */
784 replay = btinfo->replay;
787 begin.function = replay->function;
788 begin.btinfo = btinfo;
791 btrace_call_end (&begin, btinfo);
793 /* We start from here and expand in the requested direction. Then we
794 expand in the other direction, as well, to fill up any remaining
799 /* We want the current position covered, as well. */
800 covered = btrace_call_next (&end, 1);
801 covered += btrace_call_prev (&begin, context - covered);
802 covered += btrace_call_next (&end, context - covered);
806 covered = btrace_call_next (&end, context);
807 covered += btrace_call_prev (&begin, context- covered);
812 begin = history->begin;
815 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
816 btrace_call_number (&begin), btrace_call_number (&end));
821 covered = btrace_call_prev (&begin, context);
826 covered = btrace_call_next (&end, context);
831 btrace_call_history (uiout, btinfo, &begin, &end, flags);
835 printf_unfiltered (_("At the start of the branch trace record.\n"));
837 printf_unfiltered (_("At the end of the branch trace record.\n"));
840 btrace_set_call_history (btinfo, &begin, &end);
841 do_cleanups (uiout_cleanup);
844 /* The to_call_history_range method of target record-btrace. */
847 record_btrace_call_history_range (struct target_ops *self,
848 ULONGEST from, ULONGEST to, int flags)
850 struct btrace_thread_info *btinfo;
851 struct btrace_call_history *history;
852 struct btrace_call_iterator begin, end;
853 struct cleanup *uiout_cleanup;
854 struct ui_out *uiout;
855 unsigned int low, high;
858 uiout = current_uiout;
859 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
864 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
866 /* Check for wrap-arounds. */
867 if (low != from || high != to)
868 error (_("Bad range."));
871 error (_("Bad range."));
873 btinfo = require_btrace ();
875 found = btrace_find_call_by_number (&begin, btinfo, low);
877 error (_("Range out of bounds."));
879 found = btrace_find_call_by_number (&end, btinfo, high);
882 /* Silently truncate the range. */
883 btrace_call_end (&end, btinfo);
887 /* We want both begin and end to be inclusive. */
888 btrace_call_next (&end, 1);
891 btrace_call_history (uiout, btinfo, &begin, &end, flags);
892 btrace_set_call_history (btinfo, &begin, &end);
894 do_cleanups (uiout_cleanup);
897 /* The to_call_history_from method of target record-btrace. */
900 record_btrace_call_history_from (struct target_ops *self,
901 ULONGEST from, int size, int flags)
903 ULONGEST begin, end, context;
905 context = abs (size);
907 error (_("Bad record function-call-history-size."));
916 begin = from - context + 1;
921 end = from + context - 1;
923 /* Check for wrap-around. */
928 record_btrace_call_history_range (self, begin, end, flags);
931 /* The to_record_is_replaying method of target record-btrace. */
934 record_btrace_is_replaying (struct target_ops *self)
936 struct thread_info *tp;
938 ALL_NON_EXITED_THREADS (tp)
939 if (btrace_is_replaying (tp))
945 /* The to_xfer_partial method of target record-btrace. */
947 static enum target_xfer_status
948 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
949 const char *annex, gdb_byte *readbuf,
950 const gdb_byte *writebuf, ULONGEST offset,
951 ULONGEST len, ULONGEST *xfered_len)
953 struct target_ops *t;
955 /* Filter out requests that don't make sense during replay. */
956 if (replay_memory_access == replay_memory_access_read_only
957 && !record_btrace_generating_corefile
958 && record_btrace_is_replaying (ops))
962 case TARGET_OBJECT_MEMORY:
964 struct target_section *section;
966 /* We do not allow writing memory in general. */
967 if (writebuf != NULL)
970 return TARGET_XFER_UNAVAILABLE;
973 /* We allow reading readonly memory. */
974 section = target_section_by_addr (ops, offset);
977 /* Check if the section we found is readonly. */
978 if ((bfd_get_section_flags (section->the_bfd_section->owner,
979 section->the_bfd_section)
980 & SEC_READONLY) != 0)
982 /* Truncate the request to fit into this section. */
983 len = min (len, section->endaddr - offset);
989 return TARGET_XFER_UNAVAILABLE;
994 /* Forward the request. */
996 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
997 offset, len, xfered_len);
1000 /* The to_insert_breakpoint method of target record-btrace. */
1003 record_btrace_insert_breakpoint (struct target_ops *ops,
1004 struct gdbarch *gdbarch,
1005 struct bp_target_info *bp_tgt)
1007 volatile struct gdb_exception except;
1011 /* Inserting breakpoints requires accessing memory. Allow it for the
1012 duration of this function. */
1013 old = replay_memory_access;
1014 replay_memory_access = replay_memory_access_read_write;
1017 TRY_CATCH (except, RETURN_MASK_ALL)
1018 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1020 replay_memory_access = old;
1022 if (except.reason < 0)
1023 throw_exception (except);
1028 /* The to_remove_breakpoint method of target record-btrace. */
1031 record_btrace_remove_breakpoint (struct target_ops *ops,
1032 struct gdbarch *gdbarch,
1033 struct bp_target_info *bp_tgt)
1035 volatile struct gdb_exception except;
1039 /* Removing breakpoints requires accessing memory. Allow it for the
1040 duration of this function. */
1041 old = replay_memory_access;
1042 replay_memory_access = replay_memory_access_read_write;
1045 TRY_CATCH (except, RETURN_MASK_ALL)
1046 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1048 replay_memory_access = old;
1050 if (except.reason < 0)
1051 throw_exception (except);
1056 /* The to_fetch_registers method of target record-btrace. */
1059 record_btrace_fetch_registers (struct target_ops *ops,
1060 struct regcache *regcache, int regno)
1062 struct btrace_insn_iterator *replay;
1063 struct thread_info *tp;
1065 tp = find_thread_ptid (inferior_ptid);
1066 gdb_assert (tp != NULL);
1068 replay = tp->btrace.replay;
1069 if (replay != NULL && !record_btrace_generating_corefile)
1071 const struct btrace_insn *insn;
1072 struct gdbarch *gdbarch;
1075 gdbarch = get_regcache_arch (regcache);
1076 pcreg = gdbarch_pc_regnum (gdbarch);
1080 /* We can only provide the PC register. */
1081 if (regno >= 0 && regno != pcreg)
1084 insn = btrace_insn_get (replay);
1085 gdb_assert (insn != NULL);
1087 regcache_raw_supply (regcache, regno, &insn->pc);
1091 struct target_ops *t = ops->beneath;
1093 t->to_fetch_registers (t, regcache, regno);
1097 /* The to_store_registers method of target record-btrace. */
1100 record_btrace_store_registers (struct target_ops *ops,
1101 struct regcache *regcache, int regno)
1103 struct target_ops *t;
1105 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1106 error (_("This record target does not allow writing registers."));
1108 gdb_assert (may_write_registers != 0);
1111 t->to_store_registers (t, regcache, regno);
1114 /* The to_prepare_to_store method of target record-btrace. */
1117 record_btrace_prepare_to_store (struct target_ops *ops,
1118 struct regcache *regcache)
1120 struct target_ops *t;
1122 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1126 t->to_prepare_to_store (t, regcache);
1129 /* The branch trace frame cache. */
1131 struct btrace_frame_cache
1134 struct thread_info *tp;
1136 /* The frame info. */
1137 struct frame_info *frame;
1139 /* The branch trace function segment. */
1140 const struct btrace_function *bfun;
1143 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1145 static htab_t bfcache;
1147 /* hash_f for htab_create_alloc of bfcache. */
1150 bfcache_hash (const void *arg)
1152 const struct btrace_frame_cache *cache = arg;
1154 return htab_hash_pointer (cache->frame);
1157 /* eq_f for htab_create_alloc of bfcache. */
1160 bfcache_eq (const void *arg1, const void *arg2)
1162 const struct btrace_frame_cache *cache1 = arg1;
1163 const struct btrace_frame_cache *cache2 = arg2;
1165 return cache1->frame == cache2->frame;
1168 /* Create a new btrace frame cache. */
1170 static struct btrace_frame_cache *
1171 bfcache_new (struct frame_info *frame)
1173 struct btrace_frame_cache *cache;
1176 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1177 cache->frame = frame;
1179 slot = htab_find_slot (bfcache, cache, INSERT);
1180 gdb_assert (*slot == NULL);
1186 /* Extract the branch trace function from a branch trace frame. */
1188 static const struct btrace_function *
1189 btrace_get_frame_function (struct frame_info *frame)
1191 const struct btrace_frame_cache *cache;
1192 const struct btrace_function *bfun;
1193 struct btrace_frame_cache pattern;
1196 pattern.frame = frame;
1198 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1206 /* Implement stop_reason method for record_btrace_frame_unwind. */
1208 static enum unwind_stop_reason
1209 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1212 const struct btrace_frame_cache *cache;
1213 const struct btrace_function *bfun;
1215 cache = *this_cache;
1217 gdb_assert (bfun != NULL);
1219 if (bfun->up == NULL)
1220 return UNWIND_UNAVAILABLE;
1222 return UNWIND_NO_REASON;
1225 /* Implement this_id method for record_btrace_frame_unwind. */
1228 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1229 struct frame_id *this_id)
1231 const struct btrace_frame_cache *cache;
1232 const struct btrace_function *bfun;
1233 CORE_ADDR code, special;
1235 cache = *this_cache;
1238 gdb_assert (bfun != NULL);
1240 while (bfun->segment.prev != NULL)
1241 bfun = bfun->segment.prev;
1243 code = get_frame_func (this_frame);
1244 special = bfun->number;
1246 *this_id = frame_id_build_unavailable_stack_special (code, special);
1248 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1249 btrace_get_bfun_name (cache->bfun),
1250 core_addr_to_string_nz (this_id->code_addr),
1251 core_addr_to_string_nz (this_id->special_addr));
1254 /* Implement prev_register method for record_btrace_frame_unwind. */
1256 static struct value *
1257 record_btrace_frame_prev_register (struct frame_info *this_frame,
1261 const struct btrace_frame_cache *cache;
1262 const struct btrace_function *bfun, *caller;
1263 const struct btrace_insn *insn;
1264 struct gdbarch *gdbarch;
1268 gdbarch = get_frame_arch (this_frame);
1269 pcreg = gdbarch_pc_regnum (gdbarch);
1270 if (pcreg < 0 || regnum != pcreg)
1271 throw_error (NOT_AVAILABLE_ERROR,
1272 _("Registers are not available in btrace record history"));
1274 cache = *this_cache;
1276 gdb_assert (bfun != NULL);
1280 throw_error (NOT_AVAILABLE_ERROR,
1281 _("No caller in btrace record history"));
1283 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1285 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1290 insn = VEC_last (btrace_insn_s, caller->insn);
1293 pc += gdb_insn_length (gdbarch, pc);
1296 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1297 btrace_get_bfun_name (bfun), bfun->level,
1298 core_addr_to_string_nz (pc));
1300 return frame_unwind_got_address (this_frame, regnum, pc);
1303 /* Implement sniffer method for record_btrace_frame_unwind. */
1306 record_btrace_frame_sniffer (const struct frame_unwind *self,
1307 struct frame_info *this_frame,
1310 const struct btrace_function *bfun;
1311 struct btrace_frame_cache *cache;
1312 struct thread_info *tp;
1313 struct frame_info *next;
1315 /* THIS_FRAME does not contain a reference to its thread. */
1316 tp = find_thread_ptid (inferior_ptid);
1317 gdb_assert (tp != NULL);
1320 next = get_next_frame (this_frame);
1323 const struct btrace_insn_iterator *replay;
1325 replay = tp->btrace.replay;
1327 bfun = replay->function;
1331 const struct btrace_function *callee;
1333 callee = btrace_get_frame_function (next);
1334 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1341 DEBUG ("[frame] sniffed frame for %s on level %d",
1342 btrace_get_bfun_name (bfun), bfun->level);
1344 /* This is our frame. Initialize the frame cache. */
1345 cache = bfcache_new (this_frame);
1349 *this_cache = cache;
1353 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1356 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1357 struct frame_info *this_frame,
1360 const struct btrace_function *bfun, *callee;
1361 struct btrace_frame_cache *cache;
1362 struct frame_info *next;
1364 next = get_next_frame (this_frame);
1368 callee = btrace_get_frame_function (next);
1372 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1379 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1380 btrace_get_bfun_name (bfun), bfun->level);
1382 /* This is our frame. Initialize the frame cache. */
1383 cache = bfcache_new (this_frame);
1384 cache->tp = find_thread_ptid (inferior_ptid);
1387 *this_cache = cache;
1392 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1394 struct btrace_frame_cache *cache;
1399 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1400 gdb_assert (slot != NULL);
1402 htab_remove_elt (bfcache, cache);
1405 /* btrace recording does not store previous memory content, neither the stack
1406 frames content. Any unwinding would return errorneous results as the stack
1407 contents no longer matches the changed PC value restored from history.
1408 Therefore this unwinder reports any possibly unwound registers as
1411 const struct frame_unwind record_btrace_frame_unwind =
1414 record_btrace_frame_unwind_stop_reason,
1415 record_btrace_frame_this_id,
1416 record_btrace_frame_prev_register,
1418 record_btrace_frame_sniffer,
1419 record_btrace_frame_dealloc_cache
1422 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1425 record_btrace_frame_unwind_stop_reason,
1426 record_btrace_frame_this_id,
1427 record_btrace_frame_prev_register,
1429 record_btrace_tailcall_frame_sniffer,
1430 record_btrace_frame_dealloc_cache
1433 /* Implement the to_get_unwinder method. */
1435 static const struct frame_unwind *
1436 record_btrace_to_get_unwinder (struct target_ops *self)
1438 return &record_btrace_frame_unwind;
1441 /* Implement the to_get_tailcall_unwinder method. */
1443 static const struct frame_unwind *
1444 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1446 return &record_btrace_tailcall_frame_unwind;
1449 /* Indicate that TP should be resumed according to FLAG. */
1452 record_btrace_resume_thread (struct thread_info *tp,
1453 enum btrace_thread_flag flag)
1455 struct btrace_thread_info *btinfo;
1457 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1459 btinfo = &tp->btrace;
1461 if ((btinfo->flags & BTHR_MOVE) != 0)
1462 error (_("Thread already moving."));
1464 /* Fetch the latest branch trace. */
1467 btinfo->flags |= flag;
1470 /* Find the thread to resume given a PTID. */
1472 static struct thread_info *
1473 record_btrace_find_resume_thread (ptid_t ptid)
1475 struct thread_info *tp;
1477 /* When asked to resume everything, we pick the current thread. */
1478 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1479 ptid = inferior_ptid;
1481 return find_thread_ptid (ptid);
1484 /* Start replaying a thread. */
1486 static struct btrace_insn_iterator *
1487 record_btrace_start_replaying (struct thread_info *tp)
1489 volatile struct gdb_exception except;
1490 struct btrace_insn_iterator *replay;
1491 struct btrace_thread_info *btinfo;
1494 btinfo = &tp->btrace;
1497 /* We can't start replaying without trace. */
1498 if (btinfo->begin == NULL)
1501 /* Clear the executing flag to allow changes to the current frame.
1502 We are not actually running, yet. We just started a reverse execution
1503 command or a record goto command.
1504 For the latter, EXECUTING is false and this has no effect.
1505 For the former, EXECUTING is true and we're in to_wait, about to
1506 move the thread. Since we need to recompute the stack, we temporarily
1507 set EXECUTING to flase. */
1508 executing = is_executing (tp->ptid);
1509 set_executing (tp->ptid, 0);
1511 /* GDB stores the current frame_id when stepping in order to detects steps
1513 Since frames are computed differently when we're replaying, we need to
1514 recompute those stored frames and fix them up so we can still detect
1515 subroutines after we started replaying. */
1516 TRY_CATCH (except, RETURN_MASK_ALL)
1518 struct frame_info *frame;
1519 struct frame_id frame_id;
1520 int upd_step_frame_id, upd_step_stack_frame_id;
1522 /* The current frame without replaying - computed via normal unwind. */
1523 frame = get_current_frame ();
1524 frame_id = get_frame_id (frame);
1526 /* Check if we need to update any stepping-related frame id's. */
1527 upd_step_frame_id = frame_id_eq (frame_id,
1528 tp->control.step_frame_id);
1529 upd_step_stack_frame_id = frame_id_eq (frame_id,
1530 tp->control.step_stack_frame_id);
1532 /* We start replaying at the end of the branch trace. This corresponds
1533 to the current instruction. */
1534 replay = xmalloc (sizeof (*replay));
1535 btrace_insn_end (replay, btinfo);
1537 /* We're not replaying, yet. */
1538 gdb_assert (btinfo->replay == NULL);
1539 btinfo->replay = replay;
1541 /* Make sure we're not using any stale registers. */
1542 registers_changed_ptid (tp->ptid);
1544 /* The current frame with replaying - computed via btrace unwind. */
1545 frame = get_current_frame ();
1546 frame_id = get_frame_id (frame);
1548 /* Replace stepping related frames where necessary. */
1549 if (upd_step_frame_id)
1550 tp->control.step_frame_id = frame_id;
1551 if (upd_step_stack_frame_id)
1552 tp->control.step_stack_frame_id = frame_id;
1555 /* Restore the previous execution state. */
1556 set_executing (tp->ptid, executing);
1558 if (except.reason < 0)
1560 xfree (btinfo->replay);
1561 btinfo->replay = NULL;
1563 registers_changed_ptid (tp->ptid);
1565 throw_exception (except);
1571 /* Stop replaying a thread. */
1574 record_btrace_stop_replaying (struct thread_info *tp)
1576 struct btrace_thread_info *btinfo;
1578 btinfo = &tp->btrace;
1580 xfree (btinfo->replay);
1581 btinfo->replay = NULL;
1583 /* Make sure we're not leaving any stale registers. */
1584 registers_changed_ptid (tp->ptid);
1587 /* The to_resume method of target record-btrace. */
1590 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1591 enum gdb_signal signal)
1593 struct thread_info *tp, *other;
1594 enum btrace_thread_flag flag;
1596 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1598 /* Store the execution direction of the last resume. */
1599 record_btrace_resume_exec_dir = execution_direction;
1601 tp = record_btrace_find_resume_thread (ptid);
1603 error (_("Cannot find thread to resume."));
1605 /* Stop replaying other threads if the thread to resume is not replaying. */
1606 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1607 ALL_NON_EXITED_THREADS (other)
1608 record_btrace_stop_replaying (other);
1610 /* As long as we're not replaying, just forward the request. */
1611 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1614 return ops->to_resume (ops, ptid, step, signal);
1617 /* Compute the btrace thread flag for the requested move. */
1619 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1621 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1623 /* At the moment, we only move a single thread. We could also move
1624 all threads in parallel by single-stepping each resumed thread
1625 until the first runs into an event.
1626 When we do that, we would want to continue all other threads.
1627 For now, just resume one thread to not confuse to_wait. */
1628 record_btrace_resume_thread (tp, flag);
1630 /* We just indicate the resume intent here. The actual stepping happens in
1631 record_btrace_wait below. */
1633 /* Async support. */
1634 if (target_can_async_p ())
1636 target_async (inferior_event_handler, 0);
1637 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1641 /* Find a thread to move. */
1643 static struct thread_info *
1644 record_btrace_find_thread_to_move (ptid_t ptid)
1646 struct thread_info *tp;
1648 /* First check the parameter thread. */
1649 tp = find_thread_ptid (ptid);
1650 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1653 /* Otherwise, find one other thread that has been resumed. */
1654 ALL_NON_EXITED_THREADS (tp)
1655 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1661 /* Return a target_waitstatus indicating that we ran out of history. */
1663 static struct target_waitstatus
1664 btrace_step_no_history (void)
1666 struct target_waitstatus status;
1668 status.kind = TARGET_WAITKIND_NO_HISTORY;
1673 /* Return a target_waitstatus indicating that a step finished. */
1675 static struct target_waitstatus
1676 btrace_step_stopped (void)
1678 struct target_waitstatus status;
1680 status.kind = TARGET_WAITKIND_STOPPED;
1681 status.value.sig = GDB_SIGNAL_TRAP;
1686 /* Clear the record histories. */
1689 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1691 xfree (btinfo->insn_history);
1692 xfree (btinfo->call_history);
1694 btinfo->insn_history = NULL;
1695 btinfo->call_history = NULL;
1698 /* Step a single thread. */
1700 static struct target_waitstatus
1701 record_btrace_step_thread (struct thread_info *tp)
1703 struct btrace_insn_iterator *replay, end;
1704 struct btrace_thread_info *btinfo;
1705 struct address_space *aspace;
1706 struct inferior *inf;
1707 enum btrace_thread_flag flags;
1710 /* We can't step without an execution history. */
1711 if (btrace_is_empty (tp))
1712 return btrace_step_no_history ();
1714 btinfo = &tp->btrace;
1715 replay = btinfo->replay;
1717 flags = btinfo->flags & BTHR_MOVE;
1718 btinfo->flags &= ~BTHR_MOVE;
1720 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1725 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1728 /* We're done if we're not replaying. */
1730 return btrace_step_no_history ();
1732 /* We are always able to step at least once. */
1733 steps = btrace_insn_next (replay, 1);
1734 gdb_assert (steps == 1);
1736 /* Determine the end of the instruction trace. */
1737 btrace_insn_end (&end, btinfo);
1739 /* We stop replaying if we reached the end of the trace. */
1740 if (btrace_insn_cmp (replay, &end) == 0)
1741 record_btrace_stop_replaying (tp);
1743 return btrace_step_stopped ();
1746 /* Start replaying if we're not already doing so. */
1748 replay = record_btrace_start_replaying (tp);
1750 /* If we can't step any further, we reached the end of the history. */
1751 steps = btrace_insn_prev (replay, 1);
1753 return btrace_step_no_history ();
1755 return btrace_step_stopped ();
1758 /* We're done if we're not replaying. */
1760 return btrace_step_no_history ();
1762 inf = find_inferior_ptid (tp->ptid);
1763 aspace = inf->aspace;
1765 /* Determine the end of the instruction trace. */
1766 btrace_insn_end (&end, btinfo);
1770 const struct btrace_insn *insn;
1772 /* We are always able to step at least once. */
1773 steps = btrace_insn_next (replay, 1);
1774 gdb_assert (steps == 1);
1776 /* We stop replaying if we reached the end of the trace. */
1777 if (btrace_insn_cmp (replay, &end) == 0)
1779 record_btrace_stop_replaying (tp);
1780 return btrace_step_no_history ();
1783 insn = btrace_insn_get (replay);
1786 DEBUG ("stepping %d (%s) ... %s", tp->num,
1787 target_pid_to_str (tp->ptid),
1788 core_addr_to_string_nz (insn->pc));
1790 if (breakpoint_here_p (aspace, insn->pc))
1791 return btrace_step_stopped ();
1795 /* Start replaying if we're not already doing so. */
1797 replay = record_btrace_start_replaying (tp);
1799 inf = find_inferior_ptid (tp->ptid);
1800 aspace = inf->aspace;
1804 const struct btrace_insn *insn;
1806 /* If we can't step any further, we're done. */
1807 steps = btrace_insn_prev (replay, 1);
1809 return btrace_step_no_history ();
1811 insn = btrace_insn_get (replay);
1814 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1815 target_pid_to_str (tp->ptid),
1816 core_addr_to_string_nz (insn->pc));
1818 if (breakpoint_here_p (aspace, insn->pc))
1819 return btrace_step_stopped ();
1824 /* The to_wait method of target record-btrace. */
1827 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1828 struct target_waitstatus *status, int options)
1830 struct thread_info *tp, *other;
1832 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1834 /* As long as we're not replaying, just forward the request. */
1835 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1838 return ops->to_wait (ops, ptid, status, options);
1841 /* Let's find a thread to move. */
1842 tp = record_btrace_find_thread_to_move (ptid);
1845 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1847 status->kind = TARGET_WAITKIND_IGNORE;
1848 return minus_one_ptid;
1851 /* We only move a single thread. We're not able to correlate threads. */
1852 *status = record_btrace_step_thread (tp);
1854 /* Stop all other threads. */
1856 ALL_NON_EXITED_THREADS (other)
1857 other->btrace.flags &= ~BTHR_MOVE;
1859 /* Start record histories anew from the current position. */
1860 record_btrace_clear_histories (&tp->btrace);
1862 /* We moved the replay position but did not update registers. */
1863 registers_changed_ptid (tp->ptid);
1868 /* The to_can_execute_reverse method of target record-btrace. */
1871 record_btrace_can_execute_reverse (struct target_ops *self)
1876 /* The to_decr_pc_after_break method of target record-btrace. */
1879 record_btrace_decr_pc_after_break (struct target_ops *ops,
1880 struct gdbarch *gdbarch)
1882 /* When replaying, we do not actually execute the breakpoint instruction
1883 so there is no need to adjust the PC after hitting a breakpoint. */
1884 if (record_btrace_is_replaying (ops))
1887 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1890 /* The to_update_thread_list method of target record-btrace. */
1893 record_btrace_update_thread_list (struct target_ops *ops)
1895 /* We don't add or remove threads during replay. */
1896 if (record_btrace_is_replaying (ops))
1899 /* Forward the request. */
1901 ops->to_update_thread_list (ops);
1904 /* The to_thread_alive method of target record-btrace. */
1907 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1909 /* We don't add or remove threads during replay. */
1910 if (record_btrace_is_replaying (ops))
1911 return find_thread_ptid (ptid) != NULL;
1913 /* Forward the request. */
1915 return ops->to_thread_alive (ops, ptid);
1918 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1922 record_btrace_set_replay (struct thread_info *tp,
1923 const struct btrace_insn_iterator *it)
1925 struct btrace_thread_info *btinfo;
1927 btinfo = &tp->btrace;
1929 if (it == NULL || it->function == NULL)
1930 record_btrace_stop_replaying (tp);
1933 if (btinfo->replay == NULL)
1934 record_btrace_start_replaying (tp);
1935 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1938 *btinfo->replay = *it;
1939 registers_changed_ptid (tp->ptid);
1942 /* Start anew from the new replay position. */
1943 record_btrace_clear_histories (btinfo);
1946 /* The to_goto_record_begin method of target record-btrace. */
1949 record_btrace_goto_begin (struct target_ops *self)
1951 struct thread_info *tp;
1952 struct btrace_insn_iterator begin;
1954 tp = require_btrace_thread ();
1956 btrace_insn_begin (&begin, &tp->btrace);
1957 record_btrace_set_replay (tp, &begin);
1959 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1962 /* The to_goto_record_end method of target record-btrace. */
1965 record_btrace_goto_end (struct target_ops *ops)
1967 struct thread_info *tp;
1969 tp = require_btrace_thread ();
1971 record_btrace_set_replay (tp, NULL);
1973 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1976 /* The to_goto_record method of target record-btrace. */
1979 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1981 struct thread_info *tp;
1982 struct btrace_insn_iterator it;
1983 unsigned int number;
1988 /* Check for wrap-arounds. */
1990 error (_("Instruction number out of range."));
1992 tp = require_btrace_thread ();
1994 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1996 error (_("No such instruction."));
1998 record_btrace_set_replay (tp, &it);
2000 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2003 /* The to_execution_direction target method. */
2005 static enum exec_direction_kind
2006 record_btrace_execution_direction (struct target_ops *self)
2008 return record_btrace_resume_exec_dir;
2011 /* The to_prepare_to_generate_core target method. */
2014 record_btrace_prepare_to_generate_core (struct target_ops *self)
2016 record_btrace_generating_corefile = 1;
2019 /* The to_done_generating_core target method. */
2022 record_btrace_done_generating_core (struct target_ops *self)
2024 record_btrace_generating_corefile = 0;
2027 /* Initialize the record-btrace target ops. */
2030 init_record_btrace_ops (void)
2032 struct target_ops *ops;
2034 ops = &record_btrace_ops;
2035 ops->to_shortname = "record-btrace";
2036 ops->to_longname = "Branch tracing target";
2037 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2038 ops->to_open = record_btrace_open;
2039 ops->to_close = record_btrace_close;
2040 ops->to_async = record_btrace_async;
2041 ops->to_detach = record_detach;
2042 ops->to_disconnect = record_disconnect;
2043 ops->to_mourn_inferior = record_mourn_inferior;
2044 ops->to_kill = record_kill;
2045 ops->to_stop_recording = record_btrace_stop_recording;
2046 ops->to_info_record = record_btrace_info;
2047 ops->to_insn_history = record_btrace_insn_history;
2048 ops->to_insn_history_from = record_btrace_insn_history_from;
2049 ops->to_insn_history_range = record_btrace_insn_history_range;
2050 ops->to_call_history = record_btrace_call_history;
2051 ops->to_call_history_from = record_btrace_call_history_from;
2052 ops->to_call_history_range = record_btrace_call_history_range;
2053 ops->to_record_is_replaying = record_btrace_is_replaying;
2054 ops->to_xfer_partial = record_btrace_xfer_partial;
2055 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2056 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2057 ops->to_fetch_registers = record_btrace_fetch_registers;
2058 ops->to_store_registers = record_btrace_store_registers;
2059 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2060 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2061 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2062 ops->to_resume = record_btrace_resume;
2063 ops->to_wait = record_btrace_wait;
2064 ops->to_update_thread_list = record_btrace_update_thread_list;
2065 ops->to_thread_alive = record_btrace_thread_alive;
2066 ops->to_goto_record_begin = record_btrace_goto_begin;
2067 ops->to_goto_record_end = record_btrace_goto_end;
2068 ops->to_goto_record = record_btrace_goto;
2069 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2070 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
2071 ops->to_execution_direction = record_btrace_execution_direction;
2072 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2073 ops->to_done_generating_core = record_btrace_done_generating_core;
2074 ops->to_stratum = record_stratum;
2075 ops->to_magic = OPS_MAGIC;
2078 /* Start recording in BTS format. */
2081 cmd_record_btrace_bts_start (char *args, int from_tty)
2083 volatile struct gdb_exception exception;
2085 if (args != NULL && *args != 0)
2086 error (_("Invalid argument."));
2088 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2090 TRY_CATCH (exception, RETURN_MASK_ALL)
2091 execute_command ("target record-btrace", from_tty);
2093 if (exception.error != 0)
2095 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2096 throw_exception (exception);
2100 /* Alias for "target record". */
2103 cmd_record_btrace_start (char *args, int from_tty)
2105 volatile struct gdb_exception exception;
2107 if (args != NULL && *args != 0)
2108 error (_("Invalid argument."));
2110 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2112 TRY_CATCH (exception, RETURN_MASK_ALL)
2113 execute_command ("target record-btrace", from_tty);
2115 if (exception.error == 0)
2118 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2119 throw_exception (exception);
2122 /* The "set record btrace" command. */
2125 cmd_set_record_btrace (char *args, int from_tty)
2127 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2130 /* The "show record btrace" command. */
2133 cmd_show_record_btrace (char *args, int from_tty)
2135 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2138 /* The "show record btrace replay-memory-access" command. */
2141 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2142 struct cmd_list_element *c, const char *value)
2144 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2145 replay_memory_access);
2148 /* The "set record btrace bts" command. */
2151 cmd_set_record_btrace_bts (char *args, int from_tty)
2153 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2154 "by an apporpriate subcommand.\n"));
2155 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2156 all_commands, gdb_stdout);
2159 /* The "show record btrace bts" command. */
2162 cmd_show_record_btrace_bts (char *args, int from_tty)
2164 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2167 void _initialize_record_btrace (void);
2169 /* Initialize btrace commands. */
2172 _initialize_record_btrace (void)
2174 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2175 _("Start branch trace recording."), &record_btrace_cmdlist,
2176 "record btrace ", 0, &record_cmdlist);
2177 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2179 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2181 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2182 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2183 This format may not be available on all processors."),
2184 &record_btrace_cmdlist);
2185 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2187 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2188 _("Set record options"), &set_record_btrace_cmdlist,
2189 "set record btrace ", 0, &set_record_cmdlist);
2191 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2192 _("Show record options"), &show_record_btrace_cmdlist,
2193 "show record btrace ", 0, &show_record_cmdlist);
2195 add_setshow_enum_cmd ("replay-memory-access", no_class,
2196 replay_memory_access_types, &replay_memory_access, _("\
2197 Set what memory accesses are allowed during replay."), _("\
2198 Show what memory accesses are allowed during replay."),
2199 _("Default is READ-ONLY.\n\n\
2200 The btrace record target does not trace data.\n\
2201 The memory therefore corresponds to the live target and not \
2202 to the current replay position.\n\n\
2203 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2204 When READ-WRITE, allow accesses to read-only and read-write memory during \
2206 NULL, cmd_show_replay_memory_access,
2207 &set_record_btrace_cmdlist,
2208 &show_record_btrace_cmdlist);
2210 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2211 _("Set record btrace bts options"),
2212 &set_record_btrace_bts_cmdlist,
2213 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2215 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2216 _("Show record btrace bts options"),
2217 &show_record_btrace_bts_cmdlist,
2218 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2220 add_setshow_uinteger_cmd ("buffer-size", no_class,
2221 &record_btrace_conf.bts.size,
2222 _("Set the record/replay bts buffer size."),
2223 _("Show the record/replay bts buffer size."), _("\
2224 When starting recording request a trace buffer of this size. \
2225 The actual buffer size may differ from the requested size. \
2226 Use \"info record\" to see the actual buffer size.\n\n\
2227 Bigger buffers allow longer recording but also take more time to process \
2228 the recorded execution trace.\n\n\
2229 The trace buffer size may not be changed while recording."), NULL, NULL,
2230 &set_record_btrace_bts_cmdlist,
2231 &show_record_btrace_bts_cmdlist);
2233 init_record_btrace_ops ();
2234 add_target (&record_btrace_ops);
2236 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2239 record_btrace_conf.bts.size = 64 * 1024;