1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info *
110 require_btrace_thread (void)
112 struct thread_info *tp;
116 tp = find_thread_ptid (inferior_ptid);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info *
137 require_btrace (void)
139 struct thread_info *tp;
141 tp = require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info *tp)
153 btrace_enable (tp, &record_btrace_conf);
155 CATCH (error, RETURN_MASK_ERROR)
157 warning ("%s", error.message);
162 /* Callback function to disable branch tracing for one thread. */
165 record_btrace_disable_callback (void *arg)
167 struct thread_info *tp = (struct thread_info *) arg;
172 /* Enable automatic tracing of new threads. */
175 record_btrace_auto_enable (void)
177 DEBUG ("attach thread observer");
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
183 /* Disable automatic tracing of new threads. */
186 record_btrace_auto_disable (void)
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
192 DEBUG ("detach thread observer");
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
198 /* The record-btrace async event handler function. */
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
203 inferior_event_handler (INF_REG_EVENT, NULL);
206 /* See record-btrace.h. */
209 record_btrace_push_target (void)
213 record_btrace_auto_enable ();
215 push_target (&record_btrace_ops);
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
220 record_btrace_generating_corefile = 0;
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
226 /* The to_open method of target record-btrace. */
229 record_btrace_open (const char *args, int from_tty)
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
238 if (!target_has_execution)
239 error (_("The program is not being run."));
241 gdb_assert (record_btrace_thread_observer == NULL);
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
247 btrace_enable (tp, &record_btrace_conf);
249 make_cleanup (record_btrace_disable_callback, tp);
252 record_btrace_push_target ();
254 discard_cleanups (disable_chain);
257 /* The to_stop_recording method of target record-btrace. */
260 record_btrace_stop_recording (struct target_ops *self)
262 struct thread_info *tp;
264 DEBUG ("stop recording");
266 record_btrace_auto_disable ();
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
273 /* The to_disconnect method of target record-btrace. */
276 record_btrace_disconnect (struct target_ops *self, const char *args,
279 struct target_ops *beneath = self->beneath;
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
288 /* The to_close method of target record-btrace. */
291 record_btrace_close (struct target_ops *self)
293 struct thread_info *tp;
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
308 /* The to_async method of target record-btrace. */
311 record_btrace_async (struct target_ops *ops, int enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
318 ops->beneath->to_async (ops->beneath, enable);
321 /* Adjusts the size and returns a human readable size suffix. */
324 record_btrace_adjust_size (unsigned int *size)
330 if ((sz & ((1u << 30) - 1)) == 0)
335 else if ((sz & ((1u << 20) - 1)) == 0)
340 else if ((sz & ((1u << 10) - 1)) == 0)
349 /* Print a BTS configuration. */
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
365 /* Print an Intel Processor Trace configuration. */
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
381 /* Print a branch tracing configuration. */
384 record_btrace_print_conf (const struct btrace_config *conf)
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
389 switch (conf->format)
391 case BTRACE_FORMAT_NONE:
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
406 /* The to_info_record method of target record-btrace. */
409 record_btrace_info (struct target_ops *self)
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
418 tp = find_thread_ptid (inferior_ptid);
420 error (_("No thread."));
422 validate_registers_access ();
424 btinfo = &tp->btrace;
426 conf = btrace_conf (btinfo);
428 record_btrace_print_conf (conf);
436 if (!btrace_is_empty (tp))
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
453 gaps = btinfo->ngaps;
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
465 /* Print a decode error. */
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
471 const char *errstr = btrace_decode_error (format, errcode);
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
485 /* Print an unsigned int. */
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
490 uiout->field_fmt (fld, "%u", val);
493 /* A range of source lines. */
495 struct btrace_line_range
497 /* The symtab this line is from. */
498 struct symtab *symtab;
500 /* The first line (inclusive). */
503 /* The last line (exclusive). */
507 /* Construct a line range. */
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
512 struct btrace_line_range range;
514 range.symtab = symtab;
521 /* Add a line to a line range. */
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
526 if (range.end <= range.begin)
528 /* This is the first entry. */
530 range.end = line + 1;
532 else if (line < range.begin)
534 else if (range.end < line)
540 /* Return non-zero if RANGE is empty, zero otherwise. */
543 btrace_line_range_is_empty (struct btrace_line_range range)
545 return range.end <= range.begin;
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
559 /* Find the line range associated with PC. */
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
570 symtab = find_pc_line_symtab (pc);
572 return btrace_mk_line_range (NULL, 0, 0);
574 ltable = SYMTAB_LINETABLE (symtab);
576 return btrace_mk_line_range (symtab, 0, 0);
578 nlines = ltable->nitems;
579 lines = ltable->item;
581 return btrace_mk_line_range (symtab, 0, 0);
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
593 /* Print source lines in LINES to UIOUT.
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
606 print_source_lines_flags psl_flags;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
613 for (line = lines.begin; line < lines.end; ++line)
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
627 /* Disassemble a section of the recorded instruction trace. */
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end,
634 gdb_disassembly_flags flags)
636 struct cleanup *cleanups, *ui_item_chain;
637 struct gdbarch *gdbarch;
638 struct btrace_insn_iterator it;
639 struct btrace_line_range last_lines;
641 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
642 btrace_insn_number (begin), btrace_insn_number (end));
644 flags |= DISASSEMBLY_SPECULATIVE;
646 gdbarch = target_gdbarch ();
647 last_lines = btrace_mk_line_range (NULL, 0, 0);
649 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
651 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
652 instructions corresponding to that line. */
653 ui_item_chain = NULL;
655 gdb_pretty_print_disassembler disasm (gdbarch);
657 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
659 const struct btrace_insn *insn;
661 insn = btrace_insn_get (&it);
663 /* A NULL instruction indicates a gap in the trace. */
666 const struct btrace_config *conf;
668 conf = btrace_conf (btinfo);
670 /* We have trace so we must have a configuration. */
671 gdb_assert (conf != NULL);
673 uiout->field_fmt ("insn-number", "%u",
674 btrace_insn_number (&it));
677 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
682 struct disasm_insn dinsn;
684 if ((flags & DISASSEMBLY_SOURCE) != 0)
686 struct btrace_line_range lines;
688 lines = btrace_find_line_range (insn->pc);
689 if (!btrace_line_range_is_empty (lines)
690 && !btrace_line_range_contains_range (last_lines, lines))
692 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
695 else if (ui_item_chain == NULL)
698 = make_cleanup_ui_out_tuple_begin_end (uiout,
700 /* No source information. */
701 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
704 gdb_assert (ui_item_chain != NULL);
707 memset (&dinsn, 0, sizeof (dinsn));
708 dinsn.number = btrace_insn_number (&it);
709 dinsn.addr = insn->pc;
711 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
712 dinsn.is_speculative = 1;
714 disasm.pretty_print_insn (uiout, &dinsn, flags);
718 do_cleanups (cleanups);
721 /* The to_insn_history method of target record-btrace. */
724 record_btrace_insn_history (struct target_ops *self, int size,
725 gdb_disassembly_flags flags)
727 struct btrace_thread_info *btinfo;
728 struct btrace_insn_history *history;
729 struct btrace_insn_iterator begin, end;
730 struct ui_out *uiout;
731 unsigned int context, covered;
733 uiout = current_uiout;
734 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
735 context = abs (size);
737 error (_("Bad record instruction-history-size."));
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
743 struct btrace_insn_iterator *replay;
745 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
753 btrace_insn_end (&begin, btinfo);
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
774 begin = history->begin;
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
783 covered = btrace_insn_prev (&begin, context);
788 covered = btrace_insn_next (&end, context);
793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
802 btrace_set_insn_history (btinfo, &begin, &end);
805 /* The to_insn_history_range method of target record-btrace. */
808 record_btrace_insn_history_range (struct target_ops *self,
809 ULONGEST from, ULONGEST to,
810 gdb_disassembly_flags flags)
812 struct btrace_thread_info *btinfo;
813 struct btrace_insn_history *history;
814 struct btrace_insn_iterator begin, end;
815 struct ui_out *uiout;
816 unsigned int low, high;
819 uiout = current_uiout;
820 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
824 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
826 /* Check for wrap-arounds. */
827 if (low != from || high != to)
828 error (_("Bad range."));
831 error (_("Bad range."));
833 btinfo = require_btrace ();
835 found = btrace_find_insn_by_number (&begin, btinfo, low);
837 error (_("Range out of bounds."));
839 found = btrace_find_insn_by_number (&end, btinfo, high);
842 /* Silently truncate the range. */
843 btrace_insn_end (&end, btinfo);
847 /* We want both begin and end to be inclusive. */
848 btrace_insn_next (&end, 1);
851 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
852 btrace_set_insn_history (btinfo, &begin, &end);
855 /* The to_insn_history_from method of target record-btrace. */
858 record_btrace_insn_history_from (struct target_ops *self,
859 ULONGEST from, int size,
860 gdb_disassembly_flags flags)
862 ULONGEST begin, end, context;
864 context = abs (size);
866 error (_("Bad record instruction-history-size."));
875 begin = from - context + 1;
880 end = from + context - 1;
882 /* Check for wrap-around. */
887 record_btrace_insn_history_range (self, begin, end, flags);
890 /* Print the instruction number range for a function call history line. */
893 btrace_call_history_insn_range (struct ui_out *uiout,
894 const struct btrace_function *bfun)
896 unsigned int begin, end, size;
898 size = bfun->insn.size ();
899 gdb_assert (size > 0);
901 begin = bfun->insn_offset;
902 end = begin + size - 1;
904 ui_out_field_uint (uiout, "insn begin", begin);
906 ui_out_field_uint (uiout, "insn end", end);
909 /* Compute the lowest and highest source line for the instructions in BFUN
910 and return them in PBEGIN and PEND.
911 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
912 result from inlining or macro expansion. */
915 btrace_compute_src_line_range (const struct btrace_function *bfun,
916 int *pbegin, int *pend)
918 struct symtab *symtab;
929 symtab = symbol_symtab (sym);
931 for (const btrace_insn &insn : bfun->insn)
933 struct symtab_and_line sal;
935 sal = find_pc_line (insn.pc, 0);
936 if (sal.symtab != symtab || sal.line == 0)
939 begin = std::min (begin, sal.line);
940 end = std::max (end, sal.line);
948 /* Print the source line information for a function call history line. */
951 btrace_call_history_src_line (struct ui_out *uiout,
952 const struct btrace_function *bfun)
961 uiout->field_string ("file",
962 symtab_to_filename_for_display (symbol_symtab (sym)));
964 btrace_compute_src_line_range (bfun, &begin, &end);
969 uiout->field_int ("min line", begin);
975 uiout->field_int ("max line", end);
978 /* Get the name of a branch trace function. */
981 btrace_get_bfun_name (const struct btrace_function *bfun)
983 struct minimal_symbol *msym;
993 return SYMBOL_PRINT_NAME (sym);
994 else if (msym != NULL)
995 return MSYMBOL_PRINT_NAME (msym);
1000 /* Disassemble a section of the recorded function trace. */
1003 btrace_call_history (struct ui_out *uiout,
1004 const struct btrace_thread_info *btinfo,
1005 const struct btrace_call_iterator *begin,
1006 const struct btrace_call_iterator *end,
1009 struct btrace_call_iterator it;
1010 record_print_flags flags = (enum record_print_flag) int_flags;
1012 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1013 btrace_call_number (end));
1015 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1017 const struct btrace_function *bfun;
1018 struct minimal_symbol *msym;
1021 bfun = btrace_call_get (&it);
1025 /* Print the function index. */
1026 ui_out_field_uint (uiout, "index", bfun->number);
1029 /* Indicate gaps in the trace. */
1030 if (bfun->errcode != 0)
1032 const struct btrace_config *conf;
1034 conf = btrace_conf (btinfo);
1036 /* We have trace so we must have a configuration. */
1037 gdb_assert (conf != NULL);
1039 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1044 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1046 int level = bfun->level + btinfo->level, i;
1048 for (i = 0; i < level; ++i)
1053 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1054 else if (msym != NULL)
1055 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1056 else if (!uiout->is_mi_like_p ())
1057 uiout->field_string ("function", "??");
1059 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1061 uiout->text (_("\tinst "));
1062 btrace_call_history_insn_range (uiout, bfun);
1065 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1067 uiout->text (_("\tat "));
1068 btrace_call_history_src_line (uiout, bfun);
1075 /* The to_call_history method of target record-btrace. */
1078 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1080 struct btrace_thread_info *btinfo;
1081 struct btrace_call_history *history;
1082 struct btrace_call_iterator begin, end;
1083 struct ui_out *uiout;
1084 unsigned int context, covered;
1085 record_print_flags flags = (enum record_print_flag) int_flags;
1087 uiout = current_uiout;
1088 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1089 context = abs (size);
1091 error (_("Bad record function-call-history-size."));
1093 btinfo = require_btrace ();
1094 history = btinfo->call_history;
1095 if (history == NULL)
1097 struct btrace_insn_iterator *replay;
1099 DEBUG ("call-history (0x%x): %d", int_flags, size);
1101 /* If we're replaying, we start at the replay position. Otherwise, we
1102 start at the tail of the trace. */
1103 replay = btinfo->replay;
1106 begin.btinfo = btinfo;
1107 begin.index = replay->call_index;
1110 btrace_call_end (&begin, btinfo);
1112 /* We start from here and expand in the requested direction. Then we
1113 expand in the other direction, as well, to fill up any remaining
1118 /* We want the current position covered, as well. */
1119 covered = btrace_call_next (&end, 1);
1120 covered += btrace_call_prev (&begin, context - covered);
1121 covered += btrace_call_next (&end, context - covered);
1125 covered = btrace_call_next (&end, context);
1126 covered += btrace_call_prev (&begin, context- covered);
1131 begin = history->begin;
1134 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1135 btrace_call_number (&begin), btrace_call_number (&end));
1140 covered = btrace_call_prev (&begin, context);
1145 covered = btrace_call_next (&end, context);
1150 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1154 printf_unfiltered (_("At the start of the branch trace record.\n"));
1156 printf_unfiltered (_("At the end of the branch trace record.\n"));
1159 btrace_set_call_history (btinfo, &begin, &end);
1162 /* The to_call_history_range method of target record-btrace. */
1165 record_btrace_call_history_range (struct target_ops *self,
1166 ULONGEST from, ULONGEST to,
1169 struct btrace_thread_info *btinfo;
1170 struct btrace_call_history *history;
1171 struct btrace_call_iterator begin, end;
1172 struct ui_out *uiout;
1173 unsigned int low, high;
1175 record_print_flags flags = (enum record_print_flag) int_flags;
1177 uiout = current_uiout;
1178 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1182 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1184 /* Check for wrap-arounds. */
1185 if (low != from || high != to)
1186 error (_("Bad range."));
1189 error (_("Bad range."));
1191 btinfo = require_btrace ();
1193 found = btrace_find_call_by_number (&begin, btinfo, low);
1195 error (_("Range out of bounds."));
1197 found = btrace_find_call_by_number (&end, btinfo, high);
1200 /* Silently truncate the range. */
1201 btrace_call_end (&end, btinfo);
1205 /* We want both begin and end to be inclusive. */
1206 btrace_call_next (&end, 1);
1209 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1210 btrace_set_call_history (btinfo, &begin, &end);
1213 /* The to_call_history_from method of target record-btrace. */
1216 record_btrace_call_history_from (struct target_ops *self,
1217 ULONGEST from, int size,
1220 ULONGEST begin, end, context;
1221 record_print_flags flags = (enum record_print_flag) int_flags;
1223 context = abs (size);
1225 error (_("Bad record function-call-history-size."));
1234 begin = from - context + 1;
1239 end = from + context - 1;
1241 /* Check for wrap-around. */
1246 record_btrace_call_history_range (self, begin, end, flags);
1249 /* The to_record_method method of target record-btrace. */
1251 static enum record_method
1252 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1254 const struct btrace_config *config;
1255 struct thread_info * const tp = find_thread_ptid (ptid);
1258 error (_("No thread."));
1260 if (tp->btrace.target == NULL)
1261 return RECORD_METHOD_NONE;
1263 return RECORD_METHOD_BTRACE;
1266 /* The to_record_is_replaying method of target record-btrace. */
1269 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1271 struct thread_info *tp;
1273 ALL_NON_EXITED_THREADS (tp)
1274 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1280 /* The to_record_will_replay method of target record-btrace. */
1283 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1285 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1288 /* The to_xfer_partial method of target record-btrace. */
1290 static enum target_xfer_status
1291 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1292 const char *annex, gdb_byte *readbuf,
1293 const gdb_byte *writebuf, ULONGEST offset,
1294 ULONGEST len, ULONGEST *xfered_len)
1296 struct target_ops *t;
1298 /* Filter out requests that don't make sense during replay. */
1299 if (replay_memory_access == replay_memory_access_read_only
1300 && !record_btrace_generating_corefile
1301 && record_btrace_is_replaying (ops, inferior_ptid))
1305 case TARGET_OBJECT_MEMORY:
1307 struct target_section *section;
1309 /* We do not allow writing memory in general. */
1310 if (writebuf != NULL)
1313 return TARGET_XFER_UNAVAILABLE;
1316 /* We allow reading readonly memory. */
1317 section = target_section_by_addr (ops, offset);
1318 if (section != NULL)
1320 /* Check if the section we found is readonly. */
1321 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1322 section->the_bfd_section)
1323 & SEC_READONLY) != 0)
1325 /* Truncate the request to fit into this section. */
1326 len = std::min (len, section->endaddr - offset);
1332 return TARGET_XFER_UNAVAILABLE;
1337 /* Forward the request. */
1339 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1340 offset, len, xfered_len);
1343 /* The to_insert_breakpoint method of target record-btrace. */
1346 record_btrace_insert_breakpoint (struct target_ops *ops,
1347 struct gdbarch *gdbarch,
1348 struct bp_target_info *bp_tgt)
1353 /* Inserting breakpoints requires accessing memory. Allow it for the
1354 duration of this function. */
1355 old = replay_memory_access;
1356 replay_memory_access = replay_memory_access_read_write;
1361 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1363 CATCH (except, RETURN_MASK_ALL)
1365 replay_memory_access = old;
1366 throw_exception (except);
1369 replay_memory_access = old;
1374 /* The to_remove_breakpoint method of target record-btrace. */
1377 record_btrace_remove_breakpoint (struct target_ops *ops,
1378 struct gdbarch *gdbarch,
1379 struct bp_target_info *bp_tgt,
1380 enum remove_bp_reason reason)
1385 /* Removing breakpoints requires accessing memory. Allow it for the
1386 duration of this function. */
1387 old = replay_memory_access;
1388 replay_memory_access = replay_memory_access_read_write;
1393 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1396 CATCH (except, RETURN_MASK_ALL)
1398 replay_memory_access = old;
1399 throw_exception (except);
1402 replay_memory_access = old;
1407 /* The to_fetch_registers method of target record-btrace. */
1410 record_btrace_fetch_registers (struct target_ops *ops,
1411 struct regcache *regcache, int regno)
1413 struct btrace_insn_iterator *replay;
1414 struct thread_info *tp;
1416 tp = find_thread_ptid (regcache_get_ptid (regcache));
1417 gdb_assert (tp != NULL);
1419 replay = tp->btrace.replay;
1420 if (replay != NULL && !record_btrace_generating_corefile)
1422 const struct btrace_insn *insn;
1423 struct gdbarch *gdbarch;
1426 gdbarch = get_regcache_arch (regcache);
1427 pcreg = gdbarch_pc_regnum (gdbarch);
1431 /* We can only provide the PC register. */
1432 if (regno >= 0 && regno != pcreg)
1435 insn = btrace_insn_get (replay);
1436 gdb_assert (insn != NULL);
1438 regcache_raw_supply (regcache, regno, &insn->pc);
1442 struct target_ops *t = ops->beneath;
1444 t->to_fetch_registers (t, regcache, regno);
1448 /* The to_store_registers method of target record-btrace. */
1451 record_btrace_store_registers (struct target_ops *ops,
1452 struct regcache *regcache, int regno)
1454 struct target_ops *t;
1456 if (!record_btrace_generating_corefile
1457 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1458 error (_("Cannot write registers while replaying."));
1460 gdb_assert (may_write_registers != 0);
1463 t->to_store_registers (t, regcache, regno);
1466 /* The to_prepare_to_store method of target record-btrace. */
1469 record_btrace_prepare_to_store (struct target_ops *ops,
1470 struct regcache *regcache)
1472 struct target_ops *t;
1474 if (!record_btrace_generating_corefile
1475 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1479 t->to_prepare_to_store (t, regcache);
1482 /* The branch trace frame cache. */
1484 struct btrace_frame_cache
1487 struct thread_info *tp;
1489 /* The frame info. */
1490 struct frame_info *frame;
1492 /* The branch trace function segment. */
1493 const struct btrace_function *bfun;
1496 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1498 static htab_t bfcache;
1500 /* hash_f for htab_create_alloc of bfcache. */
1503 bfcache_hash (const void *arg)
1505 const struct btrace_frame_cache *cache
1506 = (const struct btrace_frame_cache *) arg;
1508 return htab_hash_pointer (cache->frame);
1511 /* eq_f for htab_create_alloc of bfcache. */
1514 bfcache_eq (const void *arg1, const void *arg2)
1516 const struct btrace_frame_cache *cache1
1517 = (const struct btrace_frame_cache *) arg1;
1518 const struct btrace_frame_cache *cache2
1519 = (const struct btrace_frame_cache *) arg2;
1521 return cache1->frame == cache2->frame;
1524 /* Create a new btrace frame cache. */
1526 static struct btrace_frame_cache *
1527 bfcache_new (struct frame_info *frame)
1529 struct btrace_frame_cache *cache;
1532 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1533 cache->frame = frame;
1535 slot = htab_find_slot (bfcache, cache, INSERT);
1536 gdb_assert (*slot == NULL);
1542 /* Extract the branch trace function from a branch trace frame. */
1544 static const struct btrace_function *
1545 btrace_get_frame_function (struct frame_info *frame)
1547 const struct btrace_frame_cache *cache;
1548 const struct btrace_function *bfun;
1549 struct btrace_frame_cache pattern;
1552 pattern.frame = frame;
1554 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1558 cache = (const struct btrace_frame_cache *) *slot;
1562 /* Implement stop_reason method for record_btrace_frame_unwind. */
1564 static enum unwind_stop_reason
1565 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1568 const struct btrace_frame_cache *cache;
1569 const struct btrace_function *bfun;
1571 cache = (const struct btrace_frame_cache *) *this_cache;
1573 gdb_assert (bfun != NULL);
1576 return UNWIND_UNAVAILABLE;
1578 return UNWIND_NO_REASON;
1581 /* Implement this_id method for record_btrace_frame_unwind. */
1584 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1585 struct frame_id *this_id)
1587 const struct btrace_frame_cache *cache;
1588 const struct btrace_function *bfun;
1589 struct btrace_call_iterator it;
1590 CORE_ADDR code, special;
1592 cache = (const struct btrace_frame_cache *) *this_cache;
1595 gdb_assert (bfun != NULL);
1597 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1598 bfun = btrace_call_get (&it);
1600 code = get_frame_func (this_frame);
1601 special = bfun->number;
1603 *this_id = frame_id_build_unavailable_stack_special (code, special);
1605 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1606 btrace_get_bfun_name (cache->bfun),
1607 core_addr_to_string_nz (this_id->code_addr),
1608 core_addr_to_string_nz (this_id->special_addr));
1611 /* Implement prev_register method for record_btrace_frame_unwind. */
1613 static struct value *
1614 record_btrace_frame_prev_register (struct frame_info *this_frame,
1618 const struct btrace_frame_cache *cache;
1619 const struct btrace_function *bfun, *caller;
1620 struct btrace_call_iterator it;
1621 struct gdbarch *gdbarch;
1625 gdbarch = get_frame_arch (this_frame);
1626 pcreg = gdbarch_pc_regnum (gdbarch);
1627 if (pcreg < 0 || regnum != pcreg)
1628 throw_error (NOT_AVAILABLE_ERROR,
1629 _("Registers are not available in btrace record history"));
1631 cache = (const struct btrace_frame_cache *) *this_cache;
1633 gdb_assert (bfun != NULL);
1635 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1636 throw_error (NOT_AVAILABLE_ERROR,
1637 _("No caller in btrace record history"));
1639 caller = btrace_call_get (&it);
1641 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1642 pc = caller->insn.front ().pc;
1645 pc = caller->insn.back ().pc;
1646 pc += gdb_insn_length (gdbarch, pc);
1649 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1650 btrace_get_bfun_name (bfun), bfun->level,
1651 core_addr_to_string_nz (pc));
1653 return frame_unwind_got_address (this_frame, regnum, pc);
1656 /* Implement sniffer method for record_btrace_frame_unwind. */
1659 record_btrace_frame_sniffer (const struct frame_unwind *self,
1660 struct frame_info *this_frame,
1663 const struct btrace_function *bfun;
1664 struct btrace_frame_cache *cache;
1665 struct thread_info *tp;
1666 struct frame_info *next;
1668 /* THIS_FRAME does not contain a reference to its thread. */
1669 tp = find_thread_ptid (inferior_ptid);
1670 gdb_assert (tp != NULL);
1673 next = get_next_frame (this_frame);
1676 const struct btrace_insn_iterator *replay;
1678 replay = tp->btrace.replay;
1680 bfun = &replay->btinfo->functions[replay->call_index];
1684 const struct btrace_function *callee;
1685 struct btrace_call_iterator it;
1687 callee = btrace_get_frame_function (next);
1688 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1691 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1694 bfun = btrace_call_get (&it);
1700 DEBUG ("[frame] sniffed frame for %s on level %d",
1701 btrace_get_bfun_name (bfun), bfun->level);
1703 /* This is our frame. Initialize the frame cache. */
1704 cache = bfcache_new (this_frame);
1708 *this_cache = cache;
1712 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1715 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1716 struct frame_info *this_frame,
1719 const struct btrace_function *bfun, *callee;
1720 struct btrace_frame_cache *cache;
1721 struct btrace_call_iterator it;
1722 struct frame_info *next;
1723 struct thread_info *tinfo;
1725 next = get_next_frame (this_frame);
1729 callee = btrace_get_frame_function (next);
1733 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1736 tinfo = find_thread_ptid (inferior_ptid);
1737 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1740 bfun = btrace_call_get (&it);
1742 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1743 btrace_get_bfun_name (bfun), bfun->level);
1745 /* This is our frame. Initialize the frame cache. */
1746 cache = bfcache_new (this_frame);
1750 *this_cache = cache;
1755 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1757 struct btrace_frame_cache *cache;
1760 cache = (struct btrace_frame_cache *) this_cache;
1762 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1763 gdb_assert (slot != NULL);
1765 htab_remove_elt (bfcache, cache);
1768 /* btrace recording does not store previous memory content, neither the stack
1769 frames content. Any unwinding would return errorneous results as the stack
1770 contents no longer matches the changed PC value restored from history.
1771 Therefore this unwinder reports any possibly unwound registers as
1774 const struct frame_unwind record_btrace_frame_unwind =
1777 record_btrace_frame_unwind_stop_reason,
1778 record_btrace_frame_this_id,
1779 record_btrace_frame_prev_register,
1781 record_btrace_frame_sniffer,
1782 record_btrace_frame_dealloc_cache
1785 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1788 record_btrace_frame_unwind_stop_reason,
1789 record_btrace_frame_this_id,
1790 record_btrace_frame_prev_register,
1792 record_btrace_tailcall_frame_sniffer,
1793 record_btrace_frame_dealloc_cache
1796 /* Implement the to_get_unwinder method. */
1798 static const struct frame_unwind *
1799 record_btrace_to_get_unwinder (struct target_ops *self)
1801 return &record_btrace_frame_unwind;
1804 /* Implement the to_get_tailcall_unwinder method. */
1806 static const struct frame_unwind *
1807 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1809 return &record_btrace_tailcall_frame_unwind;
1812 /* Return a human-readable string for FLAG. */
1815 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1823 return "reverse-step";
1829 return "reverse-cont";
1838 /* Indicate that TP should be resumed according to FLAG. */
1841 record_btrace_resume_thread (struct thread_info *tp,
1842 enum btrace_thread_flag flag)
1844 struct btrace_thread_info *btinfo;
1846 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1847 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1849 btinfo = &tp->btrace;
1851 /* Fetch the latest branch trace. */
1854 /* A resume request overwrites a preceding resume or stop request. */
1855 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1856 btinfo->flags |= flag;
1859 /* Get the current frame for TP. */
1861 static struct frame_info *
1862 get_thread_current_frame (struct thread_info *tp)
1864 struct frame_info *frame;
1865 ptid_t old_inferior_ptid;
1868 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1869 old_inferior_ptid = inferior_ptid;
1870 inferior_ptid = tp->ptid;
1872 /* Clear the executing flag to allow changes to the current frame.
1873 We are not actually running, yet. We just started a reverse execution
1874 command or a record goto command.
1875 For the latter, EXECUTING is false and this has no effect.
1876 For the former, EXECUTING is true and we're in to_wait, about to
1877 move the thread. Since we need to recompute the stack, we temporarily
1878 set EXECUTING to flase. */
1879 executing = is_executing (inferior_ptid);
1880 set_executing (inferior_ptid, 0);
1885 frame = get_current_frame ();
1887 CATCH (except, RETURN_MASK_ALL)
1889 /* Restore the previous execution state. */
1890 set_executing (inferior_ptid, executing);
1892 /* Restore the previous inferior_ptid. */
1893 inferior_ptid = old_inferior_ptid;
1895 throw_exception (except);
1899 /* Restore the previous execution state. */
1900 set_executing (inferior_ptid, executing);
1902 /* Restore the previous inferior_ptid. */
1903 inferior_ptid = old_inferior_ptid;
1908 /* Start replaying a thread. */
1910 static struct btrace_insn_iterator *
1911 record_btrace_start_replaying (struct thread_info *tp)
1913 struct btrace_insn_iterator *replay;
1914 struct btrace_thread_info *btinfo;
1916 btinfo = &tp->btrace;
1919 /* We can't start replaying without trace. */
1920 if (btinfo->functions.empty ())
1923 /* GDB stores the current frame_id when stepping in order to detects steps
1925 Since frames are computed differently when we're replaying, we need to
1926 recompute those stored frames and fix them up so we can still detect
1927 subroutines after we started replaying. */
1930 struct frame_info *frame;
1931 struct frame_id frame_id;
1932 int upd_step_frame_id, upd_step_stack_frame_id;
1934 /* The current frame without replaying - computed via normal unwind. */
1935 frame = get_thread_current_frame (tp);
1936 frame_id = get_frame_id (frame);
1938 /* Check if we need to update any stepping-related frame id's. */
1939 upd_step_frame_id = frame_id_eq (frame_id,
1940 tp->control.step_frame_id);
1941 upd_step_stack_frame_id = frame_id_eq (frame_id,
1942 tp->control.step_stack_frame_id);
1944 /* We start replaying at the end of the branch trace. This corresponds
1945 to the current instruction. */
1946 replay = XNEW (struct btrace_insn_iterator);
1947 btrace_insn_end (replay, btinfo);
1949 /* Skip gaps at the end of the trace. */
1950 while (btrace_insn_get (replay) == NULL)
1954 steps = btrace_insn_prev (replay, 1);
1956 error (_("No trace."));
1959 /* We're not replaying, yet. */
1960 gdb_assert (btinfo->replay == NULL);
1961 btinfo->replay = replay;
1963 /* Make sure we're not using any stale registers. */
1964 registers_changed_ptid (tp->ptid);
1966 /* The current frame with replaying - computed via btrace unwind. */
1967 frame = get_thread_current_frame (tp);
1968 frame_id = get_frame_id (frame);
1970 /* Replace stepping related frames where necessary. */
1971 if (upd_step_frame_id)
1972 tp->control.step_frame_id = frame_id;
1973 if (upd_step_stack_frame_id)
1974 tp->control.step_stack_frame_id = frame_id;
1976 CATCH (except, RETURN_MASK_ALL)
1978 xfree (btinfo->replay);
1979 btinfo->replay = NULL;
1981 registers_changed_ptid (tp->ptid);
1983 throw_exception (except);
1990 /* Stop replaying a thread. */
1993 record_btrace_stop_replaying (struct thread_info *tp)
1995 struct btrace_thread_info *btinfo;
1997 btinfo = &tp->btrace;
1999 xfree (btinfo->replay);
2000 btinfo->replay = NULL;
2002 /* Make sure we're not leaving any stale registers. */
2003 registers_changed_ptid (tp->ptid);
2006 /* Stop replaying TP if it is at the end of its execution history. */
2009 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2011 struct btrace_insn_iterator *replay, end;
2012 struct btrace_thread_info *btinfo;
2014 btinfo = &tp->btrace;
2015 replay = btinfo->replay;
2020 btrace_insn_end (&end, btinfo);
2022 if (btrace_insn_cmp (replay, &end) == 0)
2023 record_btrace_stop_replaying (tp);
2026 /* The to_resume method of target record-btrace. */
2029 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2030 enum gdb_signal signal)
2032 struct thread_info *tp;
2033 enum btrace_thread_flag flag, cflag;
2035 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2036 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2037 step ? "step" : "cont");
2039 /* Store the execution direction of the last resume.
2041 If there is more than one to_resume call, we have to rely on infrun
2042 to not change the execution direction in-between. */
2043 record_btrace_resume_exec_dir = execution_direction;
2045 /* As long as we're not replaying, just forward the request.
2047 For non-stop targets this means that no thread is replaying. In order to
2048 make progress, we may need to explicitly move replaying threads to the end
2049 of their execution history. */
2050 if ((execution_direction != EXEC_REVERSE)
2051 && !record_btrace_is_replaying (ops, minus_one_ptid))
2054 ops->to_resume (ops, ptid, step, signal);
2058 /* Compute the btrace thread flag for the requested move. */
2059 if (execution_direction == EXEC_REVERSE)
2061 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2066 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2070 /* We just indicate the resume intent here. The actual stepping happens in
2071 record_btrace_wait below.
2073 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2074 if (!target_is_non_stop_p ())
2076 gdb_assert (ptid_match (inferior_ptid, ptid));
2078 ALL_NON_EXITED_THREADS (tp)
2079 if (ptid_match (tp->ptid, ptid))
2081 if (ptid_match (tp->ptid, inferior_ptid))
2082 record_btrace_resume_thread (tp, flag);
2084 record_btrace_resume_thread (tp, cflag);
2089 ALL_NON_EXITED_THREADS (tp)
2090 if (ptid_match (tp->ptid, ptid))
2091 record_btrace_resume_thread (tp, flag);
2094 /* Async support. */
2095 if (target_can_async_p ())
2098 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2102 /* The to_commit_resume method of target record-btrace. */
2105 record_btrace_commit_resume (struct target_ops *ops)
2107 if ((execution_direction != EXEC_REVERSE)
2108 && !record_btrace_is_replaying (ops, minus_one_ptid))
2109 ops->beneath->to_commit_resume (ops->beneath);
2112 /* Cancel resuming TP. */
2115 record_btrace_cancel_resume (struct thread_info *tp)
2117 enum btrace_thread_flag flags;
2119 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2123 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2124 print_thread_id (tp),
2125 target_pid_to_str (tp->ptid), flags,
2126 btrace_thread_flag_to_str (flags));
2128 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2129 record_btrace_stop_replaying_at_end (tp);
2132 /* Return a target_waitstatus indicating that we ran out of history. */
2134 static struct target_waitstatus
2135 btrace_step_no_history (void)
2137 struct target_waitstatus status;
2139 status.kind = TARGET_WAITKIND_NO_HISTORY;
2144 /* Return a target_waitstatus indicating that a step finished. */
2146 static struct target_waitstatus
2147 btrace_step_stopped (void)
2149 struct target_waitstatus status;
2151 status.kind = TARGET_WAITKIND_STOPPED;
2152 status.value.sig = GDB_SIGNAL_TRAP;
2157 /* Return a target_waitstatus indicating that a thread was stopped as
2160 static struct target_waitstatus
2161 btrace_step_stopped_on_request (void)
2163 struct target_waitstatus status;
2165 status.kind = TARGET_WAITKIND_STOPPED;
2166 status.value.sig = GDB_SIGNAL_0;
2171 /* Return a target_waitstatus indicating a spurious stop. */
2173 static struct target_waitstatus
2174 btrace_step_spurious (void)
2176 struct target_waitstatus status;
2178 status.kind = TARGET_WAITKIND_SPURIOUS;
2183 /* Return a target_waitstatus indicating that the thread was not resumed. */
2185 static struct target_waitstatus
2186 btrace_step_no_resumed (void)
2188 struct target_waitstatus status;
2190 status.kind = TARGET_WAITKIND_NO_RESUMED;
2195 /* Return a target_waitstatus indicating that we should wait again. */
2197 static struct target_waitstatus
2198 btrace_step_again (void)
2200 struct target_waitstatus status;
2202 status.kind = TARGET_WAITKIND_IGNORE;
2207 /* Clear the record histories. */
2210 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2212 xfree (btinfo->insn_history);
2213 xfree (btinfo->call_history);
2215 btinfo->insn_history = NULL;
2216 btinfo->call_history = NULL;
2219 /* Check whether TP's current replay position is at a breakpoint. */
2222 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2224 struct btrace_insn_iterator *replay;
2225 struct btrace_thread_info *btinfo;
2226 const struct btrace_insn *insn;
2227 struct inferior *inf;
2229 btinfo = &tp->btrace;
2230 replay = btinfo->replay;
2235 insn = btrace_insn_get (replay);
2239 inf = find_inferior_ptid (tp->ptid);
2243 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2244 &btinfo->stop_reason);
2247 /* Step one instruction in forward direction. */
2249 static struct target_waitstatus
2250 record_btrace_single_step_forward (struct thread_info *tp)
2252 struct btrace_insn_iterator *replay, end, start;
2253 struct btrace_thread_info *btinfo;
2255 btinfo = &tp->btrace;
2256 replay = btinfo->replay;
2258 /* We're done if we're not replaying. */
2260 return btrace_step_no_history ();
2262 /* Check if we're stepping a breakpoint. */
2263 if (record_btrace_replay_at_breakpoint (tp))
2264 return btrace_step_stopped ();
2266 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2267 jump back to the instruction at which we started. */
2273 /* We will bail out here if we continue stepping after reaching the end
2274 of the execution history. */
2275 steps = btrace_insn_next (replay, 1);
2279 return btrace_step_no_history ();
2282 while (btrace_insn_get (replay) == NULL);
2284 /* Determine the end of the instruction trace. */
2285 btrace_insn_end (&end, btinfo);
2287 /* The execution trace contains (and ends with) the current instruction.
2288 This instruction has not been executed, yet, so the trace really ends
2289 one instruction earlier. */
2290 if (btrace_insn_cmp (replay, &end) == 0)
2291 return btrace_step_no_history ();
2293 return btrace_step_spurious ();
2296 /* Step one instruction in backward direction. */
2298 static struct target_waitstatus
2299 record_btrace_single_step_backward (struct thread_info *tp)
2301 struct btrace_insn_iterator *replay, start;
2302 struct btrace_thread_info *btinfo;
2304 btinfo = &tp->btrace;
2305 replay = btinfo->replay;
2307 /* Start replaying if we're not already doing so. */
2309 replay = record_btrace_start_replaying (tp);
2311 /* If we can't step any further, we reached the end of the history.
2312 Skip gaps during replay. If we end up at a gap (at the beginning of
2313 the trace), jump back to the instruction at which we started. */
2319 steps = btrace_insn_prev (replay, 1);
2323 return btrace_step_no_history ();
2326 while (btrace_insn_get (replay) == NULL);
2328 /* Check if we're stepping a breakpoint.
2330 For reverse-stepping, this check is after the step. There is logic in
2331 infrun.c that handles reverse-stepping separately. See, for example,
2332 proceed and adjust_pc_after_break.
2334 This code assumes that for reverse-stepping, PC points to the last
2335 de-executed instruction, whereas for forward-stepping PC points to the
2336 next to-be-executed instruction. */
2337 if (record_btrace_replay_at_breakpoint (tp))
2338 return btrace_step_stopped ();
2340 return btrace_step_spurious ();
2343 /* Step a single thread. */
2345 static struct target_waitstatus
2346 record_btrace_step_thread (struct thread_info *tp)
2348 struct btrace_thread_info *btinfo;
2349 struct target_waitstatus status;
2350 enum btrace_thread_flag flags;
2352 btinfo = &tp->btrace;
2354 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2355 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2357 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2358 target_pid_to_str (tp->ptid), flags,
2359 btrace_thread_flag_to_str (flags));
2361 /* We can't step without an execution history. */
2362 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2363 return btrace_step_no_history ();
2368 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2371 return btrace_step_stopped_on_request ();
2374 status = record_btrace_single_step_forward (tp);
2375 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2378 return btrace_step_stopped ();
2381 status = record_btrace_single_step_backward (tp);
2382 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2385 return btrace_step_stopped ();
2388 status = record_btrace_single_step_forward (tp);
2389 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2392 btinfo->flags |= flags;
2393 return btrace_step_again ();
2396 status = record_btrace_single_step_backward (tp);
2397 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2400 btinfo->flags |= flags;
2401 return btrace_step_again ();
2404 /* We keep threads moving at the end of their execution history. The to_wait
2405 method will stop the thread for whom the event is reported. */
2406 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2407 btinfo->flags |= flags;
2412 /* A vector of threads. */
2414 typedef struct thread_info * tp_t;
2417 /* Announce further events if necessary. */
2420 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2421 const VEC (tp_t) *no_history)
2423 int more_moving, more_no_history;
2425 more_moving = !VEC_empty (tp_t, moving);
2426 more_no_history = !VEC_empty (tp_t, no_history);
2428 if (!more_moving && !more_no_history)
2432 DEBUG ("movers pending");
2434 if (more_no_history)
2435 DEBUG ("no-history pending");
2437 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2440 /* The to_wait method of target record-btrace. */
2443 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2444 struct target_waitstatus *status, int options)
2446 VEC (tp_t) *moving, *no_history;
2447 struct thread_info *tp, *eventing;
2448 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2450 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2452 /* As long as we're not replaying, just forward the request. */
2453 if ((execution_direction != EXEC_REVERSE)
2454 && !record_btrace_is_replaying (ops, minus_one_ptid))
2457 return ops->to_wait (ops, ptid, status, options);
2463 make_cleanup (VEC_cleanup (tp_t), &moving);
2464 make_cleanup (VEC_cleanup (tp_t), &no_history);
2466 /* Keep a work list of moving threads. */
2467 ALL_NON_EXITED_THREADS (tp)
2468 if (ptid_match (tp->ptid, ptid)
2469 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2470 VEC_safe_push (tp_t, moving, tp);
2472 if (VEC_empty (tp_t, moving))
2474 *status = btrace_step_no_resumed ();
2476 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2477 target_waitstatus_to_string (status).c_str ());
2479 do_cleanups (cleanups);
2483 /* Step moving threads one by one, one step each, until either one thread
2484 reports an event or we run out of threads to step.
2486 When stepping more than one thread, chances are that some threads reach
2487 the end of their execution history earlier than others. If we reported
2488 this immediately, all-stop on top of non-stop would stop all threads and
2489 resume the same threads next time. And we would report the same thread
2490 having reached the end of its execution history again.
2492 In the worst case, this would starve the other threads. But even if other
2493 threads would be allowed to make progress, this would result in far too
2494 many intermediate stops.
2496 We therefore delay the reporting of "no execution history" until we have
2497 nothing else to report. By this time, all threads should have moved to
2498 either the beginning or the end of their execution history. There will
2499 be a single user-visible stop. */
2501 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2506 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2508 *status = record_btrace_step_thread (tp);
2510 switch (status->kind)
2512 case TARGET_WAITKIND_IGNORE:
2516 case TARGET_WAITKIND_NO_HISTORY:
2517 VEC_safe_push (tp_t, no_history,
2518 VEC_ordered_remove (tp_t, moving, ix));
2522 eventing = VEC_unordered_remove (tp_t, moving, ix);
2528 if (eventing == NULL)
2530 /* We started with at least one moving thread. This thread must have
2531 either stopped or reached the end of its execution history.
2533 In the former case, EVENTING must not be NULL.
2534 In the latter case, NO_HISTORY must not be empty. */
2535 gdb_assert (!VEC_empty (tp_t, no_history));
2537 /* We kept threads moving at the end of their execution history. Stop
2538 EVENTING now that we are going to report its stop. */
2539 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2540 eventing->btrace.flags &= ~BTHR_MOVE;
2542 *status = btrace_step_no_history ();
2545 gdb_assert (eventing != NULL);
2547 /* We kept threads replaying at the end of their execution history. Stop
2548 replaying EVENTING now that we are going to report its stop. */
2549 record_btrace_stop_replaying_at_end (eventing);
2551 /* Stop all other threads. */
2552 if (!target_is_non_stop_p ())
2553 ALL_NON_EXITED_THREADS (tp)
2554 record_btrace_cancel_resume (tp);
2556 /* In async mode, we need to announce further events. */
2557 if (target_is_async_p ())
2558 record_btrace_maybe_mark_async_event (moving, no_history);
2560 /* Start record histories anew from the current position. */
2561 record_btrace_clear_histories (&eventing->btrace);
2563 /* We moved the replay position but did not update registers. */
2564 registers_changed_ptid (eventing->ptid);
2566 DEBUG ("wait ended by thread %s (%s): %s",
2567 print_thread_id (eventing),
2568 target_pid_to_str (eventing->ptid),
2569 target_waitstatus_to_string (status).c_str ());
2571 do_cleanups (cleanups);
2572 return eventing->ptid;
2575 /* The to_stop method of target record-btrace. */
2578 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2580 DEBUG ("stop %s", target_pid_to_str (ptid));
2582 /* As long as we're not replaying, just forward the request. */
2583 if ((execution_direction != EXEC_REVERSE)
2584 && !record_btrace_is_replaying (ops, minus_one_ptid))
2587 ops->to_stop (ops, ptid);
2591 struct thread_info *tp;
2593 ALL_NON_EXITED_THREADS (tp)
2594 if (ptid_match (tp->ptid, ptid))
2596 tp->btrace.flags &= ~BTHR_MOVE;
2597 tp->btrace.flags |= BTHR_STOP;
2602 /* The to_can_execute_reverse method of target record-btrace. */
2605 record_btrace_can_execute_reverse (struct target_ops *self)
2610 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2613 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2615 if (record_btrace_is_replaying (ops, minus_one_ptid))
2617 struct thread_info *tp = inferior_thread ();
2619 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2622 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2625 /* The to_supports_stopped_by_sw_breakpoint method of target
2629 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2631 if (record_btrace_is_replaying (ops, minus_one_ptid))
2634 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2637 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2640 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2642 if (record_btrace_is_replaying (ops, minus_one_ptid))
2644 struct thread_info *tp = inferior_thread ();
2646 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2649 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2652 /* The to_supports_stopped_by_hw_breakpoint method of target
2656 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2658 if (record_btrace_is_replaying (ops, minus_one_ptid))
2661 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2664 /* The to_update_thread_list method of target record-btrace. */
2667 record_btrace_update_thread_list (struct target_ops *ops)
2669 /* We don't add or remove threads during replay. */
2670 if (record_btrace_is_replaying (ops, minus_one_ptid))
2673 /* Forward the request. */
2675 ops->to_update_thread_list (ops);
2678 /* The to_thread_alive method of target record-btrace. */
2681 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2683 /* We don't add or remove threads during replay. */
2684 if (record_btrace_is_replaying (ops, minus_one_ptid))
2685 return find_thread_ptid (ptid) != NULL;
2687 /* Forward the request. */
2689 return ops->to_thread_alive (ops, ptid);
2692 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2696 record_btrace_set_replay (struct thread_info *tp,
2697 const struct btrace_insn_iterator *it)
2699 struct btrace_thread_info *btinfo;
2701 btinfo = &tp->btrace;
2704 record_btrace_stop_replaying (tp);
2707 if (btinfo->replay == NULL)
2708 record_btrace_start_replaying (tp);
2709 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2712 *btinfo->replay = *it;
2713 registers_changed_ptid (tp->ptid);
2716 /* Start anew from the new replay position. */
2717 record_btrace_clear_histories (btinfo);
2719 stop_pc = regcache_read_pc (get_current_regcache ());
2720 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2723 /* The to_goto_record_begin method of target record-btrace. */
2726 record_btrace_goto_begin (struct target_ops *self)
2728 struct thread_info *tp;
2729 struct btrace_insn_iterator begin;
2731 tp = require_btrace_thread ();
2733 btrace_insn_begin (&begin, &tp->btrace);
2735 /* Skip gaps at the beginning of the trace. */
2736 while (btrace_insn_get (&begin) == NULL)
2740 steps = btrace_insn_next (&begin, 1);
2742 error (_("No trace."));
2745 record_btrace_set_replay (tp, &begin);
2748 /* The to_goto_record_end method of target record-btrace. */
2751 record_btrace_goto_end (struct target_ops *ops)
2753 struct thread_info *tp;
2755 tp = require_btrace_thread ();
2757 record_btrace_set_replay (tp, NULL);
2760 /* The to_goto_record method of target record-btrace. */
2763 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2765 struct thread_info *tp;
2766 struct btrace_insn_iterator it;
2767 unsigned int number;
2772 /* Check for wrap-arounds. */
2774 error (_("Instruction number out of range."));
2776 tp = require_btrace_thread ();
2778 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2780 /* Check if the instruction could not be found or is a gap. */
2781 if (found == 0 || btrace_insn_get (&it) == NULL)
2782 error (_("No such instruction."));
2784 record_btrace_set_replay (tp, &it);
2787 /* The to_record_stop_replaying method of target record-btrace. */
2790 record_btrace_stop_replaying_all (struct target_ops *self)
2792 struct thread_info *tp;
2794 ALL_NON_EXITED_THREADS (tp)
2795 record_btrace_stop_replaying (tp);
2798 /* The to_execution_direction target method. */
2800 static enum exec_direction_kind
2801 record_btrace_execution_direction (struct target_ops *self)
2803 return record_btrace_resume_exec_dir;
2806 /* The to_prepare_to_generate_core target method. */
2809 record_btrace_prepare_to_generate_core (struct target_ops *self)
2811 record_btrace_generating_corefile = 1;
2814 /* The to_done_generating_core target method. */
2817 record_btrace_done_generating_core (struct target_ops *self)
2819 record_btrace_generating_corefile = 0;
2822 /* Initialize the record-btrace target ops. */
2825 init_record_btrace_ops (void)
2827 struct target_ops *ops;
2829 ops = &record_btrace_ops;
2830 ops->to_shortname = "record-btrace";
2831 ops->to_longname = "Branch tracing target";
2832 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2833 ops->to_open = record_btrace_open;
2834 ops->to_close = record_btrace_close;
2835 ops->to_async = record_btrace_async;
2836 ops->to_detach = record_detach;
2837 ops->to_disconnect = record_btrace_disconnect;
2838 ops->to_mourn_inferior = record_mourn_inferior;
2839 ops->to_kill = record_kill;
2840 ops->to_stop_recording = record_btrace_stop_recording;
2841 ops->to_info_record = record_btrace_info;
2842 ops->to_insn_history = record_btrace_insn_history;
2843 ops->to_insn_history_from = record_btrace_insn_history_from;
2844 ops->to_insn_history_range = record_btrace_insn_history_range;
2845 ops->to_call_history = record_btrace_call_history;
2846 ops->to_call_history_from = record_btrace_call_history_from;
2847 ops->to_call_history_range = record_btrace_call_history_range;
2848 ops->to_record_method = record_btrace_record_method;
2849 ops->to_record_is_replaying = record_btrace_is_replaying;
2850 ops->to_record_will_replay = record_btrace_will_replay;
2851 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2852 ops->to_xfer_partial = record_btrace_xfer_partial;
2853 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2854 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2855 ops->to_fetch_registers = record_btrace_fetch_registers;
2856 ops->to_store_registers = record_btrace_store_registers;
2857 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2858 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2859 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2860 ops->to_resume = record_btrace_resume;
2861 ops->to_commit_resume = record_btrace_commit_resume;
2862 ops->to_wait = record_btrace_wait;
2863 ops->to_stop = record_btrace_stop;
2864 ops->to_update_thread_list = record_btrace_update_thread_list;
2865 ops->to_thread_alive = record_btrace_thread_alive;
2866 ops->to_goto_record_begin = record_btrace_goto_begin;
2867 ops->to_goto_record_end = record_btrace_goto_end;
2868 ops->to_goto_record = record_btrace_goto;
2869 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2870 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2871 ops->to_supports_stopped_by_sw_breakpoint
2872 = record_btrace_supports_stopped_by_sw_breakpoint;
2873 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2874 ops->to_supports_stopped_by_hw_breakpoint
2875 = record_btrace_supports_stopped_by_hw_breakpoint;
2876 ops->to_execution_direction = record_btrace_execution_direction;
2877 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2878 ops->to_done_generating_core = record_btrace_done_generating_core;
2879 ops->to_stratum = record_stratum;
2880 ops->to_magic = OPS_MAGIC;
2883 /* Start recording in BTS format. */
2886 cmd_record_btrace_bts_start (const char *args, int from_tty)
2888 if (args != NULL && *args != 0)
2889 error (_("Invalid argument."));
2891 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2895 execute_command ((char *) "target record-btrace", from_tty);
2897 CATCH (exception, RETURN_MASK_ALL)
2899 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2900 throw_exception (exception);
2905 /* Start recording in Intel Processor Trace format. */
2908 cmd_record_btrace_pt_start (const char *args, int from_tty)
2910 if (args != NULL && *args != 0)
2911 error (_("Invalid argument."));
2913 record_btrace_conf.format = BTRACE_FORMAT_PT;
2917 execute_command ((char *) "target record-btrace", from_tty);
2919 CATCH (exception, RETURN_MASK_ALL)
2921 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2922 throw_exception (exception);
2927 /* Alias for "target record". */
2930 cmd_record_btrace_start (char *args, int from_tty)
2932 if (args != NULL && *args != 0)
2933 error (_("Invalid argument."));
2935 record_btrace_conf.format = BTRACE_FORMAT_PT;
2939 execute_command ((char *) "target record-btrace", from_tty);
2941 CATCH (exception, RETURN_MASK_ALL)
2943 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2947 execute_command ((char *) "target record-btrace", from_tty);
2949 CATCH (exception, RETURN_MASK_ALL)
2951 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2952 throw_exception (exception);
2959 /* The "set record btrace" command. */
2962 cmd_set_record_btrace (char *args, int from_tty)
2964 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2967 /* The "show record btrace" command. */
2970 cmd_show_record_btrace (char *args, int from_tty)
2972 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2975 /* The "show record btrace replay-memory-access" command. */
2978 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2979 struct cmd_list_element *c, const char *value)
2981 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2982 replay_memory_access);
2985 /* The "set record btrace bts" command. */
2988 cmd_set_record_btrace_bts (char *args, int from_tty)
2990 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2991 "by an appropriate subcommand.\n"));
2992 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2993 all_commands, gdb_stdout);
2996 /* The "show record btrace bts" command. */
2999 cmd_show_record_btrace_bts (char *args, int from_tty)
3001 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3004 /* The "set record btrace pt" command. */
3007 cmd_set_record_btrace_pt (char *args, int from_tty)
3009 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3010 "by an appropriate subcommand.\n"));
3011 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3012 all_commands, gdb_stdout);
3015 /* The "show record btrace pt" command. */
3018 cmd_show_record_btrace_pt (char *args, int from_tty)
3020 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3023 /* The "record bts buffer-size" show value function. */
3026 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3027 struct cmd_list_element *c,
3030 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3034 /* The "record pt buffer-size" show value function. */
3037 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3038 struct cmd_list_element *c,
3041 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3045 /* Initialize btrace commands. */
3048 _initialize_record_btrace (void)
3050 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3051 _("Start branch trace recording."), &record_btrace_cmdlist,
3052 "record btrace ", 0, &record_cmdlist);
3053 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3055 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3057 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3058 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3059 This format may not be available on all processors."),
3060 &record_btrace_cmdlist);
3061 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3063 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3065 Start branch trace recording in Intel Processor Trace format.\n\n\
3066 This format may not be available on all processors."),
3067 &record_btrace_cmdlist);
3068 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3070 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3071 _("Set record options"), &set_record_btrace_cmdlist,
3072 "set record btrace ", 0, &set_record_cmdlist);
3074 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3075 _("Show record options"), &show_record_btrace_cmdlist,
3076 "show record btrace ", 0, &show_record_cmdlist);
3078 add_setshow_enum_cmd ("replay-memory-access", no_class,
3079 replay_memory_access_types, &replay_memory_access, _("\
3080 Set what memory accesses are allowed during replay."), _("\
3081 Show what memory accesses are allowed during replay."),
3082 _("Default is READ-ONLY.\n\n\
3083 The btrace record target does not trace data.\n\
3084 The memory therefore corresponds to the live target and not \
3085 to the current replay position.\n\n\
3086 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3087 When READ-WRITE, allow accesses to read-only and read-write memory during \
3089 NULL, cmd_show_replay_memory_access,
3090 &set_record_btrace_cmdlist,
3091 &show_record_btrace_cmdlist);
3093 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3094 _("Set record btrace bts options"),
3095 &set_record_btrace_bts_cmdlist,
3096 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3098 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3099 _("Show record btrace bts options"),
3100 &show_record_btrace_bts_cmdlist,
3101 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3103 add_setshow_uinteger_cmd ("buffer-size", no_class,
3104 &record_btrace_conf.bts.size,
3105 _("Set the record/replay bts buffer size."),
3106 _("Show the record/replay bts buffer size."), _("\
3107 When starting recording request a trace buffer of this size. \
3108 The actual buffer size may differ from the requested size. \
3109 Use \"info record\" to see the actual buffer size.\n\n\
3110 Bigger buffers allow longer recording but also take more time to process \
3111 the recorded execution trace.\n\n\
3112 The trace buffer size may not be changed while recording."), NULL,
3113 show_record_bts_buffer_size_value,
3114 &set_record_btrace_bts_cmdlist,
3115 &show_record_btrace_bts_cmdlist);
3117 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3118 _("Set record btrace pt options"),
3119 &set_record_btrace_pt_cmdlist,
3120 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3122 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3123 _("Show record btrace pt options"),
3124 &show_record_btrace_pt_cmdlist,
3125 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3127 add_setshow_uinteger_cmd ("buffer-size", no_class,
3128 &record_btrace_conf.pt.size,
3129 _("Set the record/replay pt buffer size."),
3130 _("Show the record/replay pt buffer size."), _("\
3131 Bigger buffers allow longer recording but also take more time to process \
3132 the recorded execution.\n\
3133 The actual buffer size may differ from the requested size. Use \"info record\" \
3134 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3135 &set_record_btrace_pt_cmdlist,
3136 &show_record_btrace_pt_cmdlist);
3138 init_record_btrace_ops ();
3139 add_target (&record_btrace_ops);
3141 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3144 record_btrace_conf.bts.size = 64 * 1024;
3145 record_btrace_conf.pt.size = 16 * 1024;