1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
47 /* Token associated with a new-thread observer enabling branch tracing
48 for the new thread. */
49 static const gdb::observers::token record_btrace_thread_observer_token;
51 /* Memory access types used in set/show record btrace replay-memory-access. */
52 static const char replay_memory_access_read_only[] = "read-only";
53 static const char replay_memory_access_read_write[] = "read-write";
54 static const char *const replay_memory_access_types[] =
56 replay_memory_access_read_only,
57 replay_memory_access_read_write,
61 /* The currently allowed replay memory access type. */
62 static const char *replay_memory_access = replay_memory_access_read_only;
64 /* Command lists for "set/show record btrace". */
65 static struct cmd_list_element *set_record_btrace_cmdlist;
66 static struct cmd_list_element *show_record_btrace_cmdlist;
68 /* The execution direction of the last resume we got. See record-full.c. */
69 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
71 /* The async event handler for reverse/replay execution. */
72 static struct async_event_handler *record_btrace_async_inferior_event_handler;
74 /* A flag indicating that we are currently generating a core file. */
75 static int record_btrace_generating_corefile;
77 /* The current branch trace configuration. */
78 static struct btrace_config record_btrace_conf;
80 /* Command list for "record btrace". */
81 static struct cmd_list_element *record_btrace_cmdlist;
83 /* Command lists for "set/show record btrace bts". */
84 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
85 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
87 /* Command lists for "set/show record btrace pt". */
88 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
89 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
91 /* Print a record-btrace debug message. Use do ... while (0) to avoid
92 ambiguities when used in if statements. */
94 #define DEBUG(msg, args...) \
97 if (record_debug != 0) \
98 fprintf_unfiltered (gdb_stdlog, \
99 "[record-btrace] " msg "\n", ##args); \
104 /* Update the branch trace for the current thread and return a pointer to its
107 Throws an error if there is no thread or no trace. This function never
110 static struct thread_info *
111 require_btrace_thread (void)
113 struct thread_info *tp;
117 tp = find_thread_ptid (inferior_ptid);
119 error (_("No thread."));
121 validate_registers_access ();
125 if (btrace_is_empty (tp))
126 error (_("No trace."));
131 /* Update the branch trace for the current thread and return a pointer to its
132 branch trace information struct.
134 Throws an error if there is no thread or no trace. This function never
137 static struct btrace_thread_info *
138 require_btrace (void)
140 struct thread_info *tp;
142 tp = require_btrace_thread ();
147 /* Enable branch tracing for one thread. Warn on errors. */
150 record_btrace_enable_warn (struct thread_info *tp)
154 btrace_enable (tp, &record_btrace_conf);
156 CATCH (error, RETURN_MASK_ERROR)
158 warning ("%s", error.message);
163 /* Enable automatic tracing of new threads. */
166 record_btrace_auto_enable (void)
168 DEBUG ("attach thread observer");
170 gdb::observers::new_thread.attach (record_btrace_enable_warn,
171 record_btrace_thread_observer_token);
174 /* Disable automatic tracing of new threads. */
177 record_btrace_auto_disable (void)
179 DEBUG ("detach thread observer");
181 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
184 /* The record-btrace async event handler function. */
187 record_btrace_handle_async_inferior_event (gdb_client_data data)
189 inferior_event_handler (INF_REG_EVENT, NULL);
192 /* See record-btrace.h. */
195 record_btrace_push_target (void)
199 record_btrace_auto_enable ();
201 push_target (&record_btrace_ops);
203 record_btrace_async_inferior_event_handler
204 = create_async_event_handler (record_btrace_handle_async_inferior_event,
206 record_btrace_generating_corefile = 0;
208 format = btrace_format_short_string (record_btrace_conf.format);
209 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
212 /* Disable btrace on a set of threads on scope exit. */
214 struct scoped_btrace_disable
216 scoped_btrace_disable () = default;
218 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
220 ~scoped_btrace_disable ()
222 for (thread_info *tp : m_threads)
226 void add_thread (thread_info *thread)
228 m_threads.push_front (thread);
237 std::forward_list<thread_info *> m_threads;
240 /* The to_open method of target record-btrace. */
243 record_btrace_open (const char *args, int from_tty)
245 /* If we fail to enable btrace for one thread, disable it for the threads for
246 which it was successfully enabled. */
247 scoped_btrace_disable btrace_disable;
248 struct thread_info *tp;
254 if (!target_has_execution)
255 error (_("The program is not being run."));
257 ALL_NON_EXITED_THREADS (tp)
258 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
260 btrace_enable (tp, &record_btrace_conf);
262 btrace_disable.add_thread (tp);
265 record_btrace_push_target ();
267 btrace_disable.discard ();
270 /* The to_stop_recording method of target record-btrace. */
273 record_btrace_stop_recording (struct target_ops *self)
275 struct thread_info *tp;
277 DEBUG ("stop recording");
279 record_btrace_auto_disable ();
281 ALL_NON_EXITED_THREADS (tp)
282 if (tp->btrace.target != NULL)
286 /* The to_disconnect method of target record-btrace. */
289 record_btrace_disconnect (struct target_ops *self, const char *args,
292 struct target_ops *beneath = self->beneath;
294 /* Do not stop recording, just clean up GDB side. */
295 unpush_target (self);
297 /* Forward disconnect. */
298 beneath->to_disconnect (beneath, args, from_tty);
301 /* The to_close method of target record-btrace. */
304 record_btrace_close (struct target_ops *self)
306 struct thread_info *tp;
308 if (record_btrace_async_inferior_event_handler != NULL)
309 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
311 /* Make sure automatic recording gets disabled even if we did not stop
312 recording before closing the record-btrace target. */
313 record_btrace_auto_disable ();
315 /* We should have already stopped recording.
316 Tear down btrace in case we have not. */
317 ALL_NON_EXITED_THREADS (tp)
318 btrace_teardown (tp);
321 /* The to_async method of target record-btrace. */
324 record_btrace_async (struct target_ops *ops, int enable)
327 mark_async_event_handler (record_btrace_async_inferior_event_handler);
329 clear_async_event_handler (record_btrace_async_inferior_event_handler);
331 ops->beneath->to_async (ops->beneath, enable);
334 /* Adjusts the size and returns a human readable size suffix. */
337 record_btrace_adjust_size (unsigned int *size)
343 if ((sz & ((1u << 30) - 1)) == 0)
348 else if ((sz & ((1u << 20) - 1)) == 0)
353 else if ((sz & ((1u << 10) - 1)) == 0)
362 /* Print a BTS configuration. */
365 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
373 suffix = record_btrace_adjust_size (&size);
374 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 /* Print an Intel Processor Trace configuration. */
381 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
389 suffix = record_btrace_adjust_size (&size);
390 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
394 /* Print a branch tracing configuration. */
397 record_btrace_print_conf (const struct btrace_config *conf)
399 printf_unfiltered (_("Recording format: %s.\n"),
400 btrace_format_string (conf->format));
402 switch (conf->format)
404 case BTRACE_FORMAT_NONE:
407 case BTRACE_FORMAT_BTS:
408 record_btrace_print_bts_conf (&conf->bts);
411 case BTRACE_FORMAT_PT:
412 record_btrace_print_pt_conf (&conf->pt);
416 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
419 /* The to_info_record method of target record-btrace. */
422 record_btrace_info (struct target_ops *self)
424 struct btrace_thread_info *btinfo;
425 const struct btrace_config *conf;
426 struct thread_info *tp;
427 unsigned int insns, calls, gaps;
431 tp = find_thread_ptid (inferior_ptid);
433 error (_("No thread."));
435 validate_registers_access ();
437 btinfo = &tp->btrace;
439 conf = btrace_conf (btinfo);
441 record_btrace_print_conf (conf);
449 if (!btrace_is_empty (tp))
451 struct btrace_call_iterator call;
452 struct btrace_insn_iterator insn;
454 btrace_call_end (&call, btinfo);
455 btrace_call_prev (&call, 1);
456 calls = btrace_call_number (&call);
458 btrace_insn_end (&insn, btinfo);
459 insns = btrace_insn_number (&insn);
461 /* If the last instruction is not a gap, it is the current instruction
462 that is not actually part of the record. */
463 if (btrace_insn_get (&insn) != NULL)
466 gaps = btinfo->ngaps;
469 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
470 "for thread %s (%s).\n"), insns, calls, gaps,
471 print_thread_id (tp), target_pid_to_str (tp->ptid));
473 if (btrace_is_replaying (tp))
474 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
475 btrace_insn_number (btinfo->replay));
478 /* Print a decode error. */
481 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
482 enum btrace_format format)
484 const char *errstr = btrace_decode_error (format, errcode);
486 uiout->text (_("["));
487 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
488 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
490 uiout->text (_("decode error ("));
491 uiout->field_int ("errcode", errcode);
492 uiout->text (_("): "));
494 uiout->text (errstr);
495 uiout->text (_("]\n"));
498 /* Print an unsigned int. */
501 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
503 uiout->field_fmt (fld, "%u", val);
506 /* A range of source lines. */
508 struct btrace_line_range
510 /* The symtab this line is from. */
511 struct symtab *symtab;
513 /* The first line (inclusive). */
516 /* The last line (exclusive). */
520 /* Construct a line range. */
522 static struct btrace_line_range
523 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
525 struct btrace_line_range range;
527 range.symtab = symtab;
534 /* Add a line to a line range. */
536 static struct btrace_line_range
537 btrace_line_range_add (struct btrace_line_range range, int line)
539 if (range.end <= range.begin)
541 /* This is the first entry. */
543 range.end = line + 1;
545 else if (line < range.begin)
547 else if (range.end < line)
553 /* Return non-zero if RANGE is empty, zero otherwise. */
556 btrace_line_range_is_empty (struct btrace_line_range range)
558 return range.end <= range.begin;
561 /* Return non-zero if LHS contains RHS, zero otherwise. */
564 btrace_line_range_contains_range (struct btrace_line_range lhs,
565 struct btrace_line_range rhs)
567 return ((lhs.symtab == rhs.symtab)
568 && (lhs.begin <= rhs.begin)
569 && (rhs.end <= lhs.end));
572 /* Find the line range associated with PC. */
574 static struct btrace_line_range
575 btrace_find_line_range (CORE_ADDR pc)
577 struct btrace_line_range range;
578 struct linetable_entry *lines;
579 struct linetable *ltable;
580 struct symtab *symtab;
583 symtab = find_pc_line_symtab (pc);
585 return btrace_mk_line_range (NULL, 0, 0);
587 ltable = SYMTAB_LINETABLE (symtab);
589 return btrace_mk_line_range (symtab, 0, 0);
591 nlines = ltable->nitems;
592 lines = ltable->item;
594 return btrace_mk_line_range (symtab, 0, 0);
596 range = btrace_mk_line_range (symtab, 0, 0);
597 for (i = 0; i < nlines - 1; i++)
599 if ((lines[i].pc == pc) && (lines[i].line != 0))
600 range = btrace_line_range_add (range, lines[i].line);
606 /* Print source lines in LINES to UIOUT.
608 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
609 instructions corresponding to that source line. When printing a new source
610 line, we do the cleanups for the open chain and open a new cleanup chain for
611 the new source line. If the source line range in LINES is not empty, this
612 function will leave the cleanup chain for the last printed source line open
613 so instructions can be added to it. */
616 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
617 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
618 gdb::optional<ui_out_emit_list> *asm_list,
619 gdb_disassembly_flags flags)
621 print_source_lines_flags psl_flags;
623 if (flags & DISASSEMBLY_FILENAME)
624 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
626 for (int line = lines.begin; line < lines.end; ++line)
630 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
632 print_source_lines (lines.symtab, line, line + 1, psl_flags);
634 asm_list->emplace (uiout, "line_asm_insn");
638 /* Disassemble a section of the recorded instruction trace. */
641 btrace_insn_history (struct ui_out *uiout,
642 const struct btrace_thread_info *btinfo,
643 const struct btrace_insn_iterator *begin,
644 const struct btrace_insn_iterator *end,
645 gdb_disassembly_flags flags)
647 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
648 btrace_insn_number (begin), btrace_insn_number (end));
650 flags |= DISASSEMBLY_SPECULATIVE;
652 struct gdbarch *gdbarch = target_gdbarch ();
653 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
655 ui_out_emit_list list_emitter (uiout, "asm_insns");
657 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
658 gdb::optional<ui_out_emit_list> asm_list;
660 gdb_pretty_print_disassembler disasm (gdbarch);
662 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
663 btrace_insn_next (&it, 1))
665 const struct btrace_insn *insn;
667 insn = btrace_insn_get (&it);
669 /* A NULL instruction indicates a gap in the trace. */
672 const struct btrace_config *conf;
674 conf = btrace_conf (btinfo);
676 /* We have trace so we must have a configuration. */
677 gdb_assert (conf != NULL);
679 uiout->field_fmt ("insn-number", "%u",
680 btrace_insn_number (&it));
683 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
688 struct disasm_insn dinsn;
690 if ((flags & DISASSEMBLY_SOURCE) != 0)
692 struct btrace_line_range lines;
694 lines = btrace_find_line_range (insn->pc);
695 if (!btrace_line_range_is_empty (lines)
696 && !btrace_line_range_contains_range (last_lines, lines))
698 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
702 else if (!src_and_asm_tuple.has_value ())
704 gdb_assert (!asm_list.has_value ());
706 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
708 /* No source information. */
709 asm_list.emplace (uiout, "line_asm_insn");
712 gdb_assert (src_and_asm_tuple.has_value ());
713 gdb_assert (asm_list.has_value ());
716 memset (&dinsn, 0, sizeof (dinsn));
717 dinsn.number = btrace_insn_number (&it);
718 dinsn.addr = insn->pc;
720 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
721 dinsn.is_speculative = 1;
723 disasm.pretty_print_insn (uiout, &dinsn, flags);
728 /* The to_insn_history method of target record-btrace. */
731 record_btrace_insn_history (struct target_ops *self, int size,
732 gdb_disassembly_flags flags)
734 struct btrace_thread_info *btinfo;
735 struct btrace_insn_history *history;
736 struct btrace_insn_iterator begin, end;
737 struct ui_out *uiout;
738 unsigned int context, covered;
740 uiout = current_uiout;
741 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
742 context = abs (size);
744 error (_("Bad record instruction-history-size."));
746 btinfo = require_btrace ();
747 history = btinfo->insn_history;
750 struct btrace_insn_iterator *replay;
752 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
754 /* If we're replaying, we start at the replay position. Otherwise, we
755 start at the tail of the trace. */
756 replay = btinfo->replay;
760 btrace_insn_end (&begin, btinfo);
762 /* We start from here and expand in the requested direction. Then we
763 expand in the other direction, as well, to fill up any remaining
768 /* We want the current position covered, as well. */
769 covered = btrace_insn_next (&end, 1);
770 covered += btrace_insn_prev (&begin, context - covered);
771 covered += btrace_insn_next (&end, context - covered);
775 covered = btrace_insn_next (&end, context);
776 covered += btrace_insn_prev (&begin, context - covered);
781 begin = history->begin;
784 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
785 btrace_insn_number (&begin), btrace_insn_number (&end));
790 covered = btrace_insn_prev (&begin, context);
795 covered = btrace_insn_next (&end, context);
800 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
804 printf_unfiltered (_("At the start of the branch trace record.\n"));
806 printf_unfiltered (_("At the end of the branch trace record.\n"));
809 btrace_set_insn_history (btinfo, &begin, &end);
812 /* The to_insn_history_range method of target record-btrace. */
815 record_btrace_insn_history_range (struct target_ops *self,
816 ULONGEST from, ULONGEST to,
817 gdb_disassembly_flags flags)
819 struct btrace_thread_info *btinfo;
820 struct btrace_insn_iterator begin, end;
821 struct ui_out *uiout;
822 unsigned int low, high;
825 uiout = current_uiout;
826 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
830 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
832 /* Check for wrap-arounds. */
833 if (low != from || high != to)
834 error (_("Bad range."));
837 error (_("Bad range."));
839 btinfo = require_btrace ();
841 found = btrace_find_insn_by_number (&begin, btinfo, low);
843 error (_("Range out of bounds."));
845 found = btrace_find_insn_by_number (&end, btinfo, high);
848 /* Silently truncate the range. */
849 btrace_insn_end (&end, btinfo);
853 /* We want both begin and end to be inclusive. */
854 btrace_insn_next (&end, 1);
857 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
858 btrace_set_insn_history (btinfo, &begin, &end);
861 /* The to_insn_history_from method of target record-btrace. */
864 record_btrace_insn_history_from (struct target_ops *self,
865 ULONGEST from, int size,
866 gdb_disassembly_flags flags)
868 ULONGEST begin, end, context;
870 context = abs (size);
872 error (_("Bad record instruction-history-size."));
881 begin = from - context + 1;
886 end = from + context - 1;
888 /* Check for wrap-around. */
893 record_btrace_insn_history_range (self, begin, end, flags);
896 /* Print the instruction number range for a function call history line. */
899 btrace_call_history_insn_range (struct ui_out *uiout,
900 const struct btrace_function *bfun)
902 unsigned int begin, end, size;
904 size = bfun->insn.size ();
905 gdb_assert (size > 0);
907 begin = bfun->insn_offset;
908 end = begin + size - 1;
910 ui_out_field_uint (uiout, "insn begin", begin);
912 ui_out_field_uint (uiout, "insn end", end);
915 /* Compute the lowest and highest source line for the instructions in BFUN
916 and return them in PBEGIN and PEND.
917 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
918 result from inlining or macro expansion. */
921 btrace_compute_src_line_range (const struct btrace_function *bfun,
922 int *pbegin, int *pend)
924 struct symtab *symtab;
935 symtab = symbol_symtab (sym);
937 for (const btrace_insn &insn : bfun->insn)
939 struct symtab_and_line sal;
941 sal = find_pc_line (insn.pc, 0);
942 if (sal.symtab != symtab || sal.line == 0)
945 begin = std::min (begin, sal.line);
946 end = std::max (end, sal.line);
954 /* Print the source line information for a function call history line. */
957 btrace_call_history_src_line (struct ui_out *uiout,
958 const struct btrace_function *bfun)
967 uiout->field_string ("file",
968 symtab_to_filename_for_display (symbol_symtab (sym)));
970 btrace_compute_src_line_range (bfun, &begin, &end);
975 uiout->field_int ("min line", begin);
981 uiout->field_int ("max line", end);
984 /* Get the name of a branch trace function. */
987 btrace_get_bfun_name (const struct btrace_function *bfun)
989 struct minimal_symbol *msym;
999 return SYMBOL_PRINT_NAME (sym);
1000 else if (msym != NULL)
1001 return MSYMBOL_PRINT_NAME (msym);
1006 /* Disassemble a section of the recorded function trace. */
1009 btrace_call_history (struct ui_out *uiout,
1010 const struct btrace_thread_info *btinfo,
1011 const struct btrace_call_iterator *begin,
1012 const struct btrace_call_iterator *end,
1015 struct btrace_call_iterator it;
1016 record_print_flags flags = (enum record_print_flag) int_flags;
1018 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1019 btrace_call_number (end));
1021 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1023 const struct btrace_function *bfun;
1024 struct minimal_symbol *msym;
1027 bfun = btrace_call_get (&it);
1031 /* Print the function index. */
1032 ui_out_field_uint (uiout, "index", bfun->number);
1035 /* Indicate gaps in the trace. */
1036 if (bfun->errcode != 0)
1038 const struct btrace_config *conf;
1040 conf = btrace_conf (btinfo);
1042 /* We have trace so we must have a configuration. */
1043 gdb_assert (conf != NULL);
1045 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1050 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1052 int level = bfun->level + btinfo->level, i;
1054 for (i = 0; i < level; ++i)
1059 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1060 else if (msym != NULL)
1061 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1062 else if (!uiout->is_mi_like_p ())
1063 uiout->field_string ("function", "??");
1065 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1067 uiout->text (_("\tinst "));
1068 btrace_call_history_insn_range (uiout, bfun);
1071 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1073 uiout->text (_("\tat "));
1074 btrace_call_history_src_line (uiout, bfun);
1081 /* The to_call_history method of target record-btrace. */
1084 record_btrace_call_history (struct target_ops *self, int size,
1085 record_print_flags flags)
1087 struct btrace_thread_info *btinfo;
1088 struct btrace_call_history *history;
1089 struct btrace_call_iterator begin, end;
1090 struct ui_out *uiout;
1091 unsigned int context, covered;
1093 uiout = current_uiout;
1094 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1095 context = abs (size);
1097 error (_("Bad record function-call-history-size."));
1099 btinfo = require_btrace ();
1100 history = btinfo->call_history;
1101 if (history == NULL)
1103 struct btrace_insn_iterator *replay;
1105 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1107 /* If we're replaying, we start at the replay position. Otherwise, we
1108 start at the tail of the trace. */
1109 replay = btinfo->replay;
1112 begin.btinfo = btinfo;
1113 begin.index = replay->call_index;
1116 btrace_call_end (&begin, btinfo);
1118 /* We start from here and expand in the requested direction. Then we
1119 expand in the other direction, as well, to fill up any remaining
1124 /* We want the current position covered, as well. */
1125 covered = btrace_call_next (&end, 1);
1126 covered += btrace_call_prev (&begin, context - covered);
1127 covered += btrace_call_next (&end, context - covered);
1131 covered = btrace_call_next (&end, context);
1132 covered += btrace_call_prev (&begin, context- covered);
1137 begin = history->begin;
1140 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1141 btrace_call_number (&begin), btrace_call_number (&end));
1146 covered = btrace_call_prev (&begin, context);
1151 covered = btrace_call_next (&end, context);
1156 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1160 printf_unfiltered (_("At the start of the branch trace record.\n"));
1162 printf_unfiltered (_("At the end of the branch trace record.\n"));
1165 btrace_set_call_history (btinfo, &begin, &end);
1168 /* The to_call_history_range method of target record-btrace. */
1171 record_btrace_call_history_range (struct target_ops *self,
1172 ULONGEST from, ULONGEST to,
1173 record_print_flags flags)
1175 struct btrace_thread_info *btinfo;
1176 struct btrace_call_iterator begin, end;
1177 struct ui_out *uiout;
1178 unsigned int low, high;
1181 uiout = current_uiout;
1182 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1186 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1188 /* Check for wrap-arounds. */
1189 if (low != from || high != to)
1190 error (_("Bad range."));
1193 error (_("Bad range."));
1195 btinfo = require_btrace ();
1197 found = btrace_find_call_by_number (&begin, btinfo, low);
1199 error (_("Range out of bounds."));
1201 found = btrace_find_call_by_number (&end, btinfo, high);
1204 /* Silently truncate the range. */
1205 btrace_call_end (&end, btinfo);
1209 /* We want both begin and end to be inclusive. */
1210 btrace_call_next (&end, 1);
1213 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1214 btrace_set_call_history (btinfo, &begin, &end);
1217 /* The to_call_history_from method of target record-btrace. */
1220 record_btrace_call_history_from (struct target_ops *self,
1221 ULONGEST from, int size,
1222 record_print_flags flags)
1224 ULONGEST begin, end, context;
1226 context = abs (size);
1228 error (_("Bad record function-call-history-size."));
1237 begin = from - context + 1;
1242 end = from + context - 1;
1244 /* Check for wrap-around. */
1249 record_btrace_call_history_range (self, begin, end, flags);
1252 /* The to_record_method method of target record-btrace. */
1254 static enum record_method
1255 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1257 struct thread_info * const tp = find_thread_ptid (ptid);
1260 error (_("No thread."));
1262 if (tp->btrace.target == NULL)
1263 return RECORD_METHOD_NONE;
1265 return RECORD_METHOD_BTRACE;
1268 /* The to_record_is_replaying method of target record-btrace. */
1271 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1273 struct thread_info *tp;
1275 ALL_NON_EXITED_THREADS (tp)
1276 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1282 /* The to_record_will_replay method of target record-btrace. */
1285 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1287 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1290 /* The to_xfer_partial method of target record-btrace. */
1292 static enum target_xfer_status
1293 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1294 const char *annex, gdb_byte *readbuf,
1295 const gdb_byte *writebuf, ULONGEST offset,
1296 ULONGEST len, ULONGEST *xfered_len)
1298 /* Filter out requests that don't make sense during replay. */
1299 if (replay_memory_access == replay_memory_access_read_only
1300 && !record_btrace_generating_corefile
1301 && record_btrace_is_replaying (ops, inferior_ptid))
1305 case TARGET_OBJECT_MEMORY:
1307 struct target_section *section;
1309 /* We do not allow writing memory in general. */
1310 if (writebuf != NULL)
1313 return TARGET_XFER_UNAVAILABLE;
1316 /* We allow reading readonly memory. */
1317 section = target_section_by_addr (ops, offset);
1318 if (section != NULL)
1320 /* Check if the section we found is readonly. */
1321 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1322 section->the_bfd_section)
1323 & SEC_READONLY) != 0)
1325 /* Truncate the request to fit into this section. */
1326 len = std::min (len, section->endaddr - offset);
1332 return TARGET_XFER_UNAVAILABLE;
1337 /* Forward the request. */
1339 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1340 offset, len, xfered_len);
1343 /* The to_insert_breakpoint method of target record-btrace. */
1346 record_btrace_insert_breakpoint (struct target_ops *ops,
1347 struct gdbarch *gdbarch,
1348 struct bp_target_info *bp_tgt)
1353 /* Inserting breakpoints requires accessing memory. Allow it for the
1354 duration of this function. */
1355 old = replay_memory_access;
1356 replay_memory_access = replay_memory_access_read_write;
1361 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1363 CATCH (except, RETURN_MASK_ALL)
1365 replay_memory_access = old;
1366 throw_exception (except);
1369 replay_memory_access = old;
1374 /* The to_remove_breakpoint method of target record-btrace. */
1377 record_btrace_remove_breakpoint (struct target_ops *ops,
1378 struct gdbarch *gdbarch,
1379 struct bp_target_info *bp_tgt,
1380 enum remove_bp_reason reason)
1385 /* Removing breakpoints requires accessing memory. Allow it for the
1386 duration of this function. */
1387 old = replay_memory_access;
1388 replay_memory_access = replay_memory_access_read_write;
1393 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1396 CATCH (except, RETURN_MASK_ALL)
1398 replay_memory_access = old;
1399 throw_exception (except);
1402 replay_memory_access = old;
1407 /* The to_fetch_registers method of target record-btrace. */
1410 record_btrace_fetch_registers (struct target_ops *ops,
1411 struct regcache *regcache, int regno)
1413 struct btrace_insn_iterator *replay;
1414 struct thread_info *tp;
1416 tp = find_thread_ptid (regcache_get_ptid (regcache));
1417 gdb_assert (tp != NULL);
1419 replay = tp->btrace.replay;
1420 if (replay != NULL && !record_btrace_generating_corefile)
1422 const struct btrace_insn *insn;
1423 struct gdbarch *gdbarch;
1426 gdbarch = regcache->arch ();
1427 pcreg = gdbarch_pc_regnum (gdbarch);
1431 /* We can only provide the PC register. */
1432 if (regno >= 0 && regno != pcreg)
1435 insn = btrace_insn_get (replay);
1436 gdb_assert (insn != NULL);
1438 regcache_raw_supply (regcache, regno, &insn->pc);
1442 struct target_ops *t = ops->beneath;
1444 t->to_fetch_registers (t, regcache, regno);
1448 /* The to_store_registers method of target record-btrace. */
1451 record_btrace_store_registers (struct target_ops *ops,
1452 struct regcache *regcache, int regno)
1454 struct target_ops *t;
1456 if (!record_btrace_generating_corefile
1457 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1458 error (_("Cannot write registers while replaying."));
1460 gdb_assert (may_write_registers != 0);
1463 t->to_store_registers (t, regcache, regno);
1466 /* The to_prepare_to_store method of target record-btrace. */
1469 record_btrace_prepare_to_store (struct target_ops *ops,
1470 struct regcache *regcache)
1472 struct target_ops *t;
1474 if (!record_btrace_generating_corefile
1475 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1479 t->to_prepare_to_store (t, regcache);
1482 /* The branch trace frame cache. */
1484 struct btrace_frame_cache
1487 struct thread_info *tp;
1489 /* The frame info. */
1490 struct frame_info *frame;
1492 /* The branch trace function segment. */
1493 const struct btrace_function *bfun;
1496 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1498 static htab_t bfcache;
1500 /* hash_f for htab_create_alloc of bfcache. */
1503 bfcache_hash (const void *arg)
1505 const struct btrace_frame_cache *cache
1506 = (const struct btrace_frame_cache *) arg;
1508 return htab_hash_pointer (cache->frame);
1511 /* eq_f for htab_create_alloc of bfcache. */
1514 bfcache_eq (const void *arg1, const void *arg2)
1516 const struct btrace_frame_cache *cache1
1517 = (const struct btrace_frame_cache *) arg1;
1518 const struct btrace_frame_cache *cache2
1519 = (const struct btrace_frame_cache *) arg2;
1521 return cache1->frame == cache2->frame;
1524 /* Create a new btrace frame cache. */
1526 static struct btrace_frame_cache *
1527 bfcache_new (struct frame_info *frame)
1529 struct btrace_frame_cache *cache;
1532 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1533 cache->frame = frame;
1535 slot = htab_find_slot (bfcache, cache, INSERT);
1536 gdb_assert (*slot == NULL);
1542 /* Extract the branch trace function from a branch trace frame. */
1544 static const struct btrace_function *
1545 btrace_get_frame_function (struct frame_info *frame)
1547 const struct btrace_frame_cache *cache;
1548 struct btrace_frame_cache pattern;
1551 pattern.frame = frame;
1553 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1557 cache = (const struct btrace_frame_cache *) *slot;
1561 /* Implement stop_reason method for record_btrace_frame_unwind. */
1563 static enum unwind_stop_reason
1564 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1567 const struct btrace_frame_cache *cache;
1568 const struct btrace_function *bfun;
1570 cache = (const struct btrace_frame_cache *) *this_cache;
1572 gdb_assert (bfun != NULL);
1575 return UNWIND_UNAVAILABLE;
1577 return UNWIND_NO_REASON;
1580 /* Implement this_id method for record_btrace_frame_unwind. */
1583 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1584 struct frame_id *this_id)
1586 const struct btrace_frame_cache *cache;
1587 const struct btrace_function *bfun;
1588 struct btrace_call_iterator it;
1589 CORE_ADDR code, special;
1591 cache = (const struct btrace_frame_cache *) *this_cache;
1594 gdb_assert (bfun != NULL);
1596 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1597 bfun = btrace_call_get (&it);
1599 code = get_frame_func (this_frame);
1600 special = bfun->number;
1602 *this_id = frame_id_build_unavailable_stack_special (code, special);
1604 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1605 btrace_get_bfun_name (cache->bfun),
1606 core_addr_to_string_nz (this_id->code_addr),
1607 core_addr_to_string_nz (this_id->special_addr));
1610 /* Implement prev_register method for record_btrace_frame_unwind. */
1612 static struct value *
1613 record_btrace_frame_prev_register (struct frame_info *this_frame,
1617 const struct btrace_frame_cache *cache;
1618 const struct btrace_function *bfun, *caller;
1619 struct btrace_call_iterator it;
1620 struct gdbarch *gdbarch;
1624 gdbarch = get_frame_arch (this_frame);
1625 pcreg = gdbarch_pc_regnum (gdbarch);
1626 if (pcreg < 0 || regnum != pcreg)
1627 throw_error (NOT_AVAILABLE_ERROR,
1628 _("Registers are not available in btrace record history"));
1630 cache = (const struct btrace_frame_cache *) *this_cache;
1632 gdb_assert (bfun != NULL);
1634 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1635 throw_error (NOT_AVAILABLE_ERROR,
1636 _("No caller in btrace record history"));
1638 caller = btrace_call_get (&it);
1640 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1641 pc = caller->insn.front ().pc;
1644 pc = caller->insn.back ().pc;
1645 pc += gdb_insn_length (gdbarch, pc);
1648 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1649 btrace_get_bfun_name (bfun), bfun->level,
1650 core_addr_to_string_nz (pc));
1652 return frame_unwind_got_address (this_frame, regnum, pc);
1655 /* Implement sniffer method for record_btrace_frame_unwind. */
1658 record_btrace_frame_sniffer (const struct frame_unwind *self,
1659 struct frame_info *this_frame,
1662 const struct btrace_function *bfun;
1663 struct btrace_frame_cache *cache;
1664 struct thread_info *tp;
1665 struct frame_info *next;
1667 /* THIS_FRAME does not contain a reference to its thread. */
1668 tp = find_thread_ptid (inferior_ptid);
1669 gdb_assert (tp != NULL);
1672 next = get_next_frame (this_frame);
1675 const struct btrace_insn_iterator *replay;
1677 replay = tp->btrace.replay;
1679 bfun = &replay->btinfo->functions[replay->call_index];
1683 const struct btrace_function *callee;
1684 struct btrace_call_iterator it;
1686 callee = btrace_get_frame_function (next);
1687 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1690 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1693 bfun = btrace_call_get (&it);
1699 DEBUG ("[frame] sniffed frame for %s on level %d",
1700 btrace_get_bfun_name (bfun), bfun->level);
1702 /* This is our frame. Initialize the frame cache. */
1703 cache = bfcache_new (this_frame);
1707 *this_cache = cache;
1711 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1714 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1715 struct frame_info *this_frame,
1718 const struct btrace_function *bfun, *callee;
1719 struct btrace_frame_cache *cache;
1720 struct btrace_call_iterator it;
1721 struct frame_info *next;
1722 struct thread_info *tinfo;
1724 next = get_next_frame (this_frame);
1728 callee = btrace_get_frame_function (next);
1732 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1735 tinfo = find_thread_ptid (inferior_ptid);
1736 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1739 bfun = btrace_call_get (&it);
1741 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1742 btrace_get_bfun_name (bfun), bfun->level);
1744 /* This is our frame. Initialize the frame cache. */
1745 cache = bfcache_new (this_frame);
1749 *this_cache = cache;
1754 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1756 struct btrace_frame_cache *cache;
1759 cache = (struct btrace_frame_cache *) this_cache;
1761 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1762 gdb_assert (slot != NULL);
1764 htab_remove_elt (bfcache, cache);
1767 /* btrace recording does not store previous memory content, neither the stack
1768 frames content. Any unwinding would return errorneous results as the stack
1769 contents no longer matches the changed PC value restored from history.
1770 Therefore this unwinder reports any possibly unwound registers as
1773 const struct frame_unwind record_btrace_frame_unwind =
1776 record_btrace_frame_unwind_stop_reason,
1777 record_btrace_frame_this_id,
1778 record_btrace_frame_prev_register,
1780 record_btrace_frame_sniffer,
1781 record_btrace_frame_dealloc_cache
1784 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1787 record_btrace_frame_unwind_stop_reason,
1788 record_btrace_frame_this_id,
1789 record_btrace_frame_prev_register,
1791 record_btrace_tailcall_frame_sniffer,
1792 record_btrace_frame_dealloc_cache
1795 /* Implement the to_get_unwinder method. */
1797 static const struct frame_unwind *
1798 record_btrace_to_get_unwinder (struct target_ops *self)
1800 return &record_btrace_frame_unwind;
1803 /* Implement the to_get_tailcall_unwinder method. */
1805 static const struct frame_unwind *
1806 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1808 return &record_btrace_tailcall_frame_unwind;
1811 /* Return a human-readable string for FLAG. */
1814 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1822 return "reverse-step";
1828 return "reverse-cont";
1837 /* Indicate that TP should be resumed according to FLAG. */
1840 record_btrace_resume_thread (struct thread_info *tp,
1841 enum btrace_thread_flag flag)
1843 struct btrace_thread_info *btinfo;
1845 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1846 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1848 btinfo = &tp->btrace;
1850 /* Fetch the latest branch trace. */
1853 /* A resume request overwrites a preceding resume or stop request. */
1854 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1855 btinfo->flags |= flag;
1858 /* Get the current frame for TP. */
1860 static struct frame_info *
1861 get_thread_current_frame (struct thread_info *tp)
1863 struct frame_info *frame;
1864 ptid_t old_inferior_ptid;
1867 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1868 old_inferior_ptid = inferior_ptid;
1869 inferior_ptid = tp->ptid;
1871 /* Clear the executing flag to allow changes to the current frame.
1872 We are not actually running, yet. We just started a reverse execution
1873 command or a record goto command.
1874 For the latter, EXECUTING is false and this has no effect.
1875 For the former, EXECUTING is true and we're in to_wait, about to
1876 move the thread. Since we need to recompute the stack, we temporarily
1877 set EXECUTING to flase. */
1878 executing = is_executing (inferior_ptid);
1879 set_executing (inferior_ptid, 0);
1884 frame = get_current_frame ();
1886 CATCH (except, RETURN_MASK_ALL)
1888 /* Restore the previous execution state. */
1889 set_executing (inferior_ptid, executing);
1891 /* Restore the previous inferior_ptid. */
1892 inferior_ptid = old_inferior_ptid;
1894 throw_exception (except);
1898 /* Restore the previous execution state. */
1899 set_executing (inferior_ptid, executing);
1901 /* Restore the previous inferior_ptid. */
1902 inferior_ptid = old_inferior_ptid;
1907 /* Start replaying a thread. */
1909 static struct btrace_insn_iterator *
1910 record_btrace_start_replaying (struct thread_info *tp)
1912 struct btrace_insn_iterator *replay;
1913 struct btrace_thread_info *btinfo;
1915 btinfo = &tp->btrace;
1918 /* We can't start replaying without trace. */
1919 if (btinfo->functions.empty ())
1922 /* GDB stores the current frame_id when stepping in order to detects steps
1924 Since frames are computed differently when we're replaying, we need to
1925 recompute those stored frames and fix them up so we can still detect
1926 subroutines after we started replaying. */
1929 struct frame_info *frame;
1930 struct frame_id frame_id;
1931 int upd_step_frame_id, upd_step_stack_frame_id;
1933 /* The current frame without replaying - computed via normal unwind. */
1934 frame = get_thread_current_frame (tp);
1935 frame_id = get_frame_id (frame);
1937 /* Check if we need to update any stepping-related frame id's. */
1938 upd_step_frame_id = frame_id_eq (frame_id,
1939 tp->control.step_frame_id);
1940 upd_step_stack_frame_id = frame_id_eq (frame_id,
1941 tp->control.step_stack_frame_id);
1943 /* We start replaying at the end of the branch trace. This corresponds
1944 to the current instruction. */
1945 replay = XNEW (struct btrace_insn_iterator);
1946 btrace_insn_end (replay, btinfo);
1948 /* Skip gaps at the end of the trace. */
1949 while (btrace_insn_get (replay) == NULL)
1953 steps = btrace_insn_prev (replay, 1);
1955 error (_("No trace."));
1958 /* We're not replaying, yet. */
1959 gdb_assert (btinfo->replay == NULL);
1960 btinfo->replay = replay;
1962 /* Make sure we're not using any stale registers. */
1963 registers_changed_ptid (tp->ptid);
1965 /* The current frame with replaying - computed via btrace unwind. */
1966 frame = get_thread_current_frame (tp);
1967 frame_id = get_frame_id (frame);
1969 /* Replace stepping related frames where necessary. */
1970 if (upd_step_frame_id)
1971 tp->control.step_frame_id = frame_id;
1972 if (upd_step_stack_frame_id)
1973 tp->control.step_stack_frame_id = frame_id;
1975 CATCH (except, RETURN_MASK_ALL)
1977 xfree (btinfo->replay);
1978 btinfo->replay = NULL;
1980 registers_changed_ptid (tp->ptid);
1982 throw_exception (except);
1989 /* Stop replaying a thread. */
1992 record_btrace_stop_replaying (struct thread_info *tp)
1994 struct btrace_thread_info *btinfo;
1996 btinfo = &tp->btrace;
1998 xfree (btinfo->replay);
1999 btinfo->replay = NULL;
2001 /* Make sure we're not leaving any stale registers. */
2002 registers_changed_ptid (tp->ptid);
2005 /* Stop replaying TP if it is at the end of its execution history. */
2008 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2010 struct btrace_insn_iterator *replay, end;
2011 struct btrace_thread_info *btinfo;
2013 btinfo = &tp->btrace;
2014 replay = btinfo->replay;
2019 btrace_insn_end (&end, btinfo);
2021 if (btrace_insn_cmp (replay, &end) == 0)
2022 record_btrace_stop_replaying (tp);
2025 /* The to_resume method of target record-btrace. */
2028 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2029 enum gdb_signal signal)
2031 struct thread_info *tp;
2032 enum btrace_thread_flag flag, cflag;
2034 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2035 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2036 step ? "step" : "cont");
2038 /* Store the execution direction of the last resume.
2040 If there is more than one to_resume call, we have to rely on infrun
2041 to not change the execution direction in-between. */
2042 record_btrace_resume_exec_dir = execution_direction;
2044 /* As long as we're not replaying, just forward the request.
2046 For non-stop targets this means that no thread is replaying. In order to
2047 make progress, we may need to explicitly move replaying threads to the end
2048 of their execution history. */
2049 if ((execution_direction != EXEC_REVERSE)
2050 && !record_btrace_is_replaying (ops, minus_one_ptid))
2053 ops->to_resume (ops, ptid, step, signal);
2057 /* Compute the btrace thread flag for the requested move. */
2058 if (execution_direction == EXEC_REVERSE)
2060 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2065 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2069 /* We just indicate the resume intent here. The actual stepping happens in
2070 record_btrace_wait below.
2072 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2073 if (!target_is_non_stop_p ())
2075 gdb_assert (ptid_match (inferior_ptid, ptid));
2077 ALL_NON_EXITED_THREADS (tp)
2078 if (ptid_match (tp->ptid, ptid))
2080 if (ptid_match (tp->ptid, inferior_ptid))
2081 record_btrace_resume_thread (tp, flag);
2083 record_btrace_resume_thread (tp, cflag);
2088 ALL_NON_EXITED_THREADS (tp)
2089 if (ptid_match (tp->ptid, ptid))
2090 record_btrace_resume_thread (tp, flag);
2093 /* Async support. */
2094 if (target_can_async_p ())
2097 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2101 /* The to_commit_resume method of target record-btrace. */
2104 record_btrace_commit_resume (struct target_ops *ops)
2106 if ((execution_direction != EXEC_REVERSE)
2107 && !record_btrace_is_replaying (ops, minus_one_ptid))
2108 ops->beneath->to_commit_resume (ops->beneath);
2111 /* Cancel resuming TP. */
2114 record_btrace_cancel_resume (struct thread_info *tp)
2116 enum btrace_thread_flag flags;
2118 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2122 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2123 print_thread_id (tp),
2124 target_pid_to_str (tp->ptid), flags,
2125 btrace_thread_flag_to_str (flags));
2127 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2128 record_btrace_stop_replaying_at_end (tp);
2131 /* Return a target_waitstatus indicating that we ran out of history. */
2133 static struct target_waitstatus
2134 btrace_step_no_history (void)
2136 struct target_waitstatus status;
2138 status.kind = TARGET_WAITKIND_NO_HISTORY;
2143 /* Return a target_waitstatus indicating that a step finished. */
2145 static struct target_waitstatus
2146 btrace_step_stopped (void)
2148 struct target_waitstatus status;
2150 status.kind = TARGET_WAITKIND_STOPPED;
2151 status.value.sig = GDB_SIGNAL_TRAP;
2156 /* Return a target_waitstatus indicating that a thread was stopped as
2159 static struct target_waitstatus
2160 btrace_step_stopped_on_request (void)
2162 struct target_waitstatus status;
2164 status.kind = TARGET_WAITKIND_STOPPED;
2165 status.value.sig = GDB_SIGNAL_0;
2170 /* Return a target_waitstatus indicating a spurious stop. */
2172 static struct target_waitstatus
2173 btrace_step_spurious (void)
2175 struct target_waitstatus status;
2177 status.kind = TARGET_WAITKIND_SPURIOUS;
2182 /* Return a target_waitstatus indicating that the thread was not resumed. */
2184 static struct target_waitstatus
2185 btrace_step_no_resumed (void)
2187 struct target_waitstatus status;
2189 status.kind = TARGET_WAITKIND_NO_RESUMED;
2194 /* Return a target_waitstatus indicating that we should wait again. */
2196 static struct target_waitstatus
2197 btrace_step_again (void)
2199 struct target_waitstatus status;
2201 status.kind = TARGET_WAITKIND_IGNORE;
2206 /* Clear the record histories. */
2209 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2211 xfree (btinfo->insn_history);
2212 xfree (btinfo->call_history);
2214 btinfo->insn_history = NULL;
2215 btinfo->call_history = NULL;
2218 /* Check whether TP's current replay position is at a breakpoint. */
2221 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2223 struct btrace_insn_iterator *replay;
2224 struct btrace_thread_info *btinfo;
2225 const struct btrace_insn *insn;
2226 struct inferior *inf;
2228 btinfo = &tp->btrace;
2229 replay = btinfo->replay;
2234 insn = btrace_insn_get (replay);
2238 inf = find_inferior_ptid (tp->ptid);
2242 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2243 &btinfo->stop_reason);
2246 /* Step one instruction in forward direction. */
2248 static struct target_waitstatus
2249 record_btrace_single_step_forward (struct thread_info *tp)
2251 struct btrace_insn_iterator *replay, end, start;
2252 struct btrace_thread_info *btinfo;
2254 btinfo = &tp->btrace;
2255 replay = btinfo->replay;
2257 /* We're done if we're not replaying. */
2259 return btrace_step_no_history ();
2261 /* Check if we're stepping a breakpoint. */
2262 if (record_btrace_replay_at_breakpoint (tp))
2263 return btrace_step_stopped ();
2265 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2266 jump back to the instruction at which we started. */
2272 /* We will bail out here if we continue stepping after reaching the end
2273 of the execution history. */
2274 steps = btrace_insn_next (replay, 1);
2278 return btrace_step_no_history ();
2281 while (btrace_insn_get (replay) == NULL);
2283 /* Determine the end of the instruction trace. */
2284 btrace_insn_end (&end, btinfo);
2286 /* The execution trace contains (and ends with) the current instruction.
2287 This instruction has not been executed, yet, so the trace really ends
2288 one instruction earlier. */
2289 if (btrace_insn_cmp (replay, &end) == 0)
2290 return btrace_step_no_history ();
2292 return btrace_step_spurious ();
2295 /* Step one instruction in backward direction. */
2297 static struct target_waitstatus
2298 record_btrace_single_step_backward (struct thread_info *tp)
2300 struct btrace_insn_iterator *replay, start;
2301 struct btrace_thread_info *btinfo;
2303 btinfo = &tp->btrace;
2304 replay = btinfo->replay;
2306 /* Start replaying if we're not already doing so. */
2308 replay = record_btrace_start_replaying (tp);
2310 /* If we can't step any further, we reached the end of the history.
2311 Skip gaps during replay. If we end up at a gap (at the beginning of
2312 the trace), jump back to the instruction at which we started. */
2318 steps = btrace_insn_prev (replay, 1);
2322 return btrace_step_no_history ();
2325 while (btrace_insn_get (replay) == NULL);
2327 /* Check if we're stepping a breakpoint.
2329 For reverse-stepping, this check is after the step. There is logic in
2330 infrun.c that handles reverse-stepping separately. See, for example,
2331 proceed and adjust_pc_after_break.
2333 This code assumes that for reverse-stepping, PC points to the last
2334 de-executed instruction, whereas for forward-stepping PC points to the
2335 next to-be-executed instruction. */
2336 if (record_btrace_replay_at_breakpoint (tp))
2337 return btrace_step_stopped ();
2339 return btrace_step_spurious ();
2342 /* Step a single thread. */
2344 static struct target_waitstatus
2345 record_btrace_step_thread (struct thread_info *tp)
2347 struct btrace_thread_info *btinfo;
2348 struct target_waitstatus status;
2349 enum btrace_thread_flag flags;
2351 btinfo = &tp->btrace;
2353 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2354 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2356 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2357 target_pid_to_str (tp->ptid), flags,
2358 btrace_thread_flag_to_str (flags));
2360 /* We can't step without an execution history. */
2361 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2362 return btrace_step_no_history ();
2367 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2370 return btrace_step_stopped_on_request ();
2373 status = record_btrace_single_step_forward (tp);
2374 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2377 return btrace_step_stopped ();
2380 status = record_btrace_single_step_backward (tp);
2381 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2384 return btrace_step_stopped ();
2387 status = record_btrace_single_step_forward (tp);
2388 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2391 btinfo->flags |= flags;
2392 return btrace_step_again ();
2395 status = record_btrace_single_step_backward (tp);
2396 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2399 btinfo->flags |= flags;
2400 return btrace_step_again ();
2403 /* We keep threads moving at the end of their execution history. The to_wait
2404 method will stop the thread for whom the event is reported. */
2405 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2406 btinfo->flags |= flags;
2411 /* A vector of threads. */
2413 typedef struct thread_info * tp_t;
2416 /* Announce further events if necessary. */
2419 record_btrace_maybe_mark_async_event
2420 (const std::vector<thread_info *> &moving,
2421 const std::vector<thread_info *> &no_history)
2423 bool more_moving = !moving.empty ();
2424 bool more_no_history = !no_history.empty ();;
2426 if (!more_moving && !more_no_history)
2430 DEBUG ("movers pending");
2432 if (more_no_history)
2433 DEBUG ("no-history pending");
2435 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2438 /* The to_wait method of target record-btrace. */
2441 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2442 struct target_waitstatus *status, int options)
2444 std::vector<thread_info *> moving;
2445 std::vector<thread_info *> no_history;
2447 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2449 /* As long as we're not replaying, just forward the request. */
2450 if ((execution_direction != EXEC_REVERSE)
2451 && !record_btrace_is_replaying (ops, minus_one_ptid))
2454 return ops->to_wait (ops, ptid, status, options);
2457 /* Keep a work list of moving threads. */
2461 ALL_NON_EXITED_THREADS (tp)
2463 if (ptid_match (tp->ptid, ptid)
2464 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2465 moving.push_back (tp);
2469 if (moving.empty ())
2471 *status = btrace_step_no_resumed ();
2473 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2474 target_waitstatus_to_string (status).c_str ());
2479 /* Step moving threads one by one, one step each, until either one thread
2480 reports an event or we run out of threads to step.
2482 When stepping more than one thread, chances are that some threads reach
2483 the end of their execution history earlier than others. If we reported
2484 this immediately, all-stop on top of non-stop would stop all threads and
2485 resume the same threads next time. And we would report the same thread
2486 having reached the end of its execution history again.
2488 In the worst case, this would starve the other threads. But even if other
2489 threads would be allowed to make progress, this would result in far too
2490 many intermediate stops.
2492 We therefore delay the reporting of "no execution history" until we have
2493 nothing else to report. By this time, all threads should have moved to
2494 either the beginning or the end of their execution history. There will
2495 be a single user-visible stop. */
2496 struct thread_info *eventing = NULL;
2497 while ((eventing == NULL) && !moving.empty ())
2499 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2501 thread_info *tp = moving[ix];
2503 *status = record_btrace_step_thread (tp);
2505 switch (status->kind)
2507 case TARGET_WAITKIND_IGNORE:
2511 case TARGET_WAITKIND_NO_HISTORY:
2512 no_history.push_back (ordered_remove (moving, ix));
2516 eventing = unordered_remove (moving, ix);
2522 if (eventing == NULL)
2524 /* We started with at least one moving thread. This thread must have
2525 either stopped or reached the end of its execution history.
2527 In the former case, EVENTING must not be NULL.
2528 In the latter case, NO_HISTORY must not be empty. */
2529 gdb_assert (!no_history.empty ());
2531 /* We kept threads moving at the end of their execution history. Stop
2532 EVENTING now that we are going to report its stop. */
2533 eventing = unordered_remove (no_history, 0);
2534 eventing->btrace.flags &= ~BTHR_MOVE;
2536 *status = btrace_step_no_history ();
2539 gdb_assert (eventing != NULL);
2541 /* We kept threads replaying at the end of their execution history. Stop
2542 replaying EVENTING now that we are going to report its stop. */
2543 record_btrace_stop_replaying_at_end (eventing);
2545 /* Stop all other threads. */
2546 if (!target_is_non_stop_p ())
2550 ALL_NON_EXITED_THREADS (tp)
2551 record_btrace_cancel_resume (tp);
2554 /* In async mode, we need to announce further events. */
2555 if (target_is_async_p ())
2556 record_btrace_maybe_mark_async_event (moving, no_history);
2558 /* Start record histories anew from the current position. */
2559 record_btrace_clear_histories (&eventing->btrace);
2561 /* We moved the replay position but did not update registers. */
2562 registers_changed_ptid (eventing->ptid);
2564 DEBUG ("wait ended by thread %s (%s): %s",
2565 print_thread_id (eventing),
2566 target_pid_to_str (eventing->ptid),
2567 target_waitstatus_to_string (status).c_str ());
2569 return eventing->ptid;
2572 /* The to_stop method of target record-btrace. */
2575 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2577 DEBUG ("stop %s", target_pid_to_str (ptid));
2579 /* As long as we're not replaying, just forward the request. */
2580 if ((execution_direction != EXEC_REVERSE)
2581 && !record_btrace_is_replaying (ops, minus_one_ptid))
2584 ops->to_stop (ops, ptid);
2588 struct thread_info *tp;
2590 ALL_NON_EXITED_THREADS (tp)
2591 if (ptid_match (tp->ptid, ptid))
2593 tp->btrace.flags &= ~BTHR_MOVE;
2594 tp->btrace.flags |= BTHR_STOP;
2599 /* The to_can_execute_reverse method of target record-btrace. */
2602 record_btrace_can_execute_reverse (struct target_ops *self)
2607 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2610 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2612 if (record_btrace_is_replaying (ops, minus_one_ptid))
2614 struct thread_info *tp = inferior_thread ();
2616 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2619 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2622 /* The to_supports_stopped_by_sw_breakpoint method of target
2626 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2628 if (record_btrace_is_replaying (ops, minus_one_ptid))
2631 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2634 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2637 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2639 if (record_btrace_is_replaying (ops, minus_one_ptid))
2641 struct thread_info *tp = inferior_thread ();
2643 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2646 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2649 /* The to_supports_stopped_by_hw_breakpoint method of target
2653 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2655 if (record_btrace_is_replaying (ops, minus_one_ptid))
2658 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2661 /* The to_update_thread_list method of target record-btrace. */
2664 record_btrace_update_thread_list (struct target_ops *ops)
2666 /* We don't add or remove threads during replay. */
2667 if (record_btrace_is_replaying (ops, minus_one_ptid))
2670 /* Forward the request. */
2672 ops->to_update_thread_list (ops);
2675 /* The to_thread_alive method of target record-btrace. */
2678 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2680 /* We don't add or remove threads during replay. */
2681 if (record_btrace_is_replaying (ops, minus_one_ptid))
2682 return find_thread_ptid (ptid) != NULL;
2684 /* Forward the request. */
2686 return ops->to_thread_alive (ops, ptid);
2689 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2693 record_btrace_set_replay (struct thread_info *tp,
2694 const struct btrace_insn_iterator *it)
2696 struct btrace_thread_info *btinfo;
2698 btinfo = &tp->btrace;
2701 record_btrace_stop_replaying (tp);
2704 if (btinfo->replay == NULL)
2705 record_btrace_start_replaying (tp);
2706 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2709 *btinfo->replay = *it;
2710 registers_changed_ptid (tp->ptid);
2713 /* Start anew from the new replay position. */
2714 record_btrace_clear_histories (btinfo);
2716 stop_pc = regcache_read_pc (get_current_regcache ());
2717 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2720 /* The to_goto_record_begin method of target record-btrace. */
2723 record_btrace_goto_begin (struct target_ops *self)
2725 struct thread_info *tp;
2726 struct btrace_insn_iterator begin;
2728 tp = require_btrace_thread ();
2730 btrace_insn_begin (&begin, &tp->btrace);
2732 /* Skip gaps at the beginning of the trace. */
2733 while (btrace_insn_get (&begin) == NULL)
2737 steps = btrace_insn_next (&begin, 1);
2739 error (_("No trace."));
2742 record_btrace_set_replay (tp, &begin);
2745 /* The to_goto_record_end method of target record-btrace. */
2748 record_btrace_goto_end (struct target_ops *ops)
2750 struct thread_info *tp;
2752 tp = require_btrace_thread ();
2754 record_btrace_set_replay (tp, NULL);
2757 /* The to_goto_record method of target record-btrace. */
2760 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2762 struct thread_info *tp;
2763 struct btrace_insn_iterator it;
2764 unsigned int number;
2769 /* Check for wrap-arounds. */
2771 error (_("Instruction number out of range."));
2773 tp = require_btrace_thread ();
2775 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2777 /* Check if the instruction could not be found or is a gap. */
2778 if (found == 0 || btrace_insn_get (&it) == NULL)
2779 error (_("No such instruction."));
2781 record_btrace_set_replay (tp, &it);
2784 /* The to_record_stop_replaying method of target record-btrace. */
2787 record_btrace_stop_replaying_all (struct target_ops *self)
2789 struct thread_info *tp;
2791 ALL_NON_EXITED_THREADS (tp)
2792 record_btrace_stop_replaying (tp);
2795 /* The to_execution_direction target method. */
2797 static enum exec_direction_kind
2798 record_btrace_execution_direction (struct target_ops *self)
2800 return record_btrace_resume_exec_dir;
2803 /* The to_prepare_to_generate_core target method. */
2806 record_btrace_prepare_to_generate_core (struct target_ops *self)
2808 record_btrace_generating_corefile = 1;
2811 /* The to_done_generating_core target method. */
2814 record_btrace_done_generating_core (struct target_ops *self)
2816 record_btrace_generating_corefile = 0;
2819 /* Initialize the record-btrace target ops. */
2822 init_record_btrace_ops (void)
2824 struct target_ops *ops;
2826 ops = &record_btrace_ops;
2827 ops->to_shortname = "record-btrace";
2828 ops->to_longname = "Branch tracing target";
2829 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2830 ops->to_open = record_btrace_open;
2831 ops->to_close = record_btrace_close;
2832 ops->to_async = record_btrace_async;
2833 ops->to_detach = record_detach;
2834 ops->to_disconnect = record_btrace_disconnect;
2835 ops->to_mourn_inferior = record_mourn_inferior;
2836 ops->to_kill = record_kill;
2837 ops->to_stop_recording = record_btrace_stop_recording;
2838 ops->to_info_record = record_btrace_info;
2839 ops->to_insn_history = record_btrace_insn_history;
2840 ops->to_insn_history_from = record_btrace_insn_history_from;
2841 ops->to_insn_history_range = record_btrace_insn_history_range;
2842 ops->to_call_history = record_btrace_call_history;
2843 ops->to_call_history_from = record_btrace_call_history_from;
2844 ops->to_call_history_range = record_btrace_call_history_range;
2845 ops->to_record_method = record_btrace_record_method;
2846 ops->to_record_is_replaying = record_btrace_is_replaying;
2847 ops->to_record_will_replay = record_btrace_will_replay;
2848 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2849 ops->to_xfer_partial = record_btrace_xfer_partial;
2850 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2851 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2852 ops->to_fetch_registers = record_btrace_fetch_registers;
2853 ops->to_store_registers = record_btrace_store_registers;
2854 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2855 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2856 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2857 ops->to_resume = record_btrace_resume;
2858 ops->to_commit_resume = record_btrace_commit_resume;
2859 ops->to_wait = record_btrace_wait;
2860 ops->to_stop = record_btrace_stop;
2861 ops->to_update_thread_list = record_btrace_update_thread_list;
2862 ops->to_thread_alive = record_btrace_thread_alive;
2863 ops->to_goto_record_begin = record_btrace_goto_begin;
2864 ops->to_goto_record_end = record_btrace_goto_end;
2865 ops->to_goto_record = record_btrace_goto;
2866 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2867 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2868 ops->to_supports_stopped_by_sw_breakpoint
2869 = record_btrace_supports_stopped_by_sw_breakpoint;
2870 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2871 ops->to_supports_stopped_by_hw_breakpoint
2872 = record_btrace_supports_stopped_by_hw_breakpoint;
2873 ops->to_execution_direction = record_btrace_execution_direction;
2874 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2875 ops->to_done_generating_core = record_btrace_done_generating_core;
2876 ops->to_stratum = record_stratum;
2877 ops->to_magic = OPS_MAGIC;
2880 /* Start recording in BTS format. */
2883 cmd_record_btrace_bts_start (const char *args, int from_tty)
2885 if (args != NULL && *args != 0)
2886 error (_("Invalid argument."));
2888 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2892 execute_command ("target record-btrace", from_tty);
2894 CATCH (exception, RETURN_MASK_ALL)
2896 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2897 throw_exception (exception);
2902 /* Start recording in Intel Processor Trace format. */
2905 cmd_record_btrace_pt_start (const char *args, int from_tty)
2907 if (args != NULL && *args != 0)
2908 error (_("Invalid argument."));
2910 record_btrace_conf.format = BTRACE_FORMAT_PT;
2914 execute_command ("target record-btrace", from_tty);
2916 CATCH (exception, RETURN_MASK_ALL)
2918 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2919 throw_exception (exception);
2924 /* Alias for "target record". */
2927 cmd_record_btrace_start (const char *args, int from_tty)
2929 if (args != NULL && *args != 0)
2930 error (_("Invalid argument."));
2932 record_btrace_conf.format = BTRACE_FORMAT_PT;
2936 execute_command ("target record-btrace", from_tty);
2938 CATCH (exception, RETURN_MASK_ALL)
2940 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2944 execute_command ("target record-btrace", from_tty);
2946 CATCH (exception, RETURN_MASK_ALL)
2948 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2949 throw_exception (exception);
2956 /* The "set record btrace" command. */
2959 cmd_set_record_btrace (const char *args, int from_tty)
2961 printf_unfiltered (_("\"set record btrace\" must be followed "
2962 "by an appropriate subcommand.\n"));
2963 help_list (set_record_btrace_cmdlist, "set record btrace ",
2964 all_commands, gdb_stdout);
2967 /* The "show record btrace" command. */
2970 cmd_show_record_btrace (const char *args, int from_tty)
2972 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2975 /* The "show record btrace replay-memory-access" command. */
2978 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2979 struct cmd_list_element *c, const char *value)
2981 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2982 replay_memory_access);
2985 /* The "set record btrace bts" command. */
2988 cmd_set_record_btrace_bts (const char *args, int from_tty)
2990 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2991 "by an appropriate subcommand.\n"));
2992 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2993 all_commands, gdb_stdout);
2996 /* The "show record btrace bts" command. */
2999 cmd_show_record_btrace_bts (const char *args, int from_tty)
3001 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3004 /* The "set record btrace pt" command. */
3007 cmd_set_record_btrace_pt (const char *args, int from_tty)
3009 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3010 "by an appropriate subcommand.\n"));
3011 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3012 all_commands, gdb_stdout);
3015 /* The "show record btrace pt" command. */
3018 cmd_show_record_btrace_pt (const char *args, int from_tty)
3020 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3023 /* The "record bts buffer-size" show value function. */
3026 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3027 struct cmd_list_element *c,
3030 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3034 /* The "record pt buffer-size" show value function. */
3037 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3038 struct cmd_list_element *c,
3041 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3045 /* Initialize btrace commands. */
3048 _initialize_record_btrace (void)
3050 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3051 _("Start branch trace recording."), &record_btrace_cmdlist,
3052 "record btrace ", 0, &record_cmdlist);
3053 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3055 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3057 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3058 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3059 This format may not be available on all processors."),
3060 &record_btrace_cmdlist);
3061 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3063 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3065 Start branch trace recording in Intel Processor Trace format.\n\n\
3066 This format may not be available on all processors."),
3067 &record_btrace_cmdlist);
3068 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3070 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3071 _("Set record options"), &set_record_btrace_cmdlist,
3072 "set record btrace ", 0, &set_record_cmdlist);
3074 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3075 _("Show record options"), &show_record_btrace_cmdlist,
3076 "show record btrace ", 0, &show_record_cmdlist);
3078 add_setshow_enum_cmd ("replay-memory-access", no_class,
3079 replay_memory_access_types, &replay_memory_access, _("\
3080 Set what memory accesses are allowed during replay."), _("\
3081 Show what memory accesses are allowed during replay."),
3082 _("Default is READ-ONLY.\n\n\
3083 The btrace record target does not trace data.\n\
3084 The memory therefore corresponds to the live target and not \
3085 to the current replay position.\n\n\
3086 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3087 When READ-WRITE, allow accesses to read-only and read-write memory during \
3089 NULL, cmd_show_replay_memory_access,
3090 &set_record_btrace_cmdlist,
3091 &show_record_btrace_cmdlist);
3093 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3094 _("Set record btrace bts options"),
3095 &set_record_btrace_bts_cmdlist,
3096 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3098 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3099 _("Show record btrace bts options"),
3100 &show_record_btrace_bts_cmdlist,
3101 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3103 add_setshow_uinteger_cmd ("buffer-size", no_class,
3104 &record_btrace_conf.bts.size,
3105 _("Set the record/replay bts buffer size."),
3106 _("Show the record/replay bts buffer size."), _("\
3107 When starting recording request a trace buffer of this size. \
3108 The actual buffer size may differ from the requested size. \
3109 Use \"info record\" to see the actual buffer size.\n\n\
3110 Bigger buffers allow longer recording but also take more time to process \
3111 the recorded execution trace.\n\n\
3112 The trace buffer size may not be changed while recording."), NULL,
3113 show_record_bts_buffer_size_value,
3114 &set_record_btrace_bts_cmdlist,
3115 &show_record_btrace_bts_cmdlist);
3117 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3118 _("Set record btrace pt options"),
3119 &set_record_btrace_pt_cmdlist,
3120 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3122 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3123 _("Show record btrace pt options"),
3124 &show_record_btrace_pt_cmdlist,
3125 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3127 add_setshow_uinteger_cmd ("buffer-size", no_class,
3128 &record_btrace_conf.pt.size,
3129 _("Set the record/replay pt buffer size."),
3130 _("Show the record/replay pt buffer size."), _("\
3131 Bigger buffers allow longer recording but also take more time to process \
3132 the recorded execution.\n\
3133 The actual buffer size may differ from the requested size. Use \"info record\" \
3134 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3135 &set_record_btrace_pt_cmdlist,
3136 &show_record_btrace_pt_cmdlist);
3138 init_record_btrace_ops ();
3139 add_target (&record_btrace_ops);
3141 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3144 record_btrace_conf.bts.size = 64 * 1024;
3145 record_btrace_conf.pt.size = 16 * 1024;