/* Output Dwarf2 format symbol table information from GCC.
- Copyright (C) 1992-2015 Free Software Foundation, Inc.
+ Copyright (C) 1992-2016 Free Software Foundation, Inc.
Contributed by Gary Funck (gary@intrepid.com).
Derived from DWARF 1 implementation of Ron Guilmette (rfg@monkeys.com).
Extensively modified by Jason Merrill (jason@cygnus.com).
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "target.h"
+#include "function.h"
#include "rtl.h"
-#include "alias.h"
#include "tree.h"
-#include "fold-const.h"
+#include "tm_p.h"
#include "stringpool.h"
+#include "insn-config.h"
+#include "ira.h"
+#include "cgraph.h"
+#include "diagnostic.h"
+#include "fold-const.h"
#include "stor-layout.h"
#include "varasm.h"
-#include "function.h"
-#include "emit-rtl.h"
#include "version.h"
#include "flags.h"
-#include "regs.h"
#include "rtlhash.h"
-#include "insn-config.h"
#include "reload.h"
#include "output.h"
-#include "expmed.h"
-#include "dojump.h"
-#include "explow.h"
-#include "calls.h"
-#include "stmt.h"
#include "expr.h"
-#include "except.h"
-#include "dwarf2.h"
#include "dwarf2out.h"
#include "dwarf2asm.h"
#include "toplev.h"
#include "md5.h"
-#include "tm_p.h"
-#include "diagnostic.h"
#include "tree-pretty-print.h"
#include "debug.h"
-#include "target.h"
#include "common/common-target.h"
#include "langhooks.h"
-#include "cgraph.h"
-#include "ira.h"
#include "lra.h"
#include "dumpfile.h"
#include "opts.h"
static rtx_insn *cached_next_real_insn;
static void dwarf2out_decl (tree);
+#ifndef XCOFF_DEBUGGING_INFO
+#define XCOFF_DEBUGGING_INFO 0
+#endif
+
+#ifndef HAVE_XCOFF_DWARF_EXTRAS
+#define HAVE_XCOFF_DWARF_EXTRAS 0
+#endif
+
#ifdef VMS_DEBUGGING_INFO
int vms_file_stats_name (const char *, long long *, long *, char *, int *);
#endif
/* Get the number of HOST_WIDE_INTs needed to represent the precision
- of the number. */
+ of the number. Some constants have a large uniform precision, so
+ we get the precision needed for the actual value of the number. */
static unsigned int
get_full_len (const wide_int &op)
{
- return ((op.get_precision () + HOST_BITS_PER_WIDE_INT - 1)
+ int prec = wi::min_precision (op, UNSIGNED);
+ return ((prec + HOST_BITS_PER_WIDE_INT - 1)
/ HOST_BITS_PER_WIDE_INT);
}
for collect2 the first time around. */
static void
-switch_to_eh_frame_section (bool back)
+switch_to_eh_frame_section (bool back ATTRIBUTE_UNUSED)
{
- tree label;
-
-#ifdef EH_FRAME_SECTION_NAME
if (eh_frame_section == 0)
{
int flags;
}
else
flags = SECTION_WRITE;
+
+#ifdef EH_FRAME_SECTION_NAME
eh_frame_section = get_section (EH_FRAME_SECTION_NAME, flags, NULL);
- }
+#else
+ eh_frame_section = ((flags == SECTION_WRITE)
+ ? data_section : readonly_data_section);
#endif /* EH_FRAME_SECTION_NAME */
+ }
- if (eh_frame_section)
- switch_to_section (eh_frame_section);
- else
- {
- /* We have no special eh_frame section. Put the information in
- the data section and emit special labels to guide collect2. */
- switch_to_section (data_section);
+ switch_to_section (eh_frame_section);
- if (!back)
- {
- label = get_file_function_name ("F");
- ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
- targetm.asm_out.globalize_label (asm_out_file,
- IDENTIFIER_POINTER (label));
- ASM_OUTPUT_LABEL (asm_out_file, IDENTIFIER_POINTER (label));
- }
+#ifdef EH_FRAME_THROUGH_COLLECT2
+ /* We have no special eh_frame section. Emit special labels to guide
+ collect2. */
+ if (!back)
+ {
+ tree label = get_file_function_name ("F");
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE));
+ targetm.asm_out.globalize_label (asm_out_file,
+ IDENTIFIER_POINTER (label));
+ ASM_OUTPUT_LABEL (asm_out_file, IDENTIFIER_POINTER (label));
}
+#endif
}
/* Switch [BACK] to the eh or debug frame table section, depending on
for_eh + j);
ASM_GENERATE_INTERNAL_LABEL (l1, FDE_AFTER_SIZE_LABEL, for_eh + j);
ASM_GENERATE_INTERNAL_LABEL (l2, FDE_END_LABEL, for_eh + j);
- if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4 && !for_eh)
- dw2_asm_output_data (4, 0xffffffff, "Initial length escape value"
- " indicating 64-bit DWARF extension");
- dw2_asm_output_delta (for_eh ? 4 : DWARF_OFFSET_SIZE, l2, l1,
- "FDE Length");
+ if (!XCOFF_DEBUGGING_INFO || for_eh)
+ {
+ if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4 && !for_eh)
+ dw2_asm_output_data (4, 0xffffffff, "Initial length escape value"
+ " indicating 64-bit DWARF extension");
+ dw2_asm_output_delta (for_eh ? 4 : DWARF_OFFSET_SIZE, l2, l1,
+ "FDE Length");
+ }
ASM_OUTPUT_LABEL (asm_out_file, l1);
if (for_eh)
/* Output the CIE. */
ASM_GENERATE_INTERNAL_LABEL (l1, CIE_AFTER_SIZE_LABEL, for_eh);
ASM_GENERATE_INTERNAL_LABEL (l2, CIE_END_LABEL, for_eh);
- if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4 && !for_eh)
- dw2_asm_output_data (4, 0xffffffff,
- "Initial length escape value indicating 64-bit DWARF extension");
- dw2_asm_output_delta (for_eh ? 4 : DWARF_OFFSET_SIZE, l2, l1,
- "Length of Common Information Entry");
+ if (!XCOFF_DEBUGGING_INFO || for_eh)
+ {
+ if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4 && !for_eh)
+ dw2_asm_output_data (4, 0xffffffff,
+ "Initial length escape value indicating 64-bit DWARF extension");
+ dw2_asm_output_delta (for_eh ? 4 : DWARF_OFFSET_SIZE, l2, l1,
+ "Length of Common Information Entry");
+ }
ASM_OUTPUT_LABEL (asm_out_file, l1);
/* Now that the CIE pointer is PC-relative for EH,
} dw_loc_list_node;
static dw_loc_descr_ref int_loc_descriptor (HOST_WIDE_INT);
+static dw_loc_descr_ref uint_loc_descriptor (unsigned HOST_WIDE_INT);
/* Convert a DWARF stack opcode into its string name. */
dw_loc_descr_ref descr = ggc_cleared_alloc<dw_loc_descr_node> ();
descr->dw_loc_opc = op;
+#if CHECKING_P
+ descr->dw_loc_frame_offset = -1;
+#endif
descr->dw_loc_oprnd1.val_class = dw_val_class_unsigned_const;
descr->dw_loc_oprnd1.val_entry = NULL;
descr->dw_loc_oprnd1.v.val_unsigned = oprnd1;
case dw_val_class_vms_delta:
return (!strcmp (a->v.val_vms_delta.lbl1, b->v.val_vms_delta.lbl1)
&& !strcmp (a->v.val_vms_delta.lbl1, b->v.val_vms_delta.lbl1));
+
+ case dw_val_class_discr_value:
+ return (a->v.val_discr_value.pos == b->v.val_discr_value.pos
+ && a->v.val_discr_value.v.uval == b->v.val_discr_value.v.uval);
+ case dw_val_class_discr_list:
+ /* It makes no sense comparing two discriminant value lists. */
+ return false;
}
gcc_unreachable ();
}
return size;
}
+/* Return the size of the value in a DW_AT_discr_value attribute. */
+
+static int
+size_of_discr_value (dw_discr_value *discr_value)
+{
+ if (discr_value->pos)
+ return size_of_uleb128 (discr_value->v.uval);
+ else
+ return size_of_sleb128 (discr_value->v.sval);
+}
+
+/* Return the size of the value in a DW_discr_list attribute. */
+
+static int
+size_of_discr_list (dw_discr_list_ref discr_list)
+{
+ int size = 0;
+
+ for (dw_discr_list_ref list = discr_list;
+ list != NULL;
+ list = list->dw_discr_next)
+ {
+ /* One byte for the discriminant value descriptor, and then one or two
+ LEB128 numbers, depending on whether it's a single case label or a
+ range label. */
+ size += 1;
+ size += size_of_discr_value (&list->dw_discr_lower_bound);
+ if (list->dw_discr_range != 0)
+ size += size_of_discr_value (&list->dw_discr_upper_bound);
+ }
+ return size;
+}
+
static HOST_WIDE_INT extract_int (const unsigned char *, unsigned);
static void get_ref_die_offset_label (char *, dw_die_ref);
static unsigned long int get_ref_die_offset (dw_die_ref);
"(index into .debug_addr)");
break;
+ case DW_OP_call2:
+ case DW_OP_call4:
+ {
+ unsigned long die_offset
+ = get_ref_die_offset (val1->v.val_die_ref.die);
+ /* Make sure the offset has been computed and that we can encode it as
+ an operand. */
+ gcc_assert (die_offset > 0
+ && die_offset <= (loc->dw_loc_opc == DW_OP_call2)
+ ? 0xffff
+ : 0xffffffff);
+ dw2_asm_output_data ((loc->dw_loc_opc == DW_OP_call2) ? 2 : 4,
+ die_offset, NULL);
+ }
+ break;
+
case DW_OP_GNU_implicit_pointer:
{
char label[MAX_ARTIFICIAL_LABEL_BYTES
dw_die_ref);
static void dwarf2out_abstract_function (tree);
static void dwarf2out_var_location (rtx_insn *);
+static void dwarf2out_size_function (tree);
static void dwarf2out_begin_function (tree);
static void dwarf2out_end_function (unsigned int);
static void dwarf2out_register_main_translation_unit (tree unit);
debug_nothing_rtx_code_label, /* label */
debug_nothing_int, /* handle_pch */
dwarf2out_var_location,
+ dwarf2out_size_function, /* size_function */
dwarf2out_switch_text_section,
dwarf2out_set_name,
1, /* start_end_main_source_file */
debug_nothing_rtx_code_label, /* label */
debug_nothing_int, /* handle_pch */
debug_nothing_rtx_insn, /* var_location */
+ debug_nothing_tree, /* size_function */
debug_nothing_void, /* switch_text_section */
debug_nothing_tree_tree, /* set_name */
0, /* start_end_main_source_file */
/* Number of elements in abbrev_die_table currently in use. */
static GTY(()) unsigned abbrev_die_table_in_use;
+/* A hash map to remember the stack usage for DWARF procedures. The value
+ stored is the stack size difference between before the DWARF procedure
+ invokation and after it returned. In other words, for a DWARF procedure
+ that consumes N stack slots and that pushes M ones, this stores M - N. */
+static hash_map<dw_die_ref, int> *dwarf_proc_stack_usage_map;
+
/* Size (in elements) of increments by which we may expand the
abbrev_die_table. */
#define ABBREV_DIE_TABLE_INCREMENT 256
/* True if .debug_macinfo or .debug_macros section is going to be
emitted. */
#define have_macinfo \
- (debug_info_level >= DINFO_LEVEL_VERBOSE \
+ ((!XCOFF_DEBUGGING_INFO || HAVE_XCOFF_DWARF_EXTRAS) \
+ && debug_info_level >= DINFO_LEVEL_VERBOSE \
&& !macinfo_table->is_empty ())
/* Array of dies for which we should generate .debug_ranges info. */
static vec<dw_die_ref> base_types;
+/* Pointer to vector of DW_TAG_string_type DIEs that need finalization
+ once all arguments are parsed. */
+static vec<dw_die_ref> *string_types;
+
/* Flags to represent a set of attribute classes for attributes that represent
a scalar value (bounds, pointers, ...). */
enum dw_scalar_form
static void add_pubname_string (const char *, dw_die_ref);
static void add_pubtype (tree, dw_die_ref);
static void output_pubnames (vec<pubname_entry, va_gc> *);
-static void output_aranges (unsigned long);
+static void output_aranges (void);
static unsigned int add_ranges_num (int);
static unsigned int add_ranges (const_tree);
static void add_ranges_by_labels (dw_die_ref, const char *, const char *,
static dw_line_info_table *new_line_info_table (void);
static void output_line_info (bool);
static void output_file_names (void);
-static dw_die_ref base_type_die (tree);
+static dw_die_ref base_type_die (tree, bool);
static int is_base_type (tree);
-static dw_die_ref subrange_type_die (tree, tree, tree, dw_die_ref);
+static dw_die_ref subrange_type_die (tree, tree, tree, tree, dw_die_ref);
static int decl_quals (const_tree);
-static dw_die_ref modified_type_die (tree, int, dw_die_ref);
+static dw_die_ref modified_type_die (tree, int, bool, dw_die_ref);
static dw_die_ref generic_parameter_die (tree, tree, bool, dw_die_ref);
static dw_die_ref template_parameter_pack_die (tree, tree, dw_die_ref);
static int type_is_enum (const_tree);
static dw_loc_descr_ref loc_descriptor (rtx, machine_mode mode,
enum var_init_status);
struct loc_descr_context;
+static void add_loc_descr_to_each (dw_loc_list_ref list, dw_loc_descr_ref ref);
+static void add_loc_list (dw_loc_list_ref *ret, dw_loc_list_ref list);
static dw_loc_list_ref loc_list_from_tree (tree, int,
const struct loc_descr_context *);
static dw_loc_descr_ref loc_descriptor_from_tree (tree, int,
static unsigned int simple_type_align_in_bits (const_tree);
static unsigned int simple_decl_align_in_bits (const_tree);
static unsigned HOST_WIDE_INT simple_type_size_in_bits (const_tree);
-static HOST_WIDE_INT field_byte_offset (const_tree);
+struct vlr_context;
+static dw_loc_descr_ref field_byte_offset (const_tree, struct vlr_context *,
+ HOST_WIDE_INT *);
static void add_AT_location_description (dw_die_ref, enum dwarf_attribute,
dw_loc_list_ref);
-static void add_data_member_location_attribute (dw_die_ref, tree);
+static void add_data_member_location_attribute (dw_die_ref, tree,
+ struct vlr_context *);
static bool add_const_value_attribute (dw_die_ref, rtx);
static void insert_int (HOST_WIDE_INT, unsigned, unsigned char *);
static void insert_wide_int (const wide_int &, unsigned char *, int);
static void insert_float (const_rtx, unsigned char *);
static rtx rtl_for_decl_location (tree);
-static bool add_location_or_const_value_attribute (dw_die_ref, tree, bool,
- enum dwarf_attribute);
+static bool add_location_or_const_value_attribute (dw_die_ref, tree, bool);
static bool tree_add_const_value_attribute (dw_die_ref, tree);
static bool tree_add_const_value_attribute_for_decl (dw_die_ref, tree);
static void add_name_attribute (dw_die_ref, const char *);
const struct loc_descr_context *);
static void add_subscript_info (dw_die_ref, tree, bool);
static void add_byte_size_attribute (dw_die_ref, tree);
-static void add_bit_offset_attribute (dw_die_ref, tree);
+static inline void add_bit_offset_attribute (dw_die_ref, tree,
+ struct vlr_context *);
static void add_bit_size_attribute (dw_die_ref, tree);
static void add_prototyped_attribute (dw_die_ref, tree);
static dw_die_ref add_abstract_origin_attribute (dw_die_ref, tree);
static void add_pure_or_virtual_attribute (dw_die_ref, tree);
static void add_src_coords_attributes (dw_die_ref, tree);
static void add_name_and_src_coords_attributes (dw_die_ref, tree);
+static void add_discr_value (dw_die_ref, dw_discr_value *);
+static void add_discr_list (dw_die_ref, dw_discr_list_ref);
+static inline dw_discr_list_ref AT_discr_list (dw_attr_node *);
static void push_decl_scope (tree);
static void pop_decl_scope (void);
static dw_die_ref scope_die_for (tree, dw_die_ref);
static inline int local_scope_p (dw_die_ref);
static inline int class_scope_p (dw_die_ref);
static inline int class_or_namespace_scope_p (dw_die_ref);
-static void add_type_attribute (dw_die_ref, tree, int, dw_die_ref);
+static void add_type_attribute (dw_die_ref, tree, int, bool, dw_die_ref);
static void add_calling_convention_attribute (dw_die_ref, tree);
static const char *type_tag (const_tree);
static tree member_declared_type (const_tree);
static void gen_label_die (tree, dw_die_ref);
static void gen_lexical_block_die (tree, dw_die_ref);
static void gen_inlined_subroutine_die (tree, dw_die_ref);
-static void gen_field_die (tree, dw_die_ref);
+static void gen_field_die (tree, struct vlr_context *, dw_die_ref);
static void gen_ptr_to_mbr_type_die (tree, dw_die_ref);
static dw_die_ref gen_compile_unit_die (const char *);
-static void gen_inheritance_die (tree, tree, dw_die_ref);
+static void gen_inheritance_die (tree, tree, tree, dw_die_ref);
static void gen_member_die (tree, dw_die_ref);
static void gen_struct_or_union_type_die (tree, dw_die_ref,
enum debug_info_usage);
static void gen_type_die (tree, dw_die_ref);
static void gen_block_die (tree, dw_die_ref);
static void decls_for_scope (tree, dw_die_ref);
-static inline int is_redundant_typedef (const_tree);
static bool is_naming_typedef_decl (const_tree);
static inline dw_die_ref get_context_die (tree);
static void gen_namespace_die (tree, dw_die_ref);
static dw_die_ref gen_namelist_decl (tree, dw_die_ref, tree);
-static dw_die_ref gen_decl_die (tree, tree, dw_die_ref);
+static dw_die_ref gen_decl_die (tree, tree, struct vlr_context *, dw_die_ref);
static dw_die_ref force_decl_die (tree);
static dw_die_ref force_type_die (tree);
static dw_die_ref setup_namespace_context (tree, dw_die_ref);
add_AT_die_ref (dw_die_ref die, enum dwarf_attribute attr_kind, dw_die_ref targ_die)
{
dw_attr_node attr;
+ gcc_checking_assert (targ_die != NULL);
-#ifdef ENABLE_CHECKING
- gcc_assert (targ_die != NULL);
-#else
/* With LTO we can end up trying to reference something we didn't create
a DIE for. Avoid crashing later on a NULL referenced DIE. */
if (targ_die == NULL)
return;
-#endif
attr.dw_attr = attr_kind;
attr.dw_attr_val.val_class = dw_val_class_die_ref;
{
dw_attr_node attr;
+ if (XCOFF_DEBUGGING_INFO && !HAVE_XCOFF_DWARF_EXTRAS)
+ return;
+
attr.dw_attr = attr_kind;
attr.dw_attr_val.val_class = dw_val_class_loc_list;
attr.dw_attr_val.val_entry = NULL;
die->die_child = child_die;
}
+/* Like add_child_die, but put CHILD_DIE after AFTER_DIE. */
+
+static void
+add_child_die_after (dw_die_ref die, dw_die_ref child_die,
+ dw_die_ref after_die)
+{
+ gcc_assert (die
+ && child_die
+ && after_die
+ && die->die_child
+ && die != child_die);
+
+ child_die->die_parent = die;
+ child_die->die_sib = after_die->die_sib;
+ after_die->die_sib = child_die;
+ if (die->die_child == after_die)
+ die->die_child = child_die;
+}
+
/* Unassociate CHILD from its parent, and make its parent be
NEW_PARENT. */
&& TREE_CODE (TREE_OPERAND (realdecl, 0)) == ADDR_EXPR))
{
HOST_WIDE_INT maxsize;
- tree innerdecl;
- innerdecl
- = get_ref_base_and_extent (realdecl, &bitpos, &bitsize, &maxsize);
+ bool reverse;
+ tree innerdecl
+ = get_ref_base_and_extent (realdecl, &bitpos, &bitsize, &maxsize,
+ &reverse);
if (!DECL_P (innerdecl)
|| DECL_IGNORED_P (innerdecl)
|| TREE_STATIC (innerdecl)
fprintf (outfile, "%02x", sig[i] & 0xff);
}
+static inline void
+print_discr_value (FILE *outfile, dw_discr_value *discr_value)
+{
+ if (discr_value->pos)
+ fprintf (outfile, HOST_WIDE_INT_PRINT_UNSIGNED, discr_value->v.sval);
+ else
+ fprintf (outfile, HOST_WIDE_INT_PRINT_DEC, discr_value->v.uval);
+}
+
static void print_loc_descr (dw_loc_descr_ref, FILE *);
/* Print the value associated to the VAL DWARF value node to OUTFILE. If
fprintf (outfile, "%02x", val->v.val_data8[i]);
break;
}
+ case dw_val_class_discr_value:
+ print_discr_value (outfile, &val->v.val_discr_value);
+ break;
+ case dw_val_class_discr_list:
+ for (dw_discr_list_ref node = val->v.val_discr_list;
+ node != NULL;
+ node = node->dw_discr_next)
+ {
+ if (node->dw_discr_range)
+ {
+ fprintf (outfile, " .. ");
+ print_discr_value (outfile, &node->dw_discr_lower_bound);
+ print_discr_value (outfile, &node->dw_discr_upper_bound);
+ }
+ else
+ print_discr_value (outfile, &node->dw_discr_lower_bound);
+
+ if (node->dw_discr_next != NULL)
+ fprintf (outfile, " | ");
+ }
default:
break;
}
print_die (comp_unit_die (), stderr);
}
-#ifdef ENABLE_CHECKING
/* Sanity checks on DIEs. */
static void
&& a->dw_attr != DW_AT_GNU_all_call_sites);
}
}
-#endif
\f
/* Start a new compilation unit DIE for an include file. OLD_UNIT is the CU
for the enclosing include file, if any. BINCL_DIE is the DW_TAG_GNU_BINCL
return skeleton;
}
+static void
+copy_dwarf_procs_ref_in_attrs (dw_die_ref die,
+ comdat_type_node *type_node,
+ hash_map<dw_die_ref, dw_die_ref> &copied_dwarf_procs);
+
+/* Helper for copy_dwarf_procs_ref_in_dies. Make a copy of the DIE DWARF
+ procedure, put it under TYPE_NODE and return the copy. Continue looking for
+ DWARF procedure references in the DW_AT_location attribute. */
+
+static dw_die_ref
+copy_dwarf_procedure (dw_die_ref die,
+ comdat_type_node *type_node,
+ hash_map<dw_die_ref, dw_die_ref> &copied_dwarf_procs)
+{
+ /* We do this for COMDAT section, which is DWARFv4 specific, so
+ DWARF procedure are always DW_TAG_dwarf_procedure DIEs (unlike
+ DW_TAG_variable in DWARFv3). */
+ gcc_assert (die->die_tag == DW_TAG_dwarf_procedure);
+
+ /* DWARF procedures are not supposed to have children... */
+ gcc_assert (die->die_child == NULL);
+
+ /* ... and they are supposed to have only one attribute: DW_AT_location. */
+ gcc_assert (vec_safe_length (die->die_attr) == 1
+ && ((*die->die_attr)[0].dw_attr == DW_AT_location));
+
+ /* Do not copy more than once DWARF procedures. */
+ bool existed;
+ dw_die_ref &die_copy = copied_dwarf_procs.get_or_insert (die, &existed);
+ if (existed)
+ return die_copy;
+
+ die_copy = clone_die (die);
+ add_child_die (type_node->root_die, die_copy);
+ copy_dwarf_procs_ref_in_attrs (die_copy, type_node, copied_dwarf_procs);
+ return die_copy;
+}
+
+/* Helper for copy_dwarf_procs_ref_in_dies. Look for references to DWARF
+ procedures in DIE's attributes. */
+
+static void
+copy_dwarf_procs_ref_in_attrs (dw_die_ref die,
+ comdat_type_node *type_node,
+ hash_map<dw_die_ref, dw_die_ref> &copied_dwarf_procs)
+{
+ dw_attr_node *a;
+ unsigned i;
+
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, i, a)
+ {
+ dw_loc_descr_ref loc;
+
+ if (a->dw_attr_val.val_class != dw_val_class_loc)
+ continue;
+
+ for (loc = a->dw_attr_val.v.val_loc; loc != NULL; loc = loc->dw_loc_next)
+ {
+ switch (loc->dw_loc_opc)
+ {
+ case DW_OP_call2:
+ case DW_OP_call4:
+ case DW_OP_call_ref:
+ gcc_assert (loc->dw_loc_oprnd1.val_class
+ == dw_val_class_die_ref);
+ loc->dw_loc_oprnd1.v.val_die_ref.die
+ = copy_dwarf_procedure (loc->dw_loc_oprnd1.v.val_die_ref.die,
+ type_node,
+ copied_dwarf_procs);
+
+ default:
+ break;
+ }
+ }
+ }
+}
+
+/* Copy DWARF procedures that are referenced by the DIE tree to TREE_NODE and
+ rewrite references to point to the copies.
+
+ References are looked for in DIE's attributes and recursively in all its
+ children attributes that are location descriptions. COPIED_DWARF_PROCS is a
+ mapping from old DWARF procedures to their copy. It is used not to copy
+ twice the same DWARF procedure under TYPE_NODE. */
+
+static void
+copy_dwarf_procs_ref_in_dies (dw_die_ref die,
+ comdat_type_node *type_node,
+ hash_map<dw_die_ref, dw_die_ref> &copied_dwarf_procs)
+{
+ dw_die_ref c;
+
+ copy_dwarf_procs_ref_in_attrs (die, type_node, copied_dwarf_procs);
+ FOR_EACH_CHILD (die, c, copy_dwarf_procs_ref_in_dies (c,
+ type_node,
+ copied_dwarf_procs));
+}
+
/* Traverse the DIE and set up additional .debug_types sections for each
type worthy of being placed in a COMDAT section. */
/* Add the DIE to the new compunit. */
add_child_die (unit, c);
+ /* Types can reference DWARF procedures for type size or data location
+ expressions. Calls in DWARF expressions cannot target procedures
+ that are not in the same section. So we must copy DWARF procedures
+ along with this type and then rewrite references to them. */
+ hash_map<dw_die_ref, dw_die_ref> copied_dwarf_procs;
+ copy_dwarf_procs_ref_in_dies (c, type_node, copied_dwarf_procs);
+
if (replacement != NULL)
c = replacement;
}
case dw_val_class_high_pc:
size += DWARF2_ADDR_SIZE;
break;
+ case dw_val_class_discr_value:
+ size += size_of_discr_value (&a->dw_attr_val.v.val_discr_value);
+ break;
+ case dw_val_class_discr_list:
+ {
+ unsigned block_size = size_of_discr_list (AT_discr_list (a));
+
+ /* This is a block, so we have the block length and then its
+ data. */
+ size += constant_size (block_size) + block_size;
+ }
+ break;
default:
gcc_unreachable ();
}
gcc_unreachable ();
}
+ case dw_val_class_discr_value:
+ return (a->dw_attr_val.v.val_discr_value.pos
+ ? DW_FORM_udata
+ : DW_FORM_sdata);
+ case dw_val_class_discr_list:
+ switch (constant_size (size_of_discr_list (AT_discr_list (a))))
+ {
+ case 1:
+ return DW_FORM_block1;
+ case 2:
+ return DW_FORM_block2;
+ case 4:
+ return DW_FORM_block4;
+ default:
+ gcc_unreachable ();
+ }
+
default:
gcc_unreachable ();
}
dw2_asm_output_data (1, sig[i], i == 0 ? "%s" : NULL, name);
}
+/* Output a discriminant value. */
+
+static inline void
+output_discr_value (dw_discr_value *discr_value, const char *name)
+{
+ if (discr_value->pos)
+ dw2_asm_output_data_uleb128 (discr_value->v.uval, "%s", name);
+ else
+ dw2_asm_output_data_sleb128 (discr_value->v.sval, "%s", name);
+}
+
/* Output the DIE and its attributes. Called recursively to generate
the definitions of each child DIE. */
{
dw2_asm_output_data (l, a->dw_attr_val.v.val_wide->elt (i),
"%s", name);
- name = NULL;
+ name = "";
}
else
for (i = 0; i < len; ++i)
{
dw2_asm_output_data (l, a->dw_attr_val.v.val_wide->elt (i),
"%s", name);
- name = NULL;
+ name = "";
}
}
break;
get_AT_low_pc (die), "DW_AT_high_pc");
break;
+ case dw_val_class_discr_value:
+ output_discr_value (&a->dw_attr_val.v.val_discr_value, name);
+ break;
+
+ case dw_val_class_discr_list:
+ {
+ dw_discr_list_ref list = AT_discr_list (a);
+ const int size = size_of_discr_list (list);
+
+ /* This is a block, so output its length first. */
+ dw2_asm_output_data (constant_size (size), size,
+ "%s: block size", name);
+
+ for (; list != NULL; list = list->dw_discr_next)
+ {
+ /* One byte for the discriminant value descriptor, and then as
+ many LEB128 numbers as required. */
+ if (list->dw_discr_range)
+ dw2_asm_output_data (1, DW_DSC_range,
+ "%s: DW_DSC_range", name);
+ else
+ dw2_asm_output_data (1, DW_DSC_label,
+ "%s: DW_DSC_label", name);
+
+ output_discr_value (&list->dw_discr_lower_bound, name);
+ if (list->dw_discr_range)
+ output_discr_value (&list->dw_discr_upper_bound, name);
+ }
+ break;
+ }
+
default:
gcc_unreachable ();
}
DWARFv5 draft DIE tags in DWARFv4 format. */
int ver = dwarf_version < 5 ? dwarf_version : 4;
- if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4)
- dw2_asm_output_data (4, 0xffffffff,
- "Initial length escape value indicating 64-bit DWARF extension");
- dw2_asm_output_data (DWARF_OFFSET_SIZE,
- next_die_offset - DWARF_INITIAL_LENGTH_SIZE,
- "Length of Compilation Unit Info");
+ if (!XCOFF_DEBUGGING_INFO)
+ {
+ if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4)
+ dw2_asm_output_data (4, 0xffffffff,
+ "Initial length escape value indicating 64-bit DWARF extension");
+ dw2_asm_output_data (DWARF_OFFSET_SIZE,
+ next_die_offset - DWARF_INITIAL_LENGTH_SIZE,
+ "Length of Compilation Unit Info");
+ }
+
dw2_asm_output_data (2, ver, "DWARF version number");
dw2_asm_output_offset (DWARF_OFFSET_SIZE, abbrev_section_label,
debug_abbrev_section,
unsigned long pubnames_length = size_of_pubnames (names);
pubname_entry *pub;
- if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4)
- dw2_asm_output_data (4, 0xffffffff,
- "Initial length escape value indicating 64-bit DWARF extension");
- dw2_asm_output_data (DWARF_OFFSET_SIZE, pubnames_length, "Pub Info Length");
+ if (!XCOFF_DEBUGGING_INFO)
+ {
+ if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4)
+ dw2_asm_output_data (4, 0xffffffff,
+ "Initial length escape value indicating 64-bit DWARF extension");
+ dw2_asm_output_data (DWARF_OFFSET_SIZE, pubnames_length,
+ "Pub Info Length");
+ }
/* Version number for pubnames/pubtypes is independent of dwarf version. */
dw2_asm_output_data (2, 2, "DWARF Version");
text section generated for this compilation unit. */
static void
-output_aranges (unsigned long aranges_length)
+output_aranges (void)
{
unsigned i;
+ unsigned long aranges_length = size_of_aranges ();
+
+ if (!XCOFF_DEBUGGING_INFO)
+ {
+ if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4)
+ dw2_asm_output_data (4, 0xffffffff,
+ "Initial length escape value indicating 64-bit DWARF extension");
+ dw2_asm_output_data (DWARF_OFFSET_SIZE, aranges_length,
+ "Length of Address Ranges Info");
+ }
- if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4)
- dw2_asm_output_data (4, 0xffffffff,
- "Initial length escape value indicating 64-bit DWARF extension");
- dw2_asm_output_data (DWARF_OFFSET_SIZE, aranges_length,
- "Length of Address Ranges Info");
/* Version number for aranges is still 2, even up to DWARF5. */
dw2_asm_output_data (2, 2, "DWARF Version");
if (dwarf_split_debug_info)
ASM_GENERATE_INTERNAL_LABEL (p1, LN_PROLOG_AS_LABEL, 0);
ASM_GENERATE_INTERNAL_LABEL (p2, LN_PROLOG_END_LABEL, 0);
- if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4)
- dw2_asm_output_data (4, 0xffffffff,
- "Initial length escape value indicating 64-bit DWARF extension");
- dw2_asm_output_delta (DWARF_OFFSET_SIZE, l2, l1,
- "Length of Source Line Info");
+ if (!XCOFF_DEBUGGING_INFO)
+ {
+ if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4)
+ dw2_asm_output_data (4, 0xffffffff,
+ "Initial length escape value indicating 64-bit DWARF extension");
+ dw2_asm_output_delta (DWARF_OFFSET_SIZE, l2, l1,
+ "Length of Source Line Info");
+ }
+
ASM_OUTPUT_LABEL (asm_out_file, l1);
dw2_asm_output_data (2, ver, "DWARF Version");
ASM_OUTPUT_LABEL (asm_out_file, l2);
}
\f
+/* Return true if DW_AT_endianity should be emitted according to REVERSE. */
+
+static inline bool
+need_endianity_attribute_p (bool reverse)
+{
+ return reverse && (dwarf_version >= 3 || !dwarf_strict);
+}
+
/* Given a pointer to a tree node for some base type, return a pointer to
- a DIE that describes the given type.
+ a DIE that describes the given type. REVERSE is true if the type is
+ to be interpreted in the reverse storage order wrt the target order.
This routine must only be called for GCC type nodes that correspond to
Dwarf base (fundamental) types. */
static dw_die_ref
-base_type_die (tree type)
+base_type_die (tree type, bool reverse)
{
dw_die_ref base_type_result;
enum dwarf_type encoding;
+ bool fpt_used = false;
+ struct fixed_point_type_info fpt_info;
+ tree type_bias = NULL_TREE;
if (TREE_CODE (type) == ERROR_MARK || TREE_CODE (type) == VOID_TYPE)
return 0;
break;
}
}
+ if ((dwarf_version >= 3 || !dwarf_strict)
+ && lang_hooks.types.get_fixed_point_type_info)
+ {
+ memset (&fpt_info, 0, sizeof (fpt_info));
+ if (lang_hooks.types.get_fixed_point_type_info (type, &fpt_info))
+ {
+ fpt_used = true;
+ encoding = ((TYPE_UNSIGNED (type))
+ ? DW_ATE_unsigned_fixed
+ : DW_ATE_signed_fixed);
+ break;
+ }
+ }
if (TYPE_STRING_FLAG (type))
{
if (TYPE_UNSIGNED (type))
encoding = DW_ATE_unsigned;
else
encoding = DW_ATE_signed;
+
+ if (!dwarf_strict
+ && lang_hooks.types.get_type_bias)
+ type_bias = lang_hooks.types.get_type_bias (type);
break;
case REAL_TYPE:
add_AT_unsigned (base_type_result, DW_AT_byte_size,
int_size_in_bytes (type));
add_AT_unsigned (base_type_result, DW_AT_encoding, encoding);
+
+ if (need_endianity_attribute_p (reverse))
+ add_AT_unsigned (base_type_result, DW_AT_endianity,
+ BYTES_BIG_ENDIAN ? DW_END_little : DW_END_big);
+
+ if (fpt_used)
+ {
+ switch (fpt_info.scale_factor_kind)
+ {
+ case fixed_point_scale_factor_binary:
+ add_AT_int (base_type_result, DW_AT_binary_scale,
+ fpt_info.scale_factor.binary);
+ break;
+
+ case fixed_point_scale_factor_decimal:
+ add_AT_int (base_type_result, DW_AT_decimal_scale,
+ fpt_info.scale_factor.decimal);
+ break;
+
+ case fixed_point_scale_factor_arbitrary:
+ /* Arbitrary scale factors cannot be described in standard DWARF,
+ yet. */
+ if (!dwarf_strict)
+ {
+ /* Describe the scale factor as a rational constant. */
+ const dw_die_ref scale_factor
+ = new_die (DW_TAG_constant, comp_unit_die (), type);
+
+ add_AT_unsigned (scale_factor, DW_AT_GNU_numerator,
+ fpt_info.scale_factor.arbitrary.numerator);
+ add_AT_int (scale_factor, DW_AT_GNU_denominator,
+ fpt_info.scale_factor.arbitrary.denominator);
+
+ add_AT_die_ref (base_type_result, DW_AT_small, scale_factor);
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (type_bias)
+ add_scalar_info (base_type_result, DW_AT_GNU_bias, type_bias,
+ dw_scalar_form_constant
+ | dw_scalar_form_exprloc
+ | dw_scalar_form_reference,
+ NULL);
+
add_pubtype (type, base_type_result);
return base_type_result;
to a DIE that describes the given type. */
static dw_die_ref
-subrange_type_die (tree type, tree low, tree high, dw_die_ref context_die)
+subrange_type_die (tree type, tree low, tree high, tree bias,
+ dw_die_ref context_die)
{
dw_die_ref subrange_die;
const HOST_WIDE_INT size_in_bytes = int_size_in_bytes (type);
add_bound_info (subrange_die, DW_AT_lower_bound, low, NULL);
if (high)
add_bound_info (subrange_die, DW_AT_upper_bound, high, NULL);
+ if (bias && !dwarf_strict)
+ add_scalar_info (subrange_die, DW_AT_GNU_bias, bias,
+ dw_scalar_form_constant
+ | dw_scalar_form_exprloc
+ | dw_scalar_form_reference,
+ NULL);
return subrange_die;
}
return best_qual;
}
+struct dwarf_qual_info_t { int q; enum dwarf_tag t; };
+static const dwarf_qual_info_t dwarf_qual_info[] =
+{
+ { TYPE_QUAL_CONST, DW_TAG_const_type },
+ { TYPE_QUAL_VOLATILE, DW_TAG_volatile_type },
+ { TYPE_QUAL_RESTRICT, DW_TAG_restrict_type },
+ { TYPE_QUAL_ATOMIC, DW_TAG_atomic_type }
+};
+static const unsigned int dwarf_qual_info_size
+ = sizeof (dwarf_qual_info) / sizeof (dwarf_qual_info[0]);
+
+/* If DIE is a qualified DIE of some base DIE with the same parent,
+ return the base DIE, otherwise return NULL. Set MASK to the
+ qualifiers added compared to the returned DIE. */
+
+static dw_die_ref
+qualified_die_p (dw_die_ref die, int *mask, unsigned int depth)
+{
+ unsigned int i;
+ for (i = 0; i < dwarf_qual_info_size; i++)
+ if (die->die_tag == dwarf_qual_info[i].t)
+ break;
+ if (i == dwarf_qual_info_size)
+ return NULL;
+ if (vec_safe_length (die->die_attr) != 1)
+ return NULL;
+ dw_die_ref type = get_AT_ref (die, DW_AT_type);
+ if (type == NULL || type->die_parent != die->die_parent)
+ return NULL;
+ *mask |= dwarf_qual_info[i].q;
+ if (depth)
+ {
+ dw_die_ref ret = qualified_die_p (type, mask, depth - 1);
+ if (ret)
+ return ret;
+ }
+ return type;
+}
+
/* Given a pointer to an arbitrary ..._TYPE tree node, return a debugging
- entry that chains various modifiers in front of the given type. */
+ entry that chains the modifiers specified by CV_QUALS in front of the
+ given type. REVERSE is true if the type is to be interpreted in the
+ reverse storage order wrt the target order. */
static dw_die_ref
-modified_type_die (tree type, int cv_quals, dw_die_ref context_die)
+modified_type_die (tree type, int cv_quals, bool reverse,
+ dw_die_ref context_die)
{
enum tree_code code = TREE_CODE (type);
dw_die_ref mod_type_die;
if (code == ERROR_MARK)
return NULL;
+ if (lang_hooks.types.get_debug_type)
+ {
+ tree debug_type = lang_hooks.types.get_debug_type (type);
+
+ if (debug_type != NULL_TREE && debug_type != type)
+ return modified_type_die (debug_type, cv_quals, reverse, context_die);
+ }
+
cv_quals &= cv_qual_mask;
/* Don't emit DW_TAG_restrict_type for DWARFv2, since it is a type
if (qualified_type)
{
mod_type_die = lookup_type_die (qualified_type);
- if (mod_type_die)
+
+ /* DW_AT_endianity doesn't come from a qualifier on the type. */
+ if (mod_type_die
+ && (!need_endianity_attribute_p (reverse)
+ || !is_base_type (type)
+ || get_AT_unsigned (mod_type_die, DW_AT_endianity)))
return mod_type_die;
}
|| (cv_quals == dquals && DECL_ORIGINAL_TYPE (name) != type))
/* cv-unqualified version of named type. Just use
the unnamed type to which it refers. */
- return modified_type_die (DECL_ORIGINAL_TYPE (name),
- cv_quals, context_die);
+ return modified_type_die (DECL_ORIGINAL_TYPE (name), cv_quals,
+ reverse, context_die);
/* Else cv-qualified version of named type; fall through. */
}
}
if (cv_quals)
{
- struct qual_info { int q; enum dwarf_tag t; };
- static const struct qual_info qual_info[] =
- {
- { TYPE_QUAL_ATOMIC, DW_TAG_atomic_type },
- { TYPE_QUAL_RESTRICT, DW_TAG_restrict_type },
- { TYPE_QUAL_VOLATILE, DW_TAG_volatile_type },
- { TYPE_QUAL_CONST, DW_TAG_const_type },
- };
- int sub_quals;
+ int sub_quals = 0, first_quals = 0;
unsigned i;
+ dw_die_ref first = NULL, last = NULL;
/* Determine a lesser qualified type that most closely matches
this one. Then generate DW_TAG_* entries for the remaining
qualifiers. */
sub_quals = get_nearest_type_subqualifiers (type, cv_quals,
cv_qual_mask);
- mod_type_die = modified_type_die (type, sub_quals, context_die);
+ if (sub_quals && use_debug_types)
+ {
+ bool needed = false;
+ /* If emitting type units, make sure the order of qualifiers
+ is canonical. Thus, start from unqualified type if
+ an earlier qualifier is missing in sub_quals, but some later
+ one is present there. */
+ for (i = 0; i < dwarf_qual_info_size; i++)
+ if (dwarf_qual_info[i].q & cv_quals & ~sub_quals)
+ needed = true;
+ else if (needed && (dwarf_qual_info[i].q & cv_quals))
+ {
+ sub_quals = 0;
+ break;
+ }
+ }
+ mod_type_die = modified_type_die (type, sub_quals, reverse, context_die);
+ if (mod_scope && mod_type_die && mod_type_die->die_parent == mod_scope)
+ {
+ /* As not all intermediate qualified DIEs have corresponding
+ tree types, ensure that qualified DIEs in the same scope
+ as their DW_AT_type are emitted after their DW_AT_type,
+ only with other qualified DIEs for the same type possibly
+ in between them. Determine the range of such qualified
+ DIEs now (first being the base type, last being corresponding
+ last qualified DIE for it). */
+ unsigned int count = 0;
+ first = qualified_die_p (mod_type_die, &first_quals,
+ dwarf_qual_info_size);
+ if (first == NULL)
+ first = mod_type_die;
+ gcc_assert ((first_quals & ~sub_quals) == 0);
+ for (count = 0, last = first;
+ count < (1U << dwarf_qual_info_size);
+ count++, last = last->die_sib)
+ {
+ int quals = 0;
+ if (last == mod_scope->die_child)
+ break;
+ if (qualified_die_p (last->die_sib, &quals, dwarf_qual_info_size)
+ != first)
+ break;
+ }
+ }
- for (i = 0; i < sizeof (qual_info) / sizeof (qual_info[0]); i++)
- if (qual_info[i].q & cv_quals & ~sub_quals)
+ for (i = 0; i < dwarf_qual_info_size; i++)
+ if (dwarf_qual_info[i].q & cv_quals & ~sub_quals)
{
- dw_die_ref d = new_die (qual_info[i].t, mod_scope, type);
- if (mod_type_die)
- add_AT_die_ref (d, DW_AT_type, mod_type_die);
+ dw_die_ref d;
+ if (first && first != last)
+ {
+ for (d = first->die_sib; ; d = d->die_sib)
+ {
+ int quals = 0;
+ qualified_die_p (d, &quals, dwarf_qual_info_size);
+ if (quals == (first_quals | dwarf_qual_info[i].q))
+ break;
+ if (d == last)
+ {
+ d = NULL;
+ break;
+ }
+ }
+ if (d)
+ {
+ mod_type_die = d;
+ continue;
+ }
+ }
+ if (first)
+ {
+ d = ggc_cleared_alloc<die_node> ();
+ d->die_tag = dwarf_qual_info[i].t;
+ add_child_die_after (mod_scope, d, last);
+ last = d;
+ }
+ else
+ d = new_die (dwarf_qual_info[i].t, mod_scope, type);
+ if (mod_type_die)
+ add_AT_die_ref (d, DW_AT_type, mod_type_die);
mod_type_die = d;
+ first_quals |= dwarf_qual_info[i].q;
}
}
- else if (code == POINTER_TYPE)
- {
- mod_type_die = new_die (DW_TAG_pointer_type, mod_scope, type);
- add_AT_unsigned (mod_type_die, DW_AT_byte_size,
- simple_type_size_in_bits (type) / BITS_PER_UNIT);
- item_type = TREE_TYPE (type);
- if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (item_type)))
- add_AT_unsigned (mod_type_die, DW_AT_address_class,
- TYPE_ADDR_SPACE (item_type));
- }
- else if (code == REFERENCE_TYPE)
+ else if (code == POINTER_TYPE || code == REFERENCE_TYPE)
{
- if (TYPE_REF_IS_RVALUE (type) && dwarf_version >= 4)
- mod_type_die = new_die (DW_TAG_rvalue_reference_type, mod_scope,
- type);
- else
- mod_type_die = new_die (DW_TAG_reference_type, mod_scope, type);
+ dwarf_tag tag = DW_TAG_pointer_type;
+ if (code == REFERENCE_TYPE)
+ {
+ if (TYPE_REF_IS_RVALUE (type) && dwarf_version >= 4)
+ tag = DW_TAG_rvalue_reference_type;
+ else
+ tag = DW_TAG_reference_type;
+ }
+ mod_type_die = new_die (tag, mod_scope, type);
+
add_AT_unsigned (mod_type_die, DW_AT_byte_size,
simple_type_size_in_bits (type) / BITS_PER_UNIT);
item_type = TREE_TYPE (type);
- if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (item_type)))
- add_AT_unsigned (mod_type_die, DW_AT_address_class,
- TYPE_ADDR_SPACE (item_type));
+
+ addr_space_t as = TYPE_ADDR_SPACE (item_type);
+ if (!ADDR_SPACE_GENERIC_P (as))
+ {
+ int action = targetm.addr_space.debug (as);
+ if (action >= 0)
+ {
+ /* Positive values indicate an address_class. */
+ add_AT_unsigned (mod_type_die, DW_AT_address_class, action);
+ }
+ else
+ {
+ /* Negative values indicate an (inverted) segment base reg. */
+ dw_loc_descr_ref d
+ = one_reg_loc_descriptor (~action, VAR_INIT_STATUS_INITIALIZED);
+ add_AT_loc (mod_type_die, DW_AT_segment, d);
+ }
+ }
}
else if (code == INTEGER_TYPE
&& TREE_TYPE (type) != NULL_TREE
&& subrange_type_for_debug_p (type, &low, &high))
{
- mod_type_die = subrange_type_die (type, low, high, context_die);
+ tree bias = NULL_TREE;
+ if (lang_hooks.types.get_type_bias)
+ bias = lang_hooks.types.get_type_bias (type);
+ mod_type_die = subrange_type_die (type, low, high, bias, context_die);
item_type = TREE_TYPE (type);
}
else if (is_base_type (type))
- mod_type_die = base_type_die (type);
+ mod_type_die = base_type_die (type, reverse);
else
{
gen_type_die (type, context_die);
copy was created to help us keep track of typedef names) and
that copy might have a different TYPE_UID from the original
..._TYPE node. */
- if (TREE_CODE (type) != VECTOR_TYPE)
+ if (TREE_CODE (type) != VECTOR_TYPE
+ && TREE_CODE (type) != ARRAY_TYPE)
return lookup_type_die (type_main_variant (type));
else
/* Vectors have the debugging information in the type,
types are possible in Ada. */
sub_die = modified_type_die (item_type,
TYPE_QUALS_NO_ADDR_SPACE (item_type),
+ reverse,
context_die);
if (sub_die != NULL)
add_type_attribute (tmpl_die, tmpl_type,
(TREE_THIS_VOLATILE (tmpl_type)
? TYPE_QUAL_VOLATILE : TYPE_UNQUALIFIED),
- parent_die);
+ false, parent_die);
}
else
{
return new_loc_descr (op, i, 0);
}
+/* Likewise, for unsigned constants. */
+
+static dw_loc_descr_ref
+uint_loc_descriptor (unsigned HOST_WIDE_INT i)
+{
+ const unsigned HOST_WIDE_INT max_int = INTTYPE_MAXIMUM (HOST_WIDE_INT);
+ const unsigned HOST_WIDE_INT max_uint
+ = INTTYPE_MAXIMUM (unsigned HOST_WIDE_INT);
+
+ /* If possible, use the clever signed constants handling. */
+ if (i <= max_int)
+ return int_loc_descriptor ((HOST_WIDE_INT) i);
+
+ /* Here, we are left with positive numbers that cannot be represented as
+ HOST_WIDE_INT, i.e.:
+ max (HOST_WIDE_INT) < i <= max (unsigned HOST_WIDE_INT)
+
+ Using DW_OP_const4/8/./u operation to encode them consumes a lot of bytes
+ whereas may be better to output a negative integer: thanks to integer
+ wrapping, we know that:
+ x = x - 2 ** DWARF2_ADDR_SIZE
+ = x - 2 * (max (HOST_WIDE_INT) + 1)
+ So numbers close to max (unsigned HOST_WIDE_INT) could be represented as
+ small negative integers. Let's try that in cases it will clearly improve
+ the encoding: there is no gain turning DW_OP_const4u into
+ DW_OP_const4s. */
+ if (DWARF2_ADDR_SIZE * 8 == HOST_BITS_PER_WIDE_INT
+ && ((DWARF2_ADDR_SIZE == 4 && i > max_uint - 0x8000)
+ || (DWARF2_ADDR_SIZE == 8 && i > max_uint - 0x80000000)))
+ {
+ const unsigned HOST_WIDE_INT first_shift = i - max_int - 1;
+
+ /* Now, -1 < first_shift <= max (HOST_WIDE_INT)
+ i.e. 0 <= first_shift <= max (HOST_WIDE_INT). */
+ const HOST_WIDE_INT second_shift
+ = (HOST_WIDE_INT) first_shift - (HOST_WIDE_INT) max_int - 1;
+
+ /* So we finally have:
+ -max (HOST_WIDE_INT) - 1 <= second_shift <= -1.
+ i.e. min (HOST_WIDE_INT) <= second_shift < 0. */
+ return int_loc_descriptor (second_shift);
+ }
+
+ /* Last chance: fallback to a simple constant operation. */
+ return new_loc_descr
+ ((HOST_BITS_PER_WIDE_INT == 32 || i <= 0xffffffff)
+ ? DW_OP_const4u
+ : DW_OP_const8u,
+ i, 0);
+}
+
+/* Generate and return a location description that computes the unsigned
+ comparison of the two stack top entries (a OP b where b is the top-most
+ entry and a is the second one). The KIND of comparison can be LT_EXPR,
+ LE_EXPR, GT_EXPR or GE_EXPR. */
+
+static dw_loc_descr_ref
+uint_comparison_loc_list (enum tree_code kind)
+{
+ enum dwarf_location_atom op, flip_op;
+ dw_loc_descr_ref ret, bra_node, jmp_node, tmp;
+
+ switch (kind)
+ {
+ case LT_EXPR:
+ op = DW_OP_lt;
+ break;
+ case LE_EXPR:
+ op = DW_OP_le;
+ break;
+ case GT_EXPR:
+ op = DW_OP_gt;
+ break;
+ case GE_EXPR:
+ op = DW_OP_ge;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ bra_node = new_loc_descr (DW_OP_bra, 0, 0);
+ jmp_node = new_loc_descr (DW_OP_skip, 0, 0);
+
+ /* Until DWARFv4, operations all work on signed integers. It is nevertheless
+ possible to perform unsigned comparisons: we just have to distinguish
+ three cases:
+
+ 1. when a and b have the same sign (as signed integers); then we should
+ return: a OP(signed) b;
+
+ 2. when a is a negative signed integer while b is a positive one, then a
+ is a greater unsigned integer than b; likewise when a and b's roles
+ are flipped.
+
+ So first, compare the sign of the two operands. */
+ ret = new_loc_descr (DW_OP_over, 0, 0);
+ add_loc_descr (&ret, new_loc_descr (DW_OP_over, 0, 0));
+ add_loc_descr (&ret, new_loc_descr (DW_OP_xor, 0, 0));
+ /* If they have different signs (i.e. they have different sign bits), then
+ the stack top value has now the sign bit set and thus it's smaller than
+ zero. */
+ add_loc_descr (&ret, new_loc_descr (DW_OP_lit0, 0, 0));
+ add_loc_descr (&ret, new_loc_descr (DW_OP_lt, 0, 0));
+ add_loc_descr (&ret, bra_node);
+
+ /* We are in case 1. At this point, we know both operands have the same
+ sign, to it's safe to use the built-in signed comparison. */
+ add_loc_descr (&ret, new_loc_descr (op, 0, 0));
+ add_loc_descr (&ret, jmp_node);
+
+ /* We are in case 2. Here, we know both operands do not have the same sign,
+ so we have to flip the signed comparison. */
+ flip_op = (kind == LT_EXPR || kind == LE_EXPR) ? DW_OP_gt : DW_OP_lt;
+ tmp = new_loc_descr (flip_op, 0, 0);
+ bra_node->dw_loc_oprnd1.val_class = dw_val_class_loc;
+ bra_node->dw_loc_oprnd1.v.val_loc = tmp;
+ add_loc_descr (&ret, tmp);
+
+ /* This dummy operation is necessary to make the two branches join. */
+ tmp = new_loc_descr (DW_OP_nop, 0, 0);
+ jmp_node->dw_loc_oprnd1.val_class = dw_val_class_loc;
+ jmp_node->dw_loc_oprnd1.v.val_loc = tmp;
+ add_loc_descr (&ret, tmp);
+
+ return ret;
+}
+
+/* Likewise, but takes the location description lists (might be destructive on
+ them). Return NULL if either is NULL or if concatenation fails. */
+
+static dw_loc_list_ref
+loc_list_from_uint_comparison (dw_loc_list_ref left, dw_loc_list_ref right,
+ enum tree_code kind)
+{
+ if (left == NULL || right == NULL)
+ return NULL;
+
+ add_loc_list (&left, right);
+ if (left == NULL)
+ return NULL;
+
+ add_loc_descr_to_each (left, uint_comparison_loc_list (kind));
+ return left;
+}
+
/* Return size_of_locs (int_shift_loc_descriptor (i, shift))
without actually allocating it. */
{
/* If delegitimize_address couldn't do anything with the UNSPEC, assume
we can't express it in the debug info. */
-#ifdef ENABLE_CHECKING
/* Don't complain about TLS UNSPECs, those are just too hard to
delegitimize. Note this could be a non-decl SYMBOL_REF such as
one in a constant pool entry, so testing SYMBOL_REF_TLS_MODEL
rather than DECL_THREAD_LOCAL_P is not just an optimization. */
- if (XVECLEN (rtl, 0) == 0
- || GET_CODE (XVECEXP (rtl, 0, 0)) != SYMBOL_REF
- || SYMBOL_REF_TLS_MODEL (XVECEXP (rtl, 0, 0)) == TLS_MODEL_NONE)
+ if (flag_checking
+ && (XVECLEN (rtl, 0) == 0
+ || GET_CODE (XVECEXP (rtl, 0, 0)) != SYMBOL_REF
+ || SYMBOL_REF_TLS_MODEL (XVECEXP (rtl, 0, 0)) == TLS_MODEL_NONE))
inform (current_function_decl
? DECL_SOURCE_LOCATION (current_function_decl)
: UNKNOWN_LOCATION,
"non-delegitimized UNSPEC %d found in variable location",
XINT (rtl, 1));
#endif
-#endif
expansion_failed (NULL_TREE, rtl,
"UNSPEC hasn't been delegitimized.\n");
return false;
}
type_die = lookup_type_die (type);
if (!type_die)
- type_die = modified_type_die (type, TYPE_UNQUALIFIED, comp_unit_die ());
+ type_die = modified_type_die (type, TYPE_UNQUALIFIED, false,
+ comp_unit_die ());
if (type_die == NULL || type_die->die_tag != DW_TAG_base_type)
return NULL;
return type_die;
cvt->dw_loc_oprnd1.v.val_die_ref.die = type_die;
cvt->dw_loc_oprnd1.v.val_die_ref.external = 0;
add_loc_descr (&mem_loc_result, cvt);
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) <= DWARF2_ADDR_SIZE)
+ {
+ /* Convert it to untyped afterwards. */
+ cvt = new_loc_descr (DW_OP_GNU_convert, 0, 0);
+ add_loc_descr (&mem_loc_result, cvt);
+ }
}
break;
goto symref;
default:
-#ifdef ENABLE_CHECKING
- print_rtl (stderr, rtl);
- gcc_unreachable ();
-#else
+ if (flag_checking)
+ {
+ print_rtl (stderr, rtl);
+ gcc_unreachable ();
+ }
break;
-#endif
}
if (mem_loc_result && initialized == VAR_INIT_STATUS_UNINITIALIZED)
tree obj, offset;
HOST_WIDE_INT bitsize, bitpos, bytepos;
machine_mode mode;
- int unsignedp, volatilep = 0;
+ int unsignedp, reversep, volatilep = 0;
dw_loc_list_ref list_ret = NULL, list_ret1 = NULL;
obj = get_inner_reference (TREE_OPERAND (loc, 0),
&bitsize, &bitpos, &offset, &mode,
- &unsignedp, &volatilep, false);
+ &unsignedp, &reversep, &volatilep, false);
STRIP_NOPS (obj);
if (bitpos % BITS_PER_UNIT)
{
return list_ret;
}
+/* Set LOC to the next operation that is not a DW_OP_nop operation. In the case
+ all operations from LOC are nops, move to the last one. Insert in NOPS all
+ operations that are skipped. */
+
+static void
+loc_descr_to_next_no_nop (dw_loc_descr_ref &loc,
+ hash_set<dw_loc_descr_ref> &nops)
+{
+ while (loc->dw_loc_next != NULL && loc->dw_loc_opc == DW_OP_nop)
+ {
+ nops.add (loc);
+ loc = loc->dw_loc_next;
+ }
+}
+
+/* Helper for loc_descr_without_nops: free the location description operation
+ P. */
+
+bool
+free_loc_descr (const dw_loc_descr_ref &loc, void *data ATTRIBUTE_UNUSED)
+{
+ ggc_free (loc);
+ return true;
+}
+
+/* Remove all DW_OP_nop operations from LOC except, if it exists, the one that
+ finishes LOC. */
+
+static void
+loc_descr_without_nops (dw_loc_descr_ref &loc)
+{
+ if (loc->dw_loc_opc == DW_OP_nop && loc->dw_loc_next == NULL)
+ return;
+
+ /* Set of all DW_OP_nop operations we remove. */
+ hash_set<dw_loc_descr_ref> nops;
+
+ /* First, strip all prefix NOP operations in order to keep the head of the
+ operations list. */
+ loc_descr_to_next_no_nop (loc, nops);
+
+ for (dw_loc_descr_ref cur = loc; cur != NULL;)
+ {
+ /* For control flow operations: strip "prefix" nops in destination
+ labels. */
+ if (cur->dw_loc_oprnd1.val_class == dw_val_class_loc)
+ loc_descr_to_next_no_nop (cur->dw_loc_oprnd1.v.val_loc, nops);
+ if (cur->dw_loc_oprnd2.val_class == dw_val_class_loc)
+ loc_descr_to_next_no_nop (cur->dw_loc_oprnd2.v.val_loc, nops);
+
+ /* Do the same for the operations that follow, then move to the next
+ iteration. */
+ if (cur->dw_loc_next != NULL)
+ loc_descr_to_next_no_nop (cur->dw_loc_next, nops);
+ cur = cur->dw_loc_next;
+ }
+
+ nops.traverse<void *, free_loc_descr> (NULL);
+}
+
+
+struct dwarf_procedure_info;
/* Helper structure for location descriptions generation. */
struct loc_descr_context
/* The ..._DECL node that should be translated as a
DW_OP_push_object_address operation. */
tree base_decl;
+ /* Information about the DWARF procedure we are currently generating. NULL if
+ we are not generating a DWARF procedure. */
+ struct dwarf_procedure_info *dpi;
};
+/* DWARF procedures generation
+
+ DWARF expressions (aka. location descriptions) are used to encode variable
+ things such as sizes or offsets. Such computations can have redundant parts
+ that can be factorized in order to reduce the size of the output debug
+ information. This is the whole point of DWARF procedures.
+
+ Thanks to stor-layout.c, size and offset expressions in GENERIC trees are
+ already factorized into functions ("size functions") in order to handle very
+ big and complex types. Such functions are quite simple: they have integral
+ arguments, they return an integral result and their body contains only a
+ return statement with arithmetic expressions. This is the only kind of
+ function we are interested in translating into DWARF procedures, here.
+
+ DWARF expressions and DWARF procedure are executed using a stack, so we have
+ to define some calling convention for them to interact. Let's say that:
+
+ - Before calling a DWARF procedure, DWARF expressions must push on the stack
+ all arguments in reverse order (right-to-left) so that when the DWARF
+ procedure execution starts, the first argument is the top of the stack.
+
+ - Then, when returning, the DWARF procedure must have consumed all arguments
+ on the stack, must have pushed the result and touched nothing else.
+
+ - Each integral argument and the result are integral types can be hold in a
+ single stack slot.
+
+ - We call "frame offset" the number of stack slots that are "under DWARF
+ procedure control": it includes the arguments slots, the temporaries and
+ the result slot. Thus, it is equal to the number of arguments when the
+ procedure execution starts and must be equal to one (the result) when it
+ returns. */
+
+/* Helper structure used when generating operations for a DWARF procedure. */
+struct dwarf_procedure_info
+{
+ /* The FUNCTION_DECL node corresponding to the DWARF procedure that is
+ currently translated. */
+ tree fndecl;
+ /* The number of arguments FNDECL takes. */
+ unsigned args_count;
+};
+
+/* Return a pointer to a newly created DIE node for a DWARF procedure. Add
+ LOCATION as its DW_AT_location attribute. If FNDECL is not NULL_TREE,
+ equate it to this DIE. */
+
+static dw_die_ref
+new_dwarf_proc_die (dw_loc_descr_ref location, tree fndecl,
+ dw_die_ref parent_die)
+{
+ const bool dwarf_proc_supported = dwarf_version >= 4;
+ dw_die_ref dwarf_proc_die;
+
+ if ((dwarf_version < 3 && dwarf_strict)
+ || location == NULL)
+ return NULL;
+
+ dwarf_proc_die = new_die (dwarf_proc_supported
+ ? DW_TAG_dwarf_procedure
+ : DW_TAG_variable,
+ parent_die,
+ fndecl);
+ if (fndecl)
+ equate_decl_number_to_die (fndecl, dwarf_proc_die);
+ if (!dwarf_proc_supported)
+ add_AT_flag (dwarf_proc_die, DW_AT_artificial, 1);
+ add_AT_loc (dwarf_proc_die, DW_AT_location, location);
+ return dwarf_proc_die;
+}
+
+/* Return whether TYPE is a supported type as a DWARF procedure argument
+ type or return type (we handle only scalar types and pointer types that
+ aren't wider than the DWARF expression evaluation stack. */
+
+static bool
+is_handled_procedure_type (tree type)
+{
+ return ((INTEGRAL_TYPE_P (type)
+ || TREE_CODE (type) == OFFSET_TYPE
+ || TREE_CODE (type) == POINTER_TYPE)
+ && int_size_in_bytes (type) <= DWARF2_ADDR_SIZE);
+}
+
+/* Helper for resolve_args_picking. Stop when coming across VISITED nodes. */
+
+static bool
+resolve_args_picking_1 (dw_loc_descr_ref loc, unsigned initial_frame_offset,
+ struct dwarf_procedure_info *dpi,
+ hash_set<dw_loc_descr_ref> &visited)
+{
+ /* The "frame_offset" identifier is already used to name a macro... */
+ unsigned frame_offset_ = initial_frame_offset;
+ dw_loc_descr_ref l;
+
+ for (l = loc; l != NULL;)
+ {
+ /* If we already met this node, there is nothing to compute anymore. */
+ if (visited.add (l))
+ {
+#if CHECKING_P
+ /* Make sure that the stack size is consistent wherever the execution
+ flow comes from. */
+ gcc_assert ((unsigned) l->dw_loc_frame_offset == frame_offset_);
+#endif
+ break;
+ }
+#if CHECKING_P
+ l->dw_loc_frame_offset = frame_offset_;
+#endif
+
+ /* If needed, relocate the picking offset with respect to the frame
+ offset. */
+ if (l->dw_loc_opc == DW_OP_pick && l->frame_offset_rel)
+ {
+ /* frame_offset_ is the size of the current stack frame, including
+ incoming arguments. Besides, the arguments are pushed
+ right-to-left. Thus, in order to access the Nth argument from
+ this operation node, the picking has to skip temporaries *plus*
+ one stack slot per argument (0 for the first one, 1 for the second
+ one, etc.).
+
+ The targetted argument number (N) is already set as the operand,
+ and the number of temporaries can be computed with:
+ frame_offsets_ - dpi->args_count */
+ l->dw_loc_oprnd1.v.val_unsigned += frame_offset_ - dpi->args_count;
+
+ /* DW_OP_pick handles only offsets from 0 to 255 (inclusive)... */
+ if (l->dw_loc_oprnd1.v.val_unsigned > 255)
+ return false;
+ }
+
+ /* Update frame_offset according to the effect the current operation has
+ on the stack. */
+ switch (l->dw_loc_opc)
+ {
+ case DW_OP_deref:
+ case DW_OP_swap:
+ case DW_OP_rot:
+ case DW_OP_abs:
+ case DW_OP_not:
+ case DW_OP_plus_uconst:
+ case DW_OP_skip:
+ case DW_OP_reg0:
+ case DW_OP_reg1:
+ case DW_OP_reg2:
+ case DW_OP_reg3:
+ case DW_OP_reg4:
+ case DW_OP_reg5:
+ case DW_OP_reg6:
+ case DW_OP_reg7:
+ case DW_OP_reg8:
+ case DW_OP_reg9:
+ case DW_OP_reg10:
+ case DW_OP_reg11:
+ case DW_OP_reg12:
+ case DW_OP_reg13:
+ case DW_OP_reg14:
+ case DW_OP_reg15:
+ case DW_OP_reg16:
+ case DW_OP_reg17:
+ case DW_OP_reg18:
+ case DW_OP_reg19:
+ case DW_OP_reg20:
+ case DW_OP_reg21:
+ case DW_OP_reg22:
+ case DW_OP_reg23:
+ case DW_OP_reg24:
+ case DW_OP_reg25:
+ case DW_OP_reg26:
+ case DW_OP_reg27:
+ case DW_OP_reg28:
+ case DW_OP_reg29:
+ case DW_OP_reg30:
+ case DW_OP_reg31:
+ case DW_OP_bregx:
+ case DW_OP_piece:
+ case DW_OP_deref_size:
+ case DW_OP_nop:
+ case DW_OP_form_tls_address:
+ case DW_OP_bit_piece:
+ case DW_OP_implicit_value:
+ case DW_OP_stack_value:
+ break;
+
+ case DW_OP_addr:
+ case DW_OP_const1u:
+ case DW_OP_const1s:
+ case DW_OP_const2u:
+ case DW_OP_const2s:
+ case DW_OP_const4u:
+ case DW_OP_const4s:
+ case DW_OP_const8u:
+ case DW_OP_const8s:
+ case DW_OP_constu:
+ case DW_OP_consts:
+ case DW_OP_dup:
+ case DW_OP_over:
+ case DW_OP_pick:
+ case DW_OP_lit0:
+ case DW_OP_lit1:
+ case DW_OP_lit2:
+ case DW_OP_lit3:
+ case DW_OP_lit4:
+ case DW_OP_lit5:
+ case DW_OP_lit6:
+ case DW_OP_lit7:
+ case DW_OP_lit8:
+ case DW_OP_lit9:
+ case DW_OP_lit10:
+ case DW_OP_lit11:
+ case DW_OP_lit12:
+ case DW_OP_lit13:
+ case DW_OP_lit14:
+ case DW_OP_lit15:
+ case DW_OP_lit16:
+ case DW_OP_lit17:
+ case DW_OP_lit18:
+ case DW_OP_lit19:
+ case DW_OP_lit20:
+ case DW_OP_lit21:
+ case DW_OP_lit22:
+ case DW_OP_lit23:
+ case DW_OP_lit24:
+ case DW_OP_lit25:
+ case DW_OP_lit26:
+ case DW_OP_lit27:
+ case DW_OP_lit28:
+ case DW_OP_lit29:
+ case DW_OP_lit30:
+ case DW_OP_lit31:
+ case DW_OP_breg0:
+ case DW_OP_breg1:
+ case DW_OP_breg2:
+ case DW_OP_breg3:
+ case DW_OP_breg4:
+ case DW_OP_breg5:
+ case DW_OP_breg6:
+ case DW_OP_breg7:
+ case DW_OP_breg8:
+ case DW_OP_breg9:
+ case DW_OP_breg10:
+ case DW_OP_breg11:
+ case DW_OP_breg12:
+ case DW_OP_breg13:
+ case DW_OP_breg14:
+ case DW_OP_breg15:
+ case DW_OP_breg16:
+ case DW_OP_breg17:
+ case DW_OP_breg18:
+ case DW_OP_breg19:
+ case DW_OP_breg20:
+ case DW_OP_breg21:
+ case DW_OP_breg22:
+ case DW_OP_breg23:
+ case DW_OP_breg24:
+ case DW_OP_breg25:
+ case DW_OP_breg26:
+ case DW_OP_breg27:
+ case DW_OP_breg28:
+ case DW_OP_breg29:
+ case DW_OP_breg30:
+ case DW_OP_breg31:
+ case DW_OP_fbreg:
+ case DW_OP_push_object_address:
+ case DW_OP_call_frame_cfa:
+ ++frame_offset_;
+ break;
+
+ case DW_OP_drop:
+ case DW_OP_xderef:
+ case DW_OP_and:
+ case DW_OP_div:
+ case DW_OP_minus:
+ case DW_OP_mod:
+ case DW_OP_mul:
+ case DW_OP_neg:
+ case DW_OP_or:
+ case DW_OP_plus:
+ case DW_OP_shl:
+ case DW_OP_shr:
+ case DW_OP_shra:
+ case DW_OP_xor:
+ case DW_OP_bra:
+ case DW_OP_eq:
+ case DW_OP_ge:
+ case DW_OP_gt:
+ case DW_OP_le:
+ case DW_OP_lt:
+ case DW_OP_ne:
+ case DW_OP_regx:
+ case DW_OP_xderef_size:
+ --frame_offset_;
+ break;
+
+ case DW_OP_call2:
+ case DW_OP_call4:
+ case DW_OP_call_ref:
+ {
+ dw_die_ref dwarf_proc = l->dw_loc_oprnd1.v.val_die_ref.die;
+ int *stack_usage = dwarf_proc_stack_usage_map->get (dwarf_proc);
+
+ if (stack_usage == NULL)
+ return false;
+ frame_offset_ += *stack_usage;
+ break;
+ }
+
+ case DW_OP_GNU_push_tls_address:
+ case DW_OP_GNU_uninit:
+ case DW_OP_GNU_encoded_addr:
+ case DW_OP_GNU_implicit_pointer:
+ case DW_OP_GNU_entry_value:
+ case DW_OP_GNU_const_type:
+ case DW_OP_GNU_regval_type:
+ case DW_OP_GNU_deref_type:
+ case DW_OP_GNU_convert:
+ case DW_OP_GNU_reinterpret:
+ case DW_OP_GNU_parameter_ref:
+ /* loc_list_from_tree will probably not output these operations for
+ size functions, so assume they will not appear here. */
+ /* Fall through... */
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Now, follow the control flow (except subroutine calls). */
+ switch (l->dw_loc_opc)
+ {
+ case DW_OP_bra:
+ if (!resolve_args_picking_1 (l->dw_loc_next, frame_offset_, dpi,
+ visited))
+ return false;
+ /* Fall through... */
+
+ case DW_OP_skip:
+ l = l->dw_loc_oprnd1.v.val_loc;
+ break;
+
+ case DW_OP_stack_value:
+ return true;
+
+ default:
+ l = l->dw_loc_next;
+ break;
+ }
+ }
+
+ return true;
+}
+
+/* Make a DFS over operations reachable through LOC (i.e. follow branch
+ operations) in order to resolve the operand of DW_OP_pick operations that
+ target DWARF procedure arguments (DPI). Stop at already visited nodes.
+ INITIAL_FRAME_OFFSET is the frame offset *before* LOC is executed. Return
+ if all relocations were successful. */
+
+static bool
+resolve_args_picking (dw_loc_descr_ref loc, unsigned initial_frame_offset,
+ struct dwarf_procedure_info *dpi)
+{
+ hash_set<dw_loc_descr_ref> visited;
+
+ return resolve_args_picking_1 (loc, initial_frame_offset, dpi, visited);
+}
+
+/* Try to generate a DWARF procedure that computes the same result as FNDECL.
+ Return NULL if it is not possible. */
+
+static dw_die_ref
+function_to_dwarf_procedure (tree fndecl)
+{
+ struct loc_descr_context ctx;
+ struct dwarf_procedure_info dpi;
+ dw_die_ref dwarf_proc_die;
+ tree tree_body = DECL_SAVED_TREE (fndecl);
+ dw_loc_descr_ref loc_body, epilogue;
+
+ tree cursor;
+ unsigned i;
+
+ /* Do not generate multiple DWARF procedures for the same function
+ declaration. */
+ dwarf_proc_die = lookup_decl_die (fndecl);
+ if (dwarf_proc_die != NULL)
+ return dwarf_proc_die;
+
+ /* DWARF procedures are available starting with the DWARFv3 standard, but
+ it's the DWARFv4 standard that introduces the DW_TAG_dwarf_procedure
+ DIE. */
+ if (dwarf_version < 3 && dwarf_strict)
+ return NULL;
+
+ /* We handle only functions for which we still have a body, that return a
+ supported type and that takes arguments with supported types. Note that
+ there is no point translating functions that return nothing. */
+ if (tree_body == NULL_TREE
+ || DECL_RESULT (fndecl) == NULL_TREE
+ || !is_handled_procedure_type (TREE_TYPE (DECL_RESULT (fndecl))))
+ return NULL;
+
+ for (cursor = DECL_ARGUMENTS (fndecl);
+ cursor != NULL_TREE;
+ cursor = TREE_CHAIN (cursor))
+ if (!is_handled_procedure_type (TREE_TYPE (cursor)))
+ return NULL;
+
+ /* Match only "expr" in: RETURN_EXPR (MODIFY_EXPR (RESULT_DECL, expr)). */
+ if (TREE_CODE (tree_body) != RETURN_EXPR)
+ return NULL;
+ tree_body = TREE_OPERAND (tree_body, 0);
+ if (TREE_CODE (tree_body) != MODIFY_EXPR
+ || TREE_OPERAND (tree_body, 0) != DECL_RESULT (fndecl))
+ return NULL;
+ tree_body = TREE_OPERAND (tree_body, 1);
+
+ /* Try to translate the body expression itself. Note that this will probably
+ cause an infinite recursion if its call graph has a cycle. This is very
+ unlikely for size functions, however, so don't bother with such things at
+ the moment. */
+ ctx.context_type = NULL_TREE;
+ ctx.base_decl = NULL_TREE;
+ ctx.dpi = &dpi;
+ dpi.fndecl = fndecl;
+ dpi.args_count = list_length (DECL_ARGUMENTS (fndecl));
+ loc_body = loc_descriptor_from_tree (tree_body, 0, &ctx);
+ if (!loc_body)
+ return NULL;
+
+ /* After evaluating all operands in "loc_body", we should still have on the
+ stack all arguments plus the desired function result (top of the stack).
+ Generate code in order to keep only the result in our stack frame. */
+ epilogue = NULL;
+ for (i = 0; i < dpi.args_count; ++i)
+ {
+ dw_loc_descr_ref op_couple = new_loc_descr (DW_OP_swap, 0, 0);
+ op_couple->dw_loc_next = new_loc_descr (DW_OP_drop, 0, 0);
+ op_couple->dw_loc_next->dw_loc_next = epilogue;
+ epilogue = op_couple;
+ }
+ add_loc_descr (&loc_body, epilogue);
+ if (!resolve_args_picking (loc_body, dpi.args_count, &dpi))
+ return NULL;
+
+ /* Trailing nops from loc_descriptor_from_tree (if any) cannot be removed
+ because they are considered useful. Now there is an epilogue, they are
+ not anymore, so give it another try. */
+ loc_descr_without_nops (loc_body);
+
+ /* fndecl may be used both as a regular DW_TAG_subprogram DIE and as
+ a DW_TAG_dwarf_procedure, so we may have a conflict, here. It's unlikely,
+ though, given that size functions do not come from source, so they should
+ not have a dedicated DW_TAG_subprogram DIE. */
+ dwarf_proc_die
+ = new_dwarf_proc_die (loc_body, fndecl,
+ get_context_die (DECL_CONTEXT (fndecl)));
+
+ /* The called DWARF procedure consumes one stack slot per argument and
+ returns one stack slot. */
+ dwarf_proc_stack_usage_map->put (dwarf_proc_die, 1 - dpi.args_count);
+
+ return dwarf_proc_die;
+}
+
+
/* Generate Dwarf location list representing LOC.
If WANT_ADDRESS is false, expression computing LOC will be computed
If WANT_ADDRESS is 1, expression computing address of LOC will be returned
referenced by DW_OP_push_object_address. If it is NULL_TREE, this operation
will not be generated.
- If CONTEXT is NULL, the behavior is the same as if both context_type and
- base_decl fields were NULL_TREE. */
+ Its DPI field determines whether we are generating a DWARF expression for a
+ DWARF procedure, so PARM_DECL references are processed specifically.
+
+ If CONTEXT is NULL, the behavior is the same as if context_type, base_decl
+ and dpi fields were null. */
static dw_loc_list_ref
-loc_list_from_tree (tree loc, int want_address,
- const struct loc_descr_context *context)
+loc_list_from_tree_1 (tree loc, int want_address,
+ const struct loc_descr_context *context)
{
dw_loc_descr_ref ret = NULL, ret1 = NULL;
dw_loc_list_ref list_ret = NULL, list_ret1 = NULL;
break;
case CALL_EXPR:
- expansion_failed (loc, NULL_RTX, "CALL_EXPR");
- /* There are no opcodes for these operations. */
- return 0;
+ {
+ const int nargs = call_expr_nargs (loc);
+ tree callee = get_callee_fndecl (loc);
+ int i;
+ dw_die_ref dwarf_proc;
+
+ if (callee == NULL_TREE)
+ goto call_expansion_failed;
+
+ /* We handle only functions that return an integer. */
+ if (!is_handled_procedure_type (TREE_TYPE (TREE_TYPE (callee))))
+ goto call_expansion_failed;
+
+ dwarf_proc = function_to_dwarf_procedure (callee);
+ if (dwarf_proc == NULL)
+ goto call_expansion_failed;
+
+ /* Evaluate arguments right-to-left so that the first argument will
+ be the top-most one on the stack. */
+ for (i = nargs - 1; i >= 0; --i)
+ {
+ dw_loc_descr_ref loc_descr
+ = loc_descriptor_from_tree (CALL_EXPR_ARG (loc, i), 0,
+ context);
+
+ if (loc_descr == NULL)
+ goto call_expansion_failed;
+
+ add_loc_descr (&ret, loc_descr);
+ }
+
+ ret1 = new_loc_descr (DW_OP_call4, 0, 0);
+ ret1->dw_loc_oprnd1.val_class = dw_val_class_die_ref;
+ ret1->dw_loc_oprnd1.v.val_die_ref.die = dwarf_proc;
+ ret1->dw_loc_oprnd1.v.val_die_ref.external = 0;
+ add_loc_descr (&ret, ret1);
+ break;
+
+ call_expansion_failed:
+ expansion_failed (loc, NULL_RTX, "CALL_EXPR");
+ /* There are no opcodes for these operations. */
+ return 0;
+ }
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
}
/* Otherwise, process the argument and look for the address. */
if (!list_ret && !ret)
- list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 1, context);
+ list_ret = loc_list_from_tree_1 (TREE_OPERAND (loc, 0), 1, context);
else
{
if (want_address)
/* FALLTHRU */
case PARM_DECL:
+ if (context != NULL && context->dpi != NULL
+ && DECL_CONTEXT (loc) == context->dpi->fndecl)
+ {
+ /* We are generating code for a DWARF procedure and we want to access
+ one of its arguments: find the appropriate argument offset and let
+ the resolve_args_picking pass compute the offset that complies
+ with the stack frame size. */
+ unsigned i = 0;
+ tree cursor;
+
+ for (cursor = DECL_ARGUMENTS (context->dpi->fndecl);
+ cursor != NULL_TREE && cursor != loc;
+ cursor = TREE_CHAIN (cursor), ++i)
+ ;
+ /* If we are translating a DWARF procedure, all referenced parameters
+ must belong to the current function. */
+ gcc_assert (cursor != NULL_TREE);
+
+ ret = new_loc_descr (DW_OP_pick, i, 0);
+ ret->frame_offset_rel = 1;
+ break;
+ }
+ /* FALLTHRU */
+
case RESULT_DECL:
if (DECL_HAS_VALUE_EXPR_P (loc))
- return loc_list_from_tree (DECL_VALUE_EXPR (loc),
- want_address, context);
+ return loc_list_from_tree_1 (DECL_VALUE_EXPR (loc),
+ want_address, context);
/* FALLTHRU */
case FUNCTION_DECL:
}
/* Fallthru. */
case INDIRECT_REF:
- list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 0, context);
+ list_ret = loc_list_from_tree_1 (TREE_OPERAND (loc, 0), 0, context);
have_address = 1;
break;
return NULL;
case COMPOUND_EXPR:
- return loc_list_from_tree (TREE_OPERAND (loc, 1), want_address, context);
+ return loc_list_from_tree_1 (TREE_OPERAND (loc, 1), want_address,
+ context);
CASE_CONVERT:
case VIEW_CONVERT_EXPR:
case SAVE_EXPR:
case MODIFY_EXPR:
- return loc_list_from_tree (TREE_OPERAND (loc, 0), want_address, context);
+ case NON_LVALUE_EXPR:
+ return loc_list_from_tree_1 (TREE_OPERAND (loc, 0), want_address,
+ context);
case COMPONENT_REF:
case BIT_FIELD_REF:
tree obj, offset;
HOST_WIDE_INT bitsize, bitpos, bytepos;
machine_mode mode;
- int unsignedp, volatilep = 0;
+ int unsignedp, reversep, volatilep = 0;
obj = get_inner_reference (loc, &bitsize, &bitpos, &offset, &mode,
- &unsignedp, &volatilep, false);
+ &unsignedp, &reversep, &volatilep, false);
gcc_assert (obj != loc);
- list_ret = loc_list_from_tree (obj,
- want_address == 2
- && !bitpos && !offset ? 2 : 1,
- context);
+ list_ret = loc_list_from_tree_1 (obj,
+ want_address == 2
+ && !bitpos && !offset ? 2 : 1,
+ context);
/* TODO: We can extract value of the small expression via shifting even
for nonzero bitpos. */
if (list_ret == 0)
if (offset != NULL_TREE)
{
/* Variable offset. */
- list_ret1 = loc_list_from_tree (offset, 0, context);
+ list_ret1 = loc_list_from_tree_1 (offset, 0, context);
if (list_ret1 == 0)
return 0;
add_loc_list (&list_ret, list_ret1);
have_address = 1;
else if (tree_fits_shwi_p (loc))
ret = int_loc_descriptor (tree_to_shwi (loc));
+ else if (tree_fits_uhwi_p (loc))
+ ret = uint_loc_descriptor (tree_to_uhwi (loc));
else
{
expansion_failed (loc, NULL_RTX,
case CEIL_DIV_EXPR:
case ROUND_DIV_EXPR:
case TRUNC_DIV_EXPR:
+ case EXACT_DIV_EXPR:
if (TYPE_UNSIGNED (TREE_TYPE (loc)))
return 0;
op = DW_OP_div;
op = DW_OP_mod;
goto do_binop;
}
- list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 0, context);
- list_ret1 = loc_list_from_tree (TREE_OPERAND (loc, 1), 0, context);
+ list_ret = loc_list_from_tree_1 (TREE_OPERAND (loc, 0), 0, context);
+ list_ret1 = loc_list_from_tree_1 (TREE_OPERAND (loc, 1), 0, context);
if (list_ret == 0 || list_ret1 == 0)
return 0;
do_plus:
if (tree_fits_shwi_p (TREE_OPERAND (loc, 1)))
{
- list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 0, context);
+ /* Big unsigned numbers can fit in HOST_WIDE_INT but it may be
+ smarter to encode their opposite. The DW_OP_plus_uconst operation
+ takes 1 + X bytes, X being the size of the ULEB128 addend. On the
+ other hand, a "<push literal>; DW_OP_minus" pattern takes 1 + Y
+ bytes, Y being the size of the operation that pushes the opposite
+ of the addend. So let's choose the smallest representation. */
+ const tree tree_addend = TREE_OPERAND (loc, 1);
+ offset_int wi_addend;
+ HOST_WIDE_INT shwi_addend;
+ dw_loc_descr_ref loc_naddend;
+
+ list_ret = loc_list_from_tree_1 (TREE_OPERAND (loc, 0), 0, context);
if (list_ret == 0)
return 0;
- loc_list_plus_const (list_ret, tree_to_shwi (TREE_OPERAND (loc, 1)));
+ /* Try to get the literal to push. It is the opposite of the addend,
+ so as we rely on wrapping during DWARF evaluation, first decode
+ the literal as a "DWARF-sized" signed number. */
+ wi_addend = wi::to_offset (tree_addend);
+ wi_addend = wi::sext (wi_addend, DWARF2_ADDR_SIZE * 8);
+ shwi_addend = wi_addend.to_shwi ();
+ loc_naddend = (shwi_addend != INTTYPE_MINIMUM (HOST_WIDE_INT))
+ ? int_loc_descriptor (-shwi_addend)
+ : NULL;
+
+ if (loc_naddend != NULL
+ && ((unsigned) size_of_uleb128 (shwi_addend)
+ > size_of_loc_descr (loc_naddend)))
+ {
+ add_loc_descr_to_each (list_ret, loc_naddend);
+ add_loc_descr_to_each (list_ret,
+ new_loc_descr (DW_OP_minus, 0, 0));
+ }
+ else
+ {
+ for (dw_loc_descr_ref loc_cur = loc_naddend; loc_cur != NULL; )
+ {
+ loc_naddend = loc_cur;
+ loc_cur = loc_cur->dw_loc_next;
+ ggc_free (loc_naddend);
+ }
+ loc_list_plus_const (list_ret, wi_addend.to_shwi ());
+ }
break;
}
goto do_binop;
case LE_EXPR:
- if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (loc, 0))))
- return 0;
-
op = DW_OP_le;
- goto do_binop;
+ goto do_comp_binop;
case GE_EXPR:
- if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (loc, 0))))
- return 0;
-
op = DW_OP_ge;
- goto do_binop;
+ goto do_comp_binop;
case LT_EXPR:
- if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (loc, 0))))
- return 0;
-
op = DW_OP_lt;
- goto do_binop;
+ goto do_comp_binop;
case GT_EXPR:
- if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (loc, 0))))
- return 0;
-
op = DW_OP_gt;
- goto do_binop;
+ goto do_comp_binop;
+
+ do_comp_binop:
+ if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (loc, 0))))
+ {
+ list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 0, context);
+ list_ret1 = loc_list_from_tree (TREE_OPERAND (loc, 1), 0, context);
+ list_ret = loc_list_from_uint_comparison (list_ret, list_ret1,
+ TREE_CODE (loc));
+ break;
+ }
+ else
+ goto do_binop;
case EQ_EXPR:
op = DW_OP_eq;
goto do_binop;
do_binop:
- list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 0, context);
- list_ret1 = loc_list_from_tree (TREE_OPERAND (loc, 1), 0, context);
+ list_ret = loc_list_from_tree_1 (TREE_OPERAND (loc, 0), 0, context);
+ list_ret1 = loc_list_from_tree_1 (TREE_OPERAND (loc, 1), 0, context);
if (list_ret == 0 || list_ret1 == 0)
return 0;
goto do_unop;
do_unop:
- list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 0, context);
+ list_ret = loc_list_from_tree_1 (TREE_OPERAND (loc, 0), 0, context);
if (list_ret == 0)
return 0;
dw_loc_descr_ref lhs
= loc_descriptor_from_tree (TREE_OPERAND (loc, 1), 0, context);
dw_loc_list_ref rhs
- = loc_list_from_tree (TREE_OPERAND (loc, 2), 0, context);
+ = loc_list_from_tree_1 (TREE_OPERAND (loc, 2), 0, context);
dw_loc_descr_ref bra_node, jump_node, tmp;
- list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 0, context);
+ list_ret = loc_list_from_tree_1 (TREE_OPERAND (loc, 0), 0, context);
if (list_ret == 0 || lhs == 0 || rhs == 0)
return 0;
return 0;
}
-#ifdef ENABLE_CHECKING
/* Otherwise this is a generic code; we should just lists all of
these explicitly. We forgot one. */
- gcc_unreachable ();
-#else
+ if (flag_checking)
+ gcc_unreachable ();
+
/* In a release build, we want to degrade gracefully: better to
generate incomplete debugging information than to crash. */
return NULL;
-#endif
}
if (!ret && !list_ret)
return list_ret;
}
+/* Likewise, but strip useless DW_OP_nop operations in the resulting
+ expressions. */
+
+static dw_loc_list_ref
+loc_list_from_tree (tree loc, int want_address,
+ const struct loc_descr_context *context)
+{
+ dw_loc_list_ref result = loc_list_from_tree_1 (loc, want_address, context);
+
+ for (dw_loc_list_ref loc_cur = result;
+ loc_cur != NULL; loc_cur =
+ loc_cur->dw_loc_next)
+ loc_descr_without_nops (loc_cur->expr);
+ return result;
+}
+
/* Same as above but return only single location expression. */
static dw_loc_descr_ref
loc_descriptor_from_tree (tree loc, int want_address,
return (TREE_CODE (decl) != ERROR_MARK) ? DECL_ALIGN (decl) : BITS_PER_WORD;
}
-/* Return the result of rounding T up to ALIGN. */
+/* Return the result of rounding T up to ALIGN. */
+
+static inline offset_int
+round_up_to_align (const offset_int &t, unsigned int align)
+{
+ return wi::udiv_trunc (t + align - 1, align) * align;
+}
+
+/* Compute the size of TYPE in bytes. If possible, return NULL and store the
+ size as an integer constant in CST_SIZE. Otherwise, if possible, return a
+ DWARF expression that computes the size. Return NULL and set CST_SIZE to -1
+ if we fail to return the size in one of these two forms. */
+
+static dw_loc_descr_ref
+type_byte_size (const_tree type, HOST_WIDE_INT *cst_size)
+{
+ tree tree_size;
+ struct loc_descr_context ctx;
+
+ /* Return a constant integer in priority, if possible. */
+ *cst_size = int_size_in_bytes (type);
+ if (*cst_size != -1)
+ return NULL;
+
+ ctx.context_type = const_cast<tree> (type);
+ ctx.base_decl = NULL_TREE;
+ ctx.dpi = NULL;
+
+ type = TYPE_MAIN_VARIANT (type);
+ tree_size = TYPE_SIZE_UNIT (type);
+ return ((tree_size != NULL_TREE)
+ ? loc_descriptor_from_tree (tree_size, 0, &ctx)
+ : NULL);
+}
+
+/* Helper structure for RECORD_TYPE processing. */
+struct vlr_context
+{
+ /* Root RECORD_TYPE. It is needed to generate data member location
+ descriptions in variable-length records (VLR), but also to cope with
+ variants, which are composed of nested structures multiplexed with
+ QUAL_UNION_TYPE nodes. Each time such a structure is passed to a
+ function processing a FIELD_DECL, it is required to be non null. */
+ tree struct_type;
+ /* When generating a variant part in a RECORD_TYPE (i.e. a nested
+ QUAL_UNION_TYPE), this holds an expression that computes the offset for
+ this variant part as part of the root record (in storage units). For
+ regular records, it must be NULL_TREE. */
+ tree variant_part_offset;
+};
+
+/* Given a pointer to a FIELD_DECL, compute the byte offset of the lowest
+ addressed byte of the "containing object" for the given FIELD_DECL. If
+ possible, return a native constant through CST_OFFSET (in which case NULL is
+ returned); otherwise return a DWARF expression that computes the offset.
-static inline offset_int
-round_up_to_align (const offset_int &t, unsigned int align)
-{
- return wi::udiv_trunc (t + align - 1, align) * align;
-}
+ Set *CST_OFFSET to 0 and return NULL if we are unable to determine what
+ that offset is, either because the argument turns out to be a pointer to an
+ ERROR_MARK node, or because the offset expression is too complex for us.
-/* Given a pointer to a FIELD_DECL, compute and return the byte offset of the
- lowest addressed byte of the "containing object" for the given FIELD_DECL,
- or return 0 if we are unable to determine what that offset is, either
- because the argument turns out to be a pointer to an ERROR_MARK node, or
- because the offset is actually variable. (We can't handle the latter case
- just yet). */
+ CTX is required: see the comment for VLR_CONTEXT. */
-static HOST_WIDE_INT
-field_byte_offset (const_tree decl)
+static dw_loc_descr_ref
+field_byte_offset (const_tree decl, struct vlr_context *ctx,
+ HOST_WIDE_INT *cst_offset)
{
- offset_int object_offset_in_bits;
- offset_int object_offset_in_bytes;
- offset_int bitpos_int;
-
- if (TREE_CODE (decl) == ERROR_MARK)
- return 0;
+ tree tree_result;
+ dw_loc_list_ref loc_result;
- gcc_assert (TREE_CODE (decl) == FIELD_DECL);
+ *cst_offset = 0;
- /* We cannot yet cope with fields whose positions are variable, so
- for now, when we see such things, we simply return 0. Someday, we may
- be able to handle such cases, but it will be damn difficult. */
- if (TREE_CODE (bit_position (decl)) != INTEGER_CST)
- return 0;
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return NULL;
+ else
+ gcc_assert (TREE_CODE (decl) == FIELD_DECL);
- bitpos_int = wi::to_offset (bit_position (decl));
+ /* We cannot handle variable bit offsets at the moment, so abort if it's the
+ case. */
+ if (TREE_CODE (DECL_FIELD_BIT_OFFSET (decl)) != INTEGER_CST)
+ return NULL;
- if (PCC_BITFIELD_TYPE_MATTERS)
+#ifdef PCC_BITFIELD_TYPE_MATTERS
+ /* We used to handle only constant offsets in all cases. Now, we handle
+ properly dynamic byte offsets only when PCC bitfield type doesn't
+ matter. */
+ if (PCC_BITFIELD_TYPE_MATTERS
+ && TREE_CODE (DECL_FIELD_OFFSET (decl)) == INTEGER_CST)
{
+ offset_int object_offset_in_bits;
+ offset_int object_offset_in_bytes;
+ offset_int bitpos_int;
tree type;
tree field_size_tree;
offset_int deepest_bitpos;
unsigned int decl_align_in_bits;
offset_int type_size_in_bits;
+ bitpos_int = wi::to_offset (bit_position (decl));
type = field_type (decl);
type_size_in_bits = offset_int_type_size_in_bits (type);
type_align_in_bits = simple_type_align_in_bits (type);
object_offset_in_bits
= round_up_to_align (object_offset_in_bits, decl_align_in_bits);
}
+
+ object_offset_in_bytes
+ = wi::lrshift (object_offset_in_bits, LOG2_BITS_PER_UNIT);
+ if (ctx->variant_part_offset == NULL_TREE)
+ {
+ *cst_offset = object_offset_in_bytes.to_shwi ();
+ return NULL;
+ }
+ tree_result = wide_int_to_tree (sizetype, object_offset_in_bytes);
}
else
- object_offset_in_bits = bitpos_int;
+#endif /* PCC_BITFIELD_TYPE_MATTERS */
+ tree_result = byte_position (decl);
+
+ if (ctx->variant_part_offset != NULL_TREE)
+ tree_result = fold_build2 (PLUS_EXPR, TREE_TYPE (tree_result),
+ ctx->variant_part_offset, tree_result);
+
+ /* If the byte offset is a constant, it's simplier to handle a native
+ constant rather than a DWARF expression. */
+ if (TREE_CODE (tree_result) == INTEGER_CST)
+ {
+ *cst_offset = wi::to_offset (tree_result).to_shwi ();
+ return NULL;
+ }
+ struct loc_descr_context loc_ctx = {
+ ctx->struct_type, /* context_type */
+ NULL_TREE, /* base_decl */
+ NULL /* dpi */
+ };
+ loc_result = loc_list_from_tree (tree_result, 0, &loc_ctx);
- object_offset_in_bytes
- = wi::lrshift (object_offset_in_bits, LOG2_BITS_PER_UNIT);
- return object_offset_in_bytes.to_shwi ();
+ /* We want a DWARF expression: abort if we only have a location list with
+ multiple elements. */
+ if (!loc_result || !single_element_loc_list_p (loc_result))
+ return NULL;
+ else
+ return loc_result->expr;
}
\f
/* The following routines define various Dwarf attributes and any data
DW_AT_byte_size attribute for this bit-field. (See the
`byte_size_attribute' function below.) It is also used when calculating the
value of the DW_AT_bit_offset attribute. (See the `bit_offset_attribute'
- function below.) */
+ function below.)
+
+ CTX is required: see the comment for VLR_CONTEXT. */
static void
-add_data_member_location_attribute (dw_die_ref die, tree decl)
+add_data_member_location_attribute (dw_die_ref die,
+ tree decl,
+ struct vlr_context *ctx)
{
HOST_WIDE_INT offset;
dw_loc_descr_ref loc_descr = 0;
offset = tree_to_shwi (BINFO_OFFSET (decl));
}
else
- offset = field_byte_offset (decl);
+ {
+ loc_descr = field_byte_offset (decl, ctx, &offset);
+
+ /* If loc_descr is available then we know the field offset is dynamic.
+ However, GDB does not handle dynamic field offsets very well at the
+ moment. */
+ if (loc_descr != NULL && gnat_encodings != DWARF_GNAT_ENCODINGS_MINIMAL)
+ {
+ loc_descr = NULL;
+ offset = 0;
+ }
+
+ /* Data member location evalutation starts with the base address on the
+ stack. Compute the field offset and add it to this base address. */
+ else if (loc_descr != NULL)
+ add_loc_descr (&loc_descr, new_loc_descr (DW_OP_plus, 0, 0));
+ }
if (! loc_descr)
{
static void
insert_float (const_rtx rtl, unsigned char *array)
{
- REAL_VALUE_TYPE rv;
long val[4];
int i;
- REAL_VALUE_FROM_CONST_DOUBLE (rv, rtl);
- real_to_target (val, &rv, GET_MODE (rtl));
+ real_to_target (val, CONST_DOUBLE_REAL_VALUE (rtl), GET_MODE (rtl));
/* real_to_target puts 32-bit pieces in each long. Pack them. */
for (i = 0; i < GET_MODE_SIZE (GET_MODE (rtl)) / 4; i++)
return true;
case CONST_WIDE_INT:
- add_AT_wide (die, DW_AT_const_value,
- std::make_pair (rtl, GET_MODE (rtl)));
+ {
+ wide_int w1 = std::make_pair (rtl, MAX_MODE_INT);
+ unsigned int prec = MIN (wi::min_precision (w1, UNSIGNED),
+ (unsigned int)CONST_WIDE_INT_NUNITS (rtl) * HOST_BITS_PER_WIDE_INT);
+ wide_int w = wi::zext (w1, prec);
+ add_AT_wide (die, DW_AT_const_value, w);
+ }
return true;
case CONST_DOUBLE:
machine_mode mode;
HOST_WIDE_INT bitsize, bitpos;
tree offset;
- int unsignedp, volatilep = 0;
+ int unsignedp, reversep, volatilep = 0;
/* If the decl isn't a VAR_DECL, or if it isn't static, or if
it does not have a value (the offset into the common area), or if it
if (TREE_CODE (val_expr) != COMPONENT_REF)
return NULL_TREE;
- cvar = get_inner_reference (val_expr, &bitsize, &bitpos, &offset,
- &mode, &unsignedp, &volatilep, true);
+ cvar = get_inner_reference (val_expr, &bitsize, &bitpos, &offset, &mode,
+ &unsignedp, &reversep, &volatilep, true);
if (cvar == NULL_TREE
|| TREE_CODE (cvar) != VAR_DECL
since we will need to refer to them each time the function is inlined. */
static bool
-add_location_or_const_value_attribute (dw_die_ref die, tree decl, bool cache_p,
- enum dwarf_attribute attr)
+add_location_or_const_value_attribute (dw_die_ref die, tree decl, bool cache_p)
{
rtx rtl;
dw_loc_list_ref list;
if (TREE_CODE (decl) == ERROR_MARK)
return false;
- if (get_AT (die, attr))
+ if (get_AT (die, DW_AT_location)
+ || get_AT (die, DW_AT_const_value))
return true;
gcc_assert (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL
}
if (list)
{
- add_AT_location_description (die, attr, list);
+ add_AT_location_description (die, DW_AT_location, list);
return true;
}
/* None of that worked, so it must not really have a location;
fieldsize = tree_to_shwi (DECL_SIZE_UNIT (field));
pos = int_byte_position (field);
gcc_assert (pos + fieldsize <= size);
- if (val
+ if (val && fieldsize != 0
&& !native_encode_initializer (val, array + pos, fieldsize))
return false;
}
add_scalar_info (dw_die_ref die, enum dwarf_attribute attr, tree value,
int forms, const struct loc_descr_context *context)
{
- dw_die_ref ctx, decl_die;
+ dw_die_ref context_die, decl_die;
dw_loc_list_ref list;
bool strip_conversions = true;
return;
if (current_function_decl == 0)
- ctx = comp_unit_die ();
+ context_die = comp_unit_die ();
else
- ctx = lookup_decl_die (current_function_decl);
+ context_die = lookup_decl_die (current_function_decl);
- decl_die = new_die (DW_TAG_variable, ctx, value);
+ decl_die = new_die (DW_TAG_variable, context_die, value);
add_AT_flag (decl_die, DW_AT_artificial, 1);
- add_type_attribute (decl_die, TREE_TYPE (value), TYPE_QUAL_CONST, ctx);
+ add_type_attribute (decl_die, TREE_TYPE (value), TYPE_QUAL_CONST, false,
+ context_die);
add_AT_location_description (decl_die, DW_AT_location, list);
add_AT_die_ref (die, attr, decl_die);
}
/* FALLTHRU */
default:
+ /* Because of the complex interaction there can be with other GNAT
+ encodings, GDB isn't ready yet to handle proper DWARF description
+ for self-referencial subrange bounds: let GNAT encodings do the
+ magic in such a case. */
+ if (gnat_encodings != DWARF_GNAT_ENCODINGS_MINIMAL
+ && contains_placeholder_p (bound))
+ return;
+
add_scalar_info (subrange_die, bound_attr, bound,
dw_scalar_form_constant
| dw_scalar_form_exprloc
;
else
add_type_attribute (subrange_die, TREE_TYPE (domain),
- TYPE_UNQUALIFIED, type_die);
+ TYPE_UNQUALIFIED, false, type_die);
}
/* ??? If upper is NULL, the array has unspecified length,
{
dw_die_ref decl_die;
HOST_WIDE_INT size;
+ dw_loc_descr_ref size_expr = NULL;
switch (TREE_CODE (tree_node))
{
add_AT_die_ref (die, DW_AT_byte_size, decl_die);
return;
}
- size = int_size_in_bytes (tree_node);
+ size_expr = type_byte_size (tree_node, &size);
break;
case FIELD_DECL:
/* For a data member of a struct or union, the DW_AT_byte_size is
gcc_unreachable ();
}
+ /* Support for dynamically-sized objects was introduced by DWARFv3.
+ At the moment, GDB does not handle variable byte sizes very well,
+ though. */
+ if ((dwarf_version >= 3 || !dwarf_strict)
+ && gnat_encodings == DWARF_GNAT_ENCODINGS_MINIMAL
+ && size_expr != NULL)
+ add_AT_loc (die, DW_AT_byte_size, size_expr);
+
/* Note that `size' might be -1 when we get to this point. If it is, that
- indicates that the byte size of the entity in question is variable. We
- have no good way of expressing this fact in Dwarf at the present time,
- when location description was not used by the caller code instead. */
+ indicates that the byte size of the entity in question is variable and
+ that we could not generate a DWARF expression that computes it. */
if (size >= 0)
add_AT_unsigned (die, DW_AT_byte_size, size);
}
exact location of the "containing object" for a bit-field is rather
complicated. It's handled by the `field_byte_offset' function (above).
+ CTX is required: see the comment for VLR_CONTEXT.
+
Note that it is the size (in bytes) of the hypothetical "containing object"
which will be given in the DW_AT_byte_size attribute for this bit-field.
(See `byte_size_attribute' above). */
static inline void
-add_bit_offset_attribute (dw_die_ref die, tree decl)
+add_bit_offset_attribute (dw_die_ref die, tree decl, struct vlr_context *ctx)
{
- HOST_WIDE_INT object_offset_in_bytes = field_byte_offset (decl);
- tree type = DECL_BIT_FIELD_TYPE (decl);
+ HOST_WIDE_INT object_offset_in_bytes;
+ tree original_type = DECL_BIT_FIELD_TYPE (decl);
HOST_WIDE_INT bitpos_int;
HOST_WIDE_INT highest_order_object_bit_offset;
HOST_WIDE_INT highest_order_field_bit_offset;
HOST_WIDE_INT bit_offset;
+ field_byte_offset (decl, ctx, &object_offset_in_bytes);
+
/* Must be a field and a bit field. */
- gcc_assert (type && TREE_CODE (decl) == FIELD_DECL);
+ gcc_assert (original_type && TREE_CODE (decl) == FIELD_DECL);
/* We can't yet handle bit-fields whose offsets are variable, so if we
encounter such things, just return without generating any attribute
if (! BYTES_BIG_ENDIAN)
{
highest_order_field_bit_offset += tree_to_shwi (DECL_SIZE (decl));
- highest_order_object_bit_offset += simple_type_size_in_bits (type);
+ highest_order_object_bit_offset +=
+ simple_type_size_in_bits (original_type);
}
bit_offset
/* Add DW_AT_{,MIPS_}linkage_name attribute for the given decl. */
static void
+add_linkage_name_raw (dw_die_ref die, tree decl)
+{
+ /* Defer until we have an assembler name set. */
+ if (!DECL_ASSEMBLER_NAME_SET_P (decl))
+ {
+ limbo_die_node *asm_name;
+
+ asm_name = ggc_cleared_alloc<limbo_die_node> ();
+ asm_name->die = die;
+ asm_name->created_for = decl;
+ asm_name->next = deferred_asm_name;
+ deferred_asm_name = asm_name;
+ }
+ else if (DECL_ASSEMBLER_NAME (decl) != DECL_NAME (decl))
+ add_linkage_attr (die, decl);
+}
+
+/* Add DW_AT_{,MIPS_}linkage_name attribute for the given decl if desired. */
+
+static void
add_linkage_name (dw_die_ref die, tree decl)
{
if (debug_info_level > DINFO_LEVEL_NONE
&& TREE_PUBLIC (decl)
&& !(TREE_CODE (decl) == VAR_DECL && DECL_REGISTER (decl))
&& die->die_tag != DW_TAG_member)
- {
- /* Defer until we have an assembler name set. */
- if (!DECL_ASSEMBLER_NAME_SET_P (decl))
- {
- limbo_die_node *asm_name;
-
- asm_name = ggc_cleared_alloc<limbo_die_node> ();
- asm_name->die = die;
- asm_name->created_for = decl;
- asm_name->next = deferred_asm_name;
- deferred_asm_name = asm_name;
- }
- else if (DECL_ASSEMBLER_NAME (decl) != DECL_NAME (decl))
- add_linkage_attr (die, decl);
- }
+ add_linkage_name_raw (die, decl);
}
/* Add a DW_AT_name attribute and source coordinate attribute for the
#endif /* VMS_DEBUGGING_INFO */
}
+/* Add VALUE as a DW_AT_discr_value attribute to DIE. */
+
+static void
+add_discr_value (dw_die_ref die, dw_discr_value *value)
+{
+ dw_attr_node attr;
+
+ attr.dw_attr = DW_AT_discr_value;
+ attr.dw_attr_val.val_class = dw_val_class_discr_value;
+ attr.dw_attr_val.val_entry = NULL;
+ attr.dw_attr_val.v.val_discr_value.pos = value->pos;
+ if (value->pos)
+ attr.dw_attr_val.v.val_discr_value.v.uval = value->v.uval;
+ else
+ attr.dw_attr_val.v.val_discr_value.v.sval = value->v.sval;
+ add_dwarf_attr (die, &attr);
+}
+
+/* Add DISCR_LIST as a DW_AT_discr_list to DIE. */
+
+static void
+add_discr_list (dw_die_ref die, dw_discr_list_ref discr_list)
+{
+ dw_attr_node attr;
+
+ attr.dw_attr = DW_AT_discr_list;
+ attr.dw_attr_val.val_class = dw_val_class_discr_list;
+ attr.dw_attr_val.val_entry = NULL;
+ attr.dw_attr_val.v.val_discr_list = discr_list;
+ add_dwarf_attr (die, &attr);
+}
+
+static inline dw_discr_list_ref
+AT_discr_list (dw_attr_node *attr)
+{
+ return attr->dw_attr_val.v.val_discr_list;
+}
+
#ifdef VMS_DEBUGGING_INFO
/* Output the debug main pointer die for VMS */
static void
add_type_attribute (dw_die_ref object_die, tree type, int cv_quals,
- dw_die_ref context_die)
+ bool reverse, dw_die_ref context_die)
{
enum tree_code code = TREE_CODE (type);
dw_die_ref type_die = NULL;
type_die = modified_type_die (type,
cv_quals | TYPE_QUALS_NO_ADDR_SPACE (type),
+ reverse,
context_die);
if (type_die != NULL)
if (size >= 0)
add_AT_unsigned (array_die, DW_AT_byte_size, size);
else if (TYPE_DOMAIN (type) != NULL_TREE
- && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != NULL_TREE
- && DECL_P (TYPE_MAX_VALUE (TYPE_DOMAIN (type))))
+ && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != NULL_TREE)
{
tree szdecl = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
- dw_loc_list_ref loc = loc_list_from_tree (szdecl, 2, NULL);
+ tree rszdecl = szdecl;
+ HOST_WIDE_INT rsize = 0;
size = int_size_in_bytes (TREE_TYPE (szdecl));
- if (loc && size > 0)
+ if (!DECL_P (szdecl))
{
- add_AT_location_description (array_die, DW_AT_string_length, loc);
- if (size != DWARF2_ADDR_SIZE)
- add_AT_unsigned (array_die, DW_AT_byte_size, size);
+ if (TREE_CODE (szdecl) == INDIRECT_REF
+ && DECL_P (TREE_OPERAND (szdecl, 0)))
+ {
+ rszdecl = TREE_OPERAND (szdecl, 0);
+ rsize = int_size_in_bytes (TREE_TYPE (rszdecl));
+ if (rsize <= 0)
+ size = 0;
+ }
+ else
+ size = 0;
+ }
+ if (size > 0)
+ {
+ dw_loc_list_ref loc = loc_list_from_tree (szdecl, 2, NULL);
+ if (loc == NULL
+ && early_dwarf
+ && current_function_decl
+ && DECL_CONTEXT (rszdecl) == current_function_decl)
+ {
+ dw_die_ref ref = lookup_decl_die (rszdecl);
+ dw_loc_descr_ref l = NULL;
+ if (ref)
+ {
+ l = new_loc_descr (DW_OP_call4, 0, 0);
+ l->dw_loc_oprnd1.val_class = dw_val_class_die_ref;
+ l->dw_loc_oprnd1.v.val_die_ref.die = ref;
+ l->dw_loc_oprnd1.v.val_die_ref.external = 0;
+ }
+ else if (TREE_CODE (rszdecl) == PARM_DECL
+ && string_types)
+ {
+ l = new_loc_descr (DW_OP_call4, 0, 0);
+ l->dw_loc_oprnd1.val_class = dw_val_class_decl_ref;
+ l->dw_loc_oprnd1.v.val_decl_ref = rszdecl;
+ string_types->safe_push (array_die);
+ }
+ if (l && rszdecl != szdecl)
+ {
+ if (rsize == DWARF2_ADDR_SIZE)
+ add_loc_descr (&l, new_loc_descr (DW_OP_deref,
+ 0, 0));
+ else
+ add_loc_descr (&l, new_loc_descr (DW_OP_deref_size,
+ rsize, 0));
+ }
+ if (l)
+ loc = new_loc_list (l, NULL, NULL, NULL);
+ }
+ if (loc)
+ {
+ add_AT_location_description (array_die, DW_AT_string_length,
+ loc);
+ if (size != DWARF2_ADDR_SIZE)
+ add_AT_unsigned (array_die, DW_AT_byte_size, size);
+ }
}
}
return;
element_type = TREE_TYPE (element_type);
}
- add_type_attribute (array_die, element_type, TYPE_UNQUALIFIED, context_die);
+ add_type_attribute (array_die, element_type, TYPE_UNQUALIFIED,
+ TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_REVERSE_STORAGE_ORDER (type),
+ context_die);
add_gnat_descriptive_type_attribute (array_die, type, context_die);
if (TYPE_ARTIFICIAL (type))
add_pubtype (type, array_die);
}
+/* After all arguments are created, adjust any DW_TAG_string_type
+ DIEs DW_AT_string_length attributes. */
+
+static void
+adjust_string_types (void)
+{
+ dw_die_ref array_die;
+ unsigned int i;
+ FOR_EACH_VEC_ELT (*string_types, i, array_die)
+ {
+ dw_attr_node *a = get_AT (array_die, DW_AT_string_length);
+ if (a == NULL)
+ continue;
+ dw_loc_descr_ref loc = AT_loc (a);
+ gcc_assert (loc->dw_loc_opc == DW_OP_call4
+ && loc->dw_loc_oprnd1.val_class == dw_val_class_decl_ref);
+ dw_die_ref ref = lookup_decl_die (loc->dw_loc_oprnd1.v.val_decl_ref);
+ if (ref)
+ {
+ loc->dw_loc_oprnd1.val_class = dw_val_class_die_ref;
+ loc->dw_loc_oprnd1.v.val_die_ref.die = ref;
+ loc->dw_loc_oprnd1.v.val_die_ref.external = 0;
+ }
+ else
+ {
+ remove_AT (array_die, DW_AT_string_length);
+ remove_AT (array_die, DW_AT_byte_size);
+ }
+ }
+}
+
/* This routine generates DIE for array with hidden descriptor, details
are filled into *info by a langhook. */
{
const dw_die_ref scope_die = scope_die_for (type, context_die);
const dw_die_ref array_die = new_die (DW_TAG_array_type, scope_die, type);
- const struct loc_descr_context context = { type, info->base_decl };
+ const struct loc_descr_context context = { type, info->base_decl, NULL };
int dim;
add_name_attribute (array_die, type_tag (type));
dw_scalar_form_constant
| dw_scalar_form_exprloc
| dw_scalar_form_reference, &context);
+ if (info->stride)
+ {
+ const enum dwarf_attribute attr
+ = (info->stride_in_bits) ? DW_AT_bit_stride : DW_AT_byte_stride;
+ const int forms
+ = (info->stride_in_bits)
+ ? dw_scalar_form_constant
+ : (dw_scalar_form_constant
+ | dw_scalar_form_exprloc
+ | dw_scalar_form_reference);
+
+ add_scalar_info (array_die, attr, info->stride, forms, &context);
+ }
}
add_gnat_descriptive_type_attribute (array_die, type, context_die);
if (info->dimen[dim].bounds_type)
add_type_attribute (subrange_die,
- info->dimen[dim].bounds_type, 0,
- context_die);
+ info->dimen[dim].bounds_type, TYPE_UNQUALIFIED,
+ false, context_die);
if (info->dimen[dim].lower_bound)
add_bound_info (subrange_die, DW_AT_lower_bound,
info->dimen[dim].lower_bound, &context);
gen_type_die (info->element_type, context_die);
add_type_attribute (array_die, info->element_type, TYPE_UNQUALIFIED,
+ TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_REVERSE_STORAGE_ORDER (type),
context_die);
if (get_AT (array_die, DW_AT_name))
{
add_name_and_src_coords_attributes (decl_die, decl);
add_type_attribute (decl_die, TREE_TYPE (TREE_TYPE (decl)),
- TYPE_UNQUALIFIED, context_die);
+ TYPE_UNQUALIFIED, false, context_die);
}
if (DECL_ABSTRACT_P (decl))
static void
retry_incomplete_types (void)
{
+ set_early_dwarf s;
int i;
for (i = vec_safe_length (incomplete_types) - 1; i >= 0; i--)
if (should_emit_struct_debug ((*incomplete_types)[i], DINFO_USAGE_DIR_USE))
gen_type_die ((*incomplete_types)[i], comp_unit_die ());
+ vec_safe_truncate (incomplete_types, 0);
}
/* Determine what tag to use for a record type. */
if (dwarf_version >= 3 || !dwarf_strict)
{
tree underlying = lang_hooks.types.enum_underlying_base_type (type);
- add_type_attribute (type_die, underlying, TYPE_UNQUALIFIED,
+ add_type_attribute (type_die, underlying, TYPE_UNQUALIFIED, false,
context_die);
}
if (TYPE_STUB_DECL (type) != NULL_TREE)
tree type = TREE_TYPE (node_or_origin);
if (decl_by_reference_p (node_or_origin))
add_type_attribute (parm_die, TREE_TYPE (type),
- TYPE_UNQUALIFIED, context_die);
+ TYPE_UNQUALIFIED,
+ false, context_die);
else
add_type_attribute (parm_die, type,
decl_quals (node_or_origin),
- context_die);
+ false, context_die);
}
if (origin == NULL && DECL_ARTIFICIAL (node))
add_AT_flag (parm_die, DW_AT_artificial, 1);
equate_decl_number_to_die (node, parm_die);
if (! DECL_ABSTRACT_P (node_or_origin))
add_location_or_const_value_attribute (parm_die, node_or_origin,
- node == NULL, DW_AT_location);
+ node == NULL);
break;
case tcc_type:
/* We were called with some kind of a ..._TYPE node. */
- add_type_attribute (parm_die, node_or_origin, TYPE_UNQUALIFIED,
+ add_type_attribute (parm_die, node_or_origin, TYPE_UNQUALIFIED, false,
context_die);
break;
|| TREE_CODE (TREE_TYPE (member)) == UNION_TYPE
|| TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE)
{
+ struct vlr_context vlr_ctx = {
+ DECL_CONTEXT (member), /* struct_type */
+ NULL_TREE /* variant_part_offset */
+ };
gen_type_die (member_declared_type (member), type_die);
- gen_field_die (member, type_die);
+ gen_field_die (member, &vlr_ctx, type_die);
}
}
else
dw_die_ref die = get_AT_ref (old_die, DW_AT_type);
if (die == auto_die || die == decltype_auto_die)
add_type_attribute (subr_die, TREE_TYPE (TREE_TYPE (decl)),
- TYPE_UNQUALIFIED, context_die);
+ TYPE_UNQUALIFIED, false, context_die);
}
}
}
{
add_prototyped_attribute (subr_die, TREE_TYPE (decl));
add_type_attribute (subr_die, TREE_TYPE (TREE_TYPE (decl)),
- TYPE_UNQUALIFIED, context_die);
+ TYPE_UNQUALIFIED, false, context_die);
}
add_pure_or_virtual_attribute (subr_die, decl);
compute_frame_pointer_to_fb_displacement (cfa_fb_offset);
if (fun->static_chain_decl)
- add_AT_location_description
- (subr_die, DW_AT_static_link,
- loc_list_from_tree (fun->static_chain_decl, 2, NULL));
+ {
+ /* DWARF requires here a location expression that computes the
+ address of the enclosing subprogram's frame base. The machinery
+ in tree-nested.c is supposed to store this specific address in the
+ last field of the FRAME record. */
+ const tree frame_type
+ = TREE_TYPE (TREE_TYPE (fun->static_chain_decl));
+ const tree fb_decl = tree_last (TYPE_FIELDS (frame_type));
+
+ tree fb_expr
+ = build1 (INDIRECT_REF, frame_type, fun->static_chain_decl);
+ fb_expr = build3 (COMPONENT_REF, TREE_TYPE (fb_decl),
+ fb_expr, fb_decl, NULL_TREE);
+
+ add_AT_location_description (subr_die, DW_AT_static_link,
+ loc_list_from_tree (fb_expr, 0, NULL));
+ }
}
/* Generate child dies for template paramaters. */
tree generic_decl_parm = generic_decl
? DECL_ARGUMENTS (generic_decl)
: NULL;
+ auto_vec<dw_die_ref> string_types_vec;
+ if (string_types == NULL)
+ string_types = &string_types_vec;
/* Now we want to walk the list of parameters of the function and
emit their relevant DIEs.
&parm);
else if (parm && !POINTER_BOUNDS_P (parm))
{
- dw_die_ref parm_die = gen_decl_die (parm, NULL, subr_die);
+ dw_die_ref parm_die = gen_decl_die (parm, NULL, NULL, subr_die);
if (parm == DECL_ARGUMENTS (decl)
&& TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE
void_type_node 2) an unprototyped function declaration (not a
definition). This just means that we have no info about the
parameters at all. */
- if (prototype_p (TREE_TYPE (decl)))
+ if (early_dwarf)
{
- /* This is the prototyped case, check for.... */
- if (stdarg_p (TREE_TYPE (decl)))
+ if (prototype_p (TREE_TYPE (decl)))
+ {
+ /* This is the prototyped case, check for.... */
+ if (stdarg_p (TREE_TYPE (decl)))
+ gen_unspecified_parameters_die (decl, subr_die);
+ }
+ else if (DECL_INITIAL (decl) == NULL_TREE)
gen_unspecified_parameters_die (decl, subr_die);
}
- else if (DECL_INITIAL (decl) == NULL_TREE)
- gen_unspecified_parameters_die (decl, subr_die);
+
+ /* Adjust DW_TAG_string_type DIEs if needed, now that all arguments
+ have DIEs. */
+ if (string_types == &string_types_vec)
+ {
+ adjust_string_types ();
+ string_types = NULL;
+ }
}
if (subr_die != old_die)
/* Emit a DW_TAG_variable DIE for a named return value. */
if (DECL_NAME (DECL_RESULT (decl)))
- gen_decl_die (DECL_RESULT (decl), NULL, subr_die);
+ gen_decl_die (DECL_RESULT (decl), NULL, NULL, subr_die);
/* The first time through decls_for_scope we will generate the
DIEs for the locals. The second time, we fill in the
rtx tloc = NULL_RTX, tlocc = NULL_RTX;
rtx arg, next_arg;
- for (arg = NOTE_VAR_LOCATION (ca_loc->call_arg_loc_note);
+ for (arg = (ca_loc->call_arg_loc_note != NULL_RTX
+ ? NOTE_VAR_LOCATION (ca_loc->call_arg_loc_note)
+ : NULL_RTX);
arg; arg = next_arg)
{
dw_loc_descr_ref reg, val;
}
if (mode == VOIDmode || mode == BLKmode)
continue;
- if (XEXP (XEXP (arg, 0), 0) == pc_rtx)
- {
- gcc_assert (ca_loc->symbol_ref == NULL_RTX);
- tloc = XEXP (XEXP (arg, 0), 1);
- continue;
- }
- else if (GET_CODE (XEXP (XEXP (arg, 0), 0)) == CLOBBER
- && XEXP (XEXP (XEXP (arg, 0), 0), 0) == pc_rtx)
+ /* Get dynamic information about call target only if we
+ have no static information: we cannot generate both
+ DW_AT_abstract_origin and DW_AT_GNU_call_site_target
+ attributes. */
+ if (ca_loc->symbol_ref == NULL_RTX)
{
- gcc_assert (ca_loc->symbol_ref == NULL_RTX);
- tlocc = XEXP (XEXP (arg, 0), 1);
- continue;
+ if (XEXP (XEXP (arg, 0), 0) == pc_rtx)
+ {
+ tloc = XEXP (XEXP (arg, 0), 1);
+ continue;
+ }
+ else if (GET_CODE (XEXP (XEXP (arg, 0), 0)) == CLOBBER
+ && XEXP (XEXP (XEXP (arg, 0), 0), 0) == pc_rtx)
+ {
+ tlocc = XEXP (XEXP (arg, 0), 1);
+ continue;
+ }
}
reg = NULL;
if (REG_P (XEXP (XEXP (arg, 0), 0)))
DW_TAG_common_block and DW_TAG_variable. */
loc = loc_list_from_tree (com_decl, 2, NULL);
}
- else if (DECL_EXTERNAL (decl))
+ else if (DECL_EXTERNAL (decl_or_origin))
add_AT_flag (com_die, DW_AT_declaration, 1);
if (want_pubnames ())
add_pubname_string (cnam, com_die); /* ??? needed? */
remove_AT (com_die, DW_AT_declaration);
}
var_die = new_die (DW_TAG_variable, com_die, decl);
- add_name_and_src_coords_attributes (var_die, decl);
- add_type_attribute (var_die, TREE_TYPE (decl), decl_quals (decl),
+ add_name_and_src_coords_attributes (var_die, decl_or_origin);
+ add_type_attribute (var_die, TREE_TYPE (decl_or_origin),
+ decl_quals (decl_or_origin), false,
context_die);
add_AT_flag (var_die, DW_AT_external, 1);
if (loc)
}
add_AT_location_description (var_die, DW_AT_location, loc);
}
- else if (DECL_EXTERNAL (decl))
+ else if (DECL_EXTERNAL (decl_or_origin))
add_AT_flag (var_die, DW_AT_declaration, 1);
- equate_decl_number_to_die (decl, var_die);
+ if (decl)
+ equate_decl_number_to_die (decl, var_die);
return;
}
tree type = TREE_TYPE (decl_or_origin);
if (decl_by_reference_p (decl_or_origin))
- add_type_attribute (var_die, TREE_TYPE (type), TYPE_UNQUALIFIED,
+ add_type_attribute (var_die, TREE_TYPE (type), TYPE_UNQUALIFIED, false,
context_die);
else
- add_type_attribute (var_die, type, decl_quals (decl_or_origin),
+ add_type_attribute (var_die, type, decl_quals (decl_or_origin), false,
context_die);
}
add_pubname (decl_or_origin, var_die);
else
add_location_or_const_value_attribute (var_die, decl_or_origin,
- decl == NULL, DW_AT_location);
+ decl == NULL);
}
else
tree_add_const_value_attribute_for_decl (var_die, decl_or_origin);
const_die = new_die (DW_TAG_constant, context_die, decl);
equate_decl_number_to_die (decl, const_die);
add_name_and_src_coords_attributes (const_die, decl);
- add_type_attribute (const_die, type, TYPE_QUAL_CONST, context_die);
+ add_type_attribute (const_die, type, TYPE_QUAL_CONST, false, context_die);
if (TREE_PUBLIC (decl))
add_AT_flag (const_die, DW_AT_external, 1);
if (DECL_ARTIFICIAL (decl))
{
if (old_die)
{
-#ifdef ENABLE_CHECKING
/* This must have been generated early and it won't even
need location information since it's a DW_AT_inline
function. */
- for (dw_die_ref c = context_die; c; c = c->die_parent)
- if (c->die_tag == DW_TAG_inlined_subroutine
- || c->die_tag == DW_TAG_subprogram)
- {
- gcc_assert (get_AT (c, DW_AT_inline));
- break;
- }
-#endif
+ if (flag_checking)
+ for (dw_die_ref c = context_die; c; c = c->die_parent)
+ if (c->die_tag == DW_TAG_inlined_subroutine
+ || c->die_tag == DW_TAG_subprogram)
+ {
+ gcc_assert (get_AT (c, DW_AT_inline));
+ break;
+ }
return;
}
}
}
}
-/* Generate a DIE for a field in a record, or structure. */
+/* Generate a DIE for a field in a record, or structure. CTX is required: see
+ the comment for VLR_CONTEXT. */
static void
-gen_field_die (tree decl, dw_die_ref context_die)
+gen_field_die (tree decl, struct vlr_context *ctx, dw_die_ref context_die)
{
dw_die_ref decl_die;
decl_die = new_die (DW_TAG_member, context_die, decl);
add_name_and_src_coords_attributes (decl_die, decl);
- add_type_attribute (decl_die, member_declared_type (decl),
- decl_quals (decl), context_die);
+ add_type_attribute (decl_die, member_declared_type (decl), decl_quals (decl),
+ TYPE_REVERSE_STORAGE_ORDER (DECL_FIELD_CONTEXT (decl)),
+ context_die);
if (DECL_BIT_FIELD_TYPE (decl))
{
add_byte_size_attribute (decl_die, decl);
add_bit_size_attribute (decl_die, decl);
- add_bit_offset_attribute (decl_die, decl);
+ add_bit_offset_attribute (decl_die, decl, ctx);
}
+ /* If we have a variant part offset, then we are supposed to process a member
+ of a QUAL_UNION_TYPE, which is how we represent variant parts in
+ trees. */
+ gcc_assert (ctx->variant_part_offset == NULL_TREE
+ || TREE_CODE (DECL_FIELD_CONTEXT (decl)) != QUAL_UNION_TYPE);
if (TREE_CODE (DECL_FIELD_CONTEXT (decl)) != UNION_TYPE)
- add_data_member_location_attribute (decl_die, decl);
+ add_data_member_location_attribute (decl_die, decl, ctx);
if (DECL_ARTIFICIAL (decl))
add_AT_flag (decl_die, DW_AT_artificial, 1);
= new_die (DW_TAG_pointer_type, scope_die_for (type, context_die), type);
equate_type_number_to_die (type, ptr_die);
- add_type_attribute (ptr_die, TREE_TYPE (type), TYPE_UNQUALIFIED,
+ add_type_attribute (ptr_die, TREE_TYPE (type), TYPE_UNQUALIFIED, false,
context_die);
add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
}
ref_die = new_die (DW_TAG_reference_type, scope_die, type);
equate_type_number_to_die (type, ref_die);
- add_type_attribute (ref_die, TREE_TYPE (type), TYPE_UNQUALIFIED,
+ add_type_attribute (ref_die, TREE_TYPE (type), TYPE_UNQUALIFIED, false,
context_die);
add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE);
}
equate_type_number_to_die (type, ptr_die);
add_AT_die_ref (ptr_die, DW_AT_containing_type,
lookup_type_die (TYPE_OFFSET_BASETYPE (type)));
- add_type_attribute (ptr_die, TREE_TYPE (type), TYPE_UNQUALIFIED,
+ add_type_attribute (ptr_die, TREE_TYPE (type), TYPE_UNQUALIFIED, false,
context_die);
}
case OPT_fpreprocessed:
case OPT_fltrans_output_list_:
case OPT_fresolution_:
+ case OPT_fdebug_prefix_map_:
/* Ignore these. */
continue;
default:
else if (strncmp (language_string, "GNU Fortran", 11) == 0)
language = DW_LANG_Fortran90;
- add_AT_unsigned (die, DW_AT_language, language);
+ add_AT_unsigned (die, DW_AT_language, language);
+
+ switch (language)
+ {
+ case DW_LANG_Fortran77:
+ case DW_LANG_Fortran90:
+ case DW_LANG_Fortran95:
+ case DW_LANG_Fortran03:
+ case DW_LANG_Fortran08:
+ /* Fortran has case insensitive identifiers and the front-end
+ lowercases everything. */
+ add_AT_unsigned (die, DW_AT_identifier_case, DW_ID_down_case);
+ break;
+ default:
+ /* The default DW_ID_case_sensitive doesn't need to be specified. */
+ break;
+ }
+ return die;
+}
+
+/* Generate the DIE for a base class. */
+
+static void
+gen_inheritance_die (tree binfo, tree access, tree type,
+ dw_die_ref context_die)
+{
+ dw_die_ref die = new_die (DW_TAG_inheritance, context_die, binfo);
+ struct vlr_context ctx = { type, NULL };
+
+ add_type_attribute (die, BINFO_TYPE (binfo), TYPE_UNQUALIFIED, false,
+ context_die);
+ add_data_member_location_attribute (die, binfo, &ctx);
+
+ if (BINFO_VIRTUAL_P (binfo))
+ add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual);
+
+ /* In DWARF3+ the default is DW_ACCESS_private only in DW_TAG_class_type
+ children, otherwise the default is DW_ACCESS_public. In DWARF2
+ the default has always been DW_ACCESS_private. */
+ if (access == access_public_node)
+ {
+ if (dwarf_version == 2
+ || context_die->die_tag == DW_TAG_class_type)
+ add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_public);
+ }
+ else if (access == access_protected_node)
+ add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_protected);
+ else if (dwarf_version > 2
+ && context_die->die_tag != DW_TAG_class_type)
+ add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_private);
+}
+
+/* Return whether DECL is a FIELD_DECL that represents the variant part of a
+ structure. */
+static bool
+is_variant_part (tree decl)
+{
+ return (TREE_CODE (decl) == FIELD_DECL
+ && TREE_CODE (TREE_TYPE (decl)) == QUAL_UNION_TYPE);
+}
+
+/* Check that OPERAND is a reference to a field in STRUCT_TYPE. If it is,
+ return the FIELD_DECL. Return NULL_TREE otherwise. */
+
+static tree
+analyze_discr_in_predicate (tree operand, tree struct_type)
+{
+ bool continue_stripping = true;
+ while (continue_stripping)
+ switch (TREE_CODE (operand))
+ {
+ CASE_CONVERT:
+ operand = TREE_OPERAND (operand, 0);
+ break;
+ default:
+ continue_stripping = false;
+ break;
+ }
+
+ /* Match field access to members of struct_type only. */
+ if (TREE_CODE (operand) == COMPONENT_REF
+ && TREE_CODE (TREE_OPERAND (operand, 0)) == PLACEHOLDER_EXPR
+ && TREE_TYPE (TREE_OPERAND (operand, 0)) == struct_type
+ && TREE_CODE (TREE_OPERAND (operand, 1)) == FIELD_DECL)
+ return TREE_OPERAND (operand, 1);
+ else
+ return NULL_TREE;
+}
+
+/* Check that SRC is a constant integer that can be represented as a native
+ integer constant (either signed or unsigned). If so, store it into DEST and
+ return true. Return false otherwise. */
+
+static bool
+get_discr_value (tree src, dw_discr_value *dest)
+{
+ bool is_unsigned = TYPE_UNSIGNED (TREE_TYPE (src));
+
+ if (TREE_CODE (src) != INTEGER_CST
+ || !(is_unsigned ? tree_fits_uhwi_p (src) : tree_fits_shwi_p (src)))
+ return false;
+
+ dest->pos = is_unsigned;
+ if (is_unsigned)
+ dest->v.uval = tree_to_uhwi (src);
+ else
+ dest->v.sval = tree_to_shwi (src);
+
+ return true;
+}
+
+/* Try to extract synthetic properties out of VARIANT_PART_DECL, which is a
+ FIELD_DECL in STRUCT_TYPE that represents a variant part. If unsuccessful,
+ store NULL_TREE in DISCR_DECL. Otherwise:
+
+ - store the discriminant field in STRUCT_TYPE that controls the variant
+ part to *DISCR_DECL
+
+ - put in *DISCR_LISTS_P an array where for each variant, the item
+ represents the corresponding matching list of discriminant values.
+
+ - put in *DISCR_LISTS_LENGTH the number of variants, which is the size of
+ the above array.
+
+ Note that when the array is allocated (i.e. when the analysis is
+ successful), it is up to the caller to free the array. */
+
+static void
+analyze_variants_discr (tree variant_part_decl,
+ tree struct_type,
+ tree *discr_decl,
+ dw_discr_list_ref **discr_lists_p,
+ unsigned *discr_lists_length)
+{
+ tree variant_part_type = TREE_TYPE (variant_part_decl);
+ tree variant;
+ dw_discr_list_ref *discr_lists;
+ unsigned i;
+
+ /* Compute how many variants there are in this variant part. */
+ *discr_lists_length = 0;
+ for (variant = TYPE_FIELDS (variant_part_type);
+ variant != NULL_TREE;
+ variant = DECL_CHAIN (variant))
+ ++*discr_lists_length;
+
+ *discr_decl = NULL_TREE;
+ *discr_lists_p
+ = (dw_discr_list_ref *) xcalloc (*discr_lists_length,
+ sizeof (**discr_lists_p));
+ discr_lists = *discr_lists_p;
+
+ /* And then analyze all variants to extract discriminant information for all
+ of them. This analysis is conservative: as soon as we detect something we
+ do not support, abort everything and pretend we found nothing. */
+ for (variant = TYPE_FIELDS (variant_part_type), i = 0;
+ variant != NULL_TREE;
+ variant = DECL_CHAIN (variant), ++i)
+ {
+ tree match_expr = DECL_QUALIFIER (variant);
+
+ /* Now, try to analyze the predicate and deduce a discriminant for
+ it. */
+ if (match_expr == boolean_true_node)
+ /* Typically happens for the default variant: it matches all cases that
+ previous variants rejected. Don't output any matching value for
+ this one. */
+ continue;
+
+ /* The following loop tries to iterate over each discriminant
+ possibility: single values or ranges. */
+ while (match_expr != NULL_TREE)
+ {
+ tree next_round_match_expr;
+ tree candidate_discr = NULL_TREE;
+ dw_discr_list_ref new_node = NULL;
+
+ /* Possibilities are matched one after the other by nested
+ TRUTH_ORIF_EXPR expressions. Process the current possibility and
+ continue with the rest at next iteration. */
+ if (TREE_CODE (match_expr) == TRUTH_ORIF_EXPR)
+ {
+ next_round_match_expr = TREE_OPERAND (match_expr, 0);
+ match_expr = TREE_OPERAND (match_expr, 1);
+ }
+ else
+ next_round_match_expr = NULL_TREE;
+
+ if (match_expr == boolean_false_node)
+ /* This sub-expression matches nothing: just wait for the next
+ one. */
+ ;
+
+ else if (TREE_CODE (match_expr) == EQ_EXPR)
+ {
+ /* We are matching: <discr_field> == <integer_cst>
+ This sub-expression matches a single value. */
+ tree integer_cst = TREE_OPERAND (match_expr, 1);
+
+ candidate_discr
+ = analyze_discr_in_predicate (TREE_OPERAND (match_expr, 0),
+ struct_type);
+
+ new_node = ggc_cleared_alloc<dw_discr_list_node> ();
+ if (!get_discr_value (integer_cst,
+ &new_node->dw_discr_lower_bound))
+ goto abort;
+ new_node->dw_discr_range = false;
+ }
+
+ else if (TREE_CODE (match_expr) == TRUTH_ANDIF_EXPR)
+ {
+ /* We are matching:
+ <discr_field> > <integer_cst>
+ && <discr_field> < <integer_cst>.
+ This sub-expression matches the range of values between the
+ two matched integer constants. Note that comparisons can be
+ inclusive or exclusive. */
+ tree candidate_discr_1, candidate_discr_2;
+ tree lower_cst, upper_cst;
+ bool lower_cst_included, upper_cst_included;
+ tree lower_op = TREE_OPERAND (match_expr, 0);
+ tree upper_op = TREE_OPERAND (match_expr, 1);
+
+ /* When the comparison is exclusive, the integer constant is not
+ the discriminant range bound we are looking for: we will have
+ to increment or decrement it. */
+ if (TREE_CODE (lower_op) == GE_EXPR)
+ lower_cst_included = true;
+ else if (TREE_CODE (lower_op) == GT_EXPR)
+ lower_cst_included = false;
+ else
+ goto abort;
+
+ if (TREE_CODE (upper_op) == LE_EXPR)
+ upper_cst_included = true;
+ else if (TREE_CODE (upper_op) == LT_EXPR)
+ upper_cst_included = false;
+ else
+ goto abort;
+
+ /* Extract the discriminant from the first operand and check it
+ is consistant with the same analysis in the second
+ operand. */
+ candidate_discr_1
+ = analyze_discr_in_predicate (TREE_OPERAND (lower_op, 0),
+ struct_type);
+ candidate_discr_2
+ = analyze_discr_in_predicate (TREE_OPERAND (upper_op, 0),
+ struct_type);
+ if (candidate_discr_1 == candidate_discr_2)
+ candidate_discr = candidate_discr_1;
+ else
+ goto abort;
+
+ /* Extract bounds from both. */
+ new_node = ggc_cleared_alloc<dw_discr_list_node> ();
+ lower_cst = TREE_OPERAND (lower_op, 1);
+ upper_cst = TREE_OPERAND (upper_op, 1);
+
+ if (!lower_cst_included)
+ lower_cst
+ = fold_build2 (PLUS_EXPR, TREE_TYPE (lower_cst), lower_cst,
+ build_int_cst (TREE_TYPE (lower_cst), 1));
+ if (!upper_cst_included)
+ upper_cst
+ = fold_build2 (MINUS_EXPR, TREE_TYPE (upper_cst), upper_cst,
+ build_int_cst (TREE_TYPE (upper_cst), 1));
+
+ if (!get_discr_value (lower_cst,
+ &new_node->dw_discr_lower_bound)
+ || !get_discr_value (upper_cst,
+ &new_node->dw_discr_upper_bound))
+ goto abort;
+
+ new_node->dw_discr_range = true;
+ }
+
+ else
+ /* Unsupported sub-expression: we cannot determine the set of
+ matching discriminant values. Abort everything. */
+ goto abort;
+
+ /* If the discriminant info is not consistant with what we saw so
+ far, consider the analysis failed and abort everything. */
+ if (candidate_discr == NULL_TREE
+ || (*discr_decl != NULL_TREE && candidate_discr != *discr_decl))
+ goto abort;
+ else
+ *discr_decl = candidate_discr;
+
+ if (new_node != NULL)
+ {
+ new_node->dw_discr_next = discr_lists[i];
+ discr_lists[i] = new_node;
+ }
+ match_expr = next_round_match_expr;
+ }
+ }
+
+ /* If we reach this point, we could match everything we were interested
+ in. */
+ return;
- switch (language)
- {
- case DW_LANG_Fortran77:
- case DW_LANG_Fortran90:
- case DW_LANG_Fortran95:
- case DW_LANG_Fortran03:
- case DW_LANG_Fortran08:
- /* Fortran has case insensitive identifiers and the front-end
- lowercases everything. */
- add_AT_unsigned (die, DW_AT_identifier_case, DW_ID_down_case);
- break;
- default:
- /* The default DW_ID_case_sensitive doesn't need to be specified. */
- break;
- }
- return die;
+abort:
+ /* Clean all data structure and return no result. */
+ free (*discr_lists_p);
+ *discr_lists_p = NULL;
+ *discr_decl = NULL_TREE;
}
-/* Generate the DIE for a base class. */
+/* Generate a DIE to represent VARIANT_PART_DECL, a variant part that is part
+ of STRUCT_TYPE, a record type. This new DIE is emitted as the next child
+ under CONTEXT_DIE.
+
+ Variant parts are supposed to be implemented as a FIELD_DECL whose type is a
+ QUAL_UNION_TYPE: this is the VARIANT_PART_DECL parameter. The members for
+ this type, which are record types, represent the available variants and each
+ has a DECL_QUALIFIER attribute. The discriminant and the discriminant
+ values are inferred from these attributes.
+
+ In trees, the offsets for the fields inside these sub-records are relative
+ to the variant part itself, whereas the corresponding DIEs should have
+ offset attributes that are relative to the embedding record base address.
+ This is why the caller must provide a VARIANT_PART_OFFSET expression: it
+ must be an expression that computes the offset of the variant part to
+ describe in DWARF. */
static void
-gen_inheritance_die (tree binfo, tree access, dw_die_ref context_die)
-{
- dw_die_ref die = new_die (DW_TAG_inheritance, context_die, binfo);
+gen_variant_part (tree variant_part_decl, struct vlr_context *vlr_ctx,
+ dw_die_ref context_die)
+{
+ const tree variant_part_type = TREE_TYPE (variant_part_decl);
+ tree variant_part_offset = vlr_ctx->variant_part_offset;
+ struct loc_descr_context ctx = {
+ vlr_ctx->struct_type, /* context_type */
+ NULL_TREE, /* base_decl */
+ NULL /* dpi */
+ };
+
+ /* The FIELD_DECL node in STRUCT_TYPE that acts as the discriminant, or
+ NULL_TREE if there is no such field. */
+ tree discr_decl = NULL_TREE;
+ dw_discr_list_ref *discr_lists;
+ unsigned discr_lists_length = 0;
+ unsigned i;
- add_type_attribute (die, BINFO_TYPE (binfo), TYPE_UNQUALIFIED, context_die);
- add_data_member_location_attribute (die, binfo);
+ dw_die_ref dwarf_proc_die = NULL;
+ dw_die_ref variant_part_die
+ = new_die (DW_TAG_variant_part, context_die, variant_part_type);
- if (BINFO_VIRTUAL_P (binfo))
- add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual);
+ equate_decl_number_to_die (variant_part_decl, variant_part_die);
- /* In DWARF3+ the default is DW_ACCESS_private only in DW_TAG_class_type
- children, otherwise the default is DW_ACCESS_public. In DWARF2
- the default has always been DW_ACCESS_private. */
- if (access == access_public_node)
+ analyze_variants_discr (variant_part_decl, vlr_ctx->struct_type,
+ &discr_decl, &discr_lists, &discr_lists_length);
+
+ if (discr_decl != NULL_TREE)
{
- if (dwarf_version == 2
- || context_die->die_tag == DW_TAG_class_type)
- add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_public);
+ dw_die_ref discr_die = lookup_decl_die (discr_decl);
+
+ if (discr_die)
+ add_AT_die_ref (variant_part_die, DW_AT_discr, discr_die);
+ else
+ /* We have no DIE for the discriminant, so just discard all
+ discrimimant information in the output. */
+ discr_decl = NULL_TREE;
}
- else if (access == access_protected_node)
- add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_protected);
- else if (dwarf_version > 2
- && context_die->die_tag != DW_TAG_class_type)
- add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_private);
+
+ /* If the offset for this variant part is more complex than a constant,
+ create a DWARF procedure for it so that we will not have to generate DWARF
+ expressions for it for each member. */
+ if (TREE_CODE (variant_part_offset) != INTEGER_CST
+ && (dwarf_version >= 3 || !dwarf_strict))
+ {
+ const tree dwarf_proc_fndecl
+ = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL, NULL_TREE,
+ build_function_type (TREE_TYPE (variant_part_offset),
+ NULL_TREE));
+ const tree dwarf_proc_call = build_call_expr (dwarf_proc_fndecl, 0);
+ const dw_loc_descr_ref dwarf_proc_body
+ = loc_descriptor_from_tree (variant_part_offset, 0, &ctx);
+
+ dwarf_proc_die = new_dwarf_proc_die (dwarf_proc_body,
+ dwarf_proc_fndecl, context_die);
+ if (dwarf_proc_die != NULL)
+ variant_part_offset = dwarf_proc_call;
+ }
+
+ /* Output DIEs for all variants. */
+ i = 0;
+ for (tree variant = TYPE_FIELDS (variant_part_type);
+ variant != NULL_TREE;
+ variant = DECL_CHAIN (variant), ++i)
+ {
+ tree variant_type = TREE_TYPE (variant);
+ dw_die_ref variant_die;
+
+ /* All variants (i.e. members of a variant part) are supposed to be
+ encoded as structures. Sub-variant parts are QUAL_UNION_TYPE fields
+ under these records. */
+ gcc_assert (TREE_CODE (variant_type) == RECORD_TYPE);
+
+ variant_die = new_die (DW_TAG_variant, variant_part_die, variant_type);
+ equate_decl_number_to_die (variant, variant_die);
+
+ /* Output discriminant values this variant matches, if any. */
+ if (discr_decl == NULL || discr_lists[i] == NULL)
+ /* In the case we have discriminant information at all, this is
+ probably the default variant: as the standard says, don't
+ output any discriminant value/list attribute. */
+ ;
+ else if (discr_lists[i]->dw_discr_next == NULL
+ && !discr_lists[i]->dw_discr_range)
+ /* If there is only one accepted value, don't bother outputting a
+ list. */
+ add_discr_value (variant_die, &discr_lists[i]->dw_discr_lower_bound);
+ else
+ add_discr_list (variant_die, discr_lists[i]);
+
+ for (tree member = TYPE_FIELDS (variant_type);
+ member != NULL_TREE;
+ member = DECL_CHAIN (member))
+ {
+ struct vlr_context vlr_sub_ctx = {
+ vlr_ctx->struct_type, /* struct_type */
+ NULL /* variant_part_offset */
+ };
+ if (is_variant_part (member))
+ {
+ /* All offsets for fields inside variant parts are relative to
+ the top-level embedding RECORD_TYPE's base address. On the
+ other hand, offsets in GCC's types are relative to the
+ nested-most variant part. So we have to sum offsets each time
+ we recurse. */
+
+ vlr_sub_ctx.variant_part_offset
+ = fold_build2 (PLUS_EXPR, TREE_TYPE (variant_part_offset),
+ variant_part_offset, byte_position (member));
+ gen_variant_part (member, &vlr_sub_ctx, variant_die);
+ }
+ else
+ {
+ vlr_sub_ctx.variant_part_offset = variant_part_offset;
+ gen_decl_die (member, NULL, &vlr_sub_ctx, variant_die);
+ }
+ }
+ }
+
+ free (discr_lists);
}
/* Generate a DIE for a class member. */
for (i = 0; BINFO_BASE_ITERATE (binfo, i, base); i++)
gen_inheritance_die (base,
(accesses ? (*accesses)[i] : access_public_node),
+ type,
context_die);
}
/* Now output info about the data members and type members. */
for (member = TYPE_FIELDS (type); member; member = DECL_CHAIN (member))
{
+ struct vlr_context vlr_ctx = { type, NULL_TREE };
+
/* If we thought we were generating minimal debug info for TYPE
and then changed our minds, some of the member declarations
may have already been defined. Don't define them again, but
child = lookup_decl_die (member);
if (child)
splice_child_die (context_die, child);
+
+ /* Do not generate standard DWARF for variant parts if we are generating
+ the corresponding GNAT encodings: DIEs generated for both would
+ conflict in our mappings. */
+ else if (is_variant_part (member)
+ && gnat_encodings == DWARF_GNAT_ENCODINGS_MINIMAL)
+ {
+ vlr_ctx.variant_part_offset = byte_position (member);
+ gen_variant_part (member, &vlr_ctx, context_die);
+ }
else
- gen_decl_die (member, NULL, context_die);
+ {
+ vlr_ctx.variant_part_offset = NULL_TREE;
+ gen_decl_die (member, NULL, &vlr_ctx, context_die);
+ }
}
/* We do not keep type methods in type variants. */
if (child)
splice_child_die (context_die, child);
else
- gen_decl_die (member, NULL, context_die);
+ gen_decl_die (member, NULL, NULL, context_die);
}
}
equate_type_number_to_die (type, subr_die);
add_prototyped_attribute (subr_die, type);
- add_type_attribute (subr_die, return_type, TYPE_UNQUALIFIED, context_die);
+ add_type_attribute (subr_die, return_type, TYPE_UNQUALIFIED, false,
+ context_die);
gen_formal_types_die (type, subr_die);
if (get_AT (subr_die, DW_AT_name))
is the name of the typedef decl naming the anonymous
struct. This greatly eases the work of consumers of
this debug info. */
- add_linkage_attr (lookup_type_die (type), decl);
+ add_linkage_name_raw (lookup_type_die (type), decl);
}
}
- add_type_attribute (type_die, type, decl_quals (decl), context_die);
+ add_type_attribute (type_die, type, decl_quals (decl), false,
+ context_die);
if (is_naming_typedef_decl (decl))
/* We want that all subsequent calls to lookup_type_die with
if (type == NULL_TREE || type == error_mark_node)
return;
-#ifdef ENABLE_CHECKING
- if (type)
+ if (flag_checking && type)
verify_type (type);
-#endif
if (TYPE_NAME (type) != NULL_TREE
&& TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
TREE_ASM_WRITTEN (type) = 1;
- gen_decl_die (TYPE_NAME (type), NULL, context_die);
+ gen_decl_die (TYPE_NAME (type), NULL, NULL, context_die);
return;
}
if (DECL_CONTEXT (TYPE_NAME (type))
&& TREE_CODE (DECL_CONTEXT (TYPE_NAME (type))) == NAMESPACE_DECL)
context_die = get_context_die (DECL_CONTEXT (TYPE_NAME (type)));
-
- gen_decl_die (TYPE_NAME (type), NULL, context_die);
+
+ gen_decl_die (TYPE_NAME (type), NULL, NULL, context_die);
return;
}
/* We are going to output a DIE to represent the unqualified version
of this type (i.e. without any const or volatile qualifiers) so
get the main variant (i.e. the unqualified version) of this type
- now. (Vectors are special because the debugging info is in the
+ now. (Vectors and arrays are special because the debugging info is in the
cloned type itself). */
- if (TREE_CODE (type) != VECTOR_TYPE)
+ if (TREE_CODE (type) != VECTOR_TYPE
+ && TREE_CODE (type) != ARRAY_TYPE)
type = type_main_variant (type);
/* If this is an array type with hidden descriptor, handle it first. */
memset (&info, 0, sizeof (info));
if (lang_hooks.types.get_array_descr_info (type, &info))
{
+ /* Fortran sometimes emits array types with no dimension. */
+ gcc_assert (info.ndimensions >= 0
+ && (info.ndimensions
+ <= DWARF2OUT_ARRAY_DESCR_INFO_MAX_DIMEN));
gen_descr_array_type_die (type, &info, context_die);
TREE_ASM_WRITTEN (type) = 1;
return;
if (type != error_mark_node)
{
gen_type_die_with_usage (type, context_die, DINFO_USAGE_DIR_USE);
-#ifdef ENABLE_CHECKING
- dw_die_ref die = lookup_type_die (type);
- if (die)
- check_die (die);
-#endif
+ if (flag_checking)
+ {
+ dw_die_ref die = lookup_type_die (type);
+ if (die)
+ check_die (die);
+ }
}
}
stmt, context_die);
}
else
- gen_decl_die (decl, origin, context_die);
+ gen_decl_die (decl, origin, NULL, context_die);
}
/* Generate all of the decls declared within a given scope and (recursively)
/* Is this a typedef we can avoid emitting? */
-static inline int
+bool
is_redundant_typedef (const_tree decl)
{
if (TYPE_DECL_IS_STUB (decl))
- return 1;
+ return true;
if (DECL_ARTIFICIAL (decl)
&& DECL_CONTEXT (decl)
&& TREE_CODE (TYPE_NAME (DECL_CONTEXT (decl))) == TYPE_DECL
&& DECL_NAME (decl) == DECL_NAME (TYPE_NAME (DECL_CONTEXT (decl))))
/* Also ignore the artificial member typedef for the class name. */
- return 1;
+ return true;
- return 0;
+ return false;
}
/* Return TRUE if TYPE is a typedef that names a type for linkage
{
if (decl == NULL_TREE
|| TREE_CODE (decl) != TYPE_DECL
+ || DECL_NAMELESS (decl)
|| !is_tagged_type (TREE_TYPE (decl))
|| DECL_IS_BUILTIN (decl)
|| is_redundant_typedef (decl)
gen_decl_die() call. */
saved_external_flag = DECL_EXTERNAL (decl);
DECL_EXTERNAL (decl) = 1;
- gen_decl_die (decl, NULL, context_die);
+ gen_decl_die (decl, NULL, NULL, context_die);
DECL_EXTERNAL (decl) = saved_external_flag;
break;
dw_die_ref context_die = get_context_die (TYPE_CONTEXT (type));
type_die = modified_type_die (type, TYPE_QUALS_NO_ADDR_SPACE (type),
- context_die);
+ false, context_die);
gcc_assert (type_die);
}
return type_die;
if (is_fortran ())
return ns_context;
if (DECL_P (thing))
- gen_decl_die (thing, NULL, ns_context);
+ gen_decl_die (thing, NULL, NULL, ns_context);
else
gen_type_die (thing, ns_context);
}
/* Generate Dwarf debug information for a decl described by DECL.
The return value is currently only meaningful for PARM_DECLs,
- for all other decls it returns NULL. */
+ for all other decls it returns NULL.
+
+ If DECL is a FIELD_DECL, CTX is required: see the comment for VLR_CONTEXT.
+ It can be NULL otherwise. */
static dw_die_ref
-gen_decl_die (tree decl, tree origin, dw_die_ref context_die)
+gen_decl_die (tree decl, tree origin, struct vlr_context *ctx,
+ dw_die_ref context_die)
{
tree decl_or_origin = decl ? decl : origin;
tree class_origin = NULL, ultimate_origin;
break;
case FIELD_DECL:
+ gcc_assert (ctx != NULL && ctx->struct_type != NULL);
/* Ignore the nameless fields that are used to skip bits but handle C++
anonymous unions and structs. */
if (DECL_NAME (decl) != NULL_TREE
|| TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE)
{
gen_type_die (member_declared_type (decl), context_die);
- gen_field_die (decl, context_die);
+ gen_field_die (decl, ctx, context_die);
}
break;
context_die);
case NAMESPACE_DECL:
- case IMPORTED_DECL:
if (dwarf_version >= 3 || !dwarf_strict)
gen_namespace_die (decl, context_die);
break;
+ case IMPORTED_DECL:
+ dwarf2out_imported_module_or_decl_1 (decl, DECL_NAME (decl),
+ DECL_CONTEXT (decl), context_die);
+ break;
+
case NAMELIST_DECL:
gen_namelist_decl (DECL_NAME (decl), context_die,
NAMELIST_DECL_ASSOCIATED_DECL (decl));
{
dw_die_ref die = lookup_decl_die (decl);
if (die)
- add_location_or_const_value_attribute (die, decl, false,
- DW_AT_location);
+ {
+ /* We get called via the symtab code invoking late_global_decl
+ for symbols that are optimized out. Do not add locations
+ for those. */
+ varpool_node *node = varpool_node::get (decl);
+ if (! node || ! node->definition)
+ tree_add_const_value_attribute_for_decl (die, decl);
+ else
+ add_location_or_const_value_attribute (die, decl, false);
+ }
}
}
return;
}
- gen_decl_die (decl, NULL, context_die);
+ gen_decl_die (decl, NULL, NULL, context_die);
-#ifdef ENABLE_CHECKING
- dw_die_ref die = lookup_decl_die (decl);
- if (die)
- check_die (die);
-#endif
+ if (flag_checking)
+ {
+ dw_die_ref die = lookup_decl_die (decl);
+ if (die)
+ check_die (die);
+ }
}
/* Write the debugging output for DECL. */
just a unique number which is associated with only that one filename. We
need such numbers for the sake of generating labels (in the .debug_sfnames
section) and references to those files numbers (in the .debug_srcinfo
- and.debug_macinfo sections). If the filename given as an argument is not
+ and .debug_macinfo sections). If the filename given as an argument is not
found in our current list, add it to the list and assign it the next
available unique index number. */
char loclabel[MAX_ARTIFICIAL_LABEL_BYTES + 2];
struct var_loc_node *newloc;
rtx_insn *next_real, *next_note;
+ rtx_insn *call_insn = NULL;
static const char *last_label;
static const char *last_postcall_label;
static bool last_in_cold_section_p;
call_site_count++;
if (SIBLING_CALL_P (loc_note))
tail_call_site_count++;
+ if (optimize == 0 && !flag_var_tracking)
+ {
+ /* When the var-tracking pass is not running, there is no note
+ for indirect calls whose target is compile-time known. In this
+ case, process such calls specifically so that we generate call
+ sites for them anyway. */
+ rtx x = PATTERN (loc_note);
+ if (GET_CODE (x) == PARALLEL)
+ x = XVECEXP (x, 0, 0);
+ if (GET_CODE (x) == SET)
+ x = SET_SRC (x);
+ if (GET_CODE (x) == CALL)
+ x = XEXP (x, 0);
+ if (!MEM_P (x)
+ || GET_CODE (XEXP (x, 0)) != SYMBOL_REF
+ || !SYMBOL_REF_DECL (XEXP (x, 0))
+ || (TREE_CODE (SYMBOL_REF_DECL (XEXP (x, 0)))
+ != FUNCTION_DECL))
+ {
+ call_insn = loc_note;
+ loc_note = NULL;
+ var_loc_p = false;
+
+ next_real = next_real_insn (call_insn);
+ next_note = NULL;
+ cached_next_real_insn = NULL;
+ goto create_label;
+ }
+ }
}
return;
}
&& !NOTE_DURING_CALL_P (loc_note))
return;
+create_label:
+
if (next_real == NULL_RTX)
next_real = get_last_insn ();
}
}
+ gcc_assert ((loc_note == NULL_RTX && call_insn != NULL_RTX)
+ || (loc_note != NULL_RTX && call_insn == NULL_RTX));
+
if (!var_loc_p)
{
struct call_arg_loc_node *ca_loc
= ggc_cleared_alloc<call_arg_loc_node> ();
- rtx_insn *prev = prev_real_insn (loc_note);
- rtx x;
+ rtx_insn *prev
+ = loc_note != NULL_RTX ? prev_real_insn (loc_note) : call_insn;
+
ca_loc->call_arg_loc_note = loc_note;
ca_loc->next = NULL;
ca_loc->label = last_label;
if (!CALL_P (prev))
prev = as_a <rtx_sequence *> (PATTERN (prev))->insn (0);
ca_loc->tail_call_p = SIBLING_CALL_P (prev);
- x = get_call_rtx_from (PATTERN (prev));
+
+ /* Look for a SYMBOL_REF in the "prev" instruction. */
+ rtx x = get_call_rtx_from (PATTERN (prev));
if (x)
{
- x = XEXP (XEXP (x, 0), 0);
- if (GET_CODE (x) == SYMBOL_REF
- && SYMBOL_REF_DECL (x)
- && TREE_CODE (SYMBOL_REF_DECL (x)) == FUNCTION_DECL)
- ca_loc->symbol_ref = x;
+ /* Try to get the call symbol, if any. */
+ if (MEM_P (XEXP (x, 0)))
+ x = XEXP (x, 0);
+ /* First, look for a memory access to a symbol_ref. */
+ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && SYMBOL_REF_DECL (XEXP (x, 0))
+ && TREE_CODE (SYMBOL_REF_DECL (XEXP (x, 0))) == FUNCTION_DECL)
+ ca_loc->symbol_ref = XEXP (x, 0);
+ /* Otherwise, look at a compile-time known user-level function
+ declaration. */
+ else if (MEM_P (x)
+ && MEM_EXPR (x)
+ && TREE_CODE (MEM_EXPR (x)) == FUNCTION_DECL)
+ ca_loc->symbol_ref = XEXP (DECL_RTL (MEM_EXPR (x)), 0);
}
+
ca_loc->block = insn_scope (prev);
if (call_arg_locations)
call_arg_loc_last->next = ca_loc;
call_arg_locations = ca_loc;
call_arg_loc_last = ca_loc;
}
- else if (!NOTE_DURING_CALL_P (loc_note))
+ else if (loc_note != NULL_RTX && !NOTE_DURING_CALL_P (loc_note))
newloc->label = last_label;
else
{
last_in_cold_section_p = in_cold_section_p;
}
+/* Called from finalize_size_functions for size functions so that their body
+ can be encoded in the debug info to describe the layout of variable-length
+ structures. */
+
+static void
+dwarf2out_size_function (tree decl)
+{
+ function_to_dwarf_procedure (decl);
+}
+
/* Note in one location list that text section has changed. */
int
/* Zero-th entry is allocated, but unused. */
abbrev_die_table_in_use = 1;
+ /* Allocate the dwarf_proc_stack_usage_map. */
+ dwarf_proc_stack_usage_map = new hash_map<dw_die_ref, int>;
+
/* Allocate the pubtypes and pubnames vectors. */
vec_alloc (pubname_table, 32);
vec_alloc (pubtype_table, 32);
FOR_EACH_CHILD (die, c, prune_unmark_dies (c));
}
+/* Given LOC that is referenced by a DIE we're marking as used, find all
+ referenced DWARF procedures it references and mark them as used. */
+
+static void
+prune_unused_types_walk_loc_descr (dw_loc_descr_ref loc)
+{
+ for (; loc != NULL; loc = loc->dw_loc_next)
+ switch (loc->dw_loc_opc)
+ {
+ case DW_OP_GNU_implicit_pointer:
+ case DW_OP_GNU_convert:
+ case DW_OP_GNU_reinterpret:
+ if (loc->dw_loc_oprnd1.val_class == dw_val_class_die_ref)
+ prune_unused_types_mark (loc->dw_loc_oprnd1.v.val_die_ref.die, 1);
+ break;
+ case DW_OP_call2:
+ case DW_OP_call4:
+ case DW_OP_call_ref:
+ case DW_OP_GNU_const_type:
+ case DW_OP_GNU_parameter_ref:
+ gcc_assert (loc->dw_loc_oprnd1.val_class == dw_val_class_die_ref);
+ prune_unused_types_mark (loc->dw_loc_oprnd1.v.val_die_ref.die, 1);
+ break;
+ case DW_OP_GNU_regval_type:
+ case DW_OP_GNU_deref_type:
+ gcc_assert (loc->dw_loc_oprnd2.val_class == dw_val_class_die_ref);
+ prune_unused_types_mark (loc->dw_loc_oprnd2.v.val_die_ref.die, 1);
+ break;
+ case DW_OP_GNU_entry_value:
+ gcc_assert (loc->dw_loc_oprnd1.val_class == dw_val_class_loc);
+ prune_unused_types_walk_loc_descr (loc->dw_loc_oprnd1.v.val_loc);
+ break;
+ default:
+ break;
+ }
+}
+
/* Given DIE that we're marking as used, find any other dies
it references as attributes and mark them as used. */
FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
{
- if (a->dw_attr_val.val_class == dw_val_class_die_ref)
+ switch (AT_class (a))
{
+ /* Make sure DWARF procedures referenced by location descriptions will
+ get emitted. */
+ case dw_val_class_loc:
+ prune_unused_types_walk_loc_descr (AT_loc (a));
+ break;
+ case dw_val_class_loc_list:
+ for (dw_loc_list_ref list = AT_loc_list (a);
+ list != NULL;
+ list = list->dw_loc_next)
+ prune_unused_types_walk_loc_descr (list->expr);
+ break;
+
+ case dw_val_class_die_ref:
/* A reference to another DIE.
Make sure that it will get emitted.
If it was broken out into a comdat group, don't follow it. */
if (! AT_ref (a)->comdat_type_p
|| a->dw_attr == DW_AT_specification)
prune_unused_types_mark (a->dw_attr_val.v.val_die_ref.die, 1);
+ break;
+
+ case dw_val_class_str:
+ /* Set the string's refcount to 0 so that prune_unused_types_mark
+ accounts properly for it. */
+ a->dw_attr_val.v.val_str->refcount = 0;
+ break;
+
+ default:
+ break;
}
- /* Set the string's refcount to 0 so that prune_unused_types_mark
- accounts properly for it. */
- if (AT_class (a) == dw_val_class_str)
- a->dw_attr_val.v.val_str->refcount = 0;
}
}
case DW_TAG_array_type:
case DW_TAG_interface_type:
case DW_TAG_friend:
- case DW_TAG_variant_part:
case DW_TAG_enumeration_type:
case DW_TAG_subroutine_type:
case DW_TAG_string_type:
case DW_TAG_subrange_type:
case DW_TAG_ptr_to_member_type:
case DW_TAG_file_type:
+ /* Type nodes are useful only when other DIEs reference them --- don't
+ mark them. */
+ /* FALLTHROUGH */
+
+ case DW_TAG_dwarf_procedure:
+ /* Likewise for DWARF procedures. */
+
if (die->die_perennial_p)
break;
- /* It's a type node --- don't mark it. */
return;
default:
}
}
+/* Return NULL if l is a DWARF expression, or first op that is not
+ valid DWARF expression. */
+
+static dw_loc_descr_ref
+non_dwarf_expression (dw_loc_descr_ref l)
+{
+ while (l)
+ {
+ if (l->dw_loc_opc >= DW_OP_reg0 && l->dw_loc_opc <= DW_OP_reg31)
+ return l;
+ switch (l->dw_loc_opc)
+ {
+ case DW_OP_regx:
+ case DW_OP_implicit_value:
+ case DW_OP_stack_value:
+ case DW_OP_GNU_implicit_pointer:
+ case DW_OP_GNU_parameter_ref:
+ case DW_OP_piece:
+ case DW_OP_bit_piece:
+ return l;
+ default:
+ break;
+ }
+ l = l->dw_loc_next;
+ }
+ return NULL;
+}
+
+/* Return adjusted copy of EXPR:
+ If it is empty DWARF expression, return it.
+ If it is valid non-empty DWARF expression,
+ return copy of EXPR with copy of DEREF appended to it.
+ If it is DWARF expression followed by DW_OP_reg{N,x}, return
+ copy of the DWARF expression with DW_OP_breg{N,x} <0> appended
+ and no DEREF.
+ If it is DWARF expression followed by DW_OP_stack_value, return
+ copy of the DWARF expression without anything appended.
+ Otherwise, return NULL. */
+
+static dw_loc_descr_ref
+copy_deref_exprloc (dw_loc_descr_ref expr, dw_loc_descr_ref deref)
+{
+
+ if (expr == NULL)
+ return NULL;
+
+ dw_loc_descr_ref l = non_dwarf_expression (expr);
+ if (l && l->dw_loc_next)
+ return NULL;
+
+ if (l)
+ {
+ if (l->dw_loc_opc >= DW_OP_reg0 && l->dw_loc_opc <= DW_OP_reg31)
+ deref = new_loc_descr ((enum dwarf_location_atom)
+ (DW_OP_breg0 + (l->dw_loc_opc - DW_OP_reg0)),
+ 0, 0);
+ else
+ switch (l->dw_loc_opc)
+ {
+ case DW_OP_regx:
+ deref = new_loc_descr (DW_OP_bregx,
+ l->dw_loc_oprnd1.v.val_unsigned, 0);
+ break;
+ case DW_OP_stack_value:
+ deref = NULL;
+ break;
+ default:
+ return NULL;
+ }
+ }
+ else
+ deref = new_loc_descr (deref->dw_loc_opc,
+ deref->dw_loc_oprnd1.v.val_int, 0);
+
+ dw_loc_descr_ref ret = NULL, *p = &ret;
+ while (expr != l)
+ {
+ *p = new_loc_descr (expr->dw_loc_opc, 0, 0);
+ (*p)->dw_loc_oprnd1 = expr->dw_loc_oprnd1;
+ (*p)->dw_loc_oprnd2 = expr->dw_loc_oprnd2;
+ p = &(*p)->dw_loc_next;
+ expr = expr->dw_loc_next;
+ }
+ *p = deref;
+ return ret;
+}
+
+/* For DW_AT_string_length attribute with DW_OP_call4 reference to a variable
+ or argument, adjust it if needed and return:
+ -1 if the DW_AT_string_length attribute and DW_AT_byte_size attribute
+ if present should be removed
+ 0 keep the attribute as is if the referenced var or argument has
+ only DWARF expression that covers all ranges
+ 1 if the attribute has been successfully adjusted. */
+
+static int
+optimize_string_length (dw_attr_node *a)
+{
+ dw_loc_descr_ref l = AT_loc (a), lv;
+ dw_die_ref die = l->dw_loc_oprnd1.v.val_die_ref.die;
+ dw_attr_node *av = get_AT (die, DW_AT_location);
+ dw_loc_list_ref d;
+ bool non_dwarf_expr = false;
+
+ if (av == NULL)
+ return -1;
+ switch (AT_class (av))
+ {
+ case dw_val_class_loc_list:
+ for (d = AT_loc_list (av); d != NULL; d = d->dw_loc_next)
+ if (d->expr && non_dwarf_expression (d->expr))
+ non_dwarf_expr = true;
+ break;
+ case dw_val_class_loc:
+ lv = AT_loc (av);
+ if (lv == NULL)
+ return -1;
+ if (non_dwarf_expression (lv))
+ non_dwarf_expr = true;
+ break;
+ default:
+ return -1;
+ }
+
+ /* If it is safe to keep DW_OP_call4 in, keep it. */
+ if (!non_dwarf_expr
+ && (l->dw_loc_next == NULL || AT_class (av) == dw_val_class_loc))
+ return 0;
+
+ /* If not dereferencing the DW_OP_call4 afterwards, we can just
+ copy over the DW_AT_location attribute from die to a. */
+ if (l->dw_loc_next == NULL)
+ {
+ a->dw_attr_val = av->dw_attr_val;
+ return 1;
+ }
+
+ dw_loc_list_ref list, *p;
+ switch (AT_class (av))
+ {
+ case dw_val_class_loc_list:
+ p = &list;
+ list = NULL;
+ for (d = AT_loc_list (av); d != NULL; d = d->dw_loc_next)
+ {
+ lv = copy_deref_exprloc (d->expr, l->dw_loc_next);
+ if (lv)
+ {
+ *p = new_loc_list (lv, d->begin, d->end, d->section);
+ p = &(*p)->dw_loc_next;
+ }
+ }
+ if (list == NULL)
+ return -1;
+ a->dw_attr_val.val_class = dw_val_class_loc_list;
+ gen_llsym (list);
+ *AT_loc_list_ptr (a) = list;
+ return 1;
+ case dw_val_class_loc:
+ lv = copy_deref_exprloc (AT_loc (av), l->dw_loc_next);
+ if (lv == NULL)
+ return -1;
+ a->dw_attr_val.v.val_loc = lv;
+ return 1;
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Resolve DW_OP_addr and DW_AT_const_value CONST_STRING arguments to
an address in .rodata section if the string literal is emitted there,
or remove the containing location list or replace DW_AT_const_value
dw_attr_node *a;
dw_loc_list_ref *curr, *start, loc;
unsigned ix;
+ bool remove_AT_byte_size = false;
FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
switch (AT_class (a))
case dw_val_class_loc:
{
dw_loc_descr_ref l = AT_loc (a);
+ /* Using DW_OP_call4 or DW_OP_call4 DW_OP_deref in
+ DW_AT_string_length is only a rough approximation; unfortunately
+ DW_AT_string_length can't be a reference to a DIE. DW_OP_call4
+ needs a DWARF expression, while DW_AT_location of the referenced
+ variable or argument might be any location description. */
+ if (a->dw_attr == DW_AT_string_length
+ && l
+ && l->dw_loc_opc == DW_OP_call4
+ && l->dw_loc_oprnd1.val_class == dw_val_class_die_ref
+ && (l->dw_loc_next == NULL
+ || (l->dw_loc_next->dw_loc_next == NULL
+ && (l->dw_loc_next->dw_loc_opc == DW_OP_deref
+ || l->dw_loc_next->dw_loc_opc != DW_OP_deref_size))))
+ {
+ switch (optimize_string_length (a))
+ {
+ case -1:
+ remove_AT (die, a->dw_attr);
+ ix--;
+ /* For DWARF4 and earlier, if we drop DW_AT_string_length,
+ we need to drop also DW_AT_byte_size. */
+ remove_AT_byte_size = true;
+ continue;
+ default:
+ break;
+ case 1:
+ /* Even if we keep the optimized DW_AT_string_length,
+ it might have changed AT_class, so process it again. */
+ ix--;
+ continue;
+ }
+ }
/* For -gdwarf-2 don't attempt to optimize
DW_AT_data_member_location containing
DW_OP_plus_uconst - older consumers might
break;
}
+ if (remove_AT_byte_size)
+ remove_AT (die, DW_AT_byte_size);
+
FOR_EACH_CHILD (die, c, resolve_addr (c));
}
\f
static void
flush_limbo_die_list (void)
{
- limbo_die_node *node, *next_node;
+ limbo_die_node *node;
- for (node = limbo_die_list; node; node = next_node)
+ /* get_context_die calls force_decl_die, which can put new DIEs on the
+ limbo list in LTO mode when nested functions are put in a different
+ partition than that of their parent function. */
+ while ((node = limbo_die_list))
{
dw_die_ref die = node->die;
- next_node = node->next;
+ limbo_die_list = node->next;
if (die->die_parent == NULL)
{
}
}
}
-
- limbo_die_list = NULL;
}
/* Output stuff that dwarf requires at the end of every file,
resolve_addr (comp_unit_die ());
move_marked_base_types ();
- /* Walk through the list of incomplete types again, trying once more to
- emit full debugging info for them. */
- retry_incomplete_types ();
-
if (flag_eliminate_unused_debug_types)
prune_unused_types ();
generate a table that would have contained data. */
if (info_section_emitted)
{
- unsigned long aranges_length = size_of_aranges ();
-
switch_to_section (debug_aranges_section);
- output_aranges (aranges_length);
+ output_aranges ();
}
/* Output ranges section if necessary. */
static void
dwarf2out_early_finish (void)
{
- limbo_die_node *node;
+ /* Walk through the list of incomplete types again, trying once more to
+ emit full debugging info for them. */
+ retry_incomplete_types ();
+
+ /* The point here is to flush out the limbo list so that it is empty
+ and we don't need to stream it for LTO. */
+ flush_limbo_die_list ();
+
+ gen_scheduled_generic_parms_dies ();
+ gen_remaining_tmpl_value_param_die_attribute ();
/* Add DW_AT_linkage_name for all deferred DIEs. */
- for (node = deferred_asm_name; node; node = node->next)
+ for (limbo_die_node *node = deferred_asm_name; node; node = node->next)
{
tree decl = node->created_for;
if (DECL_ASSEMBLER_NAME (decl) != DECL_NAME (decl)
}
}
deferred_asm_name = NULL;
-
- /* The point here is to flush out the limbo list so that it is empty
- and we don't need to stream it for LTO. */
- flush_limbo_die_list ();
-
- gen_scheduled_generic_parms_dies ();
- gen_remaining_tmpl_value_param_die_attribute ();
}
/* Reset all state within dwarf2out.c so that we can rerun the compiler
abbrev_die_table = NULL;
abbrev_die_table_allocated = 0;
abbrev_die_table_in_use = 0;
+ delete dwarf_proc_stack_usage_map;
+ dwarf_proc_stack_usage_map = NULL;
line_info_label_num = 0;
cur_line_info_table = NULL;
text_section_line_info = NULL;