1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2022 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #define INCLUDE_MEMORY
24 #include "coretypes.h"
27 #include "basic-block.h"
29 #include "gimple-iterator.h"
30 #include "diagnostic-core.h"
35 #include "stringpool.h"
38 #include "fold-const.h"
39 #include "tree-pretty-print.h"
40 #include "diagnostic-color.h"
41 #include "diagnostic-metadata.h"
44 #include "analyzer/analyzer.h"
45 #include "analyzer/analyzer-logging.h"
46 #include "ordered-hash-map.h"
49 #include "analyzer/supergraph.h"
51 #include "analyzer/call-string.h"
52 #include "analyzer/program-point.h"
53 #include "analyzer/store.h"
54 #include "analyzer/region-model.h"
55 #include "analyzer/call-summary.h"
56 #include "analyzer/analyzer-selftests.h"
57 #include "stor-layout.h"
63 /* Dump SVALS to PP, sorting them to ensure determinism. */
66 dump_svalue_set (const hash_set <const svalue *> &svals,
67 pretty_printer *pp, bool simple)
69 auto_vec <const svalue *> v;
70 for (hash_set<const svalue *>::iterator iter = svals.begin ();
71 iter != svals.end (); ++iter)
75 v.qsort (svalue::cmp_ptr_ptr);
77 pp_character (pp, '{');
80 FOR_EACH_VEC_ELT (v, i, sval)
84 sval->dump_to_pp (pp, simple);
86 pp_character (pp, '}');
89 /* class uncertainty_t. */
91 /* Dump this object to PP. */
94 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
96 pp_string (pp, "{m_maybe_bound_svals: ");
97 dump_svalue_set (m_maybe_bound_svals, pp, simple);
99 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
100 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
104 /* Dump this object to stderr. */
107 uncertainty_t::dump (bool simple) const
110 pp_format_decoder (&pp) = default_tree_printer;
111 pp_show_color (&pp) = pp_show_color (global_dc->printer);
112 pp.buffer->stream = stderr;
113 dump_to_pp (&pp, simple);
118 /* class binding_key. */
121 binding_key::make (store_manager *mgr, const region *r)
123 region_offset offset = r->get_offset (mgr->get_svalue_manager ());
124 if (offset.symbolic_p ())
125 return mgr->get_symbolic_binding (r);
129 if (r->get_bit_size (&bit_size))
130 return mgr->get_concrete_binding (offset.get_bit_offset (),
133 return mgr->get_symbolic_binding (r);
137 /* Dump this binding_key to stderr. */
140 binding_key::dump (bool simple) const
143 pp_format_decoder (&pp) = default_tree_printer;
144 pp_show_color (&pp) = pp_show_color (global_dc->printer);
145 pp.buffer->stream = stderr;
146 dump_to_pp (&pp, simple);
151 /* Get a description of this binding_key. */
154 binding_key::get_desc (bool simple) const
157 pp_format_decoder (&pp) = default_tree_printer;
158 dump_to_pp (&pp, simple);
159 return label_text::take (xstrdup (pp_formatted_text (&pp)));
162 /* qsort callback. */
165 binding_key::cmp_ptrs (const void *p1, const void *p2)
167 const binding_key * const *pk1 = (const binding_key * const *)p1;
168 const binding_key * const *pk2 = (const binding_key * const *)p2;
169 return cmp (*pk1, *pk2);
172 /* Comparator for binding_keys. */
175 binding_key::cmp (const binding_key *k1, const binding_key *k2)
177 int concrete1 = k1->concrete_p ();
178 int concrete2 = k2->concrete_p ();
179 if (int concrete_cmp = concrete1 - concrete2)
183 const concrete_binding *b1 = (const concrete_binding *)k1;
184 const concrete_binding *b2 = (const concrete_binding *)k2;
185 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
186 b2->get_start_bit_offset (),
189 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
194 const symbolic_binding *s1 = (const symbolic_binding *)k1;
195 const symbolic_binding *s2 = (const symbolic_binding *)k2;
204 /* struct bit_range. */
207 bit_range::dump_to_pp (pretty_printer *pp) const
209 byte_range bytes (0, 0);
210 if (as_byte_range (&bytes))
211 bytes.dump_to_pp (pp);
214 pp_string (pp, "start: ");
215 pp_wide_int (pp, m_start_bit_offset, SIGNED);
216 pp_string (pp, ", size: ");
217 pp_wide_int (pp, m_size_in_bits, SIGNED);
218 pp_string (pp, ", next: ");
219 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
223 /* Dump this object to stderr. */
226 bit_range::dump () const
229 pp.buffer->stream = stderr;
235 /* If OTHER is a subset of this, return true and write
236 to *OUT the relative range of OTHER within this.
237 Otherwise return false. */
240 bit_range::contains_p (const bit_range &other, bit_range *out) const
242 if (contains_p (other.get_start_bit_offset ())
243 && contains_p (other.get_last_bit_offset ()))
245 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
246 out->m_size_in_bits = other.m_size_in_bits;
253 /* If OTHER intersects this, return true and write
254 the relative range of OTHER within THIS to *OUT_THIS,
255 and the relative range of THIS within OTHER to *OUT_OTHER.
256 Otherwise return false. */
259 bit_range::intersects_p (const bit_range &other,
261 bit_range *out_other) const
263 if (get_start_bit_offset () < other.get_next_bit_offset ()
264 && other.get_start_bit_offset () < get_next_bit_offset ())
266 bit_offset_t overlap_start
267 = MAX (get_start_bit_offset (),
268 other.get_start_bit_offset ());
269 bit_offset_t overlap_next
270 = MIN (get_next_bit_offset (),
271 other.get_next_bit_offset ());
272 gcc_assert (overlap_next > overlap_start);
273 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
274 *out_this = abs_overlap_bits - get_start_bit_offset ();
275 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
283 bit_range::cmp (const bit_range &br1, const bit_range &br2)
285 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
286 br2.m_start_bit_offset))
289 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
292 /* Offset this range by OFFSET. */
295 bit_range::operator- (bit_offset_t offset) const
297 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
300 /* If MASK is a contiguous range of set bits, write them
301 to *OUT and return true.
302 Otherwise return false. */
305 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
307 unsigned iter_bit_idx = 0;
308 unsigned HOST_WIDE_INT iter_bit_mask = 1;
310 /* Find the first contiguous run of set bits in MASK. */
312 /* Find first set bit in MASK. */
313 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
315 if (mask & iter_bit_mask)
320 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
324 unsigned first_set_iter_bit_idx = iter_bit_idx;
325 unsigned num_set_bits = 1;
329 /* Find next unset bit in MASK. */
330 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
332 if (!(mask & iter_bit_mask))
338 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
340 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
344 /* We now have the first contiguous run of set bits in MASK.
345 Fail if any other bits are set. */
346 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
348 if (mask & iter_bit_mask)
354 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
358 /* Attempt to convert this bit_range to a byte_range.
359 Return true if it is possible, writing the result to *OUT.
360 Otherwise return false. */
363 bit_range::as_byte_range (byte_range *out) const
365 if (m_start_bit_offset % BITS_PER_UNIT == 0
366 && m_size_in_bits % BITS_PER_UNIT == 0)
368 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
369 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
375 /* Dump this object to PP. */
378 byte_range::dump_to_pp (pretty_printer *pp) const
380 if (m_size_in_bytes == 0)
382 pp_string (pp, "empty");
384 else if (m_size_in_bytes == 1)
386 pp_string (pp, "byte ");
387 pp_wide_int (pp, m_start_byte_offset, SIGNED);
391 pp_string (pp, "bytes ");
392 pp_wide_int (pp, m_start_byte_offset, SIGNED);
394 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
398 /* Dump this object to stderr. */
401 byte_range::dump () const
404 pp.buffer->stream = stderr;
410 /* If OTHER is a subset of this, return true and write
411 to *OUT the relative range of OTHER within this.
412 Otherwise return false. */
415 byte_range::contains_p (const byte_range &other, byte_range *out) const
417 if (contains_p (other.get_start_byte_offset ())
418 && contains_p (other.get_last_byte_offset ()))
420 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
421 out->m_size_in_bytes = other.m_size_in_bytes;
428 /* Return true if THIS and OTHER intersect and write the number
429 of bytes both buffers overlap to *OUT_NUM_OVERLAP_BYTES.
431 Otherwise return false. */
434 byte_range::intersects_p (const byte_range &other,
435 byte_size_t *out_num_overlap_bytes) const
437 if (get_start_byte_offset () < other.get_next_byte_offset ()
438 && other.get_start_byte_offset () < get_next_byte_offset ())
440 byte_offset_t overlap_start = MAX (get_start_byte_offset (),
441 other.get_start_byte_offset ());
442 byte_offset_t overlap_next = MIN (get_next_byte_offset (),
443 other.get_next_byte_offset ());
444 gcc_assert (overlap_next > overlap_start);
445 *out_num_overlap_bytes = overlap_next - overlap_start;
452 /* Return true if THIS exceeds OTHER and write the overhanging
453 byte range to OUT_OVERHANGING_BYTE_RANGE. */
456 byte_range::exceeds_p (const byte_range &other,
457 byte_range *out_overhanging_byte_range) const
459 gcc_assert (!empty_p ());
461 if (other.get_next_byte_offset () < get_next_byte_offset ())
463 /* THIS definitely exceeds OTHER. */
464 byte_offset_t start = MAX (get_start_byte_offset (),
465 other.get_next_byte_offset ());
466 byte_offset_t size = get_next_byte_offset () - start;
467 gcc_assert (size > 0);
468 out_overhanging_byte_range->m_start_byte_offset = start;
469 out_overhanging_byte_range->m_size_in_bytes = size;
476 /* Return true if THIS falls short of OFFSET and write the
477 byte range fallen short to OUT_FALL_SHORT_BYTES. */
480 byte_range::falls_short_of_p (byte_offset_t offset,
481 byte_range *out_fall_short_bytes) const
483 gcc_assert (!empty_p ());
485 if (get_start_byte_offset () < offset)
487 /* THIS falls short of OFFSET. */
488 byte_offset_t start = get_start_byte_offset ();
489 byte_offset_t size = MIN (offset, get_next_byte_offset ()) - start;
490 gcc_assert (size > 0);
491 out_fall_short_bytes->m_start_byte_offset = start;
492 out_fall_short_bytes->m_size_in_bytes = size;
499 /* qsort comparator for byte ranges. */
502 byte_range::cmp (const byte_range &br1, const byte_range &br2)
504 /* Order first by offset. */
505 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
506 br2.m_start_byte_offset))
509 /* ...then by size. */
510 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
513 /* class concrete_binding : public binding_key. */
515 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
518 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
520 m_bit_range.dump_to_pp (pp);
523 /* Return true if this binding overlaps with OTHER. */
526 concrete_binding::overlaps_p (const concrete_binding &other) const
528 if (get_start_bit_offset () < other.get_next_bit_offset ()
529 && get_next_bit_offset () > other.get_start_bit_offset ())
534 /* Comparator for use by vec<const concrete_binding *>::qsort. */
537 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
539 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
540 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
542 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
545 /* class symbolic_binding : public binding_key. */
548 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
550 //binding_key::dump_to_pp (pp, simple);
551 pp_string (pp, "region: ");
552 m_region->dump_to_pp (pp, simple);
555 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
558 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
560 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
561 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
563 return region::cmp_ids (b1->get_region (), b2->get_region ());
566 /* The store is oblivious to the types of the svalues bound within
567 it: any type can get bound at any location.
568 Simplify any casts before binding.
570 For example, if we have:
571 struct big { int ia[1024]; };
573 memcpy (&dst, &src, sizeof (struct big));
574 this reaches us in gimple form as:
575 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
576 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
577 Using cast_region when handling the MEM_REF would give us:
578 INIT_VAL(CAST_REG(unsigned char[4096], src))
579 as rhs_sval, but we can fold that into a cast svalue:
580 CAST(unsigned char[4096], INIT_VAL(src))
581 We can discard that cast from the svalue when binding it in
582 the store for "dst", and simply store:
584 key: {kind: direct, start: 0, size: 32768, next: 32768}
585 value: ‘struct big’ {INIT_VAL(src)}. */
587 static const svalue *
588 simplify_for_binding (const svalue *sval)
590 if (const svalue *cast_sval = sval->maybe_undo_cast ())
595 /* class binding_map. */
597 /* binding_map's copy ctor. */
599 binding_map::binding_map (const binding_map &other)
600 : m_map (other.m_map)
604 /* binding_map's assignment operator. */
607 binding_map::operator=(const binding_map &other)
609 /* For now, assume we only ever copy to an empty cluster. */
610 gcc_assert (m_map.elements () == 0);
611 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
614 const binding_key *key = (*iter).first;
615 const svalue *sval = (*iter).second;
616 m_map.put (key, sval);
621 /* binding_map's equality operator. */
624 binding_map::operator== (const binding_map &other) const
626 if (m_map.elements () != other.m_map.elements ())
629 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
631 const binding_key *key = (*iter).first;
632 const svalue *sval = (*iter).second;
633 const svalue **other_slot
634 = const_cast <map_t &> (other.m_map).get (key);
635 if (other_slot == NULL)
637 if (sval != *other_slot)
640 gcc_checking_assert (hash () == other.hash ());
644 /* Generate a hash value for this binding_map. */
647 binding_map::hash () const
649 hashval_t result = 0;
650 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
652 /* Use a new hasher for each key to avoid depending on the ordering
653 of keys when accumulating the result. */
654 inchash::hash hstate;
655 hstate.add_ptr ((*iter).first);
656 hstate.add_ptr ((*iter).second);
657 result ^= hstate.end ();
662 /* Dump a representation of this binding_map to PP.
663 SIMPLE controls how values and regions are to be printed.
664 If MULTILINE, then split the dump over multiple lines and
665 use whitespace for readability, otherwise put all on one line. */
668 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
669 bool multiline) const
671 auto_vec <const binding_key *> binding_keys;
672 for (map_t::iterator iter = m_map.begin ();
673 iter != m_map.end (); ++iter)
675 const binding_key *key = (*iter).first;
676 binding_keys.safe_push (key);
678 binding_keys.qsort (binding_key::cmp_ptrs);
680 const binding_key *key;
682 FOR_EACH_VEC_ELT (binding_keys, i, key)
684 const svalue *value = *const_cast <map_t &> (m_map).get (key);
687 pp_string (pp, " key: {");
688 key->dump_to_pp (pp, simple);
691 pp_string (pp, " value: ");
692 if (tree t = value->get_type ())
693 dump_quoted_tree (pp, t);
694 pp_string (pp, " {");
695 value->dump_to_pp (pp, simple);
702 pp_string (pp, ", ");
703 pp_string (pp, "binding key: {");
704 key->dump_to_pp (pp, simple);
705 pp_string (pp, "}, value: {");
706 value->dump_to_pp (pp, simple);
712 /* Dump a multiline representation of this binding_map to stderr. */
715 binding_map::dump (bool simple) const
718 pp_format_decoder (&pp) = default_tree_printer;
719 pp_show_color (&pp) = pp_show_color (global_dc->printer);
720 pp.buffer->stream = stderr;
721 dump_to_pp (&pp, simple, true);
726 /* Return a new json::object of the form
727 {KEY_DESC : SVALUE_DESC,
728 ...for the various key/value pairs in this binding_map}. */
731 binding_map::to_json () const
733 json::object *map_obj = new json::object ();
735 auto_vec <const binding_key *> binding_keys;
736 for (map_t::iterator iter = m_map.begin ();
737 iter != m_map.end (); ++iter)
739 const binding_key *key = (*iter).first;
740 binding_keys.safe_push (key);
742 binding_keys.qsort (binding_key::cmp_ptrs);
744 const binding_key *key;
746 FOR_EACH_VEC_ELT (binding_keys, i, key)
748 const svalue *value = *const_cast <map_t &> (m_map).get (key);
749 label_text key_desc = key->get_desc ();
750 map_obj->set (key_desc.get (), value->to_json ());
756 /* Comparator for imposing an order on binding_maps. */
759 binding_map::cmp (const binding_map &map1, const binding_map &map2)
761 if (int count_cmp = map1.elements () - map2.elements ())
764 auto_vec <const binding_key *> keys1 (map1.elements ());
765 for (map_t::iterator iter = map1.begin ();
766 iter != map1.end (); ++iter)
767 keys1.quick_push ((*iter).first);
768 keys1.qsort (binding_key::cmp_ptrs);
770 auto_vec <const binding_key *> keys2 (map2.elements ());
771 for (map_t::iterator iter = map2.begin ();
772 iter != map2.end (); ++iter)
773 keys2.quick_push ((*iter).first);
774 keys2.qsort (binding_key::cmp_ptrs);
776 for (size_t i = 0; i < keys1.length (); i++)
778 const binding_key *k1 = keys1[i];
779 const binding_key *k2 = keys2[i];
780 if (int key_cmp = binding_key::cmp (k1, k2))
782 gcc_assert (k1 == k2);
783 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
790 /* Get the child region of PARENT_REG based upon INDEX within a
793 static const region *
794 get_subregion_within_ctor (const region *parent_reg, tree index,
795 region_model_manager *mgr)
797 switch (TREE_CODE (index))
803 const svalue *index_sval
804 = mgr->get_or_create_constant_svalue (index);
805 return mgr->get_element_region (parent_reg,
806 TREE_TYPE (parent_reg->get_type ()),
811 return mgr->get_field_region (parent_reg, index);
815 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
817 static const svalue *
818 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
820 /* Reuse the get_rvalue logic from region_model. */
821 region_model m (mgr);
822 return m.get_rvalue (path_var (val, 0), NULL);
825 /* Bind values from CONSTRUCTOR to this map, relative to
826 PARENT_REG's relationship to its base region.
827 Return true if successful, false if there was a problem (e.g. due
828 to hitting a complexity limit). */
831 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
832 region_model_manager *mgr)
834 gcc_assert (parent_reg);
835 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
840 tree parent_type = parent_reg->get_type ();
842 if (TREE_CODE (parent_type) == RECORD_TYPE)
843 field = TYPE_FIELDS (parent_type);
846 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
850 /* If index is NULL, then iterate through the fields for
851 a RECORD_TYPE, or use an INTEGER_CST otherwise.
852 Compare with similar logic in output_constructor. */
856 field = DECL_CHAIN (field);
859 index = build_int_cst (integer_type_node, ix);
861 else if (TREE_CODE (index) == RANGE_EXPR)
863 tree min_index = TREE_OPERAND (index, 0);
864 tree max_index = TREE_OPERAND (index, 1);
865 if (min_index == max_index)
867 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
873 if (!apply_ctor_val_to_range (parent_reg, mgr,
874 min_index, max_index, val))
879 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
885 /* Bind the value VAL into the range of elements within PARENT_REF
886 from MIN_INDEX to MAX_INDEX (including endpoints).
887 For use in handling RANGE_EXPR within a CONSTRUCTOR.
888 Return true if successful, false if there was a problem (e.g. due
889 to hitting a complexity limit). */
892 binding_map::apply_ctor_val_to_range (const region *parent_reg,
893 region_model_manager *mgr,
894 tree min_index, tree max_index,
897 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
898 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
900 /* Generate a binding key for the range. */
901 const region *min_element
902 = get_subregion_within_ctor (parent_reg, min_index, mgr);
903 const region *max_element
904 = get_subregion_within_ctor (parent_reg, max_index, mgr);
905 region_offset min_offset = min_element->get_offset (mgr);
906 if (min_offset.symbolic_p ())
908 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
909 store_manager *smgr = mgr->get_store_manager ();
910 const binding_key *max_element_key = binding_key::make (smgr, max_element);
911 if (max_element_key->symbolic_p ())
913 const concrete_binding *max_element_ckey
914 = max_element_key->dyn_cast_concrete_binding ();
915 bit_size_t range_size_in_bits
916 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
917 const concrete_binding *range_key
918 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
919 if (range_key->symbolic_p ())
923 if (TREE_CODE (val) == CONSTRUCTOR)
925 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
927 /* Bind the value to the range. */
928 put (range_key, sval);
932 /* Bind the value VAL into INDEX within PARENT_REF.
933 For use in handling a pair of entries within a CONSTRUCTOR.
934 Return true if successful, false if there was a problem (e.g. due
935 to hitting a complexity limit). */
938 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
939 region_model_manager *mgr,
940 tree index, tree val)
942 const region *child_reg
943 = get_subregion_within_ctor (parent_reg, index, mgr);
944 if (TREE_CODE (val) == CONSTRUCTOR)
945 return apply_ctor_to_region (child_reg, val, mgr);
948 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
950 = binding_key::make (mgr->get_store_manager (), child_reg);
951 /* Handle the case where we have an unknown size for child_reg
952 (e.g. due to it being a trailing field with incomplete array
954 if (!k->concrete_p ())
956 /* Assume that sval has a well-defined size for this case. */
957 tree sval_type = sval->get_type ();
958 gcc_assert (sval_type);
959 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
960 gcc_assert (sval_byte_size != -1);
961 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
962 /* Get offset of child relative to base region. */
963 region_offset child_base_offset = child_reg->get_offset (mgr);
964 if (child_base_offset.symbolic_p ())
966 /* Convert to an offset relative to the parent region. */
967 region_offset parent_base_offset = parent_reg->get_offset (mgr);
968 gcc_assert (!parent_base_offset.symbolic_p ());
969 bit_offset_t child_parent_offset
970 = (child_base_offset.get_bit_offset ()
971 - parent_base_offset.get_bit_offset ());
972 /* Create a concrete key for the child within the parent. */
973 k = mgr->get_store_manager ()->get_concrete_binding
974 (child_parent_offset, sval_bit_size);
976 gcc_assert (k->concrete_p ());
982 /* Populate OUT with all bindings within this map that overlap KEY. */
985 binding_map::get_overlapping_bindings (const binding_key *key,
986 auto_vec<const binding_key *> *out)
988 for (auto iter : *this)
990 const binding_key *iter_key = iter.first;
991 if (const concrete_binding *ckey
992 = key->dyn_cast_concrete_binding ())
994 if (const concrete_binding *iter_ckey
995 = iter_key->dyn_cast_concrete_binding ())
997 if (ckey->overlaps_p (*iter_ckey))
998 out->safe_push (iter_key);
1002 /* Assume overlap. */
1003 out->safe_push (iter_key);
1008 /* Assume overlap. */
1009 out->safe_push (iter_key);
1014 /* Remove, truncate, and/or split any bindings within this map that
1017 For example, if we have:
1019 +------------------------------------+
1021 +------------------------------------+
1023 which is to be overwritten with:
1025 .......+----------------------+.......
1026 .......| new binding |.......
1027 .......+----------------------+.......
1029 this function "cuts a hole" out of the old binding:
1031 +------+......................+------+
1032 |prefix| hole for new binding |suffix|
1033 +------+......................+------+
1035 into which the new binding can be added without
1036 overlapping the prefix or suffix.
1038 The prefix and suffix (if added) will be bound to the pertinent
1039 parts of the value of the old binding.
1046 void test_5 (struct s5 *p)
1051 then after the "f = *p;" we have:
1052 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
1053 and at the "f.arr[3] = 42;" we remove the bindings overlapping
1054 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
1058 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1060 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1061 punching a hole into which the new value can be written at byte 3:
1064 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1066 value: 'char' {(char)42}
1068 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1070 If UNCERTAINTY is non-NULL, use it to record any svalues that
1071 were removed, as being maybe-bound.
1073 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1074 in the map, due to one or both of the underlying clusters being
1075 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1076 concrete binding it could actually be referring to the same memory as
1077 distinct concrete bindings in the map. Remove all bindings, but
1078 register any svalues with *UNCERTAINTY. */
1081 binding_map::remove_overlapping_bindings (store_manager *mgr,
1082 const binding_key *drop_key,
1083 uncertainty_t *uncertainty,
1084 bool always_overlap)
1086 /* Get the bindings of interest within this map. */
1087 auto_vec<const binding_key *> bindings;
1089 for (auto iter : *this)
1090 bindings.safe_push (iter.first); /* Add all bindings. */
1092 /* Just add overlapping bindings. */
1093 get_overlapping_bindings (drop_key, &bindings);
1096 const binding_key *iter_binding;
1097 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1099 /* Record any svalues that were removed to *UNCERTAINTY as being
1100 maybe-bound, provided at least some part of the binding is symbolic.
1102 Specifically, if at least one of the bindings is symbolic, or we
1103 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1104 regions, then we don't know that the svalue has been overwritten,
1105 and should record that to *UNCERTAINTY.
1107 However, if we have concrete keys accessing within the same symbolic
1108 region, then we *know* that the symbolic region has been overwritten,
1109 so we don't record it to *UNCERTAINTY, as this could be a genuine
1111 const svalue *old_sval = get (iter_binding);
1113 && (drop_key->symbolic_p ()
1114 || iter_binding->symbolic_p ()
1116 uncertainty->on_maybe_bound_sval (old_sval);
1118 /* Begin by removing the old binding. */
1119 m_map.remove (iter_binding);
1121 /* Don't attempt to handle prefixes/suffixes for the
1122 "always_overlap" case; everything's being removed. */
1126 /* Now potentially add the prefix and suffix. */
1127 if (const concrete_binding *drop_ckey
1128 = drop_key->dyn_cast_concrete_binding ())
1129 if (const concrete_binding *iter_ckey
1130 = iter_binding->dyn_cast_concrete_binding ())
1132 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1134 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1135 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1137 if (iter_bits.get_start_bit_offset ()
1138 < drop_bits.get_start_bit_offset ())
1140 /* We have a truncated prefix. */
1141 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1142 (drop_bits.get_start_bit_offset ()
1143 - iter_bits.get_start_bit_offset ()));
1144 const concrete_binding *prefix_key
1145 = mgr->get_concrete_binding (prefix_bits);
1146 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1147 const svalue *prefix_sval
1148 = old_sval->extract_bit_range (NULL_TREE,
1150 mgr->get_svalue_manager ());
1151 m_map.put (prefix_key, prefix_sval);
1154 if (iter_bits.get_next_bit_offset ()
1155 > drop_bits.get_next_bit_offset ())
1157 /* We have a truncated suffix. */
1158 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1159 (iter_bits.get_next_bit_offset ()
1160 - drop_bits.get_next_bit_offset ()));
1161 const concrete_binding *suffix_key
1162 = mgr->get_concrete_binding (suffix_bits);
1163 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1164 - iter_bits.get_start_bit_offset (),
1165 suffix_bits.m_size_in_bits);
1166 const svalue *suffix_sval
1167 = old_sval->extract_bit_range (NULL_TREE,
1169 mgr->get_svalue_manager ());
1170 m_map.put (suffix_key, suffix_sval);
1176 /* class binding_cluster. */
1178 binding_cluster::binding_cluster (const region *base_region)
1179 : m_base_region (base_region), m_map (),
1180 m_escaped (false), m_touched (false)
1184 /* binding_cluster's copy ctor. */
1186 binding_cluster::binding_cluster (const binding_cluster &other)
1187 : m_base_region (other.m_base_region), m_map (other.m_map),
1188 m_escaped (other.m_escaped), m_touched (other.m_touched)
1192 /* binding_cluster's assignment operator. */
1195 binding_cluster::operator= (const binding_cluster &other)
1197 gcc_assert (m_base_region == other.m_base_region);
1198 m_map = other.m_map;
1199 m_escaped = other.m_escaped;
1200 m_touched = other.m_touched;
1204 /* binding_cluster's equality operator. */
1207 binding_cluster::operator== (const binding_cluster &other) const
1209 if (m_map != other.m_map)
1212 if (m_base_region != other.m_base_region)
1215 if (m_escaped != other.m_escaped)
1218 if (m_touched != other.m_touched)
1221 gcc_checking_assert (hash () == other.hash ());
1226 /* Generate a hash value for this binding_cluster. */
1229 binding_cluster::hash () const
1231 return m_map.hash ();
1234 /* Return true if this binding_cluster is symbolic
1235 i.e. its base region is symbolic. */
1238 binding_cluster::symbolic_p () const
1240 return m_base_region->get_kind () == RK_SYMBOLIC;
1243 /* Dump a representation of this binding_cluster to PP.
1244 SIMPLE controls how values and regions are to be printed.
1245 If MULTILINE, then split the dump over multiple lines and
1246 use whitespace for readability, otherwise put all on one line. */
1249 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1250 bool multiline) const
1256 pp_string (pp, " ESCAPED");
1260 pp_string (pp, "(ESCAPED)");
1266 pp_string (pp, " TOUCHED");
1270 pp_string (pp, "(TOUCHED)");
1273 m_map.dump_to_pp (pp, simple, multiline);
1276 /* Dump a multiline representation of this binding_cluster to stderr. */
1279 binding_cluster::dump (bool simple) const
1282 pp_format_decoder (&pp) = default_tree_printer;
1283 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1284 pp.buffer->stream = stderr;
1285 pp_string (&pp, " cluster for: ");
1286 m_base_region->dump_to_pp (&pp, simple);
1287 pp_string (&pp, ": ");
1289 dump_to_pp (&pp, simple, true);
1294 /* Assert that this object is valid. */
1297 binding_cluster::validate () const
1299 int num_symbolic = 0;
1300 int num_concrete = 0;
1301 for (auto iter : m_map)
1303 if (iter.first->symbolic_p ())
1308 /* We shouldn't have more than one symbolic key per cluster
1309 (or one would have clobbered the other). */
1310 gcc_assert (num_symbolic < 2);
1311 /* We can't have both concrete and symbolic keys. */
1312 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1315 /* Return a new json::object of the form
1316 {"escaped": true/false,
1317 "touched": true/false,
1318 "map" : object for the binding_map. */
1321 binding_cluster::to_json () const
1323 json::object *cluster_obj = new json::object ();
1325 cluster_obj->set ("escaped", new json::literal (m_escaped));
1326 cluster_obj->set ("touched", new json::literal (m_touched));
1327 cluster_obj->set ("map", m_map.to_json ());
1332 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1336 binding_cluster::bind (store_manager *mgr,
1337 const region *reg, const svalue *sval)
1339 if (const compound_svalue *compound_sval
1340 = sval->dyn_cast_compound_svalue ())
1342 bind_compound_sval (mgr, reg, compound_sval);
1346 const binding_key *binding = binding_key::make (mgr, reg);
1347 bind_key (binding, sval);
1350 /* Bind SVAL to KEY.
1351 Unpacking of compound_svalues should already have been done by the
1352 time this is called. */
1355 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1357 gcc_assert (sval->get_kind () != SK_COMPOUND);
1359 m_map.put (key, sval);
1360 if (key->symbolic_p ())
1364 /* Subroutine of binding_cluster::bind.
1365 Unpack compound_svals when binding them, so that we bind them
1369 binding_cluster::bind_compound_sval (store_manager *mgr,
1371 const compound_svalue *compound_sval)
1373 region_offset reg_offset
1374 = reg->get_offset (mgr->get_svalue_manager ());
1375 if (reg_offset.symbolic_p ())
1378 clobber_region (mgr, reg);
1382 for (map_t::iterator iter = compound_sval->begin ();
1383 iter != compound_sval->end (); ++iter)
1385 const binding_key *iter_key = (*iter).first;
1386 const svalue *iter_sval = (*iter).second;
1388 if (const concrete_binding *concrete_key
1389 = iter_key->dyn_cast_concrete_binding ())
1391 bit_offset_t effective_start
1392 = (concrete_key->get_start_bit_offset ()
1393 + reg_offset.get_bit_offset ());
1394 const concrete_binding *effective_concrete_key
1395 = mgr->get_concrete_binding (effective_start,
1396 concrete_key->get_size_in_bits ());
1397 bind_key (effective_concrete_key, iter_sval);
1404 /* Remove all bindings overlapping REG within this cluster. */
1407 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1409 remove_overlapping_bindings (mgr, reg, NULL);
1412 /* Remove any bindings for REG within this cluster. */
1415 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1417 gcc_assert (reg->get_kind () == RK_DECL);
1418 const binding_key *binding
1419 = binding_key::make (mgr, const_cast<region *> (reg));
1420 m_map.remove (binding);
1423 /* Clobber REG and fill it with repeated copies of SVAL. */
1426 binding_cluster::fill_region (store_manager *mgr,
1430 clobber_region (mgr, reg);
1432 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1433 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1434 const svalue *fill_sval
1435 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1436 byte_size_sval, sval);
1437 bind (mgr, reg, fill_sval);
1440 /* Clobber REG within this cluster and fill it with zeroes. */
1443 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1445 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1446 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1447 fill_region (mgr, reg, zero_sval);
1450 /* Mark REG_TO_BIND within this cluster as being unknown.
1452 Remove any bindings overlapping REG_FOR_OVERLAP.
1453 If UNCERTAINTY is non-NULL, use it to record any svalues that
1454 had bindings to them removed, as being maybe-bound.
1456 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1457 store::mark_region_as_unknown, but are different in
1458 store::set_value's alias handling, for handling the case where
1459 we have a write to a symbolic REG_FOR_OVERLAP. */
1462 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1463 const region *reg_to_bind,
1464 const region *reg_for_overlap,
1465 uncertainty_t *uncertainty)
1467 remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty);
1469 /* Add a default binding to "unknown". */
1470 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1472 = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1473 bind (mgr, reg_to_bind, sval);
1476 /* Purge state involving SVAL. */
1479 binding_cluster::purge_state_involving (const svalue *sval,
1480 region_model_manager *sval_mgr)
1482 auto_vec<const binding_key *> to_remove;
1483 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1484 for (auto iter : m_map)
1486 const binding_key *iter_key = iter.first;
1487 if (const symbolic_binding *symbolic_key
1488 = iter_key->dyn_cast_symbolic_binding ())
1490 const region *reg = symbolic_key->get_region ();
1491 if (reg->involves_p (sval))
1492 to_remove.safe_push (iter_key);
1494 const svalue *iter_sval = iter.second;
1495 if (iter_sval->involves_p (sval))
1496 to_make_unknown.safe_push (std::make_pair(iter_key,
1497 iter_sval->get_type ()));
1499 for (auto iter : to_remove)
1501 m_map.remove (iter);
1504 for (auto iter : to_make_unknown)
1506 const svalue *new_sval
1507 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1508 m_map.put (iter.first, new_sval);
1512 /* Get any SVAL bound to REG within this cluster via kind KIND,
1513 without checking parent regions of REG. */
1516 binding_cluster::get_binding (store_manager *mgr,
1517 const region *reg) const
1519 const binding_key *reg_binding = binding_key::make (mgr, reg);
1520 const svalue *sval = m_map.get (reg_binding);
1523 /* If we have a struct with a single field, then the binding of
1524 the field will equal that of the struct, and looking up e.g.
1525 PARENT_REG.field within:
1526 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1527 will erroneously return INIT_VAL(OTHER_REG), rather than
1528 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1529 Fix this issue by iterating upwards whilst the bindings are equal,
1530 expressing the lookups as subvalues.
1531 We have to gather a list of subregion accesses, then walk it
1532 in reverse to get the subvalues. */
1533 auto_vec<const region *> regions;
1534 while (const region *parent_reg = reg->get_parent_region ())
1536 const binding_key *parent_reg_binding
1537 = binding_key::make (mgr, parent_reg);
1538 if (parent_reg_binding == reg_binding
1539 && sval->get_type ()
1541 && sval->get_type () != reg->get_type ())
1543 regions.safe_push (reg);
1549 if (sval->get_type ()
1551 && sval->get_type () == reg->get_type ())
1554 const region *iter_reg;
1555 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1557 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1558 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1566 /* Get any SVAL bound to REG within this cluster,
1567 either directly for REG, or recursively checking for bindings within
1568 parent regions and extracting subvalues if need be. */
1571 binding_cluster::get_binding_recursive (store_manager *mgr,
1572 const region *reg) const
1574 if (const svalue *sval = get_binding (mgr, reg))
1576 if (reg != m_base_region)
1577 if (const region *parent_reg = reg->get_parent_region ())
1578 if (const svalue *parent_sval
1579 = get_binding_recursive (mgr, parent_reg))
1581 /* Extract child svalue from parent svalue. */
1582 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1583 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1589 /* Get any value bound for REG within this cluster. */
1592 binding_cluster::get_any_binding (store_manager *mgr,
1593 const region *reg) const
1595 /* Look for a direct binding. */
1596 if (const svalue *direct_sval
1597 = get_binding_recursive (mgr, reg))
1600 /* If we had a write to a cluster of unknown size, we might
1601 have a self-binding of the whole base region with an svalue,
1602 where the base region is symbolic.
1603 Handle such cases by returning sub_svalue instances. */
1604 if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1606 /* Extract child svalue from parent svalue. */
1607 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1608 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1612 /* If this cluster has been touched by a symbolic write, then the content
1613 of any subregion not currently specifically bound is "UNKNOWN". */
1616 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1617 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1620 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1621 then we don't know if we're reading those values or not, so the result
1622 is also "UNKNOWN". */
1623 if (reg->get_offset (mgr->get_svalue_manager ()).symbolic_p ()
1624 && m_map.elements () > 0)
1626 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1627 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1630 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1631 return compound_sval;
1633 /* Otherwise, the initial value, or uninitialized. */
1637 /* Attempt to get a compound_svalue for the bindings within the cluster
1638 affecting REG (which could be the base region itself).
1640 Create a compound_svalue with the subset of bindings the affect REG,
1641 offsetting them so that the offsets are relative to the start of REG
1644 For example, REG could be one element within an array of structs.
1646 Return the resulting compound_svalue, or NULL if there's a problem. */
1649 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1650 const region *reg) const
1652 region_offset cluster_offset
1653 = m_base_region->get_offset (mgr->get_svalue_manager ());
1654 if (cluster_offset.symbolic_p ())
1656 region_offset reg_offset = reg->get_offset (mgr->get_svalue_manager ());
1657 if (reg_offset.symbolic_p ())
1660 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1662 /* We will a build the result map in two parts:
1663 (a) result_map, holding the concrete keys from this cluster,
1665 (b) default_map, holding the initial values for the region
1666 (e.g. uninitialized, initializer values, or zero), unless this
1667 cluster has been touched.
1669 We will populate (a), and as we do, clobber (b), trimming and
1670 splitting its bindings as necessary.
1671 Finally, we will merge (b) into (a), giving a concrete map
1672 that merges both the initial values and the bound values from
1673 the binding_cluster.
1674 Doing it this way reduces N for the O(N^2) intersection-finding,
1675 perhaps we should have a spatial-organized data structure for
1676 concrete keys, though. */
1678 binding_map result_map;
1679 binding_map default_map;
1681 /* Set up default values in default_map. */
1682 const svalue *default_sval;
1684 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1686 default_sval = sval_mgr->get_or_create_initial_value (reg);
1687 const binding_key *default_key = binding_key::make (mgr, reg);
1688 default_map.put (default_key, default_sval);
1690 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1692 const binding_key *key = (*iter).first;
1693 const svalue *sval = (*iter).second;
1695 if (const concrete_binding *concrete_key
1696 = key->dyn_cast_concrete_binding ())
1698 const bit_range &bound_range = concrete_key->get_bit_range ();
1700 bit_size_t reg_bit_size;
1701 if (!reg->get_bit_size (®_bit_size))
1704 bit_range reg_range (reg_offset.get_bit_offset (),
1707 /* Skip bindings that are outside the bit range of REG. */
1708 if (!bound_range.intersects_p (reg_range))
1711 /* We shouldn't have an exact match; that should have been
1713 gcc_assert (!(reg_range == bound_range));
1715 bit_range subrange (0, 0);
1716 if (reg_range.contains_p (bound_range, &subrange))
1718 /* We have a bound range fully within REG.
1719 Add it to map, offsetting accordingly. */
1721 /* Get offset of KEY relative to REG, rather than to
1723 const concrete_binding *offset_concrete_key
1724 = mgr->get_concrete_binding (subrange);
1725 result_map.put (offset_concrete_key, sval);
1727 /* Clobber default_map, removing/trimming/spliting where
1728 it overlaps with offset_concrete_key. */
1729 default_map.remove_overlapping_bindings (mgr,
1730 offset_concrete_key,
1733 else if (bound_range.contains_p (reg_range, &subrange))
1735 /* REG is fully within the bound range, but
1736 is not equal to it; we're extracting a subvalue. */
1737 return sval->extract_bit_range (reg->get_type (),
1739 mgr->get_svalue_manager ());
1743 /* REG and the bound range partially overlap. */
1744 bit_range reg_subrange (0, 0);
1745 bit_range bound_subrange (0, 0);
1746 reg_range.intersects_p (bound_range,
1747 ®_subrange, &bound_subrange);
1749 /* Get the bits from the bound value for the bits at the
1750 intersection (relative to the bound value). */
1751 const svalue *overlap_sval
1752 = sval->extract_bit_range (NULL_TREE,
1754 mgr->get_svalue_manager ());
1756 /* Get key for overlap, relative to the REG. */
1757 const concrete_binding *overlap_concrete_key
1758 = mgr->get_concrete_binding (reg_subrange);
1759 result_map.put (overlap_concrete_key, overlap_sval);
1761 /* Clobber default_map, removing/trimming/spliting where
1762 it overlaps with overlap_concrete_key. */
1763 default_map.remove_overlapping_bindings (mgr,
1764 overlap_concrete_key,
1769 /* Can't handle symbolic bindings. */
1773 if (result_map.elements () == 0)
1776 /* Merge any bindings from default_map into result_map. */
1777 for (auto iter : default_map)
1779 const binding_key *key = iter.first;
1780 const svalue *sval = iter.second;
1781 result_map.put (key, sval);
1784 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1787 /* Remove, truncate, and/or split any bindings within this map that
1790 If REG's base region or this cluster is symbolic and they're different
1791 base regions, then remove everything in this cluster's map, on the
1792 grounds that REG could be referring to the same memory as anything
1795 If UNCERTAINTY is non-NULL, use it to record any svalues that
1796 were removed, as being maybe-bound. */
1799 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1801 uncertainty_t *uncertainty)
1803 const binding_key *reg_binding = binding_key::make (mgr, reg);
1805 const region *cluster_base_reg = get_base_region ();
1806 const region *other_base_reg = reg->get_base_region ();
1807 /* If at least one of the base regions involved is symbolic, and they're
1808 not the same base region, then consider everything in the map as
1809 potentially overlapping with reg_binding (even if it's a concrete
1810 binding and things in the map are concrete - they could be referring
1811 to the same memory when the symbolic base regions are taken into
1813 bool always_overlap = (cluster_base_reg != other_base_reg
1814 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
1815 || other_base_reg->get_kind () == RK_SYMBOLIC));
1816 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
1820 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1822 Return true if they can be merged, false otherwise. */
1825 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1826 const binding_cluster *cluster_b,
1827 binding_cluster *out_cluster,
1830 model_merger *merger)
1832 gcc_assert (out_cluster);
1834 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1835 true if either of the inputs is true. */
1836 if ((cluster_a && cluster_a->m_escaped)
1837 || (cluster_b && cluster_b->m_escaped))
1838 out_cluster->m_escaped = true;
1839 if ((cluster_a && cluster_a->m_touched)
1840 || (cluster_b && cluster_b->m_touched))
1841 out_cluster->m_touched = true;
1843 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1844 could be NULL. Handle these cases. */
1845 if (cluster_a == NULL)
1847 gcc_assert (cluster_b != NULL);
1848 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1849 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
1852 if (cluster_b == NULL)
1854 gcc_assert (cluster_a != NULL);
1855 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1856 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
1860 /* The "both inputs are non-NULL" case. */
1861 gcc_assert (cluster_a != NULL && cluster_b != NULL);
1862 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1863 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1865 hash_set<const binding_key *> keys;
1866 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1867 iter_a != cluster_a->m_map.end (); ++iter_a)
1869 const binding_key *key_a = (*iter_a).first;
1872 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1873 iter_b != cluster_b->m_map.end (); ++iter_b)
1875 const binding_key *key_b = (*iter_b).first;
1878 int num_symbolic_keys = 0;
1879 int num_concrete_keys = 0;
1880 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1881 iter != keys.end (); ++iter)
1883 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1884 const binding_key *key = *iter;
1885 const svalue *sval_a = cluster_a->get_any_value (key);
1886 const svalue *sval_b = cluster_b->get_any_value (key);
1888 if (key->symbolic_p ())
1889 num_symbolic_keys++;
1891 num_concrete_keys++;
1893 if (sval_a == sval_b)
1895 gcc_assert (sval_a);
1896 out_cluster->m_map.put (key, sval_a);
1899 else if (sval_a && sval_b)
1901 if (const svalue *merged_sval
1902 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1904 out_cluster->m_map.put (key, merged_sval);
1907 /* Merger of the svalues failed. Reject merger of the cluster. */
1911 /* If we get here, then one cluster binds this key and the other
1912 doesn't; merge them as "UNKNOWN". */
1913 gcc_assert (sval_a || sval_b);
1915 const svalue *bound_sval = sval_a ? sval_a : sval_b;
1916 tree type = bound_sval->get_type ();
1917 const svalue *unknown_sval
1918 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
1920 /* ...but reject the merger if this sval shouldn't be mergeable
1921 (e.g. reject merging svalues that have non-purgable sm-state,
1922 to avoid falsely reporting memory leaks by merging them
1923 with something else). */
1924 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
1927 out_cluster->m_map.put (key, unknown_sval);
1930 /* We can only have at most one symbolic key per cluster,
1931 and if we do, we can't have any concrete keys.
1932 If this happens, mark the cluster as touched, with no keys. */
1933 if (num_symbolic_keys >= 2
1934 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
1936 out_cluster->m_touched = true;
1937 out_cluster->m_map.empty ();
1940 /* We don't handle other kinds of overlaps yet. */
1945 /* Update this cluster to reflect an attempt to merge OTHER where there
1946 is no other cluster to merge with, and so we're notionally merging the
1947 bound values in OTHER with the initial value of the relevant regions.
1949 Any bound keys in OTHER should be bound to unknown in this. */
1952 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
1956 for (map_t::iterator iter = other->m_map.begin ();
1957 iter != other->m_map.end (); ++iter)
1959 const binding_key *iter_key = (*iter).first;
1960 const svalue *iter_sval = (*iter).second;
1961 const svalue *unknown_sval
1962 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
1963 (iter_sval->get_type ());
1964 m_map.put (iter_key, unknown_sval);
1966 /* For any pointers in OTHER, the merger means that the
1967 concrete pointer becomes an unknown value, which could
1968 show up as a false report of a leak when considering what
1969 pointers are live before vs after.
1970 Avoid this by marking the base regions they point to as having
1972 if (const region_svalue *region_sval
1973 = iter_sval->dyn_cast_region_svalue ())
1975 const region *base_reg
1976 = region_sval->get_pointee ()->get_base_region ();
1977 if (base_reg->tracked_p ()
1978 && !base_reg->symbolic_for_unknown_ptr_p ())
1980 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
1981 c->mark_as_escaped ();
1987 /* Mark this cluster as having escaped. */
1990 binding_cluster::mark_as_escaped ()
1995 /* If this cluster has escaped (by this call, or by an earlier one, or
1996 by being an external param), then unbind all values and mark it
1997 as "touched", so that it has a conjured value, rather than an
1999 Use P to purge state involving conjured_svalues. */
2002 binding_cluster::on_unknown_fncall (const gcall *call,
2004 const conjured_purge &p)
2010 /* Bind it to a new "conjured" value using CALL. */
2012 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2013 (m_base_region->get_type (), call, m_base_region, p);
2014 bind (mgr, m_base_region, sval);
2020 /* Mark this cluster as having been clobbered by STMT.
2021 Use P to purge state involving conjured_svalues. */
2024 binding_cluster::on_asm (const gasm *stmt,
2026 const conjured_purge &p)
2030 /* Bind it to a new "conjured" value using CALL. */
2032 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2033 (m_base_region->get_type (), stmt, m_base_region, p);
2034 bind (mgr, m_base_region, sval);
2039 /* Return true if this cluster has escaped. */
2042 binding_cluster::escaped_p () const
2044 /* Consider the "errno" region to always have escaped. */
2045 if (m_base_region->get_kind () == RK_ERRNO)
2050 /* Return true if this binding_cluster has no information
2051 i.e. if there are no bindings, and it hasn't been marked as having
2052 escaped, or touched symbolically. */
2055 binding_cluster::redundant_p () const
2057 return (m_map.elements () == 0
2062 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
2065 append_pathvar_with_type (path_var pv,
2067 auto_vec<path_var> *out_pvs)
2069 gcc_assert (pv.m_tree);
2071 if (TREE_TYPE (pv.m_tree) != type)
2072 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
2074 out_pvs->safe_push (pv);
2077 /* Find representative path_vars for SVAL within this binding of BASE_REG,
2078 appending the results to OUT_PVS. */
2081 binding_cluster::get_representative_path_vars (const region_model *model,
2082 svalue_set *visited,
2083 const region *base_reg,
2085 auto_vec<path_var> *out_pvs)
2088 sval = simplify_for_binding (sval);
2090 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2092 const binding_key *key = (*iter).first;
2093 const svalue *bound_sval = (*iter).second;
2094 if (bound_sval == sval)
2096 if (const concrete_binding *ckey
2097 = key->dyn_cast_concrete_binding ())
2099 auto_vec <const region *> subregions;
2100 base_reg->get_subregions_for_binding
2101 (model->get_manager (),
2102 ckey->get_start_bit_offset (),
2103 ckey->get_size_in_bits (),
2107 const region *subregion;
2108 FOR_EACH_VEC_ELT (subregions, i, subregion)
2111 = model->get_representative_path_var (subregion,
2113 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2118 const symbolic_binding *skey = (const symbolic_binding *)key;
2120 = model->get_representative_path_var (skey->get_region (),
2122 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2128 /* Get any svalue bound to KEY, or NULL. */
2131 binding_cluster::get_any_value (const binding_key *key) const
2133 return m_map.get (key);
2136 /* If this cluster has a single direct binding for the whole of the region,
2138 For use in simplifying dumps. */
2141 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2143 /* Fail gracefully if MGR is NULL to make it easier to dump store
2144 instances in the debugger. */
2148 if (m_map.elements () != 1)
2151 const binding_key *key = binding_key::make (mgr, m_base_region);
2152 return get_any_value (key);
2155 /* class store_manager. */
2158 store_manager::get_logger () const
2160 return m_mgr->get_logger ();
2163 /* binding consolidation. */
2165 const concrete_binding *
2166 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
2167 bit_offset_t size_in_bits)
2169 concrete_binding b (start_bit_offset, size_in_bits);
2170 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2173 concrete_binding *to_save = new concrete_binding (b);
2174 m_concrete_binding_key_mgr.put (b, to_save);
2178 const symbolic_binding *
2179 store_manager::get_symbolic_binding (const region *reg)
2181 symbolic_binding b (reg);
2182 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2185 symbolic_binding *to_save = new symbolic_binding (b);
2186 m_symbolic_binding_key_mgr.put (b, to_save);
2192 /* store's default ctor. */
2195 : m_called_unknown_fn (false)
2199 /* store's copy ctor. */
2201 store::store (const store &other)
2202 : m_cluster_map (other.m_cluster_map.elements ()),
2203 m_called_unknown_fn (other.m_called_unknown_fn)
2205 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2206 iter != other.m_cluster_map.end ();
2209 const region *reg = (*iter).first;
2211 binding_cluster *c = (*iter).second;
2213 m_cluster_map.put (reg, new binding_cluster (*c));
2221 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2222 iter != m_cluster_map.end ();
2224 delete (*iter).second;
2227 /* store's assignment operator. */
2230 store::operator= (const store &other)
2232 /* Delete existing cluster map. */
2233 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2234 iter != m_cluster_map.end ();
2236 delete (*iter).second;
2237 m_cluster_map.empty ();
2239 m_called_unknown_fn = other.m_called_unknown_fn;
2241 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2242 iter != other.m_cluster_map.end ();
2245 const region *reg = (*iter).first;
2247 binding_cluster *c = (*iter).second;
2249 m_cluster_map.put (reg, new binding_cluster (*c));
2254 /* store's equality operator. */
2257 store::operator== (const store &other) const
2259 if (m_called_unknown_fn != other.m_called_unknown_fn)
2262 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2265 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2266 iter != m_cluster_map.end ();
2269 const region *reg = (*iter).first;
2270 binding_cluster *c = (*iter).second;
2271 binding_cluster **other_slot
2272 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2273 if (other_slot == NULL)
2275 if (*c != **other_slot)
2279 gcc_checking_assert (hash () == other.hash ());
2284 /* Get a hash value for this store. */
2287 store::hash () const
2289 hashval_t result = 0;
2290 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2291 iter != m_cluster_map.end ();
2293 result ^= (*iter).second->hash ();
2297 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2298 removing duplicate parents. */
2301 get_sorted_parent_regions (auto_vec<const region *> *out,
2302 auto_vec<const region *> &in)
2304 /* Get the set of parent regions. */
2305 hash_set<const region *> parent_regions;
2306 const region *iter_reg;
2308 FOR_EACH_VEC_ELT (in, i, iter_reg)
2310 const region *parent_reg = iter_reg->get_parent_region ();
2311 gcc_assert (parent_reg);
2312 parent_regions.add (parent_reg);
2316 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2317 iter != parent_regions.end(); ++iter)
2318 out->safe_push (*iter);
2321 out->qsort (region::cmp_ptr_ptr);
2324 /* Dump a representation of this store to PP, using SIMPLE to control how
2325 svalues and regions are printed.
2326 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2327 (to make it easier to use from the debugger). */
2330 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2331 store_manager *mgr) const
2333 /* Sort into some deterministic order. */
2334 auto_vec<const region *> base_regions;
2335 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2336 iter != m_cluster_map.end (); ++iter)
2338 const region *base_reg = (*iter).first;
2339 base_regions.safe_push (base_reg);
2341 base_regions.qsort (region::cmp_ptr_ptr);
2343 /* Gather clusters, organize by parent region, so that we can group
2344 together locals, globals, etc. */
2345 auto_vec<const region *> parent_regions;
2346 get_sorted_parent_regions (&parent_regions, base_regions);
2348 const region *parent_reg;
2350 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2352 gcc_assert (parent_reg);
2353 pp_string (pp, "clusters within ");
2354 parent_reg->dump_to_pp (pp, simple);
2358 pp_string (pp, " {");
2360 const region *base_reg;
2362 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2364 /* This is O(N * M), but N ought to be small. */
2365 if (base_reg->get_parent_region () != parent_reg)
2367 binding_cluster *cluster
2368 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2372 pp_string (pp, ", ");
2374 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2376 /* Special-case to simplify dumps for the common case where
2377 we just have one value directly bound to the whole of a
2381 pp_string (pp, " cluster for: ");
2382 base_reg->dump_to_pp (pp, simple);
2383 pp_string (pp, ": ");
2384 sval->dump_to_pp (pp, simple);
2385 if (cluster->escaped_p ())
2386 pp_string (pp, " (ESCAPED)");
2387 if (cluster->touched_p ())
2388 pp_string (pp, " (TOUCHED)");
2393 pp_string (pp, "region: {");
2394 base_reg->dump_to_pp (pp, simple);
2395 pp_string (pp, ", value: ");
2396 sval->dump_to_pp (pp, simple);
2397 if (cluster->escaped_p ())
2398 pp_string (pp, " (ESCAPED)");
2399 if (cluster->touched_p ())
2400 pp_string (pp, " (TOUCHED)");
2401 pp_string (pp, "}");
2406 pp_string (pp, " cluster for: ");
2407 base_reg->dump_to_pp (pp, simple);
2409 cluster->dump_to_pp (pp, simple, multiline);
2413 pp_string (pp, "base region: {");
2414 base_reg->dump_to_pp (pp, simple);
2415 pp_string (pp, "} has cluster: {");
2416 cluster->dump_to_pp (pp, simple, multiline);
2417 pp_string (pp, "}");
2421 pp_string (pp, "}");
2423 pp_printf (pp, "m_called_unknown_fn: %s",
2424 m_called_unknown_fn ? "TRUE" : "FALSE");
2429 /* Dump a multiline representation of this store to stderr. */
2432 store::dump (bool simple) const
2435 pp_format_decoder (&pp) = default_tree_printer;
2436 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2437 pp.buffer->stream = stderr;
2438 dump_to_pp (&pp, simple, true, NULL);
2443 /* Assert that this object is valid. */
2446 store::validate () const
2448 for (auto iter : m_cluster_map)
2449 iter.second->validate ();
2452 /* Return a new json::object of the form
2453 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2454 ... for each cluster within parent region},
2455 ...for each parent region,
2456 "called_unknown_fn": true/false}. */
2459 store::to_json () const
2461 json::object *store_obj = new json::object ();
2463 /* Sort into some deterministic order. */
2464 auto_vec<const region *> base_regions;
2465 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2466 iter != m_cluster_map.end (); ++iter)
2468 const region *base_reg = (*iter).first;
2469 base_regions.safe_push (base_reg);
2471 base_regions.qsort (region::cmp_ptr_ptr);
2473 /* Gather clusters, organize by parent region, so that we can group
2474 together locals, globals, etc. */
2475 auto_vec<const region *> parent_regions;
2476 get_sorted_parent_regions (&parent_regions, base_regions);
2478 const region *parent_reg;
2480 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2482 gcc_assert (parent_reg);
2484 json::object *clusters_in_parent_reg_obj = new json::object ();
2486 const region *base_reg;
2488 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2490 /* This is O(N * M), but N ought to be small. */
2491 if (base_reg->get_parent_region () != parent_reg)
2493 binding_cluster *cluster
2494 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2495 label_text base_reg_desc = base_reg->get_desc ();
2496 clusters_in_parent_reg_obj->set (base_reg_desc.get (),
2497 cluster->to_json ());
2499 label_text parent_reg_desc = parent_reg->get_desc ();
2500 store_obj->set (parent_reg_desc.get (), clusters_in_parent_reg_obj);
2503 store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2508 /* Get any svalue bound to REG, or NULL. */
2511 store::get_any_binding (store_manager *mgr, const region *reg) const
2513 const region *base_reg = reg->get_base_region ();
2514 binding_cluster **cluster_slot
2515 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2518 return (*cluster_slot)->get_any_binding (mgr, reg);
2521 /* Set the value of LHS_REG to RHS_SVAL. */
2524 store::set_value (store_manager *mgr, const region *lhs_reg,
2525 const svalue *rhs_sval,
2526 uncertainty_t *uncertainty)
2528 logger *logger = mgr->get_logger ();
2531 remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
2533 if (lhs_reg->get_type ())
2534 rhs_sval = simplify_for_binding (rhs_sval);
2535 /* ...but if we have no type for the region, retain any cast. */
2537 const region *lhs_base_reg = lhs_reg->get_base_region ();
2538 binding_cluster *lhs_cluster;
2539 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2541 /* Reject attempting to bind values into a symbolic region
2542 for an unknown ptr; merely invalidate values below. */
2545 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2546 then treat the region being pointed to as having escaped. */
2547 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2549 const region *ptr_dst = ptr_sval->get_pointee ();
2550 const region *ptr_base_reg = ptr_dst->get_base_region ();
2551 mark_as_escaped (ptr_base_reg);
2554 else if (lhs_base_reg->tracked_p ())
2556 lhs_cluster = get_or_create_cluster (lhs_base_reg);
2557 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2561 /* Reject attempting to bind values into an untracked region;
2562 merely invalidate values below. */
2566 /* Bindings to a cluster can affect other clusters if a symbolic
2567 base region is involved.
2568 Writes to concrete clusters can't affect other concrete clusters,
2569 but can affect symbolic clusters.
2570 Writes to symbolic clusters can affect both concrete and symbolic
2572 Invalidate our knowledge of other clusters that might have been
2573 affected by the write. */
2574 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2575 iter != m_cluster_map.end (); ++iter)
2577 const region *iter_base_reg = (*iter).first;
2578 binding_cluster *iter_cluster = (*iter).second;
2579 if (iter_base_reg != lhs_base_reg
2580 && (lhs_cluster == NULL
2581 || lhs_cluster->symbolic_p ()
2582 || iter_cluster->symbolic_p ()))
2584 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2585 switch (t_alias.get_value ())
2590 case tristate::TS_UNKNOWN:
2593 pretty_printer *pp = logger->get_printer ();
2594 logger->start_log_line ();
2595 logger->log_partial ("possible aliasing of ");
2596 iter_base_reg->dump_to_pp (pp, true);
2597 logger->log_partial (" when writing SVAL: ");
2598 rhs_sval->dump_to_pp (pp, true);
2599 logger->log_partial (" to LHS_REG: ");
2600 lhs_reg->dump_to_pp (pp, true);
2601 logger->end_log_line ();
2603 /* Mark all of iter_cluster's iter_base_reg as unknown,
2604 using LHS_REG when considering overlaps, to handle
2605 symbolic vs concrete issues. */
2606 iter_cluster->mark_region_as_unknown
2608 iter_base_reg, /* reg_to_bind */
2609 lhs_reg, /* reg_for_overlap */
2613 case tristate::TS_TRUE:
2617 case tristate::TS_FALSE:
2618 /* If they can't be aliases, then don't invalidate this
2626 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2629 store::eval_alias (const region *base_reg_a,
2630 const region *base_reg_b) const
2632 /* SSA names can't alias. */
2633 tree decl_a = base_reg_a->maybe_get_decl ();
2634 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2635 return tristate::TS_FALSE;
2636 tree decl_b = base_reg_b->maybe_get_decl ();
2637 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2638 return tristate::TS_FALSE;
2640 /* Try both ways, for symmetry. */
2641 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2642 if (ts_ab.is_false ())
2643 return tristate::TS_FALSE;
2644 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2645 if (ts_ba.is_false ())
2646 return tristate::TS_FALSE;
2647 return tristate::TS_UNKNOWN;
2650 /* Half of store::eval_alias; called twice for symmetry. */
2653 store::eval_alias_1 (const region *base_reg_a,
2654 const region *base_reg_b) const
2656 if (const symbolic_region *sym_reg_a
2657 = base_reg_a->dyn_cast_symbolic_region ())
2659 const svalue *sval_a = sym_reg_a->get_pointer ();
2660 if (tree decl_b = base_reg_b->maybe_get_decl ())
2662 if (!may_be_aliased (decl_b))
2663 return tristate::TS_FALSE;
2664 if (sval_a->get_kind () == SK_INITIAL)
2665 if (!is_global_var (decl_b))
2667 /* The initial value of a pointer can't point to a local. */
2668 return tristate::TS_FALSE;
2671 if (sval_a->get_kind () == SK_INITIAL
2672 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2674 /* The initial value of a pointer can't point to a
2675 region that was allocated on the heap after the beginning of the
2677 return tristate::TS_FALSE;
2679 if (const widening_svalue *widening_sval_a
2680 = sval_a->dyn_cast_widening_svalue ())
2682 const svalue *base = widening_sval_a->get_base_svalue ();
2683 if (const region_svalue *region_sval
2684 = base->dyn_cast_region_svalue ())
2686 const region *pointee = region_sval->get_pointee ();
2687 /* If we have sval_a is WIDENING(®ION, OP), and
2688 B can't alias REGION, then B can't alias A either.
2689 For example, A might arise from
2690 for (ptr = ®ION; ...; ptr++)
2691 where sval_a is ptr in the 2nd iteration of the loop.
2692 We want to ensure that "*ptr" can only clobber things
2693 within REGION's base region. */
2694 tristate ts = eval_alias (pointee->get_base_region (),
2697 return tristate::TS_FALSE;
2701 return tristate::TS_UNKNOWN;
2704 /* Remove all bindings overlapping REG within this store. */
2707 store::clobber_region (store_manager *mgr, const region *reg)
2709 const region *base_reg = reg->get_base_region ();
2710 binding_cluster **slot = m_cluster_map.get (base_reg);
2713 binding_cluster *cluster = *slot;
2714 cluster->clobber_region (mgr, reg);
2715 if (cluster->redundant_p ())
2718 m_cluster_map.remove (base_reg);
2722 /* Remove any bindings for REG within this store. */
2725 store::purge_region (store_manager *mgr, const region *reg)
2727 const region *base_reg = reg->get_base_region ();
2728 binding_cluster **slot = m_cluster_map.get (base_reg);
2731 binding_cluster *cluster = *slot;
2732 cluster->purge_region (mgr, reg);
2733 if (cluster->redundant_p ())
2736 m_cluster_map.remove (base_reg);
2740 /* Fill REG with SVAL. */
2743 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
2745 const region *base_reg = reg->get_base_region ();
2746 if (base_reg->symbolic_for_unknown_ptr_p ()
2747 || !base_reg->tracked_p ())
2749 binding_cluster *cluster = get_or_create_cluster (base_reg);
2750 cluster->fill_region (mgr, reg, sval);
2753 /* Zero-fill REG. */
2756 store::zero_fill_region (store_manager *mgr, const region *reg)
2758 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2759 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2760 fill_region (mgr, reg, zero_sval);
2763 /* Mark REG as having unknown content. */
2766 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
2767 uncertainty_t *uncertainty)
2769 const region *base_reg = reg->get_base_region ();
2770 if (base_reg->symbolic_for_unknown_ptr_p ()
2771 || !base_reg->tracked_p ())
2773 binding_cluster *cluster = get_or_create_cluster (base_reg);
2774 cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty);
2777 /* Purge state involving SVAL. */
2780 store::purge_state_involving (const svalue *sval,
2781 region_model_manager *sval_mgr)
2783 auto_vec <const region *> base_regs_to_purge;
2784 for (auto iter : m_cluster_map)
2786 const region *base_reg = iter.first;
2787 if (base_reg->involves_p (sval))
2788 base_regs_to_purge.safe_push (base_reg);
2791 binding_cluster *cluster = iter.second;
2792 cluster->purge_state_involving (sval, sval_mgr);
2796 for (auto iter : base_regs_to_purge)
2797 purge_cluster (iter);
2800 /* Get the cluster for BASE_REG, or NULL (const version). */
2802 const binding_cluster *
2803 store::get_cluster (const region *base_reg) const
2805 gcc_assert (base_reg);
2806 gcc_assert (base_reg->get_base_region () == base_reg);
2807 if (binding_cluster **slot
2808 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2814 /* Get the cluster for BASE_REG, or NULL (non-const version). */
2817 store::get_cluster (const region *base_reg)
2819 gcc_assert (base_reg);
2820 gcc_assert (base_reg->get_base_region () == base_reg);
2821 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2827 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2830 store::get_or_create_cluster (const region *base_reg)
2832 gcc_assert (base_reg);
2833 gcc_assert (base_reg->get_base_region () == base_reg);
2835 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2836 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2838 /* We shouldn't create clusters for base regions that aren't trackable. */
2839 gcc_assert (base_reg->tracked_p ());
2841 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2844 binding_cluster *cluster = new binding_cluster (base_reg);
2845 m_cluster_map.put (base_reg, cluster);
2850 /* Remove any cluster for BASE_REG, for use by
2851 region_model::unbind_region_and_descendents
2852 when popping stack frames and handling deleted heap regions. */
2855 store::purge_cluster (const region *base_reg)
2857 gcc_assert (base_reg->get_base_region () == base_reg);
2858 binding_cluster **slot = m_cluster_map.get (base_reg);
2861 binding_cluster *cluster = *slot;
2863 m_cluster_map.remove (base_reg);
2866 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
2867 Return true if successful, or false if the stores can't be merged. */
2870 store::can_merge_p (const store *store_a, const store *store_b,
2871 store *out_store, store_manager *mgr,
2872 model_merger *merger)
2874 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
2875 out_store->m_called_unknown_fn = true;
2877 /* Get the union of all base regions for STORE_A and STORE_B. */
2878 hash_set<const region *> base_regions;
2879 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
2880 iter_a != store_a->m_cluster_map.end (); ++iter_a)
2882 const region *base_reg_a = (*iter_a).first;
2883 base_regions.add (base_reg_a);
2885 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
2886 iter_b != store_b->m_cluster_map.end (); ++iter_b)
2888 const region *base_reg_b = (*iter_b).first;
2889 base_regions.add (base_reg_b);
2892 /* Sort the base regions before considering them. This ought not to
2893 affect the results, but can affect which types UNKNOWN_REGIONs are
2894 created for in a run; sorting them thus avoids minor differences
2896 auto_vec<const region *> vec_base_regions (base_regions.elements ());
2897 for (hash_set<const region *>::iterator iter = base_regions.begin ();
2898 iter != base_regions.end (); ++iter)
2899 vec_base_regions.quick_push (*iter);
2900 vec_base_regions.qsort (region::cmp_ptr_ptr);
2902 const region *base_reg;
2903 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
2905 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
2906 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
2907 /* At least one of cluster_a and cluster_b must be non-NULL. */
2908 binding_cluster *out_cluster
2909 = out_store->get_or_create_cluster (base_reg);
2910 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
2911 out_cluster, out_store, mgr, merger))
2917 /* Mark the cluster for BASE_REG as having escaped.
2918 For use when handling an unrecognized function call, and
2919 for params to "top-level" calls.
2920 Further unknown function calls could touch it, even if the cluster
2921 isn't reachable from args of those calls. */
2924 store::mark_as_escaped (const region *base_reg)
2926 gcc_assert (base_reg);
2927 gcc_assert (base_reg->get_base_region () == base_reg);
2929 if (base_reg->symbolic_for_unknown_ptr_p ()
2930 || !base_reg->tracked_p ())
2933 binding_cluster *cluster = get_or_create_cluster (base_reg);
2934 cluster->mark_as_escaped ();
2937 /* Handle an unknown fncall by updating any clusters that have escaped
2938 (either in this fncall, or in a prior one). */
2941 store::on_unknown_fncall (const gcall *call, store_manager *mgr,
2942 const conjured_purge &p)
2944 m_called_unknown_fn = true;
2946 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2947 iter != m_cluster_map.end (); ++iter)
2948 (*iter).second->on_unknown_fncall (call, mgr, p);
2951 /* Return true if a non-const pointer to BASE_REG (or something within it)
2952 has escaped to code outside of the TU being analyzed. */
2955 store::escaped_p (const region *base_reg) const
2957 gcc_assert (base_reg);
2958 gcc_assert (base_reg->get_base_region () == base_reg);
2960 /* "errno" can always be modified by external code. */
2961 if (base_reg->get_kind () == RK_ERRNO)
2964 if (binding_cluster **cluster_slot
2965 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
2966 return (*cluster_slot)->escaped_p ();
2970 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
2971 this store, using VISITED to ensure the traversal terminates. */
2974 store::get_representative_path_vars (const region_model *model,
2975 svalue_set *visited,
2977 auto_vec<path_var> *out_pvs) const
2981 /* Find all bindings that reference SVAL. */
2982 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2983 iter != m_cluster_map.end (); ++iter)
2985 const region *base_reg = (*iter).first;
2986 binding_cluster *cluster = (*iter).second;
2987 cluster->get_representative_path_vars (model, visited, base_reg, sval,
2991 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
2993 const region *reg = init_sval->get_region ();
2994 if (path_var pv = model->get_representative_path_var (reg,
2996 out_pvs->safe_push (pv);
3000 /* Remove all bindings overlapping REG within this store, removing
3001 any clusters that become redundant.
3003 If UNCERTAINTY is non-NULL, use it to record any svalues that
3004 were removed, as being maybe-bound. */
3007 store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
3008 uncertainty_t *uncertainty)
3010 const region *base_reg = reg->get_base_region ();
3011 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
3013 binding_cluster *cluster = *cluster_slot;
3014 if (reg == base_reg && !escaped_p (base_reg))
3016 /* Remove whole cluster. */
3017 m_cluster_map.remove (base_reg);
3021 cluster->remove_overlapping_bindings (mgr, reg, uncertainty);
3025 /* Subclass of visitor that accumulates a hash_set of the regions that
3028 struct region_finder : public visitor
3030 void visit_region (const region *reg) final override
3035 hash_set<const region *> m_regs;
3038 /* Canonicalize this store, to maximize the chance of equality between
3042 store::canonicalize (store_manager *mgr)
3045 cluster for: HEAP_ALLOCATED_REGION(543)
3048 where the heap region is empty and unreferenced, then purge that
3049 cluster, to avoid unbounded state chains involving these. */
3051 /* Find regions that are referenced by bound values in the store. */
3053 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3054 iter != m_cluster_map.end (); ++iter)
3056 binding_cluster *cluster = (*iter).second;
3057 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3058 bind_iter != cluster->m_map.end (); ++bind_iter)
3059 (*bind_iter).second->accept (&s);
3062 /* Locate heap-allocated regions that have empty bindings that weren't
3064 hash_set<const region *> purgeable_regions;
3065 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3066 iter != m_cluster_map.end (); ++iter)
3068 const region *base_reg = (*iter).first;
3069 binding_cluster *cluster = (*iter).second;
3070 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
3072 if (cluster->empty_p ())
3073 if (!s.m_regs.contains (base_reg))
3074 purgeable_regions.add (base_reg);
3076 /* Also cover the UNKNOWN case. */
3077 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
3078 if (sval->get_kind () == SK_UNKNOWN)
3079 if (!s.m_regs.contains (base_reg))
3080 purgeable_regions.add (base_reg);
3085 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
3086 iter != purgeable_regions.end (); ++iter)
3088 const region *base_reg = *iter;
3089 purge_cluster (base_reg);
3093 /* Subroutine for use by exploded_path::feasible_p.
3095 We need to deal with state differences between:
3096 (a) when the exploded_graph is being initially constructed and
3097 (b) when replaying the state changes along a specific path in
3098 in exploded_path::feasible_p.
3100 In (a), state merging happens, so when exploring a loop
3101 for (i = 0; i < 1024; i++)
3102 on successive iterations we have i == 0, then i == WIDENING.
3104 In (b), no state merging happens, so naively replaying the path
3105 that goes twice through the loop then exits it
3106 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3107 that exits the loop, which would be found to be infeasible as i == 1,
3108 and the path would be rejected.
3110 We need to fix up state during replay. This subroutine is
3111 called whenever we enter a supernode that we've already
3112 visited along this exploded_path, passing in OTHER_STORE
3113 from the destination enode's state.
3115 Find bindings to widening values in OTHER_STORE.
3116 For all that are found, update the binding in this store to UNKNOWN. */
3119 store::loop_replay_fixup (const store *other_store,
3120 region_model_manager *mgr)
3122 gcc_assert (other_store);
3123 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3124 iter != other_store->m_cluster_map.end (); ++iter)
3126 const region *base_reg = (*iter).first;
3127 binding_cluster *cluster = (*iter).second;
3128 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3129 bind_iter != cluster->m_map.end (); ++bind_iter)
3131 const binding_key *key = (*bind_iter).first;
3132 const svalue *sval = (*bind_iter).second;
3133 if (sval->get_kind () == SK_WIDENING)
3135 binding_cluster *this_cluster
3136 = get_or_create_cluster (base_reg);
3137 const svalue *unknown
3138 = mgr->get_or_create_unknown_svalue (sval->get_type ());
3139 this_cluster->bind_key (key, unknown);
3145 /* Use R to replay the bindings from SUMMARY into this object. */
3148 store::replay_call_summary (call_summary_replay &r,
3149 const store &summary)
3151 if (summary.m_called_unknown_fn)
3153 /* A call to an external function occurred in the summary.
3154 Hence we need to invalidate our knownledge of globals,
3155 escaped regions, etc. */
3156 on_unknown_fncall (r.get_call_stmt (),
3157 r.get_store_manager (),
3158 conjured_purge (r.get_caller_model (),
3162 auto_vec<const region *> keys (summary.m_cluster_map.elements ());
3163 for (auto kv : summary.m_cluster_map)
3164 keys.quick_push (kv.first);
3165 keys.qsort (region::cmp_ptr_ptr);
3166 for (auto base_reg : keys)
3167 replay_call_summary_cluster (r, summary, base_reg);
3170 /* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER
3171 into this object. */
3174 store::replay_call_summary_cluster (call_summary_replay &r,
3175 const store &summary,
3176 const region *summary_base_reg)
3178 const call_details &cd = r.get_call_details ();
3179 region_model_manager *reg_mgr = r.get_manager ();
3180 store_manager *mgr = reg_mgr->get_store_manager ();
3181 const binding_cluster *summary_cluster
3182 = summary.get_cluster (summary_base_reg);
3184 /* Handle "ESCAPED" and "TOUCHED" flags. */
3185 if (summary_cluster->escaped_p () || summary_cluster->touched_p ())
3186 if (const region *caller_reg
3187 = r.convert_region_from_summary (summary_base_reg))
3189 const region *caller_base_reg = caller_reg->get_base_region ();
3190 if (caller_base_reg->tracked_p ()
3191 && !caller_base_reg->symbolic_for_unknown_ptr_p ())
3193 binding_cluster *caller_cluster
3194 = get_or_create_cluster (caller_base_reg);
3195 if (summary_cluster->escaped_p ())
3196 caller_cluster->mark_as_escaped ();
3197 if (summary_cluster->touched_p ())
3198 caller_cluster->m_touched = true;
3202 switch (summary_base_reg->get_kind ())
3204 /* Top-level regions. */
3210 case RK_THREAD_LOCAL:
3212 /* Child regions. */
3219 /* Other regions. */
3222 /* These should never be the base region of a binding cluster. */
3229 /* These can be marked as escaping. */
3234 const symbolic_region *summary_symbolic_reg
3235 = as_a <const symbolic_region *> (summary_base_reg);
3236 const svalue *summary_ptr_sval = summary_symbolic_reg->get_pointer ();
3237 const svalue *caller_ptr_sval
3238 = r.convert_svalue_from_summary (summary_ptr_sval);
3239 if (!caller_ptr_sval)
3241 const region *caller_dest_reg
3242 = cd.get_model ()->deref_rvalue (caller_ptr_sval,
3245 const svalue *summary_sval
3246 = summary.get_any_binding (mgr, summary_base_reg);
3249 const svalue *caller_sval
3250 = r.convert_svalue_from_summary (summary_sval);
3253 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3254 set_value (mgr, caller_dest_reg,
3255 caller_sval, NULL /* uncertainty_t * */);
3259 case RK_HEAP_ALLOCATED:
3263 const region *caller_dest_reg
3264 = r.convert_region_from_summary (summary_base_reg);
3265 if (!caller_dest_reg)
3267 const svalue *summary_sval
3268 = summary.get_any_binding (mgr, summary_base_reg);
3270 summary_sval = reg_mgr->get_or_create_compound_svalue
3271 (summary_base_reg->get_type (),
3272 summary_cluster->get_map ());
3273 const svalue *caller_sval
3274 = r.convert_svalue_from_summary (summary_sval);
3277 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3278 set_value (mgr, caller_dest_reg,
3279 caller_sval, NULL /* uncertainty_t * */);
3284 /* Ignore bindings of alloca regions in the summary. */
3291 namespace selftest {
3293 /* Verify that bit_range::intersects_p works as expected. */
3296 test_bit_range_intersects_p ()
3298 bit_range b0 (0, 1);
3299 bit_range b1 (1, 1);
3300 bit_range b2 (2, 1);
3301 bit_range b3 (3, 1);
3302 bit_range b4 (4, 1);
3303 bit_range b5 (5, 1);
3304 bit_range b6 (6, 1);
3305 bit_range b7 (7, 1);
3306 bit_range b1_to_6 (1, 6);
3307 bit_range b0_to_7 (0, 8);
3308 bit_range b3_to_5 (3, 3);
3309 bit_range b6_to_7 (6, 2);
3311 /* self-intersection is true. */
3312 ASSERT_TRUE (b0.intersects_p (b0));
3313 ASSERT_TRUE (b7.intersects_p (b7));
3314 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3315 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3317 ASSERT_FALSE (b0.intersects_p (b1));
3318 ASSERT_FALSE (b1.intersects_p (b0));
3319 ASSERT_FALSE (b0.intersects_p (b7));
3320 ASSERT_FALSE (b7.intersects_p (b0));
3322 ASSERT_TRUE (b0_to_7.intersects_p (b0));
3323 ASSERT_TRUE (b0_to_7.intersects_p (b7));
3324 ASSERT_TRUE (b0.intersects_p (b0_to_7));
3325 ASSERT_TRUE (b7.intersects_p (b0_to_7));
3327 ASSERT_FALSE (b0.intersects_p (b1_to_6));
3328 ASSERT_FALSE (b1_to_6.intersects_p (b0));
3329 ASSERT_TRUE (b1.intersects_p (b1_to_6));
3330 ASSERT_TRUE (b1_to_6.intersects_p (b1));
3331 ASSERT_TRUE (b1_to_6.intersects_p (b6));
3332 ASSERT_FALSE (b1_to_6.intersects_p (b7));
3334 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3335 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3337 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3338 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
3342 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3343 ASSERT_EQ (r1.get_start_bit_offset (), 0);
3344 ASSERT_EQ (r1.m_size_in_bits, 6);
3345 ASSERT_EQ (r2.get_start_bit_offset (), 1);
3346 ASSERT_EQ (r2.m_size_in_bits, 6);
3348 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3349 ASSERT_EQ (r1.get_start_bit_offset (), 1);
3350 ASSERT_EQ (r1.m_size_in_bits, 6);
3351 ASSERT_EQ (r2.get_start_bit_offset (), 0);
3352 ASSERT_EQ (r2.m_size_in_bits, 6);
3355 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3358 assert_bit_range_from_mask_eq (const location &loc,
3359 unsigned HOST_WIDE_INT mask,
3360 const bit_range &expected)
3362 bit_range actual (0, 0);
3363 bool ok = bit_range::from_mask (mask, &actual);
3364 ASSERT_TRUE_AT (loc, ok);
3365 ASSERT_EQ_AT (loc, actual, expected);
3368 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3369 out EXPECTED_BIT_RANGE. */
3371 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3372 SELFTEST_BEGIN_STMT \
3373 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3374 EXPECTED_BIT_RANGE); \
3377 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3380 assert_no_bit_range_from_mask_eq (const location &loc,
3381 unsigned HOST_WIDE_INT mask)
3383 bit_range actual (0, 0);
3384 bool ok = bit_range::from_mask (mask, &actual);
3385 ASSERT_FALSE_AT (loc, ok);
3388 /* Assert that bit_range::from_mask (MASK) returns false. */
3390 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3391 SELFTEST_BEGIN_STMT \
3392 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3395 /* Verify that bit_range::from_mask works as expected. */
3398 test_bit_range_from_mask ()
3400 /* Should fail on zero. */
3401 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3403 /* Verify 1-bit masks. */
3404 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3405 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3406 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3407 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3408 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3409 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3410 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3411 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3413 /* Verify N-bit masks starting at bit 0. */
3414 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3415 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3416 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3417 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3418 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3419 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3420 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3421 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3423 /* Various other tests. */
3424 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3425 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3426 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3428 /* Multiple ranges of set bits should fail. */
3429 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3430 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3433 /* Implementation detail of ASSERT_OVERLAP. */
3436 assert_overlap (const location &loc,
3437 const concrete_binding *b1,
3438 const concrete_binding *b2)
3440 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3441 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3444 /* Implementation detail of ASSERT_DISJOINT. */
3447 assert_disjoint (const location &loc,
3448 const concrete_binding *b1,
3449 const concrete_binding *b2)
3451 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3452 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3455 /* Assert that B1 and B2 overlap, checking both ways. */
3457 #define ASSERT_OVERLAP(B1, B2) \
3458 SELFTEST_BEGIN_STMT \
3459 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3462 /* Assert that B1 and B2 do not overlap, checking both ways. */
3464 #define ASSERT_DISJOINT(B1, B2) \
3465 SELFTEST_BEGIN_STMT \
3466 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3469 /* Verify that concrete_binding::overlaps_p works as expected. */
3472 test_binding_key_overlap ()
3474 store_manager mgr (NULL);
3476 /* Various 8-bit bindings. */
3477 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3478 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3479 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3480 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3482 /* 16-bit bindings. */
3483 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3484 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3485 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3487 /* 32-bit binding. */
3488 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3490 /* Everything should self-overlap. */
3491 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3492 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3493 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3494 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3495 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3496 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3497 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3498 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3500 /* Verify the 8-bit bindings that don't overlap each other. */
3501 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3502 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3504 /* Check for overlap of differently-sized bindings. */
3505 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3506 /* ...and with differing start points. */
3507 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3508 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3509 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3510 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3512 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3513 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3514 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3515 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3518 /* Run all of the selftests within this file. */
3521 analyzer_store_cc_tests ()
3523 test_bit_range_intersects_p ();
3524 test_bit_range_from_mask ();
3525 test_binding_key_overlap ();
3528 } // namespace selftest
3530 #endif /* CHECKING_P */
3534 #endif /* #if ENABLE_ANALYZER */