1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2022 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #define INCLUDE_MEMORY
24 #include "coretypes.h"
27 #include "basic-block.h"
29 #include "gimple-iterator.h"
30 #include "diagnostic-core.h"
35 #include "stringpool.h"
38 #include "fold-const.h"
39 #include "tree-pretty-print.h"
40 #include "diagnostic-color.h"
41 #include "diagnostic-metadata.h"
44 #include "analyzer/analyzer.h"
45 #include "analyzer/analyzer-logging.h"
46 #include "ordered-hash-map.h"
49 #include "analyzer/supergraph.h"
51 #include "analyzer/call-string.h"
52 #include "analyzer/program-point.h"
53 #include "analyzer/store.h"
54 #include "analyzer/region-model.h"
55 #include "analyzer/call-summary.h"
56 #include "analyzer/analyzer-selftests.h"
57 #include "stor-layout.h"
63 /* Dump SVALS to PP, sorting them to ensure determinism. */
66 dump_svalue_set (const hash_set <const svalue *> &svals,
67 pretty_printer *pp, bool simple)
69 auto_vec <const svalue *> v;
70 for (hash_set<const svalue *>::iterator iter = svals.begin ();
71 iter != svals.end (); ++iter)
75 v.qsort (svalue::cmp_ptr_ptr);
77 pp_character (pp, '{');
80 FOR_EACH_VEC_ELT (v, i, sval)
84 sval->dump_to_pp (pp, simple);
86 pp_character (pp, '}');
89 /* class uncertainty_t. */
91 /* Dump this object to PP. */
94 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
96 pp_string (pp, "{m_maybe_bound_svals: ");
97 dump_svalue_set (m_maybe_bound_svals, pp, simple);
99 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
100 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
104 /* Dump this object to stderr. */
107 uncertainty_t::dump (bool simple) const
110 pp_format_decoder (&pp) = default_tree_printer;
111 pp_show_color (&pp) = pp_show_color (global_dc->printer);
112 pp.buffer->stream = stderr;
113 dump_to_pp (&pp, simple);
118 /* class binding_key. */
121 binding_key::make (store_manager *mgr, const region *r)
123 region_offset offset = r->get_offset (mgr->get_svalue_manager ());
124 if (offset.symbolic_p ())
125 return mgr->get_symbolic_binding (r);
129 if (r->get_bit_size (&bit_size))
130 return mgr->get_concrete_binding (offset.get_bit_offset (),
133 return mgr->get_symbolic_binding (r);
137 /* Dump this binding_key to stderr. */
140 binding_key::dump (bool simple) const
143 pp_format_decoder (&pp) = default_tree_printer;
144 pp_show_color (&pp) = pp_show_color (global_dc->printer);
145 pp.buffer->stream = stderr;
146 dump_to_pp (&pp, simple);
151 /* Get a description of this binding_key. */
154 binding_key::get_desc (bool simple) const
157 pp_format_decoder (&pp) = default_tree_printer;
158 dump_to_pp (&pp, simple);
159 return label_text::take (xstrdup (pp_formatted_text (&pp)));
162 /* qsort callback. */
165 binding_key::cmp_ptrs (const void *p1, const void *p2)
167 const binding_key * const *pk1 = (const binding_key * const *)p1;
168 const binding_key * const *pk2 = (const binding_key * const *)p2;
169 return cmp (*pk1, *pk2);
172 /* Comparator for binding_keys. */
175 binding_key::cmp (const binding_key *k1, const binding_key *k2)
177 int concrete1 = k1->concrete_p ();
178 int concrete2 = k2->concrete_p ();
179 if (int concrete_cmp = concrete1 - concrete2)
183 const concrete_binding *b1 = (const concrete_binding *)k1;
184 const concrete_binding *b2 = (const concrete_binding *)k2;
185 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
186 b2->get_start_bit_offset (),
189 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
194 const symbolic_binding *s1 = (const symbolic_binding *)k1;
195 const symbolic_binding *s2 = (const symbolic_binding *)k2;
204 /* struct bit_range. */
207 bit_range::dump_to_pp (pretty_printer *pp) const
209 byte_range bytes (0, 0);
210 if (as_byte_range (&bytes))
211 bytes.dump_to_pp (pp);
214 pp_string (pp, "start: ");
215 pp_wide_int (pp, m_start_bit_offset, SIGNED);
216 pp_string (pp, ", size: ");
217 pp_wide_int (pp, m_size_in_bits, SIGNED);
218 pp_string (pp, ", next: ");
219 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
223 /* Dump this object to stderr. */
226 bit_range::dump () const
229 pp.buffer->stream = stderr;
235 /* If OTHER is a subset of this, return true and write
236 to *OUT the relative range of OTHER within this.
237 Otherwise return false. */
240 bit_range::contains_p (const bit_range &other, bit_range *out) const
242 if (contains_p (other.get_start_bit_offset ())
243 && contains_p (other.get_last_bit_offset ()))
245 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
246 out->m_size_in_bits = other.m_size_in_bits;
253 /* If OTHER intersects this, return true and write
254 the relative range of OTHER within THIS to *OUT_THIS,
255 and the relative range of THIS within OTHER to *OUT_OTHER.
256 Otherwise return false. */
259 bit_range::intersects_p (const bit_range &other,
261 bit_range *out_other) const
263 if (get_start_bit_offset () < other.get_next_bit_offset ()
264 && other.get_start_bit_offset () < get_next_bit_offset ())
266 bit_offset_t overlap_start
267 = MAX (get_start_bit_offset (),
268 other.get_start_bit_offset ());
269 bit_offset_t overlap_next
270 = MIN (get_next_bit_offset (),
271 other.get_next_bit_offset ());
272 gcc_assert (overlap_next > overlap_start);
273 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
274 *out_this = abs_overlap_bits - get_start_bit_offset ();
275 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
283 bit_range::cmp (const bit_range &br1, const bit_range &br2)
285 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
286 br2.m_start_bit_offset))
289 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
292 /* Offset this range by OFFSET. */
295 bit_range::operator- (bit_offset_t offset) const
297 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
300 /* If MASK is a contiguous range of set bits, write them
301 to *OUT and return true.
302 Otherwise return false. */
305 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
307 unsigned iter_bit_idx = 0;
308 unsigned HOST_WIDE_INT iter_bit_mask = 1;
310 /* Find the first contiguous run of set bits in MASK. */
312 /* Find first set bit in MASK. */
313 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
315 if (mask & iter_bit_mask)
320 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
324 unsigned first_set_iter_bit_idx = iter_bit_idx;
325 unsigned num_set_bits = 1;
329 /* Find next unset bit in MASK. */
330 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
332 if (!(mask & iter_bit_mask))
338 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
340 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
344 /* We now have the first contiguous run of set bits in MASK.
345 Fail if any other bits are set. */
346 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
348 if (mask & iter_bit_mask)
354 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
358 /* Attempt to convert this bit_range to a byte_range.
359 Return true if it is possible, writing the result to *OUT.
360 Otherwise return false. */
363 bit_range::as_byte_range (byte_range *out) const
365 if (m_start_bit_offset % BITS_PER_UNIT == 0
366 && m_size_in_bits % BITS_PER_UNIT == 0)
368 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
369 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
375 /* Dump this object to PP. */
378 byte_range::dump_to_pp (pretty_printer *pp) const
380 if (m_size_in_bytes == 0)
382 pp_string (pp, "empty");
384 else if (m_size_in_bytes == 1)
386 pp_string (pp, "byte ");
387 pp_wide_int (pp, m_start_byte_offset, SIGNED);
391 pp_string (pp, "bytes ");
392 pp_wide_int (pp, m_start_byte_offset, SIGNED);
394 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
398 /* Dump this object to stderr. */
401 byte_range::dump () const
404 pp.buffer->stream = stderr;
410 /* If OTHER is a subset of this, return true and write
411 to *OUT the relative range of OTHER within this.
412 Otherwise return false. */
415 byte_range::contains_p (const byte_range &other, byte_range *out) const
417 if (contains_p (other.get_start_byte_offset ())
418 && contains_p (other.get_last_byte_offset ()))
420 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
421 out->m_size_in_bytes = other.m_size_in_bytes;
428 /* Return true if THIS and OTHER intersect and write the number
429 of bytes both buffers overlap to *OUT_NUM_OVERLAP_BYTES.
431 Otherwise return false. */
434 byte_range::intersects_p (const byte_range &other,
435 byte_size_t *out_num_overlap_bytes) const
437 if (get_start_byte_offset () < other.get_next_byte_offset ()
438 && other.get_start_byte_offset () < get_next_byte_offset ())
440 byte_offset_t overlap_start = MAX (get_start_byte_offset (),
441 other.get_start_byte_offset ());
442 byte_offset_t overlap_next = MIN (get_next_byte_offset (),
443 other.get_next_byte_offset ());
444 gcc_assert (overlap_next > overlap_start);
445 *out_num_overlap_bytes = overlap_next - overlap_start;
452 /* Return true if THIS exceeds OTHER and write the overhanging
453 byte range to OUT_OVERHANGING_BYTE_RANGE. */
456 byte_range::exceeds_p (const byte_range &other,
457 byte_range *out_overhanging_byte_range) const
459 gcc_assert (!empty_p ());
461 if (other.get_next_byte_offset () < get_next_byte_offset ())
463 /* THIS definitely exceeds OTHER. */
464 byte_offset_t start = MAX (get_start_byte_offset (),
465 other.get_next_byte_offset ());
466 byte_offset_t size = get_next_byte_offset () - start;
467 gcc_assert (size > 0);
468 out_overhanging_byte_range->m_start_byte_offset = start;
469 out_overhanging_byte_range->m_size_in_bytes = size;
476 /* Return true if THIS falls short of OFFSET and write the
477 byte range fallen short to OUT_FALL_SHORT_BYTES. */
480 byte_range::falls_short_of_p (byte_offset_t offset,
481 byte_range *out_fall_short_bytes) const
483 gcc_assert (!empty_p ());
485 if (get_start_byte_offset () < offset)
487 /* THIS falls short of OFFSET. */
488 byte_offset_t start = get_start_byte_offset ();
489 byte_offset_t size = MIN (offset, get_next_byte_offset ()) - start;
490 gcc_assert (size > 0);
491 out_fall_short_bytes->m_start_byte_offset = start;
492 out_fall_short_bytes->m_size_in_bytes = size;
499 /* qsort comparator for byte ranges. */
502 byte_range::cmp (const byte_range &br1, const byte_range &br2)
504 /* Order first by offset. */
505 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
506 br2.m_start_byte_offset))
509 /* ...then by size. */
510 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
513 /* class concrete_binding : public binding_key. */
515 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
518 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
520 m_bit_range.dump_to_pp (pp);
523 /* Return true if this binding overlaps with OTHER. */
526 concrete_binding::overlaps_p (const concrete_binding &other) const
528 if (get_start_bit_offset () < other.get_next_bit_offset ()
529 && get_next_bit_offset () > other.get_start_bit_offset ())
534 /* Comparator for use by vec<const concrete_binding *>::qsort. */
537 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
539 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
540 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
542 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
545 /* class symbolic_binding : public binding_key. */
548 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
550 //binding_key::dump_to_pp (pp, simple);
551 pp_string (pp, "region: ");
552 m_region->dump_to_pp (pp, simple);
555 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
558 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
560 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
561 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
563 return region::cmp_ids (b1->get_region (), b2->get_region ());
566 /* The store is oblivious to the types of the svalues bound within
567 it: any type can get bound at any location.
568 Simplify any casts before binding.
570 For example, if we have:
571 struct big { int ia[1024]; };
573 memcpy (&dst, &src, sizeof (struct big));
574 this reaches us in gimple form as:
575 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
576 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
577 Using cast_region when handling the MEM_REF would give us:
578 INIT_VAL(CAST_REG(unsigned char[4096], src))
579 as rhs_sval, but we can fold that into a cast svalue:
580 CAST(unsigned char[4096], INIT_VAL(src))
581 We can discard that cast from the svalue when binding it in
582 the store for "dst", and simply store:
584 key: {kind: direct, start: 0, size: 32768, next: 32768}
585 value: ‘struct big’ {INIT_VAL(src)}. */
587 static const svalue *
588 simplify_for_binding (const svalue *sval)
590 if (const svalue *cast_sval = sval->maybe_undo_cast ())
595 /* class binding_map. */
597 /* binding_map's copy ctor. */
599 binding_map::binding_map (const binding_map &other)
600 : m_map (other.m_map)
604 /* binding_map's assignment operator. */
607 binding_map::operator=(const binding_map &other)
609 /* For now, assume we only ever copy to an empty cluster. */
610 gcc_assert (m_map.elements () == 0);
611 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
614 const binding_key *key = (*iter).first;
615 const svalue *sval = (*iter).second;
616 m_map.put (key, sval);
621 /* binding_map's equality operator. */
624 binding_map::operator== (const binding_map &other) const
626 if (m_map.elements () != other.m_map.elements ())
629 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
631 const binding_key *key = (*iter).first;
632 const svalue *sval = (*iter).second;
633 const svalue **other_slot
634 = const_cast <map_t &> (other.m_map).get (key);
635 if (other_slot == NULL)
637 if (sval != *other_slot)
640 gcc_checking_assert (hash () == other.hash ());
644 /* Generate a hash value for this binding_map. */
647 binding_map::hash () const
649 hashval_t result = 0;
650 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
652 /* Use a new hasher for each key to avoid depending on the ordering
653 of keys when accumulating the result. */
654 inchash::hash hstate;
655 hstate.add_ptr ((*iter).first);
656 hstate.add_ptr ((*iter).second);
657 result ^= hstate.end ();
662 /* Dump a representation of this binding_map to PP.
663 SIMPLE controls how values and regions are to be printed.
664 If MULTILINE, then split the dump over multiple lines and
665 use whitespace for readability, otherwise put all on one line. */
668 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
669 bool multiline) const
671 auto_vec <const binding_key *> binding_keys;
672 for (map_t::iterator iter = m_map.begin ();
673 iter != m_map.end (); ++iter)
675 const binding_key *key = (*iter).first;
676 binding_keys.safe_push (key);
678 binding_keys.qsort (binding_key::cmp_ptrs);
680 const binding_key *key;
682 FOR_EACH_VEC_ELT (binding_keys, i, key)
684 const svalue *value = *const_cast <map_t &> (m_map).get (key);
687 pp_string (pp, " key: {");
688 key->dump_to_pp (pp, simple);
691 pp_string (pp, " value: ");
692 if (tree t = value->get_type ())
693 dump_quoted_tree (pp, t);
694 pp_string (pp, " {");
695 value->dump_to_pp (pp, simple);
702 pp_string (pp, ", ");
703 pp_string (pp, "binding key: {");
704 key->dump_to_pp (pp, simple);
705 pp_string (pp, "}, value: {");
706 value->dump_to_pp (pp, simple);
712 /* Dump a multiline representation of this binding_map to stderr. */
715 binding_map::dump (bool simple) const
718 pp_format_decoder (&pp) = default_tree_printer;
719 pp_show_color (&pp) = pp_show_color (global_dc->printer);
720 pp.buffer->stream = stderr;
721 dump_to_pp (&pp, simple, true);
726 /* Return a new json::object of the form
727 {KEY_DESC : SVALUE_DESC,
728 ...for the various key/value pairs in this binding_map}. */
731 binding_map::to_json () const
733 json::object *map_obj = new json::object ();
735 auto_vec <const binding_key *> binding_keys;
736 for (map_t::iterator iter = m_map.begin ();
737 iter != m_map.end (); ++iter)
739 const binding_key *key = (*iter).first;
740 binding_keys.safe_push (key);
742 binding_keys.qsort (binding_key::cmp_ptrs);
744 const binding_key *key;
746 FOR_EACH_VEC_ELT (binding_keys, i, key)
748 const svalue *value = *const_cast <map_t &> (m_map).get (key);
749 label_text key_desc = key->get_desc ();
750 map_obj->set (key_desc.get (), value->to_json ());
756 /* Comparator for imposing an order on binding_maps. */
759 binding_map::cmp (const binding_map &map1, const binding_map &map2)
761 if (int count_cmp = map1.elements () - map2.elements ())
764 auto_vec <const binding_key *> keys1 (map1.elements ());
765 for (map_t::iterator iter = map1.begin ();
766 iter != map1.end (); ++iter)
767 keys1.quick_push ((*iter).first);
768 keys1.qsort (binding_key::cmp_ptrs);
770 auto_vec <const binding_key *> keys2 (map2.elements ());
771 for (map_t::iterator iter = map2.begin ();
772 iter != map2.end (); ++iter)
773 keys2.quick_push ((*iter).first);
774 keys2.qsort (binding_key::cmp_ptrs);
776 for (size_t i = 0; i < keys1.length (); i++)
778 const binding_key *k1 = keys1[i];
779 const binding_key *k2 = keys2[i];
780 if (int key_cmp = binding_key::cmp (k1, k2))
782 gcc_assert (k1 == k2);
783 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
790 /* Get the child region of PARENT_REG based upon INDEX within a
793 static const region *
794 get_subregion_within_ctor (const region *parent_reg, tree index,
795 region_model_manager *mgr)
797 switch (TREE_CODE (index))
803 const svalue *index_sval
804 = mgr->get_or_create_constant_svalue (index);
805 return mgr->get_element_region (parent_reg,
806 TREE_TYPE (parent_reg->get_type ()),
811 return mgr->get_field_region (parent_reg, index);
815 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
817 static const svalue *
818 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
820 /* Reuse the get_rvalue logic from region_model. */
821 region_model m (mgr);
822 return m.get_rvalue (path_var (val, 0), NULL);
825 /* Bind values from CONSTRUCTOR to this map, relative to
826 PARENT_REG's relationship to its base region.
827 Return true if successful, false if there was a problem (e.g. due
828 to hitting a complexity limit). */
831 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
832 region_model_manager *mgr)
834 gcc_assert (parent_reg);
835 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
840 tree parent_type = parent_reg->get_type ();
842 if (TREE_CODE (parent_type) == RECORD_TYPE)
843 field = TYPE_FIELDS (parent_type);
846 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
850 /* If index is NULL, then iterate through the fields for
851 a RECORD_TYPE, or use an INTEGER_CST otherwise.
852 Compare with similar logic in output_constructor. */
856 field = DECL_CHAIN (field);
859 index = build_int_cst (integer_type_node, ix);
861 else if (TREE_CODE (index) == RANGE_EXPR)
863 tree min_index = TREE_OPERAND (index, 0);
864 tree max_index = TREE_OPERAND (index, 1);
865 if (min_index == max_index)
867 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
873 if (!apply_ctor_val_to_range (parent_reg, mgr,
874 min_index, max_index, val))
879 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
885 /* Bind the value VAL into the range of elements within PARENT_REF
886 from MIN_INDEX to MAX_INDEX (including endpoints).
887 For use in handling RANGE_EXPR within a CONSTRUCTOR.
888 Return true if successful, false if there was a problem (e.g. due
889 to hitting a complexity limit). */
892 binding_map::apply_ctor_val_to_range (const region *parent_reg,
893 region_model_manager *mgr,
894 tree min_index, tree max_index,
897 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
898 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
900 /* Generate a binding key for the range. */
901 const region *min_element
902 = get_subregion_within_ctor (parent_reg, min_index, mgr);
903 const region *max_element
904 = get_subregion_within_ctor (parent_reg, max_index, mgr);
905 region_offset min_offset = min_element->get_offset (mgr);
906 if (min_offset.symbolic_p ())
908 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
909 store_manager *smgr = mgr->get_store_manager ();
910 const binding_key *max_element_key = binding_key::make (smgr, max_element);
911 if (max_element_key->symbolic_p ())
913 const concrete_binding *max_element_ckey
914 = max_element_key->dyn_cast_concrete_binding ();
915 bit_size_t range_size_in_bits
916 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
917 const concrete_binding *range_key
918 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
919 if (range_key->symbolic_p ())
923 if (TREE_CODE (val) == CONSTRUCTOR)
925 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
927 /* Bind the value to the range. */
928 put (range_key, sval);
932 /* Bind the value VAL into INDEX within PARENT_REF.
933 For use in handling a pair of entries within a CONSTRUCTOR.
934 Return true if successful, false if there was a problem (e.g. due
935 to hitting a complexity limit). */
938 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
939 region_model_manager *mgr,
940 tree index, tree val)
942 const region *child_reg
943 = get_subregion_within_ctor (parent_reg, index, mgr);
944 if (TREE_CODE (val) == CONSTRUCTOR)
945 return apply_ctor_to_region (child_reg, val, mgr);
948 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
950 = binding_key::make (mgr->get_store_manager (), child_reg);
951 /* Handle the case where we have an unknown size for child_reg
952 (e.g. due to it being a trailing field with incomplete array
954 if (!k->concrete_p ())
956 /* Assume that sval has a well-defined size for this case. */
957 tree sval_type = sval->get_type ();
958 gcc_assert (sval_type);
959 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
960 gcc_assert (sval_byte_size != -1);
961 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
962 /* Get offset of child relative to base region. */
963 region_offset child_base_offset = child_reg->get_offset (mgr);
964 if (child_base_offset.symbolic_p ())
966 /* Convert to an offset relative to the parent region. */
967 region_offset parent_base_offset = parent_reg->get_offset (mgr);
968 gcc_assert (!parent_base_offset.symbolic_p ());
969 bit_offset_t child_parent_offset
970 = (child_base_offset.get_bit_offset ()
971 - parent_base_offset.get_bit_offset ());
972 /* Create a concrete key for the child within the parent. */
973 k = mgr->get_store_manager ()->get_concrete_binding
974 (child_parent_offset, sval_bit_size);
976 gcc_assert (k->concrete_p ());
982 /* Populate OUT with all bindings within this map that overlap KEY. */
985 binding_map::get_overlapping_bindings (const binding_key *key,
986 auto_vec<const binding_key *> *out)
988 for (auto iter : *this)
990 const binding_key *iter_key = iter.first;
991 if (const concrete_binding *ckey
992 = key->dyn_cast_concrete_binding ())
994 if (const concrete_binding *iter_ckey
995 = iter_key->dyn_cast_concrete_binding ())
997 if (ckey->overlaps_p (*iter_ckey))
998 out->safe_push (iter_key);
1002 /* Assume overlap. */
1003 out->safe_push (iter_key);
1008 /* Assume overlap. */
1009 out->safe_push (iter_key);
1014 /* Remove, truncate, and/or split any bindings within this map that
1017 For example, if we have:
1019 +------------------------------------+
1021 +------------------------------------+
1023 which is to be overwritten with:
1025 .......+----------------------+.......
1026 .......| new binding |.......
1027 .......+----------------------+.......
1029 this function "cuts a hole" out of the old binding:
1031 +------+......................+------+
1032 |prefix| hole for new binding |suffix|
1033 +------+......................+------+
1035 into which the new binding can be added without
1036 overlapping the prefix or suffix.
1038 The prefix and suffix (if added) will be bound to the pertinent
1039 parts of the value of the old binding.
1046 void test_5 (struct s5 *p)
1051 then after the "f = *p;" we have:
1052 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
1053 and at the "f.arr[3] = 42;" we remove the bindings overlapping
1054 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
1058 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1060 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1061 punching a hole into which the new value can be written at byte 3:
1064 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1066 value: 'char' {(char)42}
1068 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1070 If UNCERTAINTY is non-NULL, use it to record any svalues that
1071 were removed, as being maybe-bound.
1073 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1074 in the map, due to one or both of the underlying clusters being
1075 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1076 concrete binding it could actually be referring to the same memory as
1077 distinct concrete bindings in the map. Remove all bindings, but
1078 register any svalues with *UNCERTAINTY. */
1081 binding_map::remove_overlapping_bindings (store_manager *mgr,
1082 const binding_key *drop_key,
1083 uncertainty_t *uncertainty,
1084 bool always_overlap)
1086 /* Get the bindings of interest within this map. */
1087 auto_vec<const binding_key *> bindings;
1089 for (auto iter : *this)
1090 bindings.safe_push (iter.first); /* Add all bindings. */
1092 /* Just add overlapping bindings. */
1093 get_overlapping_bindings (drop_key, &bindings);
1096 const binding_key *iter_binding;
1097 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1099 /* Record any svalues that were removed to *UNCERTAINTY as being
1100 maybe-bound, provided at least some part of the binding is symbolic.
1102 Specifically, if at least one of the bindings is symbolic, or we
1103 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1104 regions, then we don't know that the svalue has been overwritten,
1105 and should record that to *UNCERTAINTY.
1107 However, if we have concrete keys accessing within the same symbolic
1108 region, then we *know* that the symbolic region has been overwritten,
1109 so we don't record it to *UNCERTAINTY, as this could be a genuine
1111 const svalue *old_sval = get (iter_binding);
1113 && (drop_key->symbolic_p ()
1114 || iter_binding->symbolic_p ()
1116 uncertainty->on_maybe_bound_sval (old_sval);
1118 /* Begin by removing the old binding. */
1119 m_map.remove (iter_binding);
1121 /* Don't attempt to handle prefixes/suffixes for the
1122 "always_overlap" case; everything's being removed. */
1126 /* Now potentially add the prefix and suffix. */
1127 if (const concrete_binding *drop_ckey
1128 = drop_key->dyn_cast_concrete_binding ())
1129 if (const concrete_binding *iter_ckey
1130 = iter_binding->dyn_cast_concrete_binding ())
1132 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1134 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1135 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1137 if (iter_bits.get_start_bit_offset ()
1138 < drop_bits.get_start_bit_offset ())
1140 /* We have a truncated prefix. */
1141 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1142 (drop_bits.get_start_bit_offset ()
1143 - iter_bits.get_start_bit_offset ()));
1144 const concrete_binding *prefix_key
1145 = mgr->get_concrete_binding (prefix_bits);
1146 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1147 const svalue *prefix_sval
1148 = old_sval->extract_bit_range (NULL_TREE,
1150 mgr->get_svalue_manager ());
1151 m_map.put (prefix_key, prefix_sval);
1154 if (iter_bits.get_next_bit_offset ()
1155 > drop_bits.get_next_bit_offset ())
1157 /* We have a truncated suffix. */
1158 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1159 (iter_bits.get_next_bit_offset ()
1160 - drop_bits.get_next_bit_offset ()));
1161 const concrete_binding *suffix_key
1162 = mgr->get_concrete_binding (suffix_bits);
1163 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1164 - iter_bits.get_start_bit_offset (),
1165 suffix_bits.m_size_in_bits);
1166 const svalue *suffix_sval
1167 = old_sval->extract_bit_range (NULL_TREE,
1169 mgr->get_svalue_manager ());
1170 m_map.put (suffix_key, suffix_sval);
1176 /* class binding_cluster. */
1178 binding_cluster::binding_cluster (const region *base_region)
1179 : m_base_region (base_region), m_map (),
1180 m_escaped (false), m_touched (false)
1184 /* binding_cluster's copy ctor. */
1186 binding_cluster::binding_cluster (const binding_cluster &other)
1187 : m_base_region (other.m_base_region), m_map (other.m_map),
1188 m_escaped (other.m_escaped), m_touched (other.m_touched)
1192 /* binding_cluster's assignment operator. */
1195 binding_cluster::operator= (const binding_cluster &other)
1197 gcc_assert (m_base_region == other.m_base_region);
1198 m_map = other.m_map;
1199 m_escaped = other.m_escaped;
1200 m_touched = other.m_touched;
1204 /* binding_cluster's equality operator. */
1207 binding_cluster::operator== (const binding_cluster &other) const
1209 if (m_map != other.m_map)
1212 if (m_base_region != other.m_base_region)
1215 if (m_escaped != other.m_escaped)
1218 if (m_touched != other.m_touched)
1221 gcc_checking_assert (hash () == other.hash ());
1226 /* Generate a hash value for this binding_cluster. */
1229 binding_cluster::hash () const
1231 return m_map.hash ();
1234 /* Return true if this binding_cluster is symbolic
1235 i.e. its base region is symbolic. */
1238 binding_cluster::symbolic_p () const
1240 return m_base_region->get_kind () == RK_SYMBOLIC;
1243 /* Dump a representation of this binding_cluster to PP.
1244 SIMPLE controls how values and regions are to be printed.
1245 If MULTILINE, then split the dump over multiple lines and
1246 use whitespace for readability, otherwise put all on one line. */
1249 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1250 bool multiline) const
1256 pp_string (pp, " ESCAPED");
1260 pp_string (pp, "(ESCAPED)");
1266 pp_string (pp, " TOUCHED");
1270 pp_string (pp, "(TOUCHED)");
1273 m_map.dump_to_pp (pp, simple, multiline);
1276 /* Dump a multiline representation of this binding_cluster to stderr. */
1279 binding_cluster::dump (bool simple) const
1282 pp_format_decoder (&pp) = default_tree_printer;
1283 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1284 pp.buffer->stream = stderr;
1285 pp_string (&pp, " cluster for: ");
1286 m_base_region->dump_to_pp (&pp, simple);
1287 pp_string (&pp, ": ");
1289 dump_to_pp (&pp, simple, true);
1294 /* Assert that this object is valid. */
1297 binding_cluster::validate () const
1299 int num_symbolic = 0;
1300 int num_concrete = 0;
1301 for (auto iter : m_map)
1303 if (iter.first->symbolic_p ())
1308 /* We shouldn't have more than one symbolic key per cluster
1309 (or one would have clobbered the other). */
1310 gcc_assert (num_symbolic < 2);
1311 /* We can't have both concrete and symbolic keys. */
1312 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1315 /* Return a new json::object of the form
1316 {"escaped": true/false,
1317 "touched": true/false,
1318 "map" : object for the binding_map. */
1321 binding_cluster::to_json () const
1323 json::object *cluster_obj = new json::object ();
1325 cluster_obj->set ("escaped", new json::literal (m_escaped));
1326 cluster_obj->set ("touched", new json::literal (m_touched));
1327 cluster_obj->set ("map", m_map.to_json ());
1332 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1336 binding_cluster::bind (store_manager *mgr,
1337 const region *reg, const svalue *sval)
1339 if (const compound_svalue *compound_sval
1340 = sval->dyn_cast_compound_svalue ())
1342 bind_compound_sval (mgr, reg, compound_sval);
1346 const binding_key *binding = binding_key::make (mgr, reg);
1347 bind_key (binding, sval);
1350 /* Bind SVAL to KEY.
1351 Unpacking of compound_svalues should already have been done by the
1352 time this is called. */
1355 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1357 gcc_assert (sval->get_kind () != SK_COMPOUND);
1359 m_map.put (key, sval);
1360 if (key->symbolic_p ())
1364 /* Subroutine of binding_cluster::bind.
1365 Unpack compound_svals when binding them, so that we bind them
1369 binding_cluster::bind_compound_sval (store_manager *mgr,
1371 const compound_svalue *compound_sval)
1373 region_offset reg_offset
1374 = reg->get_offset (mgr->get_svalue_manager ());
1375 if (reg_offset.symbolic_p ())
1378 clobber_region (mgr, reg);
1382 for (map_t::iterator iter = compound_sval->begin ();
1383 iter != compound_sval->end (); ++iter)
1385 const binding_key *iter_key = (*iter).first;
1386 const svalue *iter_sval = (*iter).second;
1388 if (const concrete_binding *concrete_key
1389 = iter_key->dyn_cast_concrete_binding ())
1391 bit_offset_t effective_start
1392 = (concrete_key->get_start_bit_offset ()
1393 + reg_offset.get_bit_offset ());
1394 const concrete_binding *effective_concrete_key
1395 = mgr->get_concrete_binding (effective_start,
1396 concrete_key->get_size_in_bits ());
1397 bind_key (effective_concrete_key, iter_sval);
1404 /* Remove all bindings overlapping REG within this cluster. */
1407 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1409 remove_overlapping_bindings (mgr, reg, NULL);
1412 /* Remove any bindings for REG within this cluster. */
1415 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1417 gcc_assert (reg->get_kind () == RK_DECL);
1418 const binding_key *binding
1419 = binding_key::make (mgr, const_cast<region *> (reg));
1420 m_map.remove (binding);
1423 /* Clobber REG and fill it with repeated copies of SVAL. */
1426 binding_cluster::fill_region (store_manager *mgr,
1430 clobber_region (mgr, reg);
1432 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1433 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1434 const svalue *fill_sval
1435 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1436 byte_size_sval, sval);
1437 bind (mgr, reg, fill_sval);
1440 /* Clobber REG within this cluster and fill it with zeroes. */
1443 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1445 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1446 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1447 fill_region (mgr, reg, zero_sval);
1450 /* Mark REG_TO_BIND within this cluster as being unknown.
1452 Remove any bindings overlapping REG_FOR_OVERLAP.
1453 If UNCERTAINTY is non-NULL, use it to record any svalues that
1454 had bindings to them removed, as being maybe-bound.
1456 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1457 store::mark_region_as_unknown, but are different in
1458 store::set_value's alias handling, for handling the case where
1459 we have a write to a symbolic REG_FOR_OVERLAP. */
1462 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1463 const region *reg_to_bind,
1464 const region *reg_for_overlap,
1465 uncertainty_t *uncertainty)
1467 remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty);
1469 /* Add a default binding to "unknown". */
1470 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1472 = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1473 bind (mgr, reg_to_bind, sval);
1476 /* Purge state involving SVAL. */
1479 binding_cluster::purge_state_involving (const svalue *sval,
1480 region_model_manager *sval_mgr)
1482 auto_vec<const binding_key *> to_remove;
1483 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1484 for (auto iter : m_map)
1486 const binding_key *iter_key = iter.first;
1487 if (const symbolic_binding *symbolic_key
1488 = iter_key->dyn_cast_symbolic_binding ())
1490 const region *reg = symbolic_key->get_region ();
1491 if (reg->involves_p (sval))
1492 to_remove.safe_push (iter_key);
1494 const svalue *iter_sval = iter.second;
1495 if (iter_sval->involves_p (sval))
1496 to_make_unknown.safe_push (std::make_pair(iter_key,
1497 iter_sval->get_type ()));
1499 for (auto iter : to_remove)
1501 m_map.remove (iter);
1504 for (auto iter : to_make_unknown)
1506 const svalue *new_sval
1507 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1508 m_map.put (iter.first, new_sval);
1512 /* Get any SVAL bound to REG within this cluster via kind KIND,
1513 without checking parent regions of REG. */
1516 binding_cluster::get_binding (store_manager *mgr,
1517 const region *reg) const
1519 const binding_key *reg_binding = binding_key::make (mgr, reg);
1520 const svalue *sval = m_map.get (reg_binding);
1523 /* If we have a struct with a single field, then the binding of
1524 the field will equal that of the struct, and looking up e.g.
1525 PARENT_REG.field within:
1526 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1527 will erroneously return INIT_VAL(OTHER_REG), rather than
1528 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1529 Fix this issue by iterating upwards whilst the bindings are equal,
1530 expressing the lookups as subvalues.
1531 We have to gather a list of subregion accesses, then walk it
1532 in reverse to get the subvalues. */
1533 auto_vec<const region *> regions;
1534 while (const region *parent_reg = reg->get_parent_region ())
1536 const binding_key *parent_reg_binding
1537 = binding_key::make (mgr, parent_reg);
1538 if (parent_reg_binding == reg_binding
1539 && sval->get_type ()
1541 && sval->get_type () != reg->get_type ())
1543 regions.safe_push (reg);
1549 if (sval->get_type ()
1551 && sval->get_type () == reg->get_type ())
1554 const region *iter_reg;
1555 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1557 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1558 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1566 /* Get any SVAL bound to REG within this cluster,
1567 either directly for REG, or recursively checking for bindings within
1568 parent regions and extracting subvalues if need be. */
1571 binding_cluster::get_binding_recursive (store_manager *mgr,
1572 const region *reg) const
1574 if (const svalue *sval = get_binding (mgr, reg))
1576 if (reg != m_base_region)
1577 if (const region *parent_reg = reg->get_parent_region ())
1578 if (const svalue *parent_sval
1579 = get_binding_recursive (mgr, parent_reg))
1581 /* Extract child svalue from parent svalue. */
1582 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1583 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1589 /* Get any value bound for REG within this cluster. */
1592 binding_cluster::get_any_binding (store_manager *mgr,
1593 const region *reg) const
1595 /* Look for a direct binding. */
1596 if (const svalue *direct_sval
1597 = get_binding_recursive (mgr, reg))
1600 /* If we had a write to a cluster of unknown size, we might
1601 have a self-binding of the whole base region with an svalue,
1602 where the base region is symbolic.
1603 Handle such cases by returning sub_svalue instances. */
1604 if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1606 /* Extract child svalue from parent svalue. */
1607 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1608 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1612 /* If this cluster has been touched by a symbolic write, then the content
1613 of any subregion not currently specifically bound is "UNKNOWN". */
1616 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1617 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1620 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1621 then we don't know if we're reading those values or not, so the result
1622 is also "UNKNOWN". */
1623 if (reg->get_offset (mgr->get_svalue_manager ()).symbolic_p ()
1624 && m_map.elements () > 0)
1626 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1627 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1630 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1631 return compound_sval;
1633 /* Otherwise, the initial value, or uninitialized. */
1637 /* Attempt to get a compound_svalue for the bindings within the cluster
1638 affecting REG (which could be the base region itself).
1640 Create a compound_svalue with the subset of bindings the affect REG,
1641 offsetting them so that the offsets are relative to the start of REG
1644 For example, REG could be one element within an array of structs.
1646 Return the resulting compound_svalue, or NULL if there's a problem. */
1649 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1650 const region *reg) const
1652 region_offset cluster_offset
1653 = m_base_region->get_offset (mgr->get_svalue_manager ());
1654 if (cluster_offset.symbolic_p ())
1656 region_offset reg_offset = reg->get_offset (mgr->get_svalue_manager ());
1657 if (reg_offset.symbolic_p ())
1660 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1662 /* We will a build the result map in two parts:
1663 (a) result_map, holding the concrete keys from this cluster,
1665 (b) default_map, holding the initial values for the region
1666 (e.g. uninitialized, initializer values, or zero), unless this
1667 cluster has been touched.
1669 We will populate (a), and as we do, clobber (b), trimming and
1670 splitting its bindings as necessary.
1671 Finally, we will merge (b) into (a), giving a concrete map
1672 that merges both the initial values and the bound values from
1673 the binding_cluster.
1674 Doing it this way reduces N for the O(N^2) intersection-finding,
1675 perhaps we should have a spatial-organized data structure for
1676 concrete keys, though. */
1678 binding_map result_map;
1679 binding_map default_map;
1681 /* Set up default values in default_map. */
1682 const svalue *default_sval;
1684 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1686 default_sval = sval_mgr->get_or_create_initial_value (reg);
1687 const binding_key *default_key = binding_key::make (mgr, reg);
1688 default_map.put (default_key, default_sval);
1690 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1692 const binding_key *key = (*iter).first;
1693 const svalue *sval = (*iter).second;
1695 if (const concrete_binding *concrete_key
1696 = key->dyn_cast_concrete_binding ())
1698 const bit_range &bound_range = concrete_key->get_bit_range ();
1700 bit_size_t reg_bit_size;
1701 if (!reg->get_bit_size (®_bit_size))
1704 bit_range reg_range (reg_offset.get_bit_offset (),
1707 /* Skip bindings that are outside the bit range of REG. */
1708 if (!bound_range.intersects_p (reg_range))
1711 /* We shouldn't have an exact match; that should have been
1713 gcc_assert (!(reg_range == bound_range));
1715 bit_range subrange (0, 0);
1716 if (reg_range.contains_p (bound_range, &subrange))
1718 /* We have a bound range fully within REG.
1719 Add it to map, offsetting accordingly. */
1721 /* Get offset of KEY relative to REG, rather than to
1723 const concrete_binding *offset_concrete_key
1724 = mgr->get_concrete_binding (subrange);
1725 result_map.put (offset_concrete_key, sval);
1727 /* Clobber default_map, removing/trimming/spliting where
1728 it overlaps with offset_concrete_key. */
1729 default_map.remove_overlapping_bindings (mgr,
1730 offset_concrete_key,
1733 else if (bound_range.contains_p (reg_range, &subrange))
1735 /* REG is fully within the bound range, but
1736 is not equal to it; we're extracting a subvalue. */
1737 return sval->extract_bit_range (reg->get_type (),
1739 mgr->get_svalue_manager ());
1743 /* REG and the bound range partially overlap. */
1744 bit_range reg_subrange (0, 0);
1745 bit_range bound_subrange (0, 0);
1746 reg_range.intersects_p (bound_range,
1747 ®_subrange, &bound_subrange);
1749 /* Get the bits from the bound value for the bits at the
1750 intersection (relative to the bound value). */
1751 const svalue *overlap_sval
1752 = sval->extract_bit_range (NULL_TREE,
1754 mgr->get_svalue_manager ());
1756 /* Get key for overlap, relative to the REG. */
1757 const concrete_binding *overlap_concrete_key
1758 = mgr->get_concrete_binding (reg_subrange);
1759 result_map.put (overlap_concrete_key, overlap_sval);
1761 /* Clobber default_map, removing/trimming/spliting where
1762 it overlaps with overlap_concrete_key. */
1763 default_map.remove_overlapping_bindings (mgr,
1764 overlap_concrete_key,
1769 /* Can't handle symbolic bindings. */
1773 if (result_map.elements () == 0)
1776 /* Merge any bindings from default_map into result_map. */
1777 for (auto iter : default_map)
1779 const binding_key *key = iter.first;
1780 const svalue *sval = iter.second;
1781 result_map.put (key, sval);
1784 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1787 /* Remove, truncate, and/or split any bindings within this map that
1790 If REG's base region or this cluster is symbolic and they're different
1791 base regions, then remove everything in this cluster's map, on the
1792 grounds that REG could be referring to the same memory as anything
1795 If UNCERTAINTY is non-NULL, use it to record any svalues that
1796 were removed, as being maybe-bound. */
1799 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1801 uncertainty_t *uncertainty)
1803 const binding_key *reg_binding = binding_key::make (mgr, reg);
1805 const region *cluster_base_reg = get_base_region ();
1806 const region *other_base_reg = reg->get_base_region ();
1807 /* If at least one of the base regions involved is symbolic, and they're
1808 not the same base region, then consider everything in the map as
1809 potentially overlapping with reg_binding (even if it's a concrete
1810 binding and things in the map are concrete - they could be referring
1811 to the same memory when the symbolic base regions are taken into
1813 bool always_overlap = (cluster_base_reg != other_base_reg
1814 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
1815 || other_base_reg->get_kind () == RK_SYMBOLIC));
1816 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
1820 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1822 Return true if they can be merged, false otherwise. */
1825 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1826 const binding_cluster *cluster_b,
1827 binding_cluster *out_cluster,
1830 model_merger *merger)
1832 gcc_assert (out_cluster);
1834 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1835 true if either of the inputs is true. */
1836 if ((cluster_a && cluster_a->m_escaped)
1837 || (cluster_b && cluster_b->m_escaped))
1838 out_cluster->m_escaped = true;
1839 if ((cluster_a && cluster_a->m_touched)
1840 || (cluster_b && cluster_b->m_touched))
1841 out_cluster->m_touched = true;
1843 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1844 could be NULL. Handle these cases. */
1845 if (cluster_a == NULL)
1847 gcc_assert (cluster_b != NULL);
1848 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1849 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
1852 if (cluster_b == NULL)
1854 gcc_assert (cluster_a != NULL);
1855 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1856 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
1860 /* The "both inputs are non-NULL" case. */
1861 gcc_assert (cluster_a != NULL && cluster_b != NULL);
1862 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1863 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1865 hash_set<const binding_key *> keys;
1866 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1867 iter_a != cluster_a->m_map.end (); ++iter_a)
1869 const binding_key *key_a = (*iter_a).first;
1872 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1873 iter_b != cluster_b->m_map.end (); ++iter_b)
1875 const binding_key *key_b = (*iter_b).first;
1878 int num_symbolic_keys = 0;
1879 int num_concrete_keys = 0;
1880 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1881 iter != keys.end (); ++iter)
1883 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1884 const binding_key *key = *iter;
1885 const svalue *sval_a = cluster_a->get_any_value (key);
1886 const svalue *sval_b = cluster_b->get_any_value (key);
1888 if (key->symbolic_p ())
1889 num_symbolic_keys++;
1891 num_concrete_keys++;
1893 if (sval_a == sval_b)
1895 gcc_assert (sval_a);
1896 out_cluster->m_map.put (key, sval_a);
1899 else if (sval_a && sval_b)
1901 if (const svalue *merged_sval
1902 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1904 out_cluster->m_map.put (key, merged_sval);
1907 /* Merger of the svalues failed. Reject merger of the cluster. */
1911 /* If we get here, then one cluster binds this key and the other
1912 doesn't; merge them as "UNKNOWN". */
1913 gcc_assert (sval_a || sval_b);
1915 const svalue *bound_sval = sval_a ? sval_a : sval_b;
1916 tree type = bound_sval->get_type ();
1917 const svalue *unknown_sval
1918 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
1920 /* ...but reject the merger if this sval shouldn't be mergeable
1921 (e.g. reject merging svalues that have non-purgable sm-state,
1922 to avoid falsely reporting memory leaks by merging them
1923 with something else). */
1924 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
1927 out_cluster->m_map.put (key, unknown_sval);
1930 /* We can only have at most one symbolic key per cluster,
1931 and if we do, we can't have any concrete keys.
1932 If this happens, mark the cluster as touched, with no keys. */
1933 if (num_symbolic_keys >= 2
1934 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
1936 out_cluster->m_touched = true;
1937 out_cluster->m_map.empty ();
1940 /* We don't handle other kinds of overlaps yet. */
1945 /* Update this cluster to reflect an attempt to merge OTHER where there
1946 is no other cluster to merge with, and so we're notionally merging the
1947 bound values in OTHER with the initial value of the relevant regions.
1949 Any bound keys in OTHER should be bound to unknown in this. */
1952 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
1956 for (map_t::iterator iter = other->m_map.begin ();
1957 iter != other->m_map.end (); ++iter)
1959 const binding_key *iter_key = (*iter).first;
1960 const svalue *iter_sval = (*iter).second;
1961 const svalue *unknown_sval
1962 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
1963 (iter_sval->get_type ());
1964 m_map.put (iter_key, unknown_sval);
1966 /* For any pointers in OTHER, the merger means that the
1967 concrete pointer becomes an unknown value, which could
1968 show up as a false report of a leak when considering what
1969 pointers are live before vs after.
1970 Avoid this by marking the base regions they point to as having
1972 if (const region_svalue *region_sval
1973 = iter_sval->dyn_cast_region_svalue ())
1975 const region *base_reg
1976 = region_sval->get_pointee ()->get_base_region ();
1977 if (base_reg->tracked_p ()
1978 && !base_reg->symbolic_for_unknown_ptr_p ())
1980 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
1981 c->mark_as_escaped ();
1987 /* Mark this cluster as having escaped. */
1990 binding_cluster::mark_as_escaped ()
1995 /* If this cluster has escaped (by this call, or by an earlier one, or
1996 by being an external param), then unbind all values and mark it
1997 as "touched", so that it has a conjured value, rather than an
1999 Use P to purge state involving conjured_svalues. */
2002 binding_cluster::on_unknown_fncall (const gcall *call,
2004 const conjured_purge &p)
2010 /* Bind it to a new "conjured" value using CALL. */
2012 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2013 (m_base_region->get_type (), call, m_base_region, p);
2014 bind (mgr, m_base_region, sval);
2020 /* Mark this cluster as having been clobbered by STMT.
2021 Use P to purge state involving conjured_svalues. */
2024 binding_cluster::on_asm (const gasm *stmt,
2026 const conjured_purge &p)
2030 /* Bind it to a new "conjured" value using CALL. */
2032 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2033 (m_base_region->get_type (), stmt, m_base_region, p);
2034 bind (mgr, m_base_region, sval);
2039 /* Return true if this binding_cluster has no information
2040 i.e. if there are no bindings, and it hasn't been marked as having
2041 escaped, or touched symbolically. */
2044 binding_cluster::redundant_p () const
2046 return (m_map.elements () == 0
2051 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
2054 append_pathvar_with_type (path_var pv,
2056 auto_vec<path_var> *out_pvs)
2058 gcc_assert (pv.m_tree);
2060 if (TREE_TYPE (pv.m_tree) != type)
2061 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
2063 out_pvs->safe_push (pv);
2066 /* Find representative path_vars for SVAL within this binding of BASE_REG,
2067 appending the results to OUT_PVS. */
2070 binding_cluster::get_representative_path_vars (const region_model *model,
2071 svalue_set *visited,
2072 const region *base_reg,
2074 auto_vec<path_var> *out_pvs)
2077 sval = simplify_for_binding (sval);
2079 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2081 const binding_key *key = (*iter).first;
2082 const svalue *bound_sval = (*iter).second;
2083 if (bound_sval == sval)
2085 if (const concrete_binding *ckey
2086 = key->dyn_cast_concrete_binding ())
2088 auto_vec <const region *> subregions;
2089 base_reg->get_subregions_for_binding
2090 (model->get_manager (),
2091 ckey->get_start_bit_offset (),
2092 ckey->get_size_in_bits (),
2096 const region *subregion;
2097 FOR_EACH_VEC_ELT (subregions, i, subregion)
2100 = model->get_representative_path_var (subregion,
2102 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2107 const symbolic_binding *skey = (const symbolic_binding *)key;
2109 = model->get_representative_path_var (skey->get_region (),
2111 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2117 /* Get any svalue bound to KEY, or NULL. */
2120 binding_cluster::get_any_value (const binding_key *key) const
2122 return m_map.get (key);
2125 /* If this cluster has a single direct binding for the whole of the region,
2127 For use in simplifying dumps. */
2130 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2132 /* Fail gracefully if MGR is NULL to make it easier to dump store
2133 instances in the debugger. */
2137 if (m_map.elements () != 1)
2140 const binding_key *key = binding_key::make (mgr, m_base_region);
2141 return get_any_value (key);
2144 /* class store_manager. */
2147 store_manager::get_logger () const
2149 return m_mgr->get_logger ();
2152 /* binding consolidation. */
2154 const concrete_binding *
2155 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
2156 bit_offset_t size_in_bits)
2158 concrete_binding b (start_bit_offset, size_in_bits);
2159 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2162 concrete_binding *to_save = new concrete_binding (b);
2163 m_concrete_binding_key_mgr.put (b, to_save);
2167 const symbolic_binding *
2168 store_manager::get_symbolic_binding (const region *reg)
2170 symbolic_binding b (reg);
2171 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2174 symbolic_binding *to_save = new symbolic_binding (b);
2175 m_symbolic_binding_key_mgr.put (b, to_save);
2181 /* store's default ctor. */
2184 : m_called_unknown_fn (false)
2188 /* store's copy ctor. */
2190 store::store (const store &other)
2191 : m_cluster_map (other.m_cluster_map.elements ()),
2192 m_called_unknown_fn (other.m_called_unknown_fn)
2194 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2195 iter != other.m_cluster_map.end ();
2198 const region *reg = (*iter).first;
2200 binding_cluster *c = (*iter).second;
2202 m_cluster_map.put (reg, new binding_cluster (*c));
2210 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2211 iter != m_cluster_map.end ();
2213 delete (*iter).second;
2216 /* store's assignment operator. */
2219 store::operator= (const store &other)
2221 /* Delete existing cluster map. */
2222 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2223 iter != m_cluster_map.end ();
2225 delete (*iter).second;
2226 m_cluster_map.empty ();
2228 m_called_unknown_fn = other.m_called_unknown_fn;
2230 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2231 iter != other.m_cluster_map.end ();
2234 const region *reg = (*iter).first;
2236 binding_cluster *c = (*iter).second;
2238 m_cluster_map.put (reg, new binding_cluster (*c));
2243 /* store's equality operator. */
2246 store::operator== (const store &other) const
2248 if (m_called_unknown_fn != other.m_called_unknown_fn)
2251 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2254 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2255 iter != m_cluster_map.end ();
2258 const region *reg = (*iter).first;
2259 binding_cluster *c = (*iter).second;
2260 binding_cluster **other_slot
2261 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2262 if (other_slot == NULL)
2264 if (*c != **other_slot)
2268 gcc_checking_assert (hash () == other.hash ());
2273 /* Get a hash value for this store. */
2276 store::hash () const
2278 hashval_t result = 0;
2279 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2280 iter != m_cluster_map.end ();
2282 result ^= (*iter).second->hash ();
2286 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2287 removing duplicate parents. */
2290 get_sorted_parent_regions (auto_vec<const region *> *out,
2291 auto_vec<const region *> &in)
2293 /* Get the set of parent regions. */
2294 hash_set<const region *> parent_regions;
2295 const region *iter_reg;
2297 FOR_EACH_VEC_ELT (in, i, iter_reg)
2299 const region *parent_reg = iter_reg->get_parent_region ();
2300 gcc_assert (parent_reg);
2301 parent_regions.add (parent_reg);
2305 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2306 iter != parent_regions.end(); ++iter)
2307 out->safe_push (*iter);
2310 out->qsort (region::cmp_ptr_ptr);
2313 /* Dump a representation of this store to PP, using SIMPLE to control how
2314 svalues and regions are printed.
2315 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2316 (to make it easier to use from the debugger). */
2319 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2320 store_manager *mgr) const
2322 /* Sort into some deterministic order. */
2323 auto_vec<const region *> base_regions;
2324 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2325 iter != m_cluster_map.end (); ++iter)
2327 const region *base_reg = (*iter).first;
2328 base_regions.safe_push (base_reg);
2330 base_regions.qsort (region::cmp_ptr_ptr);
2332 /* Gather clusters, organize by parent region, so that we can group
2333 together locals, globals, etc. */
2334 auto_vec<const region *> parent_regions;
2335 get_sorted_parent_regions (&parent_regions, base_regions);
2337 const region *parent_reg;
2339 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2341 gcc_assert (parent_reg);
2342 pp_string (pp, "clusters within ");
2343 parent_reg->dump_to_pp (pp, simple);
2347 pp_string (pp, " {");
2349 const region *base_reg;
2351 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2353 /* This is O(N * M), but N ought to be small. */
2354 if (base_reg->get_parent_region () != parent_reg)
2356 binding_cluster *cluster
2357 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2361 pp_string (pp, ", ");
2363 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2365 /* Special-case to simplify dumps for the common case where
2366 we just have one value directly bound to the whole of a
2370 pp_string (pp, " cluster for: ");
2371 base_reg->dump_to_pp (pp, simple);
2372 pp_string (pp, ": ");
2373 sval->dump_to_pp (pp, simple);
2374 if (cluster->escaped_p ())
2375 pp_string (pp, " (ESCAPED)");
2376 if (cluster->touched_p ())
2377 pp_string (pp, " (TOUCHED)");
2382 pp_string (pp, "region: {");
2383 base_reg->dump_to_pp (pp, simple);
2384 pp_string (pp, ", value: ");
2385 sval->dump_to_pp (pp, simple);
2386 if (cluster->escaped_p ())
2387 pp_string (pp, " (ESCAPED)");
2388 if (cluster->touched_p ())
2389 pp_string (pp, " (TOUCHED)");
2390 pp_string (pp, "}");
2395 pp_string (pp, " cluster for: ");
2396 base_reg->dump_to_pp (pp, simple);
2398 cluster->dump_to_pp (pp, simple, multiline);
2402 pp_string (pp, "base region: {");
2403 base_reg->dump_to_pp (pp, simple);
2404 pp_string (pp, "} has cluster: {");
2405 cluster->dump_to_pp (pp, simple, multiline);
2406 pp_string (pp, "}");
2410 pp_string (pp, "}");
2412 pp_printf (pp, "m_called_unknown_fn: %s",
2413 m_called_unknown_fn ? "TRUE" : "FALSE");
2418 /* Dump a multiline representation of this store to stderr. */
2421 store::dump (bool simple) const
2424 pp_format_decoder (&pp) = default_tree_printer;
2425 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2426 pp.buffer->stream = stderr;
2427 dump_to_pp (&pp, simple, true, NULL);
2432 /* Assert that this object is valid. */
2435 store::validate () const
2437 for (auto iter : m_cluster_map)
2438 iter.second->validate ();
2441 /* Return a new json::object of the form
2442 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2443 ... for each cluster within parent region},
2444 ...for each parent region,
2445 "called_unknown_fn": true/false}. */
2448 store::to_json () const
2450 json::object *store_obj = new json::object ();
2452 /* Sort into some deterministic order. */
2453 auto_vec<const region *> base_regions;
2454 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2455 iter != m_cluster_map.end (); ++iter)
2457 const region *base_reg = (*iter).first;
2458 base_regions.safe_push (base_reg);
2460 base_regions.qsort (region::cmp_ptr_ptr);
2462 /* Gather clusters, organize by parent region, so that we can group
2463 together locals, globals, etc. */
2464 auto_vec<const region *> parent_regions;
2465 get_sorted_parent_regions (&parent_regions, base_regions);
2467 const region *parent_reg;
2469 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2471 gcc_assert (parent_reg);
2473 json::object *clusters_in_parent_reg_obj = new json::object ();
2475 const region *base_reg;
2477 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2479 /* This is O(N * M), but N ought to be small. */
2480 if (base_reg->get_parent_region () != parent_reg)
2482 binding_cluster *cluster
2483 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2484 label_text base_reg_desc = base_reg->get_desc ();
2485 clusters_in_parent_reg_obj->set (base_reg_desc.get (),
2486 cluster->to_json ());
2488 label_text parent_reg_desc = parent_reg->get_desc ();
2489 store_obj->set (parent_reg_desc.get (), clusters_in_parent_reg_obj);
2492 store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2497 /* Get any svalue bound to REG, or NULL. */
2500 store::get_any_binding (store_manager *mgr, const region *reg) const
2502 const region *base_reg = reg->get_base_region ();
2503 binding_cluster **cluster_slot
2504 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2507 return (*cluster_slot)->get_any_binding (mgr, reg);
2510 /* Set the value of LHS_REG to RHS_SVAL. */
2513 store::set_value (store_manager *mgr, const region *lhs_reg,
2514 const svalue *rhs_sval,
2515 uncertainty_t *uncertainty)
2517 logger *logger = mgr->get_logger ();
2520 remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
2522 if (lhs_reg->get_type ())
2523 rhs_sval = simplify_for_binding (rhs_sval);
2524 /* ...but if we have no type for the region, retain any cast. */
2526 const region *lhs_base_reg = lhs_reg->get_base_region ();
2527 binding_cluster *lhs_cluster;
2528 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2530 /* Reject attempting to bind values into a symbolic region
2531 for an unknown ptr; merely invalidate values below. */
2534 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2535 then treat the region being pointed to as having escaped. */
2536 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2538 const region *ptr_dst = ptr_sval->get_pointee ();
2539 const region *ptr_base_reg = ptr_dst->get_base_region ();
2540 mark_as_escaped (ptr_base_reg);
2543 else if (lhs_base_reg->tracked_p ())
2545 lhs_cluster = get_or_create_cluster (lhs_base_reg);
2546 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2550 /* Reject attempting to bind values into an untracked region;
2551 merely invalidate values below. */
2555 /* Bindings to a cluster can affect other clusters if a symbolic
2556 base region is involved.
2557 Writes to concrete clusters can't affect other concrete clusters,
2558 but can affect symbolic clusters.
2559 Writes to symbolic clusters can affect both concrete and symbolic
2561 Invalidate our knowledge of other clusters that might have been
2562 affected by the write. */
2563 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2564 iter != m_cluster_map.end (); ++iter)
2566 const region *iter_base_reg = (*iter).first;
2567 binding_cluster *iter_cluster = (*iter).second;
2568 if (iter_base_reg != lhs_base_reg
2569 && (lhs_cluster == NULL
2570 || lhs_cluster->symbolic_p ()
2571 || iter_cluster->symbolic_p ()))
2573 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2574 switch (t_alias.get_value ())
2579 case tristate::TS_UNKNOWN:
2582 pretty_printer *pp = logger->get_printer ();
2583 logger->start_log_line ();
2584 logger->log_partial ("possible aliasing of ");
2585 iter_base_reg->dump_to_pp (pp, true);
2586 logger->log_partial (" when writing SVAL: ");
2587 rhs_sval->dump_to_pp (pp, true);
2588 logger->log_partial (" to LHS_REG: ");
2589 lhs_reg->dump_to_pp (pp, true);
2590 logger->end_log_line ();
2592 /* Mark all of iter_cluster's iter_base_reg as unknown,
2593 using LHS_REG when considering overlaps, to handle
2594 symbolic vs concrete issues. */
2595 iter_cluster->mark_region_as_unknown
2597 iter_base_reg, /* reg_to_bind */
2598 lhs_reg, /* reg_for_overlap */
2602 case tristate::TS_TRUE:
2606 case tristate::TS_FALSE:
2607 /* If they can't be aliases, then don't invalidate this
2615 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2618 store::eval_alias (const region *base_reg_a,
2619 const region *base_reg_b) const
2621 /* SSA names can't alias. */
2622 tree decl_a = base_reg_a->maybe_get_decl ();
2623 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2624 return tristate::TS_FALSE;
2625 tree decl_b = base_reg_b->maybe_get_decl ();
2626 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2627 return tristate::TS_FALSE;
2629 /* Try both ways, for symmetry. */
2630 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2631 if (ts_ab.is_false ())
2632 return tristate::TS_FALSE;
2633 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2634 if (ts_ba.is_false ())
2635 return tristate::TS_FALSE;
2636 return tristate::TS_UNKNOWN;
2639 /* Half of store::eval_alias; called twice for symmetry. */
2642 store::eval_alias_1 (const region *base_reg_a,
2643 const region *base_reg_b) const
2645 if (const symbolic_region *sym_reg_a
2646 = base_reg_a->dyn_cast_symbolic_region ())
2648 const svalue *sval_a = sym_reg_a->get_pointer ();
2649 if (tree decl_b = base_reg_b->maybe_get_decl ())
2651 if (!may_be_aliased (decl_b))
2652 return tristate::TS_FALSE;
2653 if (sval_a->get_kind () == SK_INITIAL)
2654 if (!is_global_var (decl_b))
2656 /* The initial value of a pointer can't point to a local. */
2657 return tristate::TS_FALSE;
2660 if (sval_a->get_kind () == SK_INITIAL
2661 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2663 /* The initial value of a pointer can't point to a
2664 region that was allocated on the heap after the beginning of the
2666 return tristate::TS_FALSE;
2668 if (const widening_svalue *widening_sval_a
2669 = sval_a->dyn_cast_widening_svalue ())
2671 const svalue *base = widening_sval_a->get_base_svalue ();
2672 if (const region_svalue *region_sval
2673 = base->dyn_cast_region_svalue ())
2675 const region *pointee = region_sval->get_pointee ();
2676 /* If we have sval_a is WIDENING(®ION, OP), and
2677 B can't alias REGION, then B can't alias A either.
2678 For example, A might arise from
2679 for (ptr = ®ION; ...; ptr++)
2680 where sval_a is ptr in the 2nd iteration of the loop.
2681 We want to ensure that "*ptr" can only clobber things
2682 within REGION's base region. */
2683 tristate ts = eval_alias (pointee->get_base_region (),
2686 return tristate::TS_FALSE;
2690 return tristate::TS_UNKNOWN;
2693 /* Remove all bindings overlapping REG within this store. */
2696 store::clobber_region (store_manager *mgr, const region *reg)
2698 const region *base_reg = reg->get_base_region ();
2699 binding_cluster **slot = m_cluster_map.get (base_reg);
2702 binding_cluster *cluster = *slot;
2703 cluster->clobber_region (mgr, reg);
2704 if (cluster->redundant_p ())
2707 m_cluster_map.remove (base_reg);
2711 /* Remove any bindings for REG within this store. */
2714 store::purge_region (store_manager *mgr, const region *reg)
2716 const region *base_reg = reg->get_base_region ();
2717 binding_cluster **slot = m_cluster_map.get (base_reg);
2720 binding_cluster *cluster = *slot;
2721 cluster->purge_region (mgr, reg);
2722 if (cluster->redundant_p ())
2725 m_cluster_map.remove (base_reg);
2729 /* Fill REG with SVAL. */
2732 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
2734 const region *base_reg = reg->get_base_region ();
2735 if (base_reg->symbolic_for_unknown_ptr_p ()
2736 || !base_reg->tracked_p ())
2738 binding_cluster *cluster = get_or_create_cluster (base_reg);
2739 cluster->fill_region (mgr, reg, sval);
2742 /* Zero-fill REG. */
2745 store::zero_fill_region (store_manager *mgr, const region *reg)
2747 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2748 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2749 fill_region (mgr, reg, zero_sval);
2752 /* Mark REG as having unknown content. */
2755 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
2756 uncertainty_t *uncertainty)
2758 const region *base_reg = reg->get_base_region ();
2759 if (base_reg->symbolic_for_unknown_ptr_p ()
2760 || !base_reg->tracked_p ())
2762 binding_cluster *cluster = get_or_create_cluster (base_reg);
2763 cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty);
2766 /* Purge state involving SVAL. */
2769 store::purge_state_involving (const svalue *sval,
2770 region_model_manager *sval_mgr)
2772 auto_vec <const region *> base_regs_to_purge;
2773 for (auto iter : m_cluster_map)
2775 const region *base_reg = iter.first;
2776 if (base_reg->involves_p (sval))
2777 base_regs_to_purge.safe_push (base_reg);
2780 binding_cluster *cluster = iter.second;
2781 cluster->purge_state_involving (sval, sval_mgr);
2785 for (auto iter : base_regs_to_purge)
2786 purge_cluster (iter);
2789 /* Get the cluster for BASE_REG, or NULL (const version). */
2791 const binding_cluster *
2792 store::get_cluster (const region *base_reg) const
2794 gcc_assert (base_reg);
2795 gcc_assert (base_reg->get_base_region () == base_reg);
2796 if (binding_cluster **slot
2797 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2803 /* Get the cluster for BASE_REG, or NULL (non-const version). */
2806 store::get_cluster (const region *base_reg)
2808 gcc_assert (base_reg);
2809 gcc_assert (base_reg->get_base_region () == base_reg);
2810 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2816 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2819 store::get_or_create_cluster (const region *base_reg)
2821 gcc_assert (base_reg);
2822 gcc_assert (base_reg->get_base_region () == base_reg);
2824 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2825 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2827 /* We shouldn't create clusters for base regions that aren't trackable. */
2828 gcc_assert (base_reg->tracked_p ());
2830 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2833 binding_cluster *cluster = new binding_cluster (base_reg);
2834 m_cluster_map.put (base_reg, cluster);
2839 /* Remove any cluster for BASE_REG, for use by
2840 region_model::unbind_region_and_descendents
2841 when popping stack frames and handling deleted heap regions. */
2844 store::purge_cluster (const region *base_reg)
2846 gcc_assert (base_reg->get_base_region () == base_reg);
2847 binding_cluster **slot = m_cluster_map.get (base_reg);
2850 binding_cluster *cluster = *slot;
2852 m_cluster_map.remove (base_reg);
2855 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
2856 Return true if successful, or false if the stores can't be merged. */
2859 store::can_merge_p (const store *store_a, const store *store_b,
2860 store *out_store, store_manager *mgr,
2861 model_merger *merger)
2863 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
2864 out_store->m_called_unknown_fn = true;
2866 /* Get the union of all base regions for STORE_A and STORE_B. */
2867 hash_set<const region *> base_regions;
2868 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
2869 iter_a != store_a->m_cluster_map.end (); ++iter_a)
2871 const region *base_reg_a = (*iter_a).first;
2872 base_regions.add (base_reg_a);
2874 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
2875 iter_b != store_b->m_cluster_map.end (); ++iter_b)
2877 const region *base_reg_b = (*iter_b).first;
2878 base_regions.add (base_reg_b);
2881 /* Sort the base regions before considering them. This ought not to
2882 affect the results, but can affect which types UNKNOWN_REGIONs are
2883 created for in a run; sorting them thus avoids minor differences
2885 auto_vec<const region *> vec_base_regions (base_regions.elements ());
2886 for (hash_set<const region *>::iterator iter = base_regions.begin ();
2887 iter != base_regions.end (); ++iter)
2888 vec_base_regions.quick_push (*iter);
2889 vec_base_regions.qsort (region::cmp_ptr_ptr);
2891 const region *base_reg;
2892 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
2894 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
2895 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
2896 /* At least one of cluster_a and cluster_b must be non-NULL. */
2897 binding_cluster *out_cluster
2898 = out_store->get_or_create_cluster (base_reg);
2899 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
2900 out_cluster, out_store, mgr, merger))
2906 /* Mark the cluster for BASE_REG as having escaped.
2907 For use when handling an unrecognized function call, and
2908 for params to "top-level" calls.
2909 Further unknown function calls could touch it, even if the cluster
2910 isn't reachable from args of those calls. */
2913 store::mark_as_escaped (const region *base_reg)
2915 gcc_assert (base_reg);
2916 gcc_assert (base_reg->get_base_region () == base_reg);
2918 if (base_reg->symbolic_for_unknown_ptr_p ()
2919 || !base_reg->tracked_p ())
2922 binding_cluster *cluster = get_or_create_cluster (base_reg);
2923 cluster->mark_as_escaped ();
2926 /* Handle an unknown fncall by updating any clusters that have escaped
2927 (either in this fncall, or in a prior one). */
2930 store::on_unknown_fncall (const gcall *call, store_manager *mgr,
2931 const conjured_purge &p)
2933 m_called_unknown_fn = true;
2935 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2936 iter != m_cluster_map.end (); ++iter)
2937 (*iter).second->on_unknown_fncall (call, mgr, p);
2940 /* Return true if a non-const pointer to BASE_REG (or something within it)
2941 has escaped to code outside of the TU being analyzed. */
2944 store::escaped_p (const region *base_reg) const
2946 gcc_assert (base_reg);
2947 gcc_assert (base_reg->get_base_region () == base_reg);
2949 if (binding_cluster **cluster_slot
2950 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
2951 return (*cluster_slot)->escaped_p ();
2955 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
2956 this store, using VISITED to ensure the traversal terminates. */
2959 store::get_representative_path_vars (const region_model *model,
2960 svalue_set *visited,
2962 auto_vec<path_var> *out_pvs) const
2966 /* Find all bindings that reference SVAL. */
2967 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2968 iter != m_cluster_map.end (); ++iter)
2970 const region *base_reg = (*iter).first;
2971 binding_cluster *cluster = (*iter).second;
2972 cluster->get_representative_path_vars (model, visited, base_reg, sval,
2976 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
2978 const region *reg = init_sval->get_region ();
2979 if (path_var pv = model->get_representative_path_var (reg,
2981 out_pvs->safe_push (pv);
2985 /* Remove all bindings overlapping REG within this store, removing
2986 any clusters that become redundant.
2988 If UNCERTAINTY is non-NULL, use it to record any svalues that
2989 were removed, as being maybe-bound. */
2992 store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
2993 uncertainty_t *uncertainty)
2995 const region *base_reg = reg->get_base_region ();
2996 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
2998 binding_cluster *cluster = *cluster_slot;
2999 if (reg == base_reg && !escaped_p (base_reg))
3001 /* Remove whole cluster. */
3002 m_cluster_map.remove (base_reg);
3006 cluster->remove_overlapping_bindings (mgr, reg, uncertainty);
3010 /* Subclass of visitor that accumulates a hash_set of the regions that
3013 struct region_finder : public visitor
3015 void visit_region (const region *reg) final override
3020 hash_set<const region *> m_regs;
3023 /* Canonicalize this store, to maximize the chance of equality between
3027 store::canonicalize (store_manager *mgr)
3030 cluster for: HEAP_ALLOCATED_REGION(543)
3033 where the heap region is empty and unreferenced, then purge that
3034 cluster, to avoid unbounded state chains involving these. */
3036 /* Find regions that are referenced by bound values in the store. */
3038 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3039 iter != m_cluster_map.end (); ++iter)
3041 binding_cluster *cluster = (*iter).second;
3042 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3043 bind_iter != cluster->m_map.end (); ++bind_iter)
3044 (*bind_iter).second->accept (&s);
3047 /* Locate heap-allocated regions that have empty bindings that weren't
3049 hash_set<const region *> purgeable_regions;
3050 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3051 iter != m_cluster_map.end (); ++iter)
3053 const region *base_reg = (*iter).first;
3054 binding_cluster *cluster = (*iter).second;
3055 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
3057 if (cluster->empty_p ())
3058 if (!s.m_regs.contains (base_reg))
3059 purgeable_regions.add (base_reg);
3061 /* Also cover the UNKNOWN case. */
3062 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
3063 if (sval->get_kind () == SK_UNKNOWN)
3064 if (!s.m_regs.contains (base_reg))
3065 purgeable_regions.add (base_reg);
3070 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
3071 iter != purgeable_regions.end (); ++iter)
3073 const region *base_reg = *iter;
3074 purge_cluster (base_reg);
3078 /* Subroutine for use by exploded_path::feasible_p.
3080 We need to deal with state differences between:
3081 (a) when the exploded_graph is being initially constructed and
3082 (b) when replaying the state changes along a specific path in
3083 in exploded_path::feasible_p.
3085 In (a), state merging happens, so when exploring a loop
3086 for (i = 0; i < 1024; i++)
3087 on successive iterations we have i == 0, then i == WIDENING.
3089 In (b), no state merging happens, so naively replaying the path
3090 that goes twice through the loop then exits it
3091 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3092 that exits the loop, which would be found to be infeasible as i == 1,
3093 and the path would be rejected.
3095 We need to fix up state during replay. This subroutine is
3096 called whenever we enter a supernode that we've already
3097 visited along this exploded_path, passing in OTHER_STORE
3098 from the destination enode's state.
3100 Find bindings to widening values in OTHER_STORE.
3101 For all that are found, update the binding in this store to UNKNOWN. */
3104 store::loop_replay_fixup (const store *other_store,
3105 region_model_manager *mgr)
3107 gcc_assert (other_store);
3108 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3109 iter != other_store->m_cluster_map.end (); ++iter)
3111 const region *base_reg = (*iter).first;
3112 binding_cluster *cluster = (*iter).second;
3113 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3114 bind_iter != cluster->m_map.end (); ++bind_iter)
3116 const binding_key *key = (*bind_iter).first;
3117 const svalue *sval = (*bind_iter).second;
3118 if (sval->get_kind () == SK_WIDENING)
3120 binding_cluster *this_cluster
3121 = get_or_create_cluster (base_reg);
3122 const svalue *unknown
3123 = mgr->get_or_create_unknown_svalue (sval->get_type ());
3124 this_cluster->bind_key (key, unknown);
3130 /* Use R to replay the bindings from SUMMARY into this object. */
3133 store::replay_call_summary (call_summary_replay &r,
3134 const store &summary)
3136 if (summary.m_called_unknown_fn)
3138 /* A call to an external function occurred in the summary.
3139 Hence we need to invalidate our knownledge of globals,
3140 escaped regions, etc. */
3141 on_unknown_fncall (r.get_call_stmt (),
3142 r.get_store_manager (),
3143 conjured_purge (r.get_caller_model (),
3147 auto_vec<const region *> keys (summary.m_cluster_map.elements ());
3148 for (auto kv : summary.m_cluster_map)
3149 keys.quick_push (kv.first);
3150 keys.qsort (region::cmp_ptr_ptr);
3151 for (auto base_reg : keys)
3152 replay_call_summary_cluster (r, summary, base_reg);
3155 /* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER
3156 into this object. */
3159 store::replay_call_summary_cluster (call_summary_replay &r,
3160 const store &summary,
3161 const region *summary_base_reg)
3163 const call_details &cd = r.get_call_details ();
3164 region_model_manager *reg_mgr = r.get_manager ();
3165 store_manager *mgr = reg_mgr->get_store_manager ();
3166 const binding_cluster *summary_cluster
3167 = summary.get_cluster (summary_base_reg);
3169 /* Handle "ESCAPED" and "TOUCHED" flags. */
3170 if (summary_cluster->escaped_p () || summary_cluster->touched_p ())
3171 if (const region *caller_reg
3172 = r.convert_region_from_summary (summary_base_reg))
3174 const region *caller_base_reg = caller_reg->get_base_region ();
3175 if (caller_base_reg->tracked_p ()
3176 && !caller_base_reg->symbolic_for_unknown_ptr_p ())
3178 binding_cluster *caller_cluster
3179 = get_or_create_cluster (caller_base_reg);
3180 if (summary_cluster->escaped_p ())
3181 caller_cluster->mark_as_escaped ();
3182 if (summary_cluster->touched_p ())
3183 caller_cluster->m_touched = true;
3187 switch (summary_base_reg->get_kind ())
3189 /* Top-level regions. */
3196 /* Child regions. */
3203 /* Other regions. */
3206 /* These should never be the base region of a binding cluster. */
3213 /* These can be marked as escaping. */
3218 const symbolic_region *summary_symbolic_reg
3219 = as_a <const symbolic_region *> (summary_base_reg);
3220 const svalue *summary_ptr_sval = summary_symbolic_reg->get_pointer ();
3221 const svalue *caller_ptr_sval
3222 = r.convert_svalue_from_summary (summary_ptr_sval);
3223 if (!caller_ptr_sval)
3225 const region *caller_dest_reg
3226 = cd.get_model ()->deref_rvalue (caller_ptr_sval,
3229 const svalue *summary_sval
3230 = summary.get_any_binding (mgr, summary_base_reg);
3233 const svalue *caller_sval
3234 = r.convert_svalue_from_summary (summary_sval);
3237 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3238 set_value (mgr, caller_dest_reg,
3239 caller_sval, NULL /* uncertainty_t * */);
3243 case RK_HEAP_ALLOCATED:
3246 const region *caller_dest_reg
3247 = r.convert_region_from_summary (summary_base_reg);
3248 if (!caller_dest_reg)
3250 const svalue *summary_sval
3251 = summary.get_any_binding (mgr, summary_base_reg);
3253 summary_sval = reg_mgr->get_or_create_compound_svalue
3254 (summary_base_reg->get_type (),
3255 summary_cluster->get_map ());
3256 const svalue *caller_sval
3257 = r.convert_svalue_from_summary (summary_sval);
3260 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3261 set_value (mgr, caller_dest_reg,
3262 caller_sval, NULL /* uncertainty_t * */);
3267 /* Ignore bindings of alloca regions in the summary. */
3274 namespace selftest {
3276 /* Verify that bit_range::intersects_p works as expected. */
3279 test_bit_range_intersects_p ()
3281 bit_range b0 (0, 1);
3282 bit_range b1 (1, 1);
3283 bit_range b2 (2, 1);
3284 bit_range b3 (3, 1);
3285 bit_range b4 (4, 1);
3286 bit_range b5 (5, 1);
3287 bit_range b6 (6, 1);
3288 bit_range b7 (7, 1);
3289 bit_range b1_to_6 (1, 6);
3290 bit_range b0_to_7 (0, 8);
3291 bit_range b3_to_5 (3, 3);
3292 bit_range b6_to_7 (6, 2);
3294 /* self-intersection is true. */
3295 ASSERT_TRUE (b0.intersects_p (b0));
3296 ASSERT_TRUE (b7.intersects_p (b7));
3297 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3298 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3300 ASSERT_FALSE (b0.intersects_p (b1));
3301 ASSERT_FALSE (b1.intersects_p (b0));
3302 ASSERT_FALSE (b0.intersects_p (b7));
3303 ASSERT_FALSE (b7.intersects_p (b0));
3305 ASSERT_TRUE (b0_to_7.intersects_p (b0));
3306 ASSERT_TRUE (b0_to_7.intersects_p (b7));
3307 ASSERT_TRUE (b0.intersects_p (b0_to_7));
3308 ASSERT_TRUE (b7.intersects_p (b0_to_7));
3310 ASSERT_FALSE (b0.intersects_p (b1_to_6));
3311 ASSERT_FALSE (b1_to_6.intersects_p (b0));
3312 ASSERT_TRUE (b1.intersects_p (b1_to_6));
3313 ASSERT_TRUE (b1_to_6.intersects_p (b1));
3314 ASSERT_TRUE (b1_to_6.intersects_p (b6));
3315 ASSERT_FALSE (b1_to_6.intersects_p (b7));
3317 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3318 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3320 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3321 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
3325 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3326 ASSERT_EQ (r1.get_start_bit_offset (), 0);
3327 ASSERT_EQ (r1.m_size_in_bits, 6);
3328 ASSERT_EQ (r2.get_start_bit_offset (), 1);
3329 ASSERT_EQ (r2.m_size_in_bits, 6);
3331 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3332 ASSERT_EQ (r1.get_start_bit_offset (), 1);
3333 ASSERT_EQ (r1.m_size_in_bits, 6);
3334 ASSERT_EQ (r2.get_start_bit_offset (), 0);
3335 ASSERT_EQ (r2.m_size_in_bits, 6);
3338 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3341 assert_bit_range_from_mask_eq (const location &loc,
3342 unsigned HOST_WIDE_INT mask,
3343 const bit_range &expected)
3345 bit_range actual (0, 0);
3346 bool ok = bit_range::from_mask (mask, &actual);
3347 ASSERT_TRUE_AT (loc, ok);
3348 ASSERT_EQ_AT (loc, actual, expected);
3351 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3352 out EXPECTED_BIT_RANGE. */
3354 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3355 SELFTEST_BEGIN_STMT \
3356 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3357 EXPECTED_BIT_RANGE); \
3360 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3363 assert_no_bit_range_from_mask_eq (const location &loc,
3364 unsigned HOST_WIDE_INT mask)
3366 bit_range actual (0, 0);
3367 bool ok = bit_range::from_mask (mask, &actual);
3368 ASSERT_FALSE_AT (loc, ok);
3371 /* Assert that bit_range::from_mask (MASK) returns false. */
3373 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3374 SELFTEST_BEGIN_STMT \
3375 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3378 /* Verify that bit_range::from_mask works as expected. */
3381 test_bit_range_from_mask ()
3383 /* Should fail on zero. */
3384 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3386 /* Verify 1-bit masks. */
3387 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3388 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3389 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3390 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3391 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3392 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3393 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3394 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3396 /* Verify N-bit masks starting at bit 0. */
3397 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3398 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3399 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3400 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3401 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3402 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3403 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3404 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3406 /* Various other tests. */
3407 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3408 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3409 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3411 /* Multiple ranges of set bits should fail. */
3412 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3413 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3416 /* Implementation detail of ASSERT_OVERLAP. */
3419 assert_overlap (const location &loc,
3420 const concrete_binding *b1,
3421 const concrete_binding *b2)
3423 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3424 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3427 /* Implementation detail of ASSERT_DISJOINT. */
3430 assert_disjoint (const location &loc,
3431 const concrete_binding *b1,
3432 const concrete_binding *b2)
3434 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3435 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3438 /* Assert that B1 and B2 overlap, checking both ways. */
3440 #define ASSERT_OVERLAP(B1, B2) \
3441 SELFTEST_BEGIN_STMT \
3442 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3445 /* Assert that B1 and B2 do not overlap, checking both ways. */
3447 #define ASSERT_DISJOINT(B1, B2) \
3448 SELFTEST_BEGIN_STMT \
3449 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3452 /* Verify that concrete_binding::overlaps_p works as expected. */
3455 test_binding_key_overlap ()
3457 store_manager mgr (NULL);
3459 /* Various 8-bit bindings. */
3460 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3461 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3462 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3463 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3465 /* 16-bit bindings. */
3466 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3467 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3468 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3470 /* 32-bit binding. */
3471 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3473 /* Everything should self-overlap. */
3474 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3475 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3476 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3477 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3478 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3479 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3480 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3481 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3483 /* Verify the 8-bit bindings that don't overlap each other. */
3484 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3485 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3487 /* Check for overlap of differently-sized bindings. */
3488 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3489 /* ...and with differing start points. */
3490 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3491 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3492 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3493 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3495 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3496 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3497 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3498 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3501 /* Run all of the selftests within this file. */
3504 analyzer_store_cc_tests ()
3506 test_bit_range_intersects_p ();
3507 test_bit_range_from_mask ();
3508 test_binding_key_overlap ();
3511 } // namespace selftest
3513 #endif /* CHECKING_P */
3517 #endif /* #if ENABLE_ANALYZER */