1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2022 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
26 #include "ipa-modref-tree.h"
28 #include "tree-ssa-alias.h"
31 #include "tree-streamer.h"
33 /* Return true if both accesses are the same. */
35 modref_access_node::operator == (modref_access_node &a) const
37 if (parm_index != a.parm_index)
39 if (parm_index != MODREF_UNKNOWN_PARM
40 && parm_index != MODREF_GLOBAL_MEMORY_PARM)
42 if (parm_offset_known != a.parm_offset_known)
45 && !known_eq (parm_offset, a.parm_offset))
48 if (range_info_useful_p () != a.range_info_useful_p ())
50 if (range_info_useful_p ()
51 && (!known_eq (a.offset, offset)
52 || !known_eq (a.size, size)
53 || !known_eq (a.max_size, max_size)))
58 /* Return true A is a subaccess. */
60 modref_access_node::contains (const modref_access_node &a) const
62 poly_int64 aoffset_adj = 0;
63 if (parm_index != MODREF_UNKNOWN_PARM)
65 if (parm_index != a.parm_index)
67 if (parm_offset_known)
69 if (!a.parm_offset_known)
71 /* Accesses are never below parm_offset, so look
73 If access ranges are known still allow merging
74 when bit offsets comparison passes. */
75 if (!known_le (parm_offset, a.parm_offset)
76 && !range_info_useful_p ())
78 /* We allow negative aoffset_adj here in case
79 there is an useful range. This is because adding
80 a.offset may result in non-negative offset again.
81 Ubsan fails on val << LOG_BITS_PER_UNIT where val
83 aoffset_adj = (a.parm_offset - parm_offset)
87 if (range_info_useful_p ())
89 if (!a.range_info_useful_p ())
91 /* Sizes of stores are used to check that object is big enough
92 to fit the store, so smaller or unknown store is more general
94 if (known_size_p (size)
95 && (!known_size_p (a.size)
96 || !known_le (size, a.size)))
98 if (known_size_p (max_size))
99 return known_subrange_p (a.offset + aoffset_adj,
100 a.max_size, offset, max_size);
102 return known_le (offset, a.offset + aoffset_adj);
107 /* Update access range to new parameters.
108 If RECORD_ADJUSTMENTS is true, record number of changes in the access
109 and if threshold is exceeded start dropping precision
110 so only constantly many updates are possible. This makes dataflow
113 modref_access_node::update (poly_int64 parm_offset1,
114 poly_int64 offset1, poly_int64 size1,
115 poly_int64 max_size1, bool record_adjustments)
117 if (known_eq (parm_offset, parm_offset1)
118 && known_eq (offset, offset1)
119 && known_eq (size, size1)
120 && known_eq (max_size, max_size1))
122 if (!record_adjustments
123 || (++adjustments) < param_modref_max_adjustments)
125 parm_offset = parm_offset1;
128 max_size = max_size1;
133 fprintf (dump_file, "--param modref-max-adjustments limit reached:");
134 if (!known_eq (parm_offset, parm_offset1))
137 fprintf (dump_file, " parm_offset cleared");
138 parm_offset_known = false;
140 if (!known_eq (size, size1))
144 fprintf (dump_file, " size cleared");
146 if (!known_eq (max_size, max_size1))
150 fprintf (dump_file, " max_size cleared");
152 if (!known_eq (offset, offset1))
156 fprintf (dump_file, " offset cleared");
159 fprintf (dump_file, "\n");
163 /* Merge in access A if it is possible to do without losing
164 precision. Return true if successful.
165 If RECORD_ADJUSTMENTs is true, remember how many interval
166 was prolonged and punt when there are too many. */
168 modref_access_node::merge (const modref_access_node &a,
169 bool record_adjustments)
171 poly_int64 offset1 = 0;
172 poly_int64 aoffset1 = 0;
173 poly_int64 new_parm_offset = 0;
175 /* We assume that containment was tested earlier. */
176 gcc_checking_assert (!contains (a) && !a.contains (*this));
177 if (parm_index != MODREF_UNKNOWN_PARM)
179 if (parm_index != a.parm_index)
181 if (parm_offset_known)
183 if (!a.parm_offset_known)
185 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
189 /* See if we can merge ranges. */
190 if (range_info_useful_p ())
192 /* In this case we have containment that should be
194 gcc_checking_assert (a.range_info_useful_p ());
196 /* If a.size is less specified than size, merge only
197 if intervals are otherwise equivalent. */
198 if (known_size_p (size)
199 && (!known_size_p (a.size) || known_lt (a.size, size)))
201 if (((known_size_p (max_size) || known_size_p (a.max_size))
202 && !known_eq (max_size, a.max_size))
203 || !known_eq (offset1, aoffset1))
205 update (new_parm_offset, offset1, a.size, max_size,
209 /* If sizes are same, we can extend the interval. */
210 if ((known_size_p (size) || known_size_p (a.size))
211 && !known_eq (size, a.size))
213 if (known_le (offset1, aoffset1))
215 if (!known_size_p (max_size)
216 || known_ge (offset1 + max_size, aoffset1))
218 update2 (new_parm_offset, offset1, size, max_size,
219 aoffset1, a.size, a.max_size,
224 else if (known_le (aoffset1, offset1))
226 if (!known_size_p (a.max_size)
227 || known_ge (aoffset1 + a.max_size, offset1))
229 update2 (new_parm_offset, offset1, size, max_size,
230 aoffset1, a.size, a.max_size,
237 update (new_parm_offset, offset1,
238 size, max_size, record_adjustments);
242 /* Return true if A1 and B1 can be merged with lower information
244 Assume that no containment or lossless merging is possible. */
246 modref_access_node::closer_pair_p (const modref_access_node &a1,
247 const modref_access_node &b1,
248 const modref_access_node &a2,
249 const modref_access_node &b2)
251 /* Merging different parm indexes comes to complete loss
253 if (a1.parm_index != b1.parm_index)
255 if (a2.parm_index != b2.parm_index)
257 /* If parm is known and parm indexes are the same we should
258 already have containment. */
259 gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
260 gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
262 /* First normalize offsets for parm offsets. */
263 poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
264 if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
265 || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
269 /* Now compute distance of the intervals. */
270 poly_offset_int dist1, dist2;
271 if (known_le (offseta1, offsetb1))
273 if (!known_size_p (a1.max_size))
276 dist1 = (poly_offset_int)offsetb1
277 - (poly_offset_int)offseta1
278 - (poly_offset_int)a1.max_size;
282 if (!known_size_p (b1.max_size))
285 dist1 = (poly_offset_int)offseta1
286 - (poly_offset_int)offsetb1
287 - (poly_offset_int)b1.max_size;
289 if (known_le (offseta2, offsetb2))
291 if (!known_size_p (a2.max_size))
294 dist2 = (poly_offset_int)offsetb2
295 - (poly_offset_int)offseta2
296 - (poly_offset_int)a2.max_size;
300 if (!known_size_p (b2.max_size))
304 - (poly_offset_int)offsetb2
305 - (poly_offset_int)b2.max_size;
307 /* It may happen that intervals overlap in case size
308 is different. Prefer the overlap to non-overlap. */
309 if (known_lt (dist1, 0) && known_ge (dist2, 0))
311 if (known_lt (dist2, 0) && known_ge (dist1, 0))
313 if (known_lt (dist1, 0))
314 /* If both overlaps minimize overlap. */
315 return known_le (dist2, dist1);
317 /* If both are disjoint look for smaller distance. */
318 return known_le (dist1, dist2);
321 /* Merge in access A while losing precision. */
323 modref_access_node::forced_merge (const modref_access_node &a,
324 bool record_adjustments)
326 if (parm_index != a.parm_index)
328 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM);
329 parm_index = MODREF_UNKNOWN_PARM;
333 /* We assume that containment and lossless merging
334 was tested earlier. */
335 gcc_checking_assert (!contains (a) && !a.contains (*this)
336 && !merge (a, record_adjustments));
337 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
339 poly_int64 new_parm_offset, offset1, aoffset1;
340 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
342 parm_offset_known = false;
345 gcc_checking_assert (range_info_useful_p ()
346 && a.range_info_useful_p ());
347 if (record_adjustments)
348 adjustments += a.adjustments;
349 update2 (new_parm_offset,
350 offset1, size, max_size,
351 aoffset1, a.size, a.max_size,
355 /* Merge two ranges both starting at parm_offset1 and update THIS
358 modref_access_node::update2 (poly_int64 parm_offset1,
359 poly_int64 offset1, poly_int64 size1,
360 poly_int64 max_size1,
361 poly_int64 offset2, poly_int64 size2,
362 poly_int64 max_size2,
363 bool record_adjustments)
365 poly_int64 new_size = size1;
367 if (!known_size_p (size2)
368 || known_le (size2, size1))
371 gcc_checking_assert (known_le (size1, size2));
373 if (known_le (offset1, offset2))
375 else if (known_le (offset2, offset1))
377 std::swap (offset1, offset2);
378 std::swap (max_size1, max_size2);
383 poly_int64 new_max_size;
385 if (!known_size_p (max_size1))
386 new_max_size = max_size1;
387 else if (!known_size_p (max_size2))
388 new_max_size = max_size2;
391 poly_offset_int s = (poly_offset_int)max_size2
392 + (poly_offset_int)offset2
393 - (poly_offset_int)offset1;
394 if (s.to_shwi (&new_max_size))
396 if (known_le (new_max_size, max_size1))
397 new_max_size = max_size1;
403 update (parm_offset1, offset1,
404 new_size, new_max_size, record_adjustments);
407 /* Given access nodes THIS and A, return true if they
408 can be done with common parm_offsets. In this case
409 return parm offset in new_parm_offset, new_offset
410 which is start of range in THIS and new_aoffset that
411 is start of range in A. */
413 modref_access_node::combined_offsets (const modref_access_node &a,
414 poly_int64 *new_parm_offset,
415 poly_int64 *new_offset,
416 poly_int64 *new_aoffset) const
418 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
419 if (known_le (a.parm_offset, parm_offset))
422 + ((parm_offset - a.parm_offset)
423 << LOG2_BITS_PER_UNIT);
424 *new_aoffset = a.offset;
425 *new_parm_offset = a.parm_offset;
428 else if (known_le (parm_offset, a.parm_offset))
430 *new_aoffset = a.offset
431 + ((a.parm_offset - parm_offset)
432 << LOG2_BITS_PER_UNIT);
433 *new_offset = offset;
434 *new_parm_offset = parm_offset;
441 /* Try to optimize the access ACCESSES list after entry INDEX was modified. */
443 modref_access_node::try_merge_with (vec <modref_access_node, va_gc> *&accesses,
448 for (i = 0; i < accesses->length ();)
451 bool found = false, restart = false;
452 modref_access_node *a = &(*accesses)[i];
453 modref_access_node *n = &(*accesses)[index];
455 if (n->contains (*a))
457 if (!found && n->merge (*a, false))
458 found = restart = true;
459 gcc_checking_assert (found || !a->merge (*n, false));
462 accesses->unordered_remove (i);
463 if (index == accesses->length ())
478 /* Stream out to OB. */
481 modref_access_node::stream_out (struct output_block *ob) const
483 streamer_write_hwi (ob, parm_index);
484 if (parm_index != MODREF_UNKNOWN_PARM)
486 streamer_write_uhwi (ob, parm_offset_known);
487 if (parm_offset_known)
489 streamer_write_poly_int64 (ob, parm_offset);
490 streamer_write_poly_int64 (ob, offset);
491 streamer_write_poly_int64 (ob, size);
492 streamer_write_poly_int64 (ob, max_size);
498 modref_access_node::stream_in (struct lto_input_block *ib)
500 int parm_index = streamer_read_hwi (ib);
501 bool parm_offset_known = false;
502 poly_int64 parm_offset = 0;
503 poly_int64 offset = 0;
504 poly_int64 size = -1;
505 poly_int64 max_size = -1;
507 if (parm_index != MODREF_UNKNOWN_PARM)
509 parm_offset_known = streamer_read_uhwi (ib);
510 if (parm_offset_known)
512 parm_offset = streamer_read_poly_int64 (ib);
513 offset = streamer_read_poly_int64 (ib);
514 size = streamer_read_poly_int64 (ib);
515 max_size = streamer_read_poly_int64 (ib);
518 return {offset, size, max_size, parm_offset, parm_index,
519 parm_offset_known, false};
522 /* Insert access with OFFSET and SIZE.
523 Collapse tree if it has more than MAX_ACCESSES entries.
524 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
525 Return true if record was changed.
527 Return 0 if nothing changed, 1 if insert was successful and -1
528 if entries should be collapsed. */
530 modref_access_node::insert (vec <modref_access_node, va_gc> *&accesses,
531 modref_access_node a, size_t max_accesses,
532 bool record_adjustments)
535 modref_access_node *a2;
537 /* Verify that list does not contain redundant accesses. */
541 modref_access_node *a, *a2;
543 FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
545 FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
547 gcc_assert (!a->contains (*a2));
551 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
553 if (a2->contains (a))
555 if (a.contains (*a2))
558 a2->parm_index = a.parm_index;
559 a2->parm_offset_known = a.parm_offset_known;
560 a2->update (a.parm_offset, a.offset, a.size, a.max_size,
562 modref_access_node::try_merge_with (accesses, i);
565 if (a2->merge (a, record_adjustments))
567 modref_access_node::try_merge_with (accesses, i);
570 gcc_checking_assert (!(a == *a2));
573 /* If this base->ref pair has too many accesses stored, we will clear
574 all accesses and bail out. */
575 if (accesses && accesses->length () >= max_accesses)
577 if (max_accesses < 2)
579 /* Find least harmful merge and perform it. */
580 int best1 = -1, best2 = -1;
581 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
583 for (j = i + 1; j < accesses->length (); j++)
585 || modref_access_node::closer_pair_p
586 (*a2, (*accesses)[j],
588 best2 < 0 ? a : (*accesses)[best2]))
593 if (modref_access_node::closer_pair_p
596 best2 < 0 ? a : (*accesses)[best2]))
602 (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
604 /* Check that merging indeed merged ranges. */
605 gcc_checking_assert ((*accesses)[best1].contains
606 (best2 < 0 ? a : (*accesses)[best2]));
607 if (!(*accesses)[best1].useful_p ())
609 if (dump_file && best2 >= 0)
611 "--param modref-max-accesses limit reached;"
612 " merging %i and %i\n", best1, best2);
615 "--param modref-max-accesses limit reached;"
616 " merging with %i\n", best1);
617 modref_access_node::try_merge_with (accesses, best1);
619 insert (accesses, a, max_accesses, record_adjustments);
623 vec_safe_push (accesses, a);
627 /* Return true if range info is useful. */
629 modref_access_node::range_info_useful_p () const
631 return parm_index != MODREF_UNKNOWN_PARM
632 && parm_index != MODREF_GLOBAL_MEMORY_PARM
634 && (known_size_p (size)
635 || known_size_p (max_size)
636 || known_ge (offset, 0));
639 /* Dump range to debug OUT. */
641 modref_access_node::dump (FILE *out)
643 if (parm_index != MODREF_UNKNOWN_PARM)
645 if (parm_index == MODREF_GLOBAL_MEMORY_PARM)
646 fprintf (out, " Base in global memory");
647 else if (parm_index >= 0)
648 fprintf (out, " Parm %i", parm_index);
649 else if (parm_index == MODREF_STATIC_CHAIN_PARM)
650 fprintf (out, " Static chain");
653 if (parm_offset_known)
655 fprintf (out, " param offset:");
656 print_dec ((poly_int64_pod)parm_offset, out, SIGNED);
659 if (range_info_useful_p ())
661 fprintf (out, " offset:");
662 print_dec ((poly_int64_pod)offset, out, SIGNED);
663 fprintf (out, " size:");
664 print_dec ((poly_int64_pod)size, out, SIGNED);
665 fprintf (out, " max_size:");
666 print_dec ((poly_int64_pod)max_size, out, SIGNED);
668 fprintf (out, " adjusted %i times", adjustments);
673 /* Return tree corresponding to parameter of the range in STMT. */
675 modref_access_node::get_call_arg (const gcall *stmt) const
677 if (parm_index == MODREF_UNKNOWN_PARM
678 || parm_index == MODREF_GLOBAL_MEMORY_PARM)
680 if (parm_index == MODREF_STATIC_CHAIN_PARM)
681 return gimple_call_chain (stmt);
682 /* MODREF_RETSLOT_PARM should not happen in access trees since the store
683 is seen explicitly in the caller. */
684 gcc_checking_assert (parm_index >= 0);
685 if (parm_index >= (int)gimple_call_num_args (stmt))
687 return gimple_call_arg (stmt, parm_index);
690 /* Return tree corresponding to parameter of the range in STMT. */
692 modref_access_node::get_ao_ref (const gcall *stmt, ao_ref *ref) const
696 if (!parm_offset_known
697 || !(arg = get_call_arg (stmt))
698 || !POINTER_TYPE_P (TREE_TYPE (arg)))
700 poly_offset_int off = (poly_offset_int)offset
701 + ((poly_offset_int)parm_offset << LOG2_BITS_PER_UNIT);
703 if (!off.to_shwi (&off2))
705 ao_ref_init_from_ptr_and_range (ref, arg, true, off2, size, max_size);
709 /* Return true A is a subkill. */
711 modref_access_node::contains_for_kills (const modref_access_node &a) const
713 poly_int64 aoffset_adj = 0;
715 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM
716 && a.parm_index != MODREF_UNKNOWN_PARM);
717 if (parm_index != a.parm_index)
719 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
720 aoffset_adj = (a.parm_offset - parm_offset)
722 gcc_checking_assert (range_info_useful_p () && a.range_info_useful_p ());
723 return known_subrange_p (a.offset + aoffset_adj,
724 a.max_size, offset, max_size);
727 /* Merge two ranges both starting at parm_offset1 and update THIS
730 modref_access_node::update_for_kills (poly_int64 parm_offset1,
732 poly_int64 max_size1,
734 poly_int64 max_size2,
735 bool record_adjustments)
737 if (known_le (offset1, offset2))
739 else if (known_le (offset2, offset1))
741 std::swap (offset1, offset2);
742 std::swap (max_size1, max_size2);
747 poly_int64 new_max_size = max_size2 + offset2 - offset1;
748 if (known_le (new_max_size, max_size1))
749 new_max_size = max_size1;
750 if (known_eq (parm_offset, parm_offset1)
751 && known_eq (offset, offset1)
752 && known_eq (size, new_max_size)
753 && known_eq (max_size, new_max_size))
756 if (!record_adjustments
757 || (++adjustments) < param_modref_max_adjustments)
759 parm_offset = parm_offset1;
761 max_size = new_max_size;
763 gcc_checking_assert (useful_for_kill_p ());
769 /* Merge in access A if it is possible to do without losing
770 precision. Return true if successful.
771 Unlike merge assume that both accesses are always executed
772 and merge size the same was as max_size. */
774 modref_access_node::merge_for_kills (const modref_access_node &a,
775 bool record_adjustments)
777 poly_int64 offset1 = 0;
778 poly_int64 aoffset1 = 0;
779 poly_int64 new_parm_offset = 0;
781 /* We assume that containment was tested earlier. */
782 gcc_checking_assert (!contains_for_kills (a) && !a.contains_for_kills (*this)
783 && useful_for_kill_p () && a.useful_for_kill_p ());
785 if (parm_index != a.parm_index
786 || !combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
789 if (known_le (offset1, aoffset1))
791 if (!known_size_p (max_size)
792 || known_ge (offset1 + max_size, aoffset1))
793 return update_for_kills (new_parm_offset, offset1, max_size,
794 aoffset1, a.max_size, record_adjustments);
796 else if (known_le (aoffset1, offset1))
798 if (!known_size_p (a.max_size)
799 || known_ge (aoffset1 + a.max_size, offset1))
800 return update_for_kills (new_parm_offset, offset1, max_size,
801 aoffset1, a.max_size, record_adjustments);
806 /* Insert new kill A into KILLS. If RECORD_ADJUSTMENTS is true limit number
807 of changes to each entry. Return true if something changed. */
810 modref_access_node::insert_kill (vec<modref_access_node> &kills,
811 modref_access_node &a, bool record_adjustments)
814 modref_access_node *a2;
817 gcc_checking_assert (a.useful_for_kill_p ());
819 /* See if we have corresponding entry already or we can merge with
820 neighboring entry. */
821 FOR_EACH_VEC_ELT (kills, index, a2)
823 if (a2->contains_for_kills (a))
825 if (a.contains_for_kills (*a2))
832 if (a2->merge_for_kills (a, record_adjustments))
838 /* If entry was not found, insert it. */
841 if ((int)kills.length () >= param_modref_max_accesses)
844 fprintf (dump_file, "--param modref-max-accesses limit reached:");
851 /* Extending range in an entry may make it possible to merge it with
855 for (i = 0; i < kills.length ();)
858 bool found = false, restart = false;
859 modref_access_node *a = &kills[i];
860 modref_access_node *n = &kills[index];
862 if (n->contains_for_kills (*a))
864 if (!found && n->merge_for_kills (*a, false))
865 found = restart = true;
866 gcc_checking_assert (found || !a->merge_for_kills (*n, false));
869 kills.unordered_remove (i);
870 if (index == kills.length ())
892 test_insert_search_collapse ()
894 modref_base_node<alias_set_type> *base_node;
895 modref_ref_node<alias_set_type> *ref_node;
896 modref_access_node a = unspecified_modref_access_node;
898 modref_tree<alias_set_type> *t = new modref_tree<alias_set_type>();
899 ASSERT_FALSE (t->every_base);
901 /* Insert into an empty tree. */
902 t->insert (1, 2, 2, 1, 2, a, false);
903 ASSERT_NE (t->bases, NULL);
904 ASSERT_EQ (t->bases->length (), 1);
905 ASSERT_FALSE (t->every_base);
906 ASSERT_EQ (t->search (2), NULL);
908 base_node = t->search (1);
909 ASSERT_NE (base_node, NULL);
910 ASSERT_EQ (base_node->base, 1);
911 ASSERT_NE (base_node->refs, NULL);
912 ASSERT_EQ (base_node->refs->length (), 1);
913 ASSERT_EQ (base_node->search (1), NULL);
915 ref_node = base_node->search (2);
916 ASSERT_NE (ref_node, NULL);
917 ASSERT_EQ (ref_node->ref, 2);
919 /* Insert when base exists but ref does not. */
920 t->insert (1, 2, 2, 1, 3, a, false);
921 ASSERT_NE (t->bases, NULL);
922 ASSERT_EQ (t->bases->length (), 1);
923 ASSERT_EQ (t->search (1), base_node);
924 ASSERT_EQ (t->search (2), NULL);
925 ASSERT_NE (base_node->refs, NULL);
926 ASSERT_EQ (base_node->refs->length (), 2);
928 ref_node = base_node->search (3);
929 ASSERT_NE (ref_node, NULL);
931 /* Insert when base and ref exist, but access is not dominated by nor
932 dominates other accesses. */
933 t->insert (1, 2, 2, 1, 2, a, false);
934 ASSERT_EQ (t->bases->length (), 1);
935 ASSERT_EQ (t->search (1), base_node);
937 ref_node = base_node->search (2);
938 ASSERT_NE (ref_node, NULL);
940 /* Insert when base and ref exist and access is dominated. */
941 t->insert (1, 2, 2, 1, 2, a, false);
942 ASSERT_EQ (t->search (1), base_node);
943 ASSERT_EQ (base_node->search (2), ref_node);
945 /* Insert ref to trigger ref list collapse for base 1. */
946 t->insert (1, 2, 2, 1, 4, a, false);
947 ASSERT_EQ (t->search (1), base_node);
948 ASSERT_EQ (base_node->refs, NULL);
949 ASSERT_EQ (base_node->search (2), NULL);
950 ASSERT_EQ (base_node->search (3), NULL);
951 ASSERT_TRUE (base_node->every_ref);
953 /* Further inserts to collapsed ref list are ignored. */
954 t->insert (1, 2, 2, 1, 5, a, false);
955 ASSERT_EQ (t->search (1), base_node);
956 ASSERT_EQ (base_node->refs, NULL);
957 ASSERT_EQ (base_node->search (2), NULL);
958 ASSERT_EQ (base_node->search (3), NULL);
959 ASSERT_TRUE (base_node->every_ref);
961 /* Insert base to trigger base list collapse. */
962 t->insert (1, 2, 2, 5, 0, a, false);
963 ASSERT_TRUE (t->every_base);
964 ASSERT_EQ (t->bases, NULL);
965 ASSERT_EQ (t->search (1), NULL);
967 /* Further inserts to collapsed base list are ignored. */
968 t->insert (1, 2, 2, 7, 8, a, false);
969 ASSERT_TRUE (t->every_base);
970 ASSERT_EQ (t->bases, NULL);
971 ASSERT_EQ (t->search (1), NULL);
979 modref_tree<alias_set_type> *t1, *t2;
980 modref_base_node<alias_set_type> *base_node;
981 modref_access_node a = unspecified_modref_access_node;
983 t1 = new modref_tree<alias_set_type>();
984 t1->insert (3, 4, 1, 1, 1, a, false);
985 t1->insert (3, 4, 1, 1, 2, a, false);
986 t1->insert (3, 4, 1, 1, 3, a, false);
987 t1->insert (3, 4, 1, 2, 1, a, false);
988 t1->insert (3, 4, 1, 3, 1, a, false);
990 t2 = new modref_tree<alias_set_type>();
991 t2->insert (10, 10, 10, 1, 2, a, false);
992 t2->insert (10, 10, 10, 1, 3, a, false);
993 t2->insert (10, 10, 10, 1, 4, a, false);
994 t2->insert (10, 10, 10, 3, 2, a, false);
995 t2->insert (10, 10, 10, 3, 3, a, false);
996 t2->insert (10, 10, 10, 3, 4, a, false);
997 t2->insert (10, 10, 10, 3, 5, a, false);
999 t1->merge (3, 4, 1, t2, NULL, NULL, false);
1001 ASSERT_FALSE (t1->every_base);
1002 ASSERT_NE (t1->bases, NULL);
1003 ASSERT_EQ (t1->bases->length (), 3);
1005 base_node = t1->search (1);
1006 ASSERT_NE (base_node->refs, NULL);
1007 ASSERT_FALSE (base_node->every_ref);
1008 ASSERT_EQ (base_node->refs->length (), 4);
1010 base_node = t1->search (2);
1011 ASSERT_NE (base_node->refs, NULL);
1012 ASSERT_FALSE (base_node->every_ref);
1013 ASSERT_EQ (base_node->refs->length (), 1);
1015 base_node = t1->search (3);
1016 ASSERT_EQ (base_node->refs, NULL);
1017 ASSERT_TRUE (base_node->every_ref);
1025 ipa_modref_tree_cc_tests ()
1027 test_insert_search_collapse ();
1031 } // namespace selftest
1036 gt_ggc_mx (modref_tree < int >*const &tt)
1040 ggc_test_and_set_mark (tt->bases);
1041 gt_ggc_mx (tt->bases);
1046 gt_ggc_mx (modref_tree < tree_node * >*const &tt)
1050 ggc_test_and_set_mark (tt->bases);
1051 gt_ggc_mx (tt->bases);
1055 void gt_pch_nx (modref_tree<int>* const&) {}
1056 void gt_pch_nx (modref_tree<tree_node*>* const&) {}
1057 void gt_pch_nx (modref_tree<int>* const&, gt_pointer_operator, void *) {}
1058 void gt_pch_nx (modref_tree<tree_node*>* const&, gt_pointer_operator, void *) {}
1060 void gt_ggc_mx (modref_base_node<int>* &b)
1062 ggc_test_and_set_mark (b);
1065 ggc_test_and_set_mark (b->refs);
1066 gt_ggc_mx (b->refs);
1070 void gt_ggc_mx (modref_base_node<tree_node*>* &b)
1072 ggc_test_and_set_mark (b);
1075 ggc_test_and_set_mark (b->refs);
1076 gt_ggc_mx (b->refs);
1079 gt_ggc_mx (b->base);
1082 void gt_pch_nx (modref_base_node<int>*) {}
1083 void gt_pch_nx (modref_base_node<tree_node*>*) {}
1084 void gt_pch_nx (modref_base_node<int>*, gt_pointer_operator, void *) {}
1085 void gt_pch_nx (modref_base_node<tree_node*>*, gt_pointer_operator, void *) {}
1087 void gt_ggc_mx (modref_ref_node<int>* &r)
1089 ggc_test_and_set_mark (r);
1092 ggc_test_and_set_mark (r->accesses);
1093 gt_ggc_mx (r->accesses);
1097 void gt_ggc_mx (modref_ref_node<tree_node*>* &r)
1099 ggc_test_and_set_mark (r);
1102 ggc_test_and_set_mark (r->accesses);
1103 gt_ggc_mx (r->accesses);
1109 void gt_pch_nx (modref_ref_node<int>* ) {}
1110 void gt_pch_nx (modref_ref_node<tree_node*>*) {}
1111 void gt_pch_nx (modref_ref_node<int>*, gt_pointer_operator, void *) {}
1112 void gt_pch_nx (modref_ref_node<tree_node*>*, gt_pointer_operator, void *) {}
1114 void gt_ggc_mx (modref_access_node &)