}
allocations_.clear();
+ TryReuseSpillRangesForGroups();
+
TRACE("End allocating function %s with the Greedy Allocator\n",
data()->debug_name());
}
+void GreedyAllocator::TryReuseSpillRangesForGroups() {
+ for (TopLevelLiveRange* top : data()->live_ranges()) {
+ if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
+ continue;
+ }
+
+ SpillRange* spill_range = nullptr;
+ for (LiveRange* member : top->group()->ranges()) {
+ if (!member->TopLevel()->HasSpillRange()) continue;
+ SpillRange* member_range = member->TopLevel()->GetSpillRange();
+ if (spill_range == nullptr) {
+ spill_range = member_range;
+ } else {
+ // This may not always succeed, because we group non-conflicting ranges
+ // that may have been splintered, and the splinters may cause conflicts
+ // in the spill ranges.
+ // TODO(mtrofin): should the splinters own their own spill ranges?
+ spill_range->TryMerge(member_range);
+ }
+ }
+ }
+}
+
+
float GreedyAllocator::GetMaximumConflictingWeight(
unsigned reg_id, const LiveRange* range, float competing_weight) const {
float ret = LiveRange::kInvalidWeight;
// - the portion after the call.
LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
+ // While we attempt to merge spill ranges later on in the allocation pipeline,
+ // we want to ensure group elements get merged. Waiting until later may hinder
+ // merge-ability, since the pipeline merger (being naive) may create conflicts
+ // between spill ranges of group members.
+ void TryReuseSpillRangesForGroups();
+
// Necessary heuristic: spill when all else failed.
void SpillRangeAsLastResort(LiveRange* range);