const_iterator begin() const { return instructions_.begin(); }
const_iterator end() const { return instructions_.end(); }
const InstructionDeque& instructions() const { return instructions_; }
+ int LastInstructionIndex() const {
+ return static_cast<int>(instructions().size()) - 1;
+ }
Instruction* InstructionAt(int index) const {
DCHECK(index >= 0);
}
for (auto block : code()->instruction_blocks()) {
if (block->PredecessorCount() <= 1) continue;
+ bool has_only_deferred = true;
+ for (RpoNumber pred_id : block->predecessors()) {
+ if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
+ has_only_deferred = false;
+ break;
+ }
+ }
+ // This would pull down common moves. If the moves occur in deferred blocks,
+ // and the closest common successor is not deferred, we lose the
+ // optimization of just spilling/filling in deferred blocks.
+ if (has_only_deferred) continue;
OptimizeMerge(block);
}
for (auto gap : to_finalize_) {
Run<PreprocessLiveRangesPhase>();
}
- if (FLAG_turbo_greedy_regalloc) {
- Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
- Run<AllocateDoubleRegistersPhase<GreedyAllocator>>();
- } else {
- Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
- Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
- }
+ // TODO(mtrofin): re-enable greedy once we have bots for range preprocessing.
+ Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
+ Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
if (FLAG_turbo_frame_elision) {
Run<LocateSpillSlotsPhase>();
} while (false)
+namespace {
+
+LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
+ LifetimePosition pos) {
+ DCHECK(range->Start() < pos && pos < range->End());
+ DCHECK(pos.IsStart() || pos.IsGapPosition() ||
+ (data->code()
+ ->GetInstructionBlock(pos.ToInstructionIndex())
+ ->last_instruction_index() != pos.ToInstructionIndex()));
+ LiveRange* result = data->NewChildRangeFor(range);
+ range->SplitAt(pos, result, data->allocation_zone());
+ TRACE("Split range %d(v%d) @%d => %d.\n", range->id(),
+ range->TopLevel()->id(), pos.ToInstructionIndex(), result->id());
+ return result;
+}
+
+
+LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
+ int instruction_index) {
+ LifetimePosition ret = LifetimePosition::Invalid();
+
+ ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
+ if (range->Start() >= ret || ret >= range->End()) {
+ return LifetimePosition::Invalid();
+ }
+ return ret;
+}
+
+
+LiveRange* SplitRangeAfterBlock(LiveRange* range, RegisterAllocationData* data,
+ const InstructionBlock* block) {
+ const InstructionSequence* code = data->code();
+ int last_index = block->last_instruction_index();
+ int outside_index = static_cast<int>(code->instructions().size());
+ bool has_handler = false;
+ for (auto successor_id : block->successors()) {
+ const InstructionBlock* successor = code->InstructionBlockAt(successor_id);
+ if (successor->IsHandler()) {
+ has_handler = true;
+ }
+ outside_index = Min(outside_index, successor->first_instruction_index());
+ }
+ int split_at = has_handler ? outside_index : last_index;
+ LifetimePosition after_block =
+ GetSplitPositionForInstruction(range, split_at);
+
+ if (after_block.IsValid()) {
+ return Split(range, data, after_block);
+ }
+
+ return range;
+}
+
+
+int GetFirstInstructionIndex(const UseInterval* interval) {
+ int ret = interval->start().ToInstructionIndex();
+ if (!interval->start().IsGapPosition() && !interval->start().IsStart()) {
+ ++ret;
+ }
+ return ret;
+}
+
+
+bool DoesSubsequenceClobber(const InstructionSequence* code, int start,
+ int end) {
+ for (int i = start; i <= end; ++i) {
+ if (code->InstructionAt(i)->IsCall()) return true;
+ }
+ return false;
+}
+
+
+void SplitRangeAtDeferredBlocksWithCalls(LiveRange* range,
+ RegisterAllocationData* data) {
+ DCHECK(!range->IsFixed());
+ DCHECK(!range->spilled());
+ if (range->TopLevel()->HasSpillOperand()) {
+ TRACE(
+ "Skipping deferred block analysis for live range %d because it has a "
+ "spill operand.\n",
+ range->TopLevel()->id());
+ return;
+ }
+
+ const InstructionSequence* code = data->code();
+ LiveRange* current_subrange = range;
+
+ UseInterval* interval = current_subrange->first_interval();
+
+ while (interval != nullptr) {
+ int first_index = GetFirstInstructionIndex(interval);
+ int last_index = interval->end().ToInstructionIndex();
+
+ if (last_index > code->LastInstructionIndex()) {
+ last_index = code->LastInstructionIndex();
+ }
+
+ interval = interval->next();
+
+ for (int index = first_index; index <= last_index;) {
+ const InstructionBlock* block = code->GetInstructionBlock(index);
+ int last_block_index = static_cast<int>(block->last_instruction_index());
+ int last_covered_index = Min(last_index, last_block_index);
+ int working_index = index;
+ index = block->last_instruction_index() + 1;
+
+ if (!block->IsDeferred() ||
+ !DoesSubsequenceClobber(code, working_index, last_covered_index)) {
+ continue;
+ }
+
+ TRACE("Deferred block B%d clobbers range %d(v%d).\n",
+ block->rpo_number().ToInt(), current_subrange->id(),
+ current_subrange->TopLevel()->id());
+ LifetimePosition block_start =
+ GetSplitPositionForInstruction(current_subrange, working_index);
+ LiveRange* block_and_after = nullptr;
+ if (block_start.IsValid()) {
+ block_and_after = Split(current_subrange, data, block_start);
+ } else {
+ block_and_after = current_subrange;
+ }
+ LiveRange* next = SplitRangeAfterBlock(block_and_after, data, block);
+ if (next != current_subrange) interval = next->first_interval();
+ current_subrange = next;
+ break;
+ }
+ }
+}
+}
+
+
void PreprocessLiveRanges::PreprocessRanges() {
SplitRangesAroundDeferredBlocks();
}
-void PreprocessLiveRanges::SplitRangesAroundDeferredBlocks() {}
-
+void PreprocessLiveRanges::SplitRangesAroundDeferredBlocks() {
+ size_t live_range_count = data()->live_ranges().size();
+ for (size_t i = 0; i < live_range_count; i++) {
+ LiveRange* range = data()->live_ranges()[i];
+ if (range != nullptr && !range->IsEmpty() && !range->spilled() &&
+ !range->IsFixed() && !range->IsChild()) {
+ SplitRangeAtDeferredBlocksWithCalls(range, data());
+ }
+ }
+}
} // namespace compiler
} // namespace internal
last_processed_use_(nullptr),
current_hint_position_(nullptr),
size_(kInvalidSize),
- weight_(kInvalidWeight) {
+ weight_(kInvalidWeight),
+ spilled_in_deferred_block_(false) {
DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
AssignedRegisterField::encode(kUnassignedRegister) |
}
+bool LiveRange::TryCommitSpillInDeferredBlock(
+ InstructionSequence* code, const InstructionOperand& spill_operand) {
+ DCHECK(!IsChild());
+
+ if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() ||
+ spill_operand.IsConstant() || spill_operand.IsImmediate()) {
+ return false;
+ }
+
+ int count = 0;
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ int first_instr = child->Start().ToInstructionIndex();
+
+ // If the range starts at instruction end, the first instruction index is
+ // the next one.
+ if (!child->Start().IsGapPosition() && !child->Start().IsStart()) {
+ ++first_instr;
+ }
+
+ // We only look at where the range starts. It doesn't matter where it ends:
+ // if it ends past this block, then either there is a phi there already,
+ // or ResolveControlFlow will adapt the last instruction gap of this block
+ // as if there were a phi. In either case, data flow will be correct.
+ const InstructionBlock* block = code->GetInstructionBlock(first_instr);
+
+ // If we have slot uses in a subrange, bail out, because we need the value
+ // on the stack before that use.
+ bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr;
+ if (!block->IsDeferred()) {
+ if (child->spilled() || has_slot_use) {
+ TRACE(
+ "Live Range %d must be spilled at definition: found a "
+ "slot-requiring non-deferred child range %d.\n",
+ TopLevel()->id(), child->id());
+ return false;
+ }
+ } else {
+ if (child->spilled() || has_slot_use) ++count;
+ }
+ }
+ if (count == 0) return false;
+
+ spill_start_index_ = -1;
+ spilled_in_deferred_block_ = true;
+
+ TRACE("Live Range %d will be spilled only in deferred blocks.\n", id());
+ // If we have ranges that aren't spilled but require the operand on the stack,
+ // make sure we insert the spill.
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ if (!child->spilled() &&
+ child->NextSlotPosition(child->Start()) != nullptr) {
+ auto instr = code->InstructionAt(child->Start().ToInstructionIndex());
+ // Insert spill at the end to let live range connections happen at START.
+ auto move =
+ instr->GetOrCreateParallelMove(Instruction::END, code->zone());
+ InstructionOperand assigned = child->GetAssignedOperand();
+ if (TopLevel()->has_slot_use()) {
+ bool found = false;
+ for (auto move_op : *move) {
+ if (move_op->IsEliminated()) continue;
+ if (move_op->source().Equals(assigned) &&
+ move_op->destination().Equals(spill_operand)) {
+ found = true;
+ break;
+ }
+ }
+ if (found) continue;
+ }
+
+ move->AddMove(assigned, spill_operand);
+ }
+ }
+
+ return true;
+}
+
+
void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
const InstructionOperand& op,
bool might_be_duplicated) {
DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
DCHECK(!IsChild());
auto zone = sequence->zone();
+
for (auto to_spill = spills_at_definition_; to_spill != nullptr;
to_spill = to_spill->next) {
auto instr = sequence->InstructionAt(to_spill->gap_index);
}
+UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
+ for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
+ pos = pos->next()) {
+ if (pos->type() != UsePositionType::kRequiresSlot) continue;
+ return pos;
+ }
+ return nullptr;
+}
+
+
bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
// We cannot spill a live range that has a use requiring a register
// at the current or the immediate next position.
int out_vreg = ConstantOperand::cast(output)->virtual_register();
live->Remove(out_vreg);
}
- Define(curr_position, output);
+ if (block->IsHandler() && index == block_start) {
+ // The register defined here is blocked from gap start - it is the
+ // exception value.
+ // TODO(mtrofin): should we explore an explicit opcode for
+ // the first instruction in the handler?
+ Define(LifetimePosition::GapFromInstructionIndex(index), output);
+ } else {
+ Define(curr_position, output);
+ }
}
if (instr->ClobbersRegisters()) {
data()->GetPhiMapValueFor(range->id())->CommitAssignment(assigned);
}
if (!range->IsChild() && !spill_operand.IsInvalid()) {
- range->CommitSpillsAtDefinition(
- data()->code(), spill_operand,
- range->has_slot_use() || range->spilled());
+ // If this top level range has a child spilled in a deferred block, we use
+ // the range and control flow connection mechanism instead of spilling at
+ // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
+ // phases. Normally, when we spill at definition, we do not insert a
+ // connecting move when a successor child range is spilled - because the
+ // spilled range picks up its value from the slot which was assigned at
+ // definition. For ranges that are determined to spill only in deferred
+ // blocks, we let ConnectLiveRanges and ResolveControlFlow insert such
+ // moves between ranges. Because of how the ranges are split around
+ // deferred blocks, this amounts to spilling and filling inside such
+ // blocks.
+ if (!range->TryCommitSpillInDeferredBlock(data()->code(),
+ spill_operand)) {
+ // Spill at definition if the range isn't spilled only in deferred
+ // blocks.
+ range->CommitSpillsAtDefinition(
+ data()->code(), spill_operand,
+ range->has_slot_use() || range->spilled());
+ }
}
}
}
// Check if the live range is spilled and the safe point is after
// the spill position.
- if (!spill_operand.IsInvalid() &&
- safe_point >= range->spill_start_index()) {
+ int spill_index = range->IsSpilledOnlyInDeferredBlocks()
+ ? cur->Start().ToInstructionIndex()
+ : range->spill_start_index();
+
+ if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
- range->id(), range->spill_start_index(), safe_point);
+ range->id(), spill_index, safe_point);
map->RecordReference(AllocatedOperand::cast(spill_operand));
}
const auto* pred_block = code()->InstructionBlockAt(pred);
array->Find(block, pred_block, &result);
if (result.cur_cover_ == result.pred_cover_ ||
- result.cur_cover_->spilled())
+ (!result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
+ result.cur_cover_->spilled()))
continue;
auto pred_op = result.pred_cover_->GetAssignedOperand();
auto cur_op = result.cur_cover_->GetAssignedOperand();
DelayedInsertionMap delayed_insertion_map(local_zone);
for (auto first_range : data()->live_ranges()) {
if (first_range == nullptr || first_range->IsChild()) continue;
+ bool connect_spilled = first_range->IsSpilledOnlyInDeferredBlocks();
for (auto second_range = first_range->next(); second_range != nullptr;
first_range = second_range, second_range = second_range->next()) {
auto pos = second_range->Start();
// Add gap move if the two live ranges touch and there is no block
// boundary.
- if (second_range->spilled()) continue;
+ if (!connect_spilled && second_range->spilled()) continue;
if (first_range->End() != pos) continue;
if (data()->IsBlockBoundary(pos) &&
!CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
// range and which follows both start and last processed use position
UsePosition* NextRegisterPosition(LifetimePosition start) const;
+ // Returns the first use position requiring stack slot, or nullptr.
+ UsePosition* NextSlotPosition(LifetimePosition start) const;
+
// Returns use position for which register is beneficial in this live
// range and which follows both start and last processed use position
UsePosition* NextUsePositionRegisterIsBeneficial(
void CommitSpillsAtDefinition(InstructionSequence* sequence,
const InstructionOperand& operand,
bool might_be_duplicated);
+ // This must be applied on top level ranges.
+ // If all the children of this range are spilled in deferred blocks, and if
+ // for any non-spilled child with a use position requiring a slot, that range
+ // is contained in a deferred block, mark the range as
+ // IsSpilledOnlyInDeferredBlocks, so that we avoid spilling at definition,
+ // and instead let the LiveRangeConnector perform the spills within the
+ // deferred blocks. If so, we insert here spills for non-spilled ranges
+ // with slot use positions.
+ bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
+ const InstructionOperand& spill_operand);
void SetSpillStartIndex(int start) {
spill_start_index_ = Min(start, spill_start_index_);
float weight() const { return weight_; }
void set_weight(float weight) { weight_ = weight; }
+ bool IsSpilledOnlyInDeferredBlocks() const {
+ return spilled_in_deferred_block_;
+ }
+
static const int kInvalidSize = -1;
static const float kInvalidWeight;
static const float kMaxWeight;
// greedy: a metric for resolving conflicts between ranges with an assigned
// register and ranges that intersect them and need a register.
float weight_;
+
+ // TODO(mtrofin): generalize spilling after definition, currently specialized
+ // just for spill in a single deferred block.
+ bool spilled_in_deferred_block_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
};
+// Insert moves of the form
+//
+// Operand(child_(k+1)) = Operand(child_k)
+//
+// where child_k and child_(k+1) are consecutive children of a range (so
+// child_k->next() == child_(k+1)), and Operand(...) refers to the
+// assigned operand, be it a register or a slot.
class LiveRangeConnector final : public ZoneObject {
public:
explicit LiveRangeConnector(RegisterAllocationData* data);
- // Phase 8: reconnect split ranges with moves.
+ // Phase 8: reconnect split ranges with moves, when the control flow
+ // between the ranges is trivial (no branches).
void ConnectRanges(Zone* local_zone);
- // Phase 9: insert moves to connect ranges across basic blocks.
+ // Phase 9: insert moves to connect ranges across basic blocks, when the
+ // control flow between them cannot be trivially resolved, such as joining
+ // branches.
void ResolveControlFlow(Zone* local_zone);
private:
Zone* code_zone() const { return code()->zone(); }
bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
+
void ResolveControlFlow(const InstructionBlock* block,
const InstructionOperand& cur_op,
const InstructionBlock* pred,
}
-void InstructionSequenceTest::StartBlock() {
+void InstructionSequenceTest::StartBlock(bool deferred) {
block_returns_ = false;
- NewBlock();
+ NewBlock(deferred);
}
}
-InstructionBlock* InstructionSequenceTest::NewBlock() {
+InstructionBlock* InstructionSequenceTest::NewBlock(bool deferred) {
CHECK(current_block_ == nullptr);
Rpo rpo = Rpo::FromInt(static_cast<int>(instruction_blocks_.size()));
Rpo loop_header = Rpo::Invalid();
}
// Construct instruction block.
auto instruction_block = new (zone())
- InstructionBlock(zone(), rpo, loop_header, loop_end, false, false);
+ InstructionBlock(zone(), rpo, loop_header, loop_end, deferred, false);
instruction_blocks_.push_back(instruction_block);
current_block_ = instruction_block;
sequence()->StartBlock(rpo);
void StartLoop(int loop_blocks);
void EndLoop();
- void StartBlock();
+ void StartBlock(bool deferred = false);
Instruction* EndBlock(BlockCompletion completion = FallThrough());
TestOperand Imm(int32_t imm = 0);
InstructionOperand* ConvertInputs(size_t input_size, TestOperand* inputs);
InstructionOperand ConvertInputOp(TestOperand op);
InstructionOperand ConvertOutputOp(VReg vreg, TestOperand op);
- InstructionBlock* NewBlock();
+ InstructionBlock* NewBlock(bool deferred = false);
void WireBlock(size_t block_offset, int jump_offset);
Instruction* Emit(InstructionCode code, size_t outputs_size = 0,
namespace internal {
namespace compiler {
+
+namespace {
+
+// We can't just use the size of the moves collection, because of
+// redundant moves which need to be discounted.
+int GetMoveCount(const ParallelMove& moves) {
+ int move_count = 0;
+ for (auto move : moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ ++move_count;
+ }
+ return move_count;
+}
+
+
+bool AreOperandsOfSameType(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ bool test_op_is_reg =
+ (test_op.type_ ==
+ InstructionSequenceTest::TestOperandType::kFixedRegister ||
+ test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister);
+
+ return (op.IsRegister() && test_op_is_reg) ||
+ (op.IsStackSlot() && !test_op_is_reg);
+}
+
+
+bool AllocatedOperandMatches(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ return AreOperandsOfSameType(op, test_op) &&
+ (op.index() == test_op.value_ ||
+ test_op.value_ == InstructionSequenceTest::kNoValue);
+}
+
+
+int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ if (moves == nullptr) return 0;
+ return GetMoveCount(*moves);
+}
+
+
+bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence,
+ const InstructionSequenceTest::TestOperand& src,
+ const InstructionSequenceTest::TestOperand& dest) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ EXPECT_NE(nullptr, moves);
+
+ bool found_match = false;
+ for (auto move : *moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) &&
+ AllocatedOperandMatches(AllocatedOperand::cast(move->destination()),
+ dest)) {
+ found_match = true;
+ break;
+ }
+ }
+ return found_match;
+}
+}
+
+
class RegisterAllocatorTest : public InstructionSequenceTest {
public:
void Allocate() {
}
+TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) {
+ StartBlock();
+ auto x = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(x), 1, 2));
+
+ StartBlock();
+ EmitCall(Slot(-1));
+ auto occupy = EmitOI(Reg(0));
+ EndBlock(Jump(2));
+
+ StartBlock();
+ EndBlock(FallThrough());
+
+ StartBlock();
+ Use(occupy);
+ Return(Reg(x));
+ EndBlock();
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) {
+ StartBlock();
+ auto x = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(x), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+
+ StartBlock();
+ EmitCall(Slot(-1));
+ auto occupy = EmitOI(Reg(0));
+ EndBlock(FallThrough());
+
+ StartBlock();
+ Use(occupy);
+ Return(Reg(x));
+ EndBlock();
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
+ StartBlock(); // B0
+ auto var = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(var), 1, 2));
+
+ StartBlock(); // B1
+ EndBlock(Jump(2));
+
+ StartBlock(true); // B2
+ EmitCall(Slot(-1), Slot(var));
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B4
+ Return(Reg(var, 0));
+ EndBlock();
+
+ Allocate();
+
+ const int var_def_index = 1;
+ const int call_index = 3;
+ int expect_no_moves =
+ FLAG_turbo_preprocess_ranges ? var_def_index : call_index;
+ int expect_spill_move =
+ FLAG_turbo_preprocess_ranges ? call_index : var_def_index;
+
+ // We should have no parallel moves at the "expect_no_moves" position.
+ EXPECT_EQ(
+ 0, GetParallelMoveCount(expect_no_moves, Instruction::START, sequence()));
+
+ // The spill should be performed at the position expect_spill_move.
+ EXPECT_TRUE(IsParallelMovePresent(expect_spill_move, Instruction::START,
+ sequence(), Reg(0), Slot(0)));
+}
+
+
+TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
+ if (!FLAG_turbo_preprocess_ranges) return;
+
+ StartBlock(); // B0
+ auto var1 = EmitOI(Reg(0));
+ auto var2 = EmitOI(Reg(1));
+ auto var3 = EmitOI(Reg(2));
+ EndBlock(Branch(Reg(var1, 0), 1, 2));
+
+ StartBlock(true); // B1
+ EmitCall(Slot(-2), Slot(var1));
+ EndBlock(Jump(2));
+
+ StartBlock(true); // B2
+ EmitCall(Slot(-1), Slot(var2));
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B4
+ Return(Reg(var3, 2));
+ EndBlock();
+
+ const int def_of_v2 = 3;
+ const int call_in_b1 = 4;
+ const int call_in_b2 = 6;
+ const int end_of_b1 = 5;
+ const int end_of_b2 = 7;
+ const int start_of_b3 = 8;
+
+ Allocate();
+ // TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
+ // so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2.
+ // Expand the test once greedy is back online with this facility.
+ const int var3_reg = 2;
+ const int var3_slot = 2;
+
+ EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(),
+ Reg(var3_reg), Slot()));
+ EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(),
+ Slot(var3_slot), Reg()));
+
+ EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(),
+ Slot(var3_slot), Reg()));
+
+
+ EXPECT_EQ(0,
+ GetParallelMoveCount(start_of_b3, Instruction::START, sequence()));
+}
+
+
namespace {
enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };