Run<PopulatePointerMapsPhase>();
Run<ConnectRangesPhase>();
Run<ResolveControlFlowPhase>();
- Run<OptimizeMovesPhase>();
+ if (FLAG_turbo_move_optimization) {
+ Run<OptimizeMovesPhase>();
+ }
if (FLAG_trace_turbo_graph) {
OFStream os(stdout);
// TODO(dcarney): this is just for experimentation, remove when default.
DEFINE_BOOL(turbo_delay_ssa_decon, false,
"delay ssa deconstruction in TurboFan register allocator")
+// TODO(dcarney): this is just for debugging, remove eventually.
+DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
DEFINE_BOOL(turbo_jt, true, "enable jump threading")
DEFINE_INT(typed_array_max_size_in_heap, 64,
int InstructionSequenceTest::EmitNop() { return Emit(NewIndex(), kArchNop); }
-int InstructionSequenceTest::EmitI(TestOperand input_op_0) {
- InstructionOperand* inputs[1]{ConvertInputOp(input_op_0)};
- return Emit(NewIndex(), kArchNop, 0, nullptr, 1, inputs);
+static size_t CountInputs(size_t size,
+ InstructionSequenceTest::TestOperand* inputs) {
+ size_t i = 0;
+ for (; i < size; ++i) {
+ if (inputs[i].type_ == InstructionSequenceTest::kInvalid) break;
+ }
+ return i;
+}
+
+
+int InstructionSequenceTest::EmitI(size_t input_size, TestOperand* inputs) {
+ InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
+ return Emit(NewIndex(), kArchNop, 0, nullptr, input_size, mapped_inputs);
+}
+
+
+int InstructionSequenceTest::EmitI(TestOperand input_op_0,
+ TestOperand input_op_1,
+ TestOperand input_op_2,
+ TestOperand input_op_3) {
+ TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
+ return EmitI(CountInputs(arraysize(inputs), inputs), inputs);
}
InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
- TestOperand output_op, TestOperand input_op_0) {
+ TestOperand output_op, size_t input_size, TestOperand* inputs) {
VReg output_vreg = NewReg();
InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
- InstructionOperand* inputs[1]{ConvertInputOp(input_op_0)};
- Emit(output_vreg.value_, kArchNop, 1, outputs, 1, inputs);
+ InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
+ Emit(output_vreg.value_, kArchNop, 1, outputs, input_size, mapped_inputs);
return output_vreg;
}
-InstructionSequenceTest::VReg InstructionSequenceTest::EmitOII(
- TestOperand output_op, TestOperand input_op_0, TestOperand input_op_1) {
- VReg output_vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
- InstructionOperand* inputs[2]{ConvertInputOp(input_op_0),
- ConvertInputOp(input_op_1)};
- Emit(output_vreg.value_, kArchNop, 1, outputs, 2, inputs);
- return output_vreg;
+InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
+ TestOperand output_op, TestOperand input_op_0, TestOperand input_op_1,
+ TestOperand input_op_2, TestOperand input_op_3) {
+ TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
+ return EmitOI(output_op, CountInputs(arraysize(inputs), inputs), inputs);
}
VReg output_vreg = NewReg();
InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
CHECK(UnallocatedOperand::cast(outputs[0])->HasFixedPolicy());
- InstructionOperand** mapped_inputs =
- zone()->NewArray<InstructionOperand*>(static_cast<int>(input_size));
- for (size_t i = 0; i < input_size; ++i) {
- mapped_inputs[i] = ConvertInputOp(inputs[i]);
- }
+ InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
Emit(output_vreg.value_, kArchCallCodeObject, 1, outputs, input_size,
mapped_inputs, 0, nullptr, true);
return output_vreg;
TestOperand output_op, TestOperand input_op_0, TestOperand input_op_1,
TestOperand input_op_2, TestOperand input_op_3) {
TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
- size_t size = 0;
- for (; size < arraysize(inputs); ++size) {
- if (inputs[size].type_ == kInvalid) break;
- }
- return EmitCall(output_op, size, inputs);
+ return EmitCall(output_op, CountInputs(arraysize(inputs), inputs), inputs);
}
}
+InstructionOperand** InstructionSequenceTest::ConvertInputs(
+ size_t input_size, TestOperand* inputs) {
+ InstructionOperand** mapped_inputs =
+ zone()->NewArray<InstructionOperand*>(static_cast<int>(input_size));
+ for (size_t i = 0; i < input_size; ++i) {
+ mapped_inputs[i] = ConvertInputOp(inputs[i]);
+ }
+ return mapped_inputs;
+}
+
+
InstructionOperand* InstructionSequenceTest::ConvertInputOp(TestOperand op) {
if (op.type_ == kImmediate) {
CHECK_EQ(op.vreg_.value_, kNoValue);
case kNone:
return Unallocated(op, UnallocatedOperand::NONE,
UnallocatedOperand::USED_AT_START);
+ case kUnique:
+ return Unallocated(op, UnallocatedOperand::NONE);
+ case kUniqueRegister:
+ return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER);
case kRegister:
return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START);
kFixedSlot,
kImmediate,
kNone,
- kConstant
+ kConstant,
+ kUnique,
+ kUniqueRegister
};
struct TestOperand {
static TestOperand Use() { return Use(VReg()); }
+ static TestOperand Unique(VReg vreg) { return TestOperand(kUnique, vreg); }
+
+ static TestOperand UniqueReg(VReg vreg) {
+ return TestOperand(kUniqueRegister, vreg);
+ }
+
enum BlockCompletionType { kBlockEnd, kFallThrough, kBranch, kJump };
struct BlockCompletion {
VReg DefineConstant(int32_t imm = 0);
int EmitNop();
- int EmitI(TestOperand input_op_0);
- VReg EmitOI(TestOperand output_op, TestOperand input_op_0);
- VReg EmitOII(TestOperand output_op, TestOperand input_op_0,
- TestOperand input_op_1);
+ int EmitI(size_t input_size, TestOperand* inputs);
+ int EmitI(TestOperand input_op_0 = TestOperand(),
+ TestOperand input_op_1 = TestOperand(),
+ TestOperand input_op_2 = TestOperand(),
+ TestOperand input_op_3 = TestOperand());
+ VReg EmitOI(TestOperand output_op, size_t input_size, TestOperand* inputs);
+ VReg EmitOI(TestOperand output_op, TestOperand input_op_0 = TestOperand(),
+ TestOperand input_op_1 = TestOperand(),
+ TestOperand input_op_2 = TestOperand(),
+ TestOperand input_op_3 = TestOperand());
VReg EmitCall(TestOperand output_op, size_t input_size, TestOperand* inputs);
VReg EmitCall(TestOperand output_op, TestOperand input_op_0 = TestOperand(),
TestOperand input_op_1 = TestOperand(),
InstructionOperand* Unallocated(TestOperand op,
UnallocatedOperand::BasicPolicy policy,
int index);
+ InstructionOperand** ConvertInputs(size_t input_size, TestOperand* inputs);
InstructionOperand* ConvertInputOp(TestOperand op);
InstructionOperand* ConvertOutputOp(VReg vreg, TestOperand op);
InstructionBlock* NewBlock();
StartBlock();
auto a_reg = Parameter();
auto b_reg = Parameter();
- auto c_reg = EmitOII(Reg(1), Reg(a_reg, 1), Reg(b_reg, 0));
+ auto c_reg = EmitOI(Reg(1), Reg(a_reg, 1), Reg(b_reg, 0));
Return(c_reg);
EndBlock(Last());
StartBlock();
auto phi = Phi(i_reg);
- auto ipp = EmitOII(Same(), Reg(phi), Use(DefineConstant()));
+ auto ipp = EmitOI(Same(), Reg(phi), Use(DefineConstant()));
Extend(phi, ipp);
EndBlock(Jump(0));
// Perform some computations.
// something like phi[i] += const
for (size_t i = 0; i < arraysize(parameters); ++i) {
- auto result = EmitOII(Same(), Reg(phis[i]), Use(constant));
+ auto result = EmitOI(Same(), Reg(phis[i]), Use(constant));
Extend(phis[i], result);
}
Allocate();
}
+
+TEST_F(RegisterAllocatorTest, SplitBeforeInstruction) {
+ const int kNumRegs = 6;
+ SetNumRegs(kNumRegs, kNumRegs);
+
+ StartBlock();
+
+ // Stack parameters/spilled values.
+ auto p_0 = Define(Slot(-1));
+ auto p_1 = Define(Slot(-2));
+
+ // Fill registers.
+ VReg values[kNumRegs];
+ for (size_t i = 0; i < arraysize(values); ++i) {
+ values[i] = Define(Reg(static_cast<int>(i)));
+ }
+
+ // values[0] will be split in the second half of this instruction.
+ // Models Intel mod instructions.
+ EmitOI(Reg(0), Reg(p_0, 1), UniqueReg(p_1));
+ EmitI(Reg(values[0], 0));
+ EndBlock(Last());
+
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) {
+ // Outer diamond.
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 5));
+
+ // Diamond 1
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ auto ll = Define(Reg());
+ EndBlock(Jump(2));
+
+ StartBlock();
+ auto lr = Define(Reg());
+ EndBlock();
+
+ StartBlock();
+ auto l_phi = Phi(ll, lr);
+ EndBlock(Jump(5));
+
+ // Diamond 2
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ auto rl = Define(Reg());
+ EndBlock(Jump(2));
+
+ StartBlock();
+ auto rr = Define(Reg());
+ EndBlock();
+
+ StartBlock();
+ auto r_phi = Phi(rl, rr);
+ EndBlock();
+
+ // Outer diamond merge.
+ StartBlock();
+ auto phi = Phi(l_phi, r_phi);
+ Return(Reg(phi));
+ EndBlock();
+
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, NestedDiamondPhiMergeDifferent) {
+ // Outer diamond.
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 5));
+
+ // Diamond 1
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ auto ll = Define(Reg(0));
+ EndBlock(Jump(2));
+
+ StartBlock();
+ auto lr = Define(Reg(1));
+ EndBlock();
+
+ StartBlock();
+ auto l_phi = Phi(ll, lr);
+ EndBlock(Jump(5));
+
+ // Diamond 2
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ auto rl = Define(Reg(2));
+ EndBlock(Jump(2));
+
+ StartBlock();
+ auto rr = Define(Reg(3));
+ EndBlock();
+
+ StartBlock();
+ auto r_phi = Phi(rl, rr);
+ EndBlock();
+
+ // Outer diamond merge.
+ StartBlock();
+ auto phi = Phi(l_phi, r_phi);
+ Return(Reg(phi));
+ EndBlock();
+
+ Allocate();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8