"src/compiler/machine-operator.h",
"src/compiler/machine-type.cc",
"src/compiler/machine-type.h",
+ "src/compiler/move-optimizer.cc",
+ "src/compiler/move-optimizer.h",
"src/compiler/node-aux-data-inl.h",
"src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc",
bool IsRedundant() const;
+ ParallelMove** parallel_moves() { return parallel_moves_; }
+
static GapInstruction* New(Zone* zone) {
void* buffer = zone->New(sizeof(GapInstruction));
return new (buffer) GapInstruction(kGapInstruction);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/move-optimizer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
+ : local_zone_(local_zone),
+ code_(code),
+ temp_vector_0_(local_zone),
+ temp_vector_1_(local_zone) {}
+
+
+void MoveOptimizer::Run() {
+ // First smash all consecutive moves into the left most move slot.
+ for (auto* block : code()->instruction_blocks()) {
+ GapInstruction* prev_gap = nullptr;
+ for (int index = block->code_start(); index < block->code_end(); ++index) {
+ auto instr = code()->instructions()[index];
+ if (!instr->IsGapMoves()) {
+ if (instr->IsSourcePosition() || instr->IsNop()) continue;
+ FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
+ prev_gap = nullptr;
+ continue;
+ }
+ auto gap = GapInstruction::cast(instr);
+ // Find first non-empty slot.
+ int i = GapInstruction::FIRST_INNER_POSITION;
+ for (; i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ auto move = gap->parallel_moves()[i];
+ if (move == nullptr) continue;
+ auto move_ops = move->move_operands();
+ auto op = move_ops->begin();
+ for (; op != move_ops->end(); ++op) {
+ if (!op->IsRedundant()) break;
+ }
+ if (op == move_ops->end()) {
+ move_ops->Rewind(0); // Clear this redundant move.
+ } else {
+ break; // Found index of first non-redundant move.
+ }
+ }
+ // Nothing to do here.
+ if (i == GapInstruction::LAST_INNER_POSITION + 1) {
+ if (prev_gap != nullptr) {
+ // Slide prev_gap down so we always know where to look for it.
+ std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
+ prev_gap = gap;
+ }
+ continue;
+ }
+ // Move the first non-empty gap to position 0.
+ std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]);
+ auto left = gap->parallel_moves()[0];
+ // Compress everything into position 0.
+ for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) {
+ auto move = gap->parallel_moves()[i];
+ if (move == nullptr) continue;
+ CompressMoves(&temp_vector_0_, left, move);
+ }
+ if (prev_gap != nullptr) {
+ // Smash left into prev_gap, killing left.
+ auto pred_moves = prev_gap->parallel_moves()[0];
+ CompressMoves(&temp_vector_0_, pred_moves, left);
+ std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
+ }
+ prev_gap = gap;
+ }
+ FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
+ }
+}
+
+
+static MoveOperands* PrepareInsertAfter(ParallelMove* left, MoveOperands* move,
+ Zone* zone) {
+ auto move_ops = left->move_operands();
+ MoveOperands* replacement = nullptr;
+ MoveOperands* to_eliminate = nullptr;
+ for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) {
+ if (curr->IsEliminated()) continue;
+ if (curr->destination()->Equals(move->source())) {
+ DCHECK_EQ(nullptr, replacement);
+ replacement = curr;
+ if (to_eliminate != nullptr) break;
+ } else if (curr->destination()->Equals(move->destination())) {
+ DCHECK_EQ(nullptr, to_eliminate);
+ to_eliminate = curr;
+ if (replacement != nullptr) break;
+ }
+ }
+ DCHECK(!(replacement == to_eliminate && replacement != nullptr));
+ if (replacement != nullptr) {
+ auto new_source = new (zone) InstructionOperand(
+ replacement->source()->kind(), replacement->source()->index());
+ move->set_source(new_source);
+ }
+ return to_eliminate;
+}
+
+
+void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
+ ParallelMove* right) {
+ DCHECK(eliminated->empty());
+ auto move_ops = right->move_operands();
+ // Modify the right moves in place and collect moves that will be killed by
+ // merging the two gaps.
+ for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
+ if (op->IsRedundant()) continue;
+ MoveOperands* to_eliminate = PrepareInsertAfter(left, op, code_zone());
+ if (to_eliminate != nullptr) {
+ eliminated->push_back(to_eliminate);
+ }
+ }
+ // Eliminate dead moves. Must happen before insertion of new moves as the
+ // contents of eliminated are pointers into a list.
+ for (auto to_eliminate : *eliminated) {
+ to_eliminate->Eliminate();
+ }
+ eliminated->clear();
+ // Add all possibly modified moves from right side.
+ for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
+ if (op->IsRedundant()) continue;
+ left->move_operands()->Add(*op, code_zone());
+ }
+ // Nuke right.
+ move_ops->Rewind(0);
+}
+
+
+void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
+ GapInstruction* gap) {
+ DCHECK(loads->empty());
+ DCHECK(new_moves->empty());
+ if (gap == nullptr) return;
+ // Split multiple loads of the same constant or stack slot off into the second
+ // slot and keep remaining moves in the first slot.
+ auto move_ops = gap->parallel_moves()[0]->move_operands();
+ for (auto move = move_ops->begin(); move != move_ops->end(); ++move) {
+ if (move->IsRedundant()) {
+ move->Eliminate();
+ continue;
+ }
+ if (!(move->source()->IsConstant() || move->source()->IsStackSlot() ||
+ move->source()->IsDoubleStackSlot()))
+ continue;
+ // Search for existing move to this slot.
+ MoveOperands* found = nullptr;
+ for (auto load : *loads) {
+ if (load->source()->Equals(move->source())) {
+ found = load;
+ break;
+ }
+ }
+ // Not found so insert.
+ if (found == nullptr) {
+ loads->push_back(move);
+ // Replace source with copy for later use.
+ auto dest = move->destination();
+ move->set_destination(new (code_zone())
+ InstructionOperand(dest->kind(), dest->index()));
+ continue;
+ }
+ if ((found->destination()->IsStackSlot() ||
+ found->destination()->IsDoubleStackSlot()) &&
+ !(move->destination()->IsStackSlot() ||
+ move->destination()->IsDoubleStackSlot())) {
+ // Found a better source for this load. Smash it in place to affect other
+ // loads that have already been split.
+ InstructionOperand::Kind found_kind = found->destination()->kind();
+ int found_index = found->destination()->index();
+ auto next_dest =
+ new (code_zone()) InstructionOperand(found_kind, found_index);
+ auto dest = move->destination();
+ found->destination()->ConvertTo(dest->kind(), dest->index());
+ move->set_destination(next_dest);
+ }
+ // move from load destination.
+ move->set_source(found->destination());
+ new_moves->push_back(move);
+ }
+ loads->clear();
+ if (new_moves->empty()) return;
+ // Insert all new moves into slot 1.
+ auto slot_1 = gap->GetOrCreateParallelMove(
+ static_cast<GapInstruction::InnerPosition>(1), code_zone());
+ DCHECK(slot_1->move_operands()->is_empty());
+ slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr),
+ static_cast<int>(new_moves->size()),
+ code_zone());
+ auto it = slot_1->move_operands()->begin();
+ for (auto new_move : *new_moves) {
+ std::swap(*new_move, *it);
+ ++it;
+ }
+ DCHECK_EQ(it, slot_1->move_operands()->end());
+ new_moves->clear();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MOVE_OPTIMIZER_
+#define V8_COMPILER_MOVE_OPTIMIZER_
+
+#include "src/compiler/instruction.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MoveOptimizer FINAL {
+ public:
+ MoveOptimizer(Zone* local_zone, InstructionSequence* code);
+ void Run();
+
+ private:
+ typedef ZoneVector<MoveOperands*> MoveOpVector;
+
+ InstructionSequence* code() const { return code_; }
+ Zone* local_zone() const { return local_zone_; }
+ Zone* code_zone() const { return code()->zone(); }
+
+ void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
+ ParallelMove* right);
+ void FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
+ GapInstruction* gap);
+
+ Zone* const local_zone_;
+ InstructionSequence* const code_;
+ MoveOpVector temp_vector_0_;
+ MoveOpVector temp_vector_1_;
+
+ DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MOVE_OPTIMIZER_
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/jump-threading.h"
#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/move-optimizer.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/register-allocator-verifier.h"
};
+struct OptimizeMovesPhase {
+ static const char* phase_name() { return "optimize moves"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ MoveOptimizer move_optimizer(temp_zone, data->sequence());
+ move_optimizer.Run();
+ }
+};
+
+
struct JumpThreadingPhase {
static const char* phase_name() { return "jump threading"; }
Run<PopulatePointerMapsPhase>();
Run<ConnectRangesPhase>();
Run<ResolveControlFlowPhase>();
+ Run<OptimizeMovesPhase>();
if (FLAG_trace_turbo) {
OFStream os(stdout);
range->set_assigned_register(reg, code_zone());
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_REGISTER_ALLOCATOR_H_
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/pipeline.h"
+#include "test/unittests/compiler/instruction-sequence-unittest.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static const char*
+ general_register_names_[RegisterConfiguration::kMaxGeneralRegisters];
+static const char*
+ double_register_names_[RegisterConfiguration::kMaxDoubleRegisters];
+static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
+ RegisterConfiguration::kMaxDoubleRegisters)];
+
+
+static void InitializeRegisterNames() {
+ char* loc = register_names_;
+ for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
+ general_register_names_[i] = loc;
+ loc += base::OS::SNPrintF(loc, 100, "gp_%d", i);
+ *loc++ = 0;
+ }
+ for (int i = 0; i < RegisterConfiguration::kMaxDoubleRegisters; ++i) {
+ double_register_names_[i] = loc;
+ loc += base::OS::SNPrintF(loc, 100, "fp_%d", i) + 1;
+ *loc++ = 0;
+ }
+}
+
+
+InstructionSequenceTest::InstructionSequenceTest()
+ : sequence_(nullptr),
+ num_general_registers_(kDefaultNRegs),
+ num_double_registers_(kDefaultNRegs),
+ instruction_blocks_(zone()),
+ current_instruction_index_(-1),
+ current_block_(nullptr),
+ block_returns_(false) {
+ InitializeRegisterNames();
+}
+
+
+void InstructionSequenceTest::SetNumRegs(int num_general_registers,
+ int num_double_registers) {
+ CHECK(config_.is_empty());
+ CHECK(instructions_.empty());
+ CHECK(instruction_blocks_.empty());
+ num_general_registers_ = num_general_registers;
+ num_double_registers_ = num_double_registers;
+}
+
+
+RegisterConfiguration* InstructionSequenceTest::config() {
+ if (config_.is_empty()) {
+ config_.Reset(new RegisterConfiguration(
+ num_general_registers_, num_double_registers_, num_double_registers_,
+ general_register_names_, double_register_names_));
+ }
+ return config_.get();
+}
+
+
+InstructionSequence* InstructionSequenceTest::sequence() {
+ if (sequence_ == nullptr) {
+ sequence_ = new (zone()) InstructionSequence(zone(), &instruction_blocks_);
+ }
+ return sequence_;
+}
+
+
+void InstructionSequenceTest::StartLoop(int loop_blocks) {
+ CHECK(current_block_ == nullptr);
+ if (!loop_blocks_.empty()) {
+ CHECK(!loop_blocks_.back().loop_header_.IsValid());
+ }
+ LoopData loop_data = {Rpo::Invalid(), loop_blocks};
+ loop_blocks_.push_back(loop_data);
+}
+
+
+void InstructionSequenceTest::EndLoop() {
+ CHECK(current_block_ == nullptr);
+ CHECK(!loop_blocks_.empty());
+ CHECK_EQ(0, loop_blocks_.back().expected_blocks_);
+ loop_blocks_.pop_back();
+}
+
+
+void InstructionSequenceTest::StartBlock() {
+ block_returns_ = false;
+ NewBlock();
+}
+
+
+int InstructionSequenceTest::EndBlock(BlockCompletion completion) {
+ int instruction_index = kMinInt;
+ if (block_returns_) {
+ CHECK(completion.type_ == kBlockEnd || completion.type_ == kFallThrough);
+ completion.type_ = kBlockEnd;
+ }
+ switch (completion.type_) {
+ case kBlockEnd:
+ break;
+ case kFallThrough:
+ instruction_index = EmitFallThrough();
+ break;
+ case kJump:
+ CHECK(!block_returns_);
+ instruction_index = EmitJump();
+ break;
+ case kBranch:
+ CHECK(!block_returns_);
+ instruction_index = EmitBranch(completion.op_);
+ break;
+ }
+ completions_.push_back(completion);
+ CHECK(current_block_ != nullptr);
+ sequence()->EndBlock(current_block_->rpo_number());
+ current_block_ = nullptr;
+ return instruction_index;
+}
+
+
+InstructionSequenceTest::TestOperand InstructionSequenceTest::Imm(int32_t imm) {
+ int index = sequence()->AddImmediate(Constant(imm));
+ return TestOperand(kImmediate, index);
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::Define(
+ TestOperand output_op) {
+ VReg vreg = NewReg();
+ InstructionOperand* outputs[1]{ConvertOutputOp(vreg, output_op)};
+ Emit(vreg.value_, kArchNop, 1, outputs);
+ return vreg;
+}
+
+
+int InstructionSequenceTest::Return(TestOperand input_op_0) {
+ block_returns_ = true;
+ InstructionOperand* inputs[1]{ConvertInputOp(input_op_0)};
+ return Emit(NewIndex(), kArchRet, 0, nullptr, 1, inputs);
+}
+
+
+PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
+ VReg incoming_vreg_1,
+ VReg incoming_vreg_2,
+ VReg incoming_vreg_3) {
+ auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, 10);
+ VReg inputs[] = {incoming_vreg_0, incoming_vreg_1, incoming_vreg_2,
+ incoming_vreg_3};
+ for (size_t i = 0; i < arraysize(inputs); ++i) {
+ if (inputs[i].value_ == kNoValue) break;
+ Extend(phi, inputs[i]);
+ }
+ current_block_->AddPhi(phi);
+ return phi;
+}
+
+
+void InstructionSequenceTest::Extend(PhiInstruction* phi, VReg vreg) {
+ phi->Extend(zone(), vreg.value_);
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::DefineConstant(
+ int32_t imm) {
+ VReg vreg = NewReg();
+ sequence()->AddConstant(vreg.value_, Constant(imm));
+ InstructionOperand* outputs[1]{ConstantOperand::Create(vreg.value_, zone())};
+ Emit(vreg.value_, kArchNop, 1, outputs);
+ return vreg;
+}
+
+
+int InstructionSequenceTest::EmitNop() { return Emit(NewIndex(), kArchNop); }
+
+
+int InstructionSequenceTest::EmitI(TestOperand input_op_0) {
+ InstructionOperand* inputs[1]{ConvertInputOp(input_op_0)};
+ return Emit(NewIndex(), kArchNop, 0, nullptr, 1, inputs);
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
+ TestOperand output_op, TestOperand input_op_0) {
+ VReg output_vreg = NewReg();
+ InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
+ InstructionOperand* inputs[1]{ConvertInputOp(input_op_0)};
+ Emit(output_vreg.value_, kArchNop, 1, outputs, 1, inputs);
+ return output_vreg;
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::EmitOII(
+ TestOperand output_op, TestOperand input_op_0, TestOperand input_op_1) {
+ VReg output_vreg = NewReg();
+ InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
+ InstructionOperand* inputs[2]{ConvertInputOp(input_op_0),
+ ConvertInputOp(input_op_1)};
+ Emit(output_vreg.value_, kArchNop, 1, outputs, 2, inputs);
+ return output_vreg;
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
+ TestOperand output_op, size_t input_size, TestOperand* inputs) {
+ VReg output_vreg = NewReg();
+ InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
+ CHECK(UnallocatedOperand::cast(outputs[0])->HasFixedPolicy());
+ InstructionOperand** mapped_inputs =
+ zone()->NewArray<InstructionOperand*>(static_cast<int>(input_size));
+ for (size_t i = 0; i < input_size; ++i) {
+ mapped_inputs[i] = ConvertInputOp(inputs[i]);
+ }
+ Emit(output_vreg.value_, kArchCallCodeObject, 1, outputs, input_size,
+ mapped_inputs, 0, nullptr, true);
+ return output_vreg;
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
+ TestOperand output_op, TestOperand input_op_0, TestOperand input_op_1,
+ TestOperand input_op_2, TestOperand input_op_3) {
+ TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
+ size_t size = 0;
+ for (; size < arraysize(inputs); ++size) {
+ if (inputs[size].type_ == kInvalid) break;
+ }
+ return EmitCall(output_op, size, inputs);
+}
+
+
+const Instruction* InstructionSequenceTest::GetInstruction(
+ int instruction_index) {
+ auto it = instructions_.find(instruction_index);
+ CHECK(it != instructions_.end());
+ return it->second;
+}
+
+
+int InstructionSequenceTest::EmitBranch(TestOperand input_op) {
+ InstructionOperand* inputs[4]{ConvertInputOp(input_op), ConvertInputOp(Imm()),
+ ConvertInputOp(Imm()), ConvertInputOp(Imm())};
+ InstructionCode opcode = kArchJmp | FlagsModeField::encode(kFlags_branch) |
+ FlagsConditionField::encode(kEqual);
+ auto instruction =
+ NewInstruction(opcode, 0, nullptr, 4, inputs)->MarkAsControl();
+ return AddInstruction(NewIndex(), instruction);
+}
+
+
+int InstructionSequenceTest::EmitFallThrough() {
+ auto instruction = NewInstruction(kArchNop, 0, nullptr)->MarkAsControl();
+ return AddInstruction(NewIndex(), instruction);
+}
+
+
+int InstructionSequenceTest::EmitJump() {
+ InstructionOperand* inputs[1]{ConvertInputOp(Imm())};
+ auto instruction =
+ NewInstruction(kArchJmp, 0, nullptr, 1, inputs)->MarkAsControl();
+ return AddInstruction(NewIndex(), instruction);
+}
+
+
+Instruction* InstructionSequenceTest::NewInstruction(
+ InstructionCode code, size_t outputs_size, InstructionOperand** outputs,
+ size_t inputs_size, InstructionOperand** inputs, size_t temps_size,
+ InstructionOperand** temps) {
+ CHECK_NE(nullptr, current_block_);
+ return Instruction::New(zone(), code, outputs_size, outputs, inputs_size,
+ inputs, temps_size, temps);
+}
+
+
+InstructionOperand* InstructionSequenceTest::Unallocated(
+ TestOperand op, UnallocatedOperand::ExtendedPolicy policy) {
+ auto unallocated = new (zone()) UnallocatedOperand(policy);
+ unallocated->set_virtual_register(op.vreg_.value_);
+ return unallocated;
+}
+
+
+InstructionOperand* InstructionSequenceTest::Unallocated(
+ TestOperand op, UnallocatedOperand::ExtendedPolicy policy,
+ UnallocatedOperand::Lifetime lifetime) {
+ auto unallocated = new (zone()) UnallocatedOperand(policy, lifetime);
+ unallocated->set_virtual_register(op.vreg_.value_);
+ return unallocated;
+}
+
+
+InstructionOperand* InstructionSequenceTest::Unallocated(
+ TestOperand op, UnallocatedOperand::ExtendedPolicy policy, int index) {
+ auto unallocated = new (zone()) UnallocatedOperand(policy, index);
+ unallocated->set_virtual_register(op.vreg_.value_);
+ return unallocated;
+}
+
+
+InstructionOperand* InstructionSequenceTest::Unallocated(
+ TestOperand op, UnallocatedOperand::BasicPolicy policy, int index) {
+ auto unallocated = new (zone()) UnallocatedOperand(policy, index);
+ unallocated->set_virtual_register(op.vreg_.value_);
+ return unallocated;
+}
+
+
+InstructionOperand* InstructionSequenceTest::ConvertInputOp(TestOperand op) {
+ if (op.type_ == kImmediate) {
+ CHECK_EQ(op.vreg_.value_, kNoValue);
+ return ImmediateOperand::Create(op.value_, zone());
+ }
+ CHECK_NE(op.vreg_.value_, kNoValue);
+ switch (op.type_) {
+ case kNone:
+ return Unallocated(op, UnallocatedOperand::NONE,
+ UnallocatedOperand::USED_AT_START);
+ case kRegister:
+ return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START);
+ case kFixedRegister:
+ CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
+ return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
+ case kFixedSlot:
+ return Unallocated(op, UnallocatedOperand::FIXED_SLOT, op.value_);
+ default:
+ break;
+ }
+ CHECK(false);
+ return NULL;
+}
+
+
+InstructionOperand* InstructionSequenceTest::ConvertOutputOp(VReg vreg,
+ TestOperand op) {
+ CHECK_EQ(op.vreg_.value_, kNoValue);
+ op.vreg_ = vreg;
+ switch (op.type_) {
+ case kSameAsFirst:
+ return Unallocated(op, UnallocatedOperand::SAME_AS_FIRST_INPUT);
+ case kRegister:
+ return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER);
+ case kFixedSlot:
+ return Unallocated(op, UnallocatedOperand::FIXED_SLOT, op.value_);
+ case kFixedRegister:
+ CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
+ return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
+ default:
+ break;
+ }
+ CHECK(false);
+ return NULL;
+}
+
+
+InstructionBlock* InstructionSequenceTest::NewBlock() {
+ CHECK(current_block_ == nullptr);
+ auto block_id = BasicBlock::Id::FromSize(instruction_blocks_.size());
+ Rpo rpo = Rpo::FromInt(block_id.ToInt());
+ Rpo loop_header = Rpo::Invalid();
+ Rpo loop_end = Rpo::Invalid();
+ if (!loop_blocks_.empty()) {
+ auto& loop_data = loop_blocks_.back();
+ // This is a loop header.
+ if (!loop_data.loop_header_.IsValid()) {
+ loop_end = Rpo::FromInt(block_id.ToInt() + loop_data.expected_blocks_);
+ loop_data.expected_blocks_--;
+ loop_data.loop_header_ = rpo;
+ } else {
+ // This is a loop body.
+ CHECK_NE(0, loop_data.expected_blocks_);
+ // TODO(dcarney): handle nested loops.
+ loop_data.expected_blocks_--;
+ loop_header = loop_data.loop_header_;
+ }
+ }
+ // Construct instruction block.
+ auto instruction_block = new (zone()) InstructionBlock(
+ zone(), block_id, rpo, rpo, loop_header, loop_end, false);
+ instruction_blocks_.push_back(instruction_block);
+ current_block_ = instruction_block;
+ sequence()->StartBlock(rpo);
+ return instruction_block;
+}
+
+
+void InstructionSequenceTest::WireBlocks() {
+ CHECK_EQ(nullptr, current_block());
+ CHECK(instruction_blocks_.size() == completions_.size());
+ size_t offset = 0;
+ for (const auto& completion : completions_) {
+ switch (completion.type_) {
+ case kBlockEnd:
+ break;
+ case kFallThrough: // Fallthrough.
+ case kJump:
+ WireBlock(offset, completion.offset_0_);
+ break;
+ case kBranch:
+ WireBlock(offset, completion.offset_0_);
+ WireBlock(offset, completion.offset_1_);
+ break;
+ }
+ ++offset;
+ }
+}
+
+
+void InstructionSequenceTest::WireBlock(size_t block_offset, int jump_offset) {
+ size_t target_block_offset = block_offset + static_cast<size_t>(jump_offset);
+ CHECK(block_offset < instruction_blocks_.size());
+ CHECK(target_block_offset < instruction_blocks_.size());
+ auto block = instruction_blocks_[block_offset];
+ auto target = instruction_blocks_[target_block_offset];
+ block->successors().push_back(target->rpo_number());
+ target->predecessors().push_back(block->rpo_number());
+}
+
+
+int InstructionSequenceTest::Emit(int instruction_index, InstructionCode code,
+ size_t outputs_size,
+ InstructionOperand** outputs,
+ size_t inputs_size,
+ InstructionOperand** inputs,
+ size_t temps_size, InstructionOperand** temps,
+ bool is_call) {
+ auto instruction = NewInstruction(code, outputs_size, outputs, inputs_size,
+ inputs, temps_size, temps);
+ if (is_call) instruction->MarkAsCall();
+ return AddInstruction(instruction_index, instruction);
+}
+
+
+int InstructionSequenceTest::AddInstruction(int instruction_index,
+ Instruction* instruction) {
+ sequence()->AddInstruction(instruction);
+ return instruction_index;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
+#define V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
+
+#include "src/compiler/instruction.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionSequenceTest : public TestWithZone {
+ public:
+ static const int kDefaultNRegs = 4;
+ static const int kNoValue = kMinInt;
+
+ typedef BasicBlock::RpoNumber Rpo;
+
+ struct VReg {
+ VReg() : value_(kNoValue) {}
+ VReg(PhiInstruction* phi) : value_(phi->virtual_register()) {} // NOLINT
+ explicit VReg(int value) : value_(value) {}
+ int value_;
+ };
+
+ enum TestOperandType {
+ kInvalid,
+ kSameAsFirst,
+ kRegister,
+ kFixedRegister,
+ kSlot,
+ kFixedSlot,
+ kImmediate,
+ kNone,
+ kConstant
+ };
+
+ struct TestOperand {
+ TestOperand() : type_(kInvalid), vreg_(), value_(kNoValue) {}
+ TestOperand(TestOperandType type, int imm)
+ : type_(type), vreg_(), value_(imm) {}
+ TestOperand(TestOperandType type, VReg vreg, int value = kNoValue)
+ : type_(type), vreg_(vreg), value_(value) {}
+
+ TestOperandType type_;
+ VReg vreg_;
+ int value_;
+ };
+
+ static TestOperand Same() { return TestOperand(kSameAsFirst, VReg()); }
+
+ static TestOperand Reg(VReg vreg, int index = kNoValue) {
+ TestOperandType type = kRegister;
+ if (index != kNoValue) type = kFixedRegister;
+ return TestOperand(type, vreg, index);
+ }
+
+ static TestOperand Reg(int index = kNoValue) { return Reg(VReg(), index); }
+
+ static TestOperand Slot(VReg vreg, int index = kNoValue) {
+ TestOperandType type = kSlot;
+ if (index != kNoValue) type = kFixedSlot;
+ return TestOperand(type, vreg, index);
+ }
+
+ static TestOperand Slot(int index = kNoValue) { return Slot(VReg(), index); }
+
+ static TestOperand Const(int index) {
+ CHECK_NE(kNoValue, index);
+ return TestOperand(kConstant, VReg(), index);
+ }
+
+ static TestOperand Use(VReg vreg) { return TestOperand(kNone, vreg); }
+
+ static TestOperand Use() { return Use(VReg()); }
+
+ enum BlockCompletionType { kBlockEnd, kFallThrough, kBranch, kJump };
+
+ struct BlockCompletion {
+ BlockCompletionType type_;
+ TestOperand op_;
+ int offset_0_;
+ int offset_1_;
+ };
+
+ static BlockCompletion FallThrough() {
+ BlockCompletion completion = {kFallThrough, TestOperand(), 1, kNoValue};
+ return completion;
+ }
+
+ static BlockCompletion Jump(int offset) {
+ BlockCompletion completion = {kJump, TestOperand(), offset, kNoValue};
+ return completion;
+ }
+
+ static BlockCompletion Branch(TestOperand op, int left_offset,
+ int right_offset) {
+ BlockCompletion completion = {kBranch, op, left_offset, right_offset};
+ return completion;
+ }
+
+ static BlockCompletion Last() {
+ BlockCompletion completion = {kBlockEnd, TestOperand(), kNoValue, kNoValue};
+ return completion;
+ }
+
+ InstructionSequenceTest();
+
+ void SetNumRegs(int num_general_registers, int num_double_registers);
+ RegisterConfiguration* config();
+ InstructionSequence* sequence();
+
+ void StartLoop(int loop_blocks);
+ void EndLoop();
+ void StartBlock();
+ int EndBlock(BlockCompletion completion = FallThrough());
+
+ TestOperand Imm(int32_t imm = 0);
+ VReg Define(TestOperand output_op);
+ VReg Parameter(TestOperand output_op = Reg()) { return Define(output_op); }
+
+ int Return(TestOperand input_op_0);
+ int Return(VReg vreg) { return Return(Reg(vreg, 0)); }
+
+ PhiInstruction* Phi(VReg incoming_vreg_0 = VReg(),
+ VReg incoming_vreg_1 = VReg(),
+ VReg incoming_vreg_2 = VReg(),
+ VReg incoming_vreg_3 = VReg());
+ void Extend(PhiInstruction* phi, VReg vreg);
+
+ VReg DefineConstant(int32_t imm = 0);
+ int EmitNop();
+ int EmitI(TestOperand input_op_0);
+ VReg EmitOI(TestOperand output_op, TestOperand input_op_0);
+ VReg EmitOII(TestOperand output_op, TestOperand input_op_0,
+ TestOperand input_op_1);
+ VReg EmitCall(TestOperand output_op, size_t input_size, TestOperand* inputs);
+ VReg EmitCall(TestOperand output_op, TestOperand input_op_0 = TestOperand(),
+ TestOperand input_op_1 = TestOperand(),
+ TestOperand input_op_2 = TestOperand(),
+ TestOperand input_op_3 = TestOperand());
+
+ // Get defining instruction vreg or value returned at instruction creation
+ // time when there is no return value.
+ const Instruction* GetInstruction(int instruction_index);
+
+ InstructionBlock* current_block() const { return current_block_; }
+ int num_general_registers() const { return num_general_registers_; }
+ int num_double_registers() const { return num_double_registers_; }
+
+ // Called after all instructions have been inserted.
+ void WireBlocks();
+
+ private:
+ VReg NewReg() { return VReg(sequence()->NextVirtualRegister()); }
+ int NewIndex() { return current_instruction_index_--; }
+
+ static TestOperand Invalid() { return TestOperand(kInvalid, VReg()); }
+
+ int EmitBranch(TestOperand input_op);
+ int EmitFallThrough();
+ int EmitJump();
+ Instruction* NewInstruction(InstructionCode code, size_t outputs_size,
+ InstructionOperand** outputs,
+ size_t inputs_size = 0,
+ InstructionOperand* *inputs = nullptr,
+ size_t temps_size = 0,
+ InstructionOperand* *temps = nullptr);
+ InstructionOperand* Unallocated(TestOperand op,
+ UnallocatedOperand::ExtendedPolicy policy);
+ InstructionOperand* Unallocated(TestOperand op,
+ UnallocatedOperand::ExtendedPolicy policy,
+ UnallocatedOperand::Lifetime lifetime);
+ InstructionOperand* Unallocated(TestOperand op,
+ UnallocatedOperand::ExtendedPolicy policy,
+ int index);
+ InstructionOperand* Unallocated(TestOperand op,
+ UnallocatedOperand::BasicPolicy policy,
+ int index);
+ InstructionOperand* ConvertInputOp(TestOperand op);
+ InstructionOperand* ConvertOutputOp(VReg vreg, TestOperand op);
+ InstructionBlock* NewBlock();
+ void WireBlock(size_t block_offset, int jump_offset);
+
+ int Emit(int instruction_index, InstructionCode code, size_t outputs_size = 0,
+ InstructionOperand* *outputs = nullptr, size_t inputs_size = 0,
+ InstructionOperand* *inputs = nullptr, size_t temps_size = 0,
+ InstructionOperand* *temps = nullptr, bool is_call = false);
+
+ int AddInstruction(int instruction_index, Instruction* instruction);
+
+ struct LoopData {
+ Rpo loop_header_;
+ int expected_blocks_;
+ };
+
+ typedef std::vector<LoopData> LoopBlocks;
+ typedef std::map<int, const Instruction*> Instructions;
+ typedef std::vector<BlockCompletion> Completions;
+
+ SmartPointer<RegisterConfiguration> config_;
+ InstructionSequence* sequence_;
+ int num_general_registers_;
+ int num_double_registers_;
+
+ // Block building state.
+ InstructionBlocks instruction_blocks_;
+ Instructions instructions_;
+ int current_instruction_index_;
+ Completions completions_;
+ LoopBlocks loop_blocks_;
+ InstructionBlock* current_block_;
+ bool block_returns_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/move-optimizer.h"
+#include "test/unittests/compiler/instruction-sequence-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MoveOptimizerTest : public InstructionSequenceTest {
+ public:
+ GapInstruction* LastGap() {
+ auto instruction = sequence()->instructions().back();
+ if (!instruction->IsGapMoves()) {
+ instruction = *(sequence()->instructions().rbegin() + 1);
+ }
+ return GapInstruction::cast(instruction);
+ }
+
+ void AddMove(GapInstruction* gap, TestOperand from, TestOperand to,
+ GapInstruction::InnerPosition pos = GapInstruction::START) {
+ auto parallel_move = gap->GetOrCreateParallelMove(pos, zone());
+ parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to), zone());
+ }
+
+ int NonRedundantSize(ParallelMove* move) {
+ int i = 0;
+ auto ops = move->move_operands();
+ for (auto op = ops->begin(); op != ops->end(); ++op) {
+ if (op->IsRedundant()) continue;
+ i++;
+ }
+ return i;
+ }
+
+ bool Contains(ParallelMove* move, TestOperand from_op, TestOperand to_op) {
+ auto from = ConvertMoveArg(from_op);
+ auto to = ConvertMoveArg(to_op);
+ auto ops = move->move_operands();
+ for (auto op = ops->begin(); op != ops->end(); ++op) {
+ if (op->IsRedundant()) continue;
+ if (op->source()->Equals(from) && op->destination()->Equals(to)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // TODO(dcarney): add a verifier.
+ void Optimize() {
+ WireBlocks();
+ if (FLAG_trace_turbo) {
+ OFStream os(stdout);
+ PrintableInstructionSequence printable = {config(), sequence()};
+ os << "----- Instruction sequence before move optimization -----\n"
+ << printable;
+ }
+ MoveOptimizer move_optimizer(zone(), sequence());
+ move_optimizer.Run();
+ if (FLAG_trace_turbo) {
+ OFStream os(stdout);
+ PrintableInstructionSequence printable = {config(), sequence()};
+ os << "----- Instruction sequence after move optimization -----\n"
+ << printable;
+ }
+ }
+
+ private:
+ InstructionOperand* ConvertMoveArg(TestOperand op) {
+ CHECK_EQ(kNoValue, op.vreg_.value_);
+ CHECK_NE(kNoValue, op.value_);
+ switch (op.type_) {
+ case kConstant:
+ return ConstantOperand::Create(op.value_, zone());
+ case kFixedSlot:
+ return StackSlotOperand::Create(op.value_, zone());
+ case kFixedRegister:
+ CHECK(0 <= op.value_ && op.value_ < num_general_registers());
+ return RegisterOperand::Create(op.value_, zone());
+ default:
+ break;
+ }
+ CHECK(false);
+ return nullptr;
+ }
+};
+
+
+TEST_F(MoveOptimizerTest, RemovesRedundant) {
+ StartBlock();
+ AddMove(LastGap(), Reg(0), Reg(1));
+ EmitNop();
+ AddMove(LastGap(), Reg(1), Reg(0));
+ EmitNop();
+ EndBlock(Last());
+
+ Optimize();
+
+ auto gap = LastGap();
+ auto move = gap->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(move));
+ CHECK(Contains(move, Reg(0), Reg(1)));
+}
+
+
+TEST_F(MoveOptimizerTest, SplitsConstants) {
+ StartBlock();
+ EndBlock(Last());
+
+ auto gap = LastGap();
+ AddMove(gap, Const(1), Slot(0));
+ AddMove(gap, Const(1), Slot(1));
+ AddMove(gap, Const(1), Reg(0));
+ AddMove(gap, Const(1), Slot(2));
+
+ Optimize();
+
+ auto move = gap->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(move));
+ CHECK(Contains(move, Const(1), Reg(0)));
+
+ move = gap->parallel_moves()[1];
+ CHECK_EQ(3, NonRedundantSize(move));
+ CHECK(Contains(move, Reg(0), Slot(0)));
+ CHECK(Contains(move, Reg(0), Slot(1)));
+ CHECK(Contains(move, Reg(0), Slot(2)));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/utils/random-number-generator.h"
-#include "src/compiler/instruction.h"
#include "src/compiler/pipeline.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gmock/include/gmock/gmock.h"
+#include "test/unittests/compiler/instruction-sequence-unittest.h"
namespace v8 {
namespace internal {
namespace compiler {
-typedef BasicBlock::RpoNumber Rpo;
-
-static const char*
- general_register_names_[RegisterConfiguration::kMaxGeneralRegisters];
-static const char*
- double_register_names_[RegisterConfiguration::kMaxDoubleRegisters];
-static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
- RegisterConfiguration::kMaxDoubleRegisters)];
-
-static void InitializeRegisterNames() {
- char* loc = register_names_;
- for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
- general_register_names_[i] = loc;
- loc += base::OS::SNPrintF(loc, 100, "gp_%d", i);
- *loc++ = 0;
- }
- for (int i = 0; i < RegisterConfiguration::kMaxDoubleRegisters; ++i) {
- double_register_names_[i] = loc;
- loc += base::OS::SNPrintF(loc, 100, "fp_%d", i) + 1;
- *loc++ = 0;
- }
-}
-
-
-class RegisterAllocatorTest : public TestWithZone {
+class RegisterAllocatorTest : public InstructionSequenceTest {
public:
- static const int kDefaultNRegs = 4;
- static const int kNoValue = kMinInt;
-
- struct VReg {
- VReg() : value_(kNoValue) {}
- VReg(PhiInstruction* phi) : value_(phi->virtual_register()) {} // NOLINT
- explicit VReg(int value) : value_(value) {}
- int value_;
- };
-
- enum TestOperandType {
- kInvalid,
- kSameAsFirst,
- kRegister,
- kFixedRegister,
- kSlot,
- kFixedSlot,
- kImmediate,
- kNone
- };
-
- struct TestOperand {
- TestOperand() : type_(kInvalid), vreg_(), value_(kNoValue) {}
- TestOperand(TestOperandType type, int imm)
- : type_(type), vreg_(), value_(imm) {}
- TestOperand(TestOperandType type, VReg vreg, int value = kNoValue)
- : type_(type), vreg_(vreg), value_(value) {}
-
- TestOperandType type_;
- VReg vreg_;
- int value_;
- };
-
- static TestOperand Same() { return TestOperand(kSameAsFirst, VReg()); }
-
- static TestOperand Reg(VReg vreg, int index = kNoValue) {
- TestOperandType type = kRegister;
- if (index != kNoValue) type = kFixedRegister;
- return TestOperand(type, vreg, index);
- }
-
- static TestOperand Reg(int index = kNoValue) { return Reg(VReg(), index); }
-
- static TestOperand Slot(VReg vreg, int index = kNoValue) {
- TestOperandType type = kSlot;
- if (index != kNoValue) type = kFixedSlot;
- return TestOperand(type, vreg, index);
- }
-
- static TestOperand Slot(int index = kNoValue) { return Slot(VReg(), index); }
-
- static TestOperand Use(VReg vreg) { return TestOperand(kNone, vreg); }
-
- static TestOperand Use() { return Use(VReg()); }
-
- enum BlockCompletionType { kBlockEnd, kFallThrough, kBranch, kJump };
-
- struct BlockCompletion {
- BlockCompletionType type_;
- TestOperand op_;
- int offset_0_;
- int offset_1_;
- };
-
- static BlockCompletion FallThrough() {
- BlockCompletion completion = {kFallThrough, TestOperand(), 1, kNoValue};
- return completion;
- }
-
- static BlockCompletion Jump(int offset) {
- BlockCompletion completion = {kJump, TestOperand(), offset, kNoValue};
- return completion;
- }
-
- static BlockCompletion Branch(TestOperand op, int left_offset,
- int right_offset) {
- BlockCompletion completion = {kBranch, op, left_offset, right_offset};
- return completion;
- }
-
- static BlockCompletion Last() {
- BlockCompletion completion = {kBlockEnd, TestOperand(), kNoValue, kNoValue};
- return completion;
- }
-
- RegisterAllocatorTest()
- : frame_(nullptr),
- sequence_(nullptr),
- num_general_registers_(kDefaultNRegs),
- num_double_registers_(kDefaultNRegs),
- instruction_blocks_(zone()),
- current_instruction_index_(-1),
- current_block_(nullptr),
- block_returns_(false) {
- InitializeRegisterNames();
- }
-
- void SetNumRegs(int num_general_registers, int num_double_registers) {
- CHECK(config_.is_empty());
- CHECK(instructions_.empty());
- CHECK(instruction_blocks_.empty());
- num_general_registers_ = num_general_registers;
- num_double_registers_ = num_double_registers;
- }
-
- RegisterConfiguration* config() {
- if (config_.is_empty()) {
- config_.Reset(new RegisterConfiguration(
- num_general_registers_, num_double_registers_, num_double_registers_,
- general_register_names_, double_register_names_));
- }
- return config_.get();
- }
-
- Frame* frame() {
- if (frame_ == nullptr) {
- frame_ = new (zone()) Frame();
- }
- return frame_;
- }
-
- InstructionSequence* sequence() {
- if (sequence_ == nullptr) {
- sequence_ =
- new (zone()) InstructionSequence(zone(), &instruction_blocks_);
- }
- return sequence_;
- }
-
- void StartLoop(int loop_blocks) {
- CHECK(current_block_ == nullptr);
- if (!loop_blocks_.empty()) {
- CHECK(!loop_blocks_.back().loop_header_.IsValid());
- }
- LoopData loop_data = {Rpo::Invalid(), loop_blocks};
- loop_blocks_.push_back(loop_data);
- }
-
- void EndLoop() {
- CHECK(current_block_ == nullptr);
- CHECK(!loop_blocks_.empty());
- CHECK_EQ(0, loop_blocks_.back().expected_blocks_);
- loop_blocks_.pop_back();
- }
-
- void StartBlock() {
- block_returns_ = false;
- NewBlock();
- }
-
- int EndBlock(BlockCompletion completion = FallThrough()) {
- int instruction_index = kMinInt;
- if (block_returns_) {
- CHECK(completion.type_ == kBlockEnd || completion.type_ == kFallThrough);
- completion.type_ = kBlockEnd;
- }
- switch (completion.type_) {
- case kBlockEnd:
- break;
- case kFallThrough:
- instruction_index = EmitFallThrough();
- break;
- case kJump:
- CHECK(!block_returns_);
- instruction_index = EmitJump();
- break;
- case kBranch:
- CHECK(!block_returns_);
- instruction_index = EmitBranch(completion.op_);
- break;
- }
- completions_.push_back(completion);
- CHECK(current_block_ != nullptr);
- sequence()->EndBlock(current_block_->rpo_number());
- current_block_ = nullptr;
- return instruction_index;
- }
-
void Allocate() {
- CHECK_EQ(nullptr, current_block_);
WireBlocks();
Pipeline::AllocateRegistersForTesting(config(), sequence(), true);
}
-
- TestOperand Imm(int32_t imm = 0) {
- int index = sequence()->AddImmediate(Constant(imm));
- return TestOperand(kImmediate, index);
- }
-
- VReg Parameter(TestOperand output_op = Reg()) {
- VReg vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(vreg, output_op)};
- Emit(vreg.value_, kArchNop, 1, outputs);
- return vreg;
- }
-
- int Return(TestOperand input_op_0) {
- block_returns_ = true;
- InstructionOperand* inputs[1]{ConvertInputOp(input_op_0)};
- return Emit(NewIndex(), kArchRet, 0, nullptr, 1, inputs);
- }
-
- int Return(VReg vreg) { return Return(Reg(vreg, 0)); }
-
- PhiInstruction* Phi(VReg incoming_vreg) {
- PhiInstruction* phi =
- new (zone()) PhiInstruction(zone(), NewReg().value_, 10);
- phi->Extend(zone(), incoming_vreg.value_);
- current_block_->AddPhi(phi);
- return phi;
- }
-
- PhiInstruction* Phi(VReg incoming_vreg_0, VReg incoming_vreg_1) {
- auto phi = Phi(incoming_vreg_0);
- Extend(phi, incoming_vreg_1);
- return phi;
- }
-
- void Extend(PhiInstruction* phi, VReg vreg) {
- phi->Extend(zone(), vreg.value_);
- }
-
- VReg DefineConstant(int32_t imm = 0) {
- VReg vreg = NewReg();
- sequence()->AddConstant(vreg.value_, Constant(imm));
- InstructionOperand* outputs[1]{
- ConstantOperand::Create(vreg.value_, zone())};
- Emit(vreg.value_, kArchNop, 1, outputs);
- return vreg;
- }
-
- VReg EmitOII(TestOperand output_op, TestOperand input_op_0,
- TestOperand input_op_1) {
- VReg output_vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
- InstructionOperand* inputs[2]{ConvertInputOp(input_op_0),
- ConvertInputOp(input_op_1)};
- Emit(output_vreg.value_, kArchNop, 1, outputs, 2, inputs);
- return output_vreg;
- }
-
- VReg EmitCall(TestOperand output_op, size_t input_size, TestOperand* inputs) {
- VReg output_vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
- InstructionOperand** mapped_inputs =
- zone()->NewArray<InstructionOperand*>(static_cast<int>(input_size));
- for (size_t i = 0; i < input_size; ++i) {
- mapped_inputs[i] = ConvertInputOp(inputs[i]);
- }
- Emit(output_vreg.value_, kArchCallCodeObject, 1, outputs, input_size,
- mapped_inputs);
- return output_vreg;
- }
-
- // Get defining instruction vreg or value returned at instruction creation
- // time when there is no return value.
- const Instruction* GetInstruction(int instruction_index) {
- auto it = instructions_.find(instruction_index);
- CHECK(it != instructions_.end());
- return it->second;
- }
-
- private:
- VReg NewReg() { return VReg(sequence()->NextVirtualRegister()); }
- int NewIndex() { return current_instruction_index_--; }
-
- static TestOperand Invalid() { return TestOperand(kInvalid, VReg()); }
-
- int EmitBranch(TestOperand input_op) {
- InstructionOperand* inputs[4]{ConvertInputOp(input_op),
- ConvertInputOp(Imm()), ConvertInputOp(Imm()),
- ConvertInputOp(Imm())};
- InstructionCode opcode = kArchJmp | FlagsModeField::encode(kFlags_branch) |
- FlagsConditionField::encode(kEqual);
- auto instruction =
- NewInstruction(opcode, 0, nullptr, 4, inputs)->MarkAsControl();
- return AddInstruction(NewIndex(), instruction);
- }
-
- int EmitFallThrough() {
- auto instruction = NewInstruction(kArchNop, 0, nullptr)->MarkAsControl();
- return AddInstruction(NewIndex(), instruction);
- }
-
- int EmitJump() {
- InstructionOperand* inputs[1]{ConvertInputOp(Imm())};
- auto instruction =
- NewInstruction(kArchJmp, 0, nullptr, 1, inputs)->MarkAsControl();
- return AddInstruction(NewIndex(), instruction);
- }
-
- Instruction* NewInstruction(InstructionCode code, size_t outputs_size,
- InstructionOperand** outputs,
- size_t inputs_size = 0,
- InstructionOperand* *inputs = nullptr,
- size_t temps_size = 0,
- InstructionOperand* *temps = nullptr) {
- CHECK_NE(nullptr, current_block_);
- return Instruction::New(zone(), code, outputs_size, outputs, inputs_size,
- inputs, temps_size, temps);
- }
-
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::ExtendedPolicy policy) {
- auto unallocated = new (zone()) UnallocatedOperand(policy);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
- }
-
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::ExtendedPolicy policy,
- UnallocatedOperand::Lifetime lifetime) {
- auto unallocated = new (zone()) UnallocatedOperand(policy, lifetime);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
- }
-
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::ExtendedPolicy policy,
- int index) {
- auto unallocated = new (zone()) UnallocatedOperand(policy, index);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
- }
-
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::BasicPolicy policy,
- int index) {
- auto unallocated = new (zone()) UnallocatedOperand(policy, index);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
- }
-
- InstructionOperand* ConvertInputOp(TestOperand op) {
- if (op.type_ == kImmediate) {
- CHECK_EQ(op.vreg_.value_, kNoValue);
- return ImmediateOperand::Create(op.value_, zone());
- }
- CHECK_NE(op.vreg_.value_, kNoValue);
- switch (op.type_) {
- case kNone:
- return Unallocated(op, UnallocatedOperand::NONE,
- UnallocatedOperand::USED_AT_START);
- case kRegister:
- return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER,
- UnallocatedOperand::USED_AT_START);
- case kFixedRegister:
- CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
- return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
- default:
- break;
- }
- CHECK(false);
- return NULL;
- }
-
- InstructionOperand* ConvertOutputOp(VReg vreg, TestOperand op) {
- CHECK_EQ(op.vreg_.value_, kNoValue);
- op.vreg_ = vreg;
- switch (op.type_) {
- case kSameAsFirst:
- return Unallocated(op, UnallocatedOperand::SAME_AS_FIRST_INPUT);
- case kRegister:
- return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER);
- case kFixedSlot:
- return Unallocated(op, UnallocatedOperand::FIXED_SLOT, op.value_);
- case kFixedRegister:
- CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
- return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
- default:
- break;
- }
- CHECK(false);
- return NULL;
- }
-
- InstructionBlock* NewBlock() {
- CHECK(current_block_ == nullptr);
- auto block_id = BasicBlock::Id::FromSize(instruction_blocks_.size());
- Rpo rpo = Rpo::FromInt(block_id.ToInt());
- Rpo loop_header = Rpo::Invalid();
- Rpo loop_end = Rpo::Invalid();
- if (!loop_blocks_.empty()) {
- auto& loop_data = loop_blocks_.back();
- // This is a loop header.
- if (!loop_data.loop_header_.IsValid()) {
- loop_end = Rpo::FromInt(block_id.ToInt() + loop_data.expected_blocks_);
- loop_data.expected_blocks_--;
- loop_data.loop_header_ = rpo;
- } else {
- // This is a loop body.
- CHECK_NE(0, loop_data.expected_blocks_);
- // TODO(dcarney): handle nested loops.
- loop_data.expected_blocks_--;
- loop_header = loop_data.loop_header_;
- }
- }
- // Construct instruction block.
- auto instruction_block = new (zone()) InstructionBlock(
- zone(), block_id, rpo, rpo, loop_header, loop_end, false);
- instruction_blocks_.push_back(instruction_block);
- current_block_ = instruction_block;
- sequence()->StartBlock(rpo);
- return instruction_block;
- }
-
- void WireBlocks() {
- CHECK(instruction_blocks_.size() == completions_.size());
- size_t offset = 0;
- for (const auto& completion : completions_) {
- switch (completion.type_) {
- case kBlockEnd:
- break;
- case kFallThrough: // Fallthrough.
- case kJump:
- WireBlock(offset, completion.offset_0_);
- break;
- case kBranch:
- WireBlock(offset, completion.offset_0_);
- WireBlock(offset, completion.offset_1_);
- break;
- }
- ++offset;
- }
- }
-
- void WireBlock(size_t block_offset, int jump_offset) {
- size_t target_block_offset =
- block_offset + static_cast<size_t>(jump_offset);
- CHECK(block_offset < instruction_blocks_.size());
- CHECK(target_block_offset < instruction_blocks_.size());
- auto block = instruction_blocks_[block_offset];
- auto target = instruction_blocks_[target_block_offset];
- block->successors().push_back(target->rpo_number());
- target->predecessors().push_back(block->rpo_number());
- }
-
- int Emit(int instruction_index, InstructionCode code, size_t outputs_size,
- InstructionOperand** outputs, size_t inputs_size = 0,
- InstructionOperand* *inputs = nullptr, size_t temps_size = 0,
- InstructionOperand* *temps = nullptr) {
- auto instruction = NewInstruction(code, outputs_size, outputs, inputs_size,
- inputs, temps_size, temps);
- return AddInstruction(instruction_index, instruction);
- }
-
- int AddInstruction(int instruction_index, Instruction* instruction) {
- sequence()->AddInstruction(instruction);
- return instruction_index;
- }
-
- struct LoopData {
- Rpo loop_header_;
- int expected_blocks_;
- };
-
- typedef std::vector<LoopData> LoopBlocks;
- typedef std::map<int, const Instruction*> Instructions;
- typedef std::vector<BlockCompletion> Completions;
-
- SmartPointer<RegisterConfiguration> config_;
- Frame* frame_;
- InstructionSequence* sequence_;
- int num_general_registers_;
- int num_double_registers_;
-
- // Block building state.
- InstructionBlocks instruction_blocks_;
- Instructions instructions_;
- int current_instruction_index_;
- Completions completions_;
- LoopBlocks loop_blocks_;
- InstructionBlock* current_block_;
- bool block_returns_;
};
for (int i = 0; i < kPhis; ++i) {
merged[i] = Use(Phi(t_vals[i], f_vals[i]));
}
- Return(EmitCall(Reg(), kPhis, merged));
+ Return(EmitCall(Slot(-1), kPhis, merged));
EndBlock();
Allocate();
for (int i = 0; i < kPhis; ++i) {
merged[i] = Use(Phi(vals[i], vals[i]));
}
- Return(EmitCall(Reg(), kPhis, merged));
+ Return(EmitCall(Reg(0), kPhis, merged));
EndBlock();
Allocate();
Allocate();
}
+
+TEST_F(RegisterAllocatorTest, SpillPhi) {
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ auto left = Define(Reg(0));
+ EndBlock(Jump(2));
+
+ StartBlock();
+ auto right = Define(Reg(0));
+ EndBlock();
+
+ StartBlock();
+ auto phi = Phi(left, right);
+ EmitCall(Slot(-1));
+ Return(Reg(phi));
+ EndBlock();
+
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, MoveLotsOfConstants) {
+ StartBlock();
+ VReg constants[kDefaultNRegs];
+ for (size_t i = 0; i < arraysize(constants); ++i) {
+ constants[i] = DefineConstant();
+ }
+ TestOperand call_ops[kDefaultNRegs * 2];
+ for (int i = 0; i < kDefaultNRegs; ++i) {
+ call_ops[i] = Reg(constants[i], i);
+ }
+ for (int i = 0; i < kDefaultNRegs; ++i) {
+ call_ops[i + kDefaultNRegs] = Slot(constants[i], i);
+ }
+ EmitCall(Slot(-1), arraysize(call_ops), call_ops);
+ EndBlock(Last());
+
+ Allocate();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
'compiler/graph-unittest.h',
'compiler/instruction-selector-unittest.cc',
'compiler/instruction-selector-unittest.h',
+ 'compiler/instruction-sequence-unittest.cc',
+ 'compiler/instruction-sequence-unittest.h',
'compiler/js-builtin-reducer-unittest.cc',
'compiler/js-operator-unittest.cc',
'compiler/js-typed-lowering-unittest.cc',
'compiler/machine-operator-reducer-unittest.cc',
+ 'compiler/move-optimizer-unittest.cc',
'compiler/node-matchers-unittest.cc',
'compiler/node-test-utils.cc',
'compiler/node-test-utils.h',
'../../src/compiler/machine-operator.h',
'../../src/compiler/machine-type.cc',
'../../src/compiler/machine-type.h',
+ '../../src/compiler/move-optimizer.cc',
+ '../../src/compiler/move-optimizer.h',
'../../src/compiler/node-aux-data-inl.h',
'../../src/compiler/node-aux-data.h',
'../../src/compiler/node-cache.cc',