AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchSwitch:
+ AssembleArchSwitch(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArchNop:
// don't emit code for nops.
DCHECK_EQ(LeaveCC, i.OutputSBit());
}
+void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+ int const kNumLabels = static_cast<int>(instr->InputCount() - 1);
+ __ BlockConstPoolFor(kNumLabels + 2);
+ __ ldr(pc, MemOperand(pc, i.InputRegister(0), LSL, 2));
+ __ nop();
+ for (int index = 0; index < kNumLabels; ++index) {
+ __ dd(GetLabel(i.InputRpo(index + 1)));
+ }
+}
+
+
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 32-bit ARM we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// On 32-bit ARM we do not insert nops for inlined Smi code.
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchSwitch:
+ AssembleArchSwitch(instr);
+ break;
case kArchNop:
// don't emit code for nops.
break;
}
+void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+ UseScratchRegisterScope scope(masm());
+ Register reg = i.InputRegister(0);
+ Register tmp = scope.AcquireX();
+ Label table;
+ __ Adr(tmp, &table);
+ __ Add(tmp, tmp, Operand(reg, LSL, 2));
+ __ Br(tmp);
+ __ Bind(&table);
+ for (size_t index = 1; index < instr->InputCount(); ++index) {
+ __ B(GetLabel(i.InputRpo(index)));
+ }
+}
+
+
// Assemble boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit ARM we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
static inline void FinishCode(MacroAssembler* masm) {
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
masm->CheckConstPool(true, false);
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+ masm->ud2();
#endif
}
namespace internal {
namespace compiler {
+class CodeGenerator::JumpTable FINAL : public ZoneObject {
+ public:
+ JumpTable(JumpTable* next, Label** targets, size_t target_count)
+ : next_(next), targets_(targets), target_count_(target_count) {}
+
+ Label* label() { return &label_; }
+ JumpTable* next() const { return next_; }
+ Label** targets() const { return targets_; }
+ size_t target_count() const { return target_count_; }
+
+ private:
+ Label label_;
+ JumpTable* const next_;
+ Label** const targets_;
+ size_t const target_count_;
+};
+
+
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info)
: frame_(frame),
deoptimization_literals_(code->zone()),
translations_(code->zone()),
last_lazy_deopt_pc_(0),
+ jump_tables_(nullptr),
ools_(nullptr),
osr_pc_offset_(-1) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
masm()->bind(ool->entry());
ool->Generate();
- masm()->jmp(ool->exit());
+ if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
}
}
FinishCode(masm());
+ // Emit the jump tables.
+ if (jump_tables_) {
+ masm()->Align(kPointerSize);
+ for (JumpTable* table = jump_tables_; table; table = table->next()) {
+ masm()->bind(table->label());
+ AssembleJumpTable(table->targets(), table->target_count());
+ }
+ }
+
safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
// TODO(titzer): what are the right code flags here?
}
+Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
+ jump_tables_ = new (zone()) JumpTable(jump_tables_, targets, target_count);
+ return jump_tables_->label();
+}
+
+
void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
void AssembleArchInstruction(Instruction* instr);
void AssembleArchJump(BasicBlock::RpoNumber target);
+ void AssembleArchSwitch(Instruction* instr);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
void AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) FINAL;
+ // ===========================================================================
+ // =================== Jump table construction methods. ======================
+ // ===========================================================================
+
+ class JumpTable;
+ // Adds a jump table that is emitted after the actual code. Returns label
+ // pointing to the beginning of the table. {targets} is assumed to be static
+ // or zone allocated.
+ Label* AddJumpTable(Label** targets, size_t target_count);
+ // Emits a jump table.
+ void AssembleJumpTable(Label** targets, size_t target_count);
+
// ===========================================================================
// Deoptimization table construction
void AddSafepointAndDeopt(Instruction* instr);
ZoneDeque<Handle<Object> > deoptimization_literals_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
+ JumpTable* jump_tables_;
OutOfLineCode* ools_;
int osr_pc_offset_;
};
}
+size_t CaseIndexOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kCase, op->opcode());
+ return OpParameter<size_t>(op);
+}
+
+
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
return lhs.type() == rhs.type() && lhs.hint() == rhs.hint();
}
}
+const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
+ DCHECK_GE(control_output_count, 2u); // Disallow trivial switches.
+ return new (zone()) Operator( // --
+ IrOpcode::kSwitch, Operator::kFoldable, // opcode
+ "Switch", // name
+ 1, 0, 1, 0, 0, control_output_count); // counts
+}
+
+
+const Operator* CommonOperatorBuilder::Case(size_t index) {
+ return new (zone()) Operator1<size_t>( // --
+ IrOpcode::kCase, Operator::kFoldable, // opcode
+ "Case", // name
+ 0, 0, 1, 0, 0, 1, // counts
+ index); // parameter
+}
+
+
const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
// Outputs are formal parameters, plus context, receiver, and JSFunction.
const int value_output_count = num_formal_parameters + 3;
BranchHint BranchHintOf(const Operator* const);
+size_t CaseIndexOf(const Operator* const);
+
+
class SelectParameters FINAL {
public:
explicit SelectParameters(MachineType type,
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
+ const Operator* Switch(size_t control_output_count);
+ const Operator* Case(size_t index);
const Operator* Throw();
const Operator* Return();
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchSwitch:
+ AssembleArchSwitch(instr);
+ break;
case kArchNop:
// don't emit code for nops.
break;
}
+void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+ size_t const label_count = instr->InputCount() - 1;
+ Label** labels = zone()->NewArray<Label*>(label_count);
+ for (size_t index = 0; index < label_count; ++index) {
+ labels[index] = GetLabel(i.InputRpo(index + 1));
+ }
+ Label* const table = AddJumpTable(labels, label_count);
+ __ jmp(Operand::JumpTable(i.InputRegister(0), times_4, table));
+}
+
+
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ dd(targets[index]);
+ }
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
V(ArchCallCodeObject) \
V(ArchCallJSFunction) \
V(ArchJmp) \
+ V(ArchSwitch) \
V(ArchNop) \
V(ArchRet) \
V(ArchStackPointer) \
}
-static inline void CheckNoPhis(const BasicBlock* block) {
+namespace {
+
+V8_INLINE void CheckNoPhis(const BasicBlock* block) {
#ifdef DEBUG
// Branch targets should not have phis.
for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
#endif
}
+} // namespace
+
void InstructionSelector::VisitControl(BasicBlock* block) {
Node* input = block->control_input();
if (condition->opcode() == IrOpcode::kAlways) return VisitGoto(tbranch);
return VisitBranch(input, tbranch, fbranch);
}
+ case BasicBlock::kSwitch: {
+ DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
+ BasicBlock** const branches = &block->successors().front();
+ size_t const branch_count = block->SuccessorCount();
+ DCHECK_LE(2u, branch_count);
+ // SSA deconstruction requires targets of branches not to have phis.
+ // Edge split form guarantees this property, but is more strict.
+ for (size_t index = 0; index < branch_count; ++index) {
+ CheckNoPhis(branches[index]);
+ }
+ return VisitSwitch(input, branches, branch_count);
+ }
case BasicBlock::kReturn: {
// If the result itself is a return, return its input.
Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
return VisitThrow(input->InputAt(0));
case BasicBlock::kNone: {
// TODO(titzer): exit block doesn't have control.
- DCHECK(input == NULL);
+ DCHECK_NULL(input);
break;
}
default:
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
+ case IrOpcode::kSwitch:
+ case IrOpcode::kCase:
case IrOpcode::kEffectPhi:
case IrOpcode::kEffectSet:
case IrOpcode::kMerge:
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
+ case IrOpcode::kSwitch:
+ case IrOpcode::kCase:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
// No code needed for these graph artifacts.
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock** branches,
+ size_t branch_count) {
+ OperandGenerator g(this);
+ Node* const value = node->InputAt(0);
+ size_t const input_count = branch_count + 1;
+ InstructionOperand* const inputs =
+ zone()->NewArray<InstructionOperand>(static_cast<int>(input_count));
+ inputs[0] = g.UseRegister(value);
+ for (size_t index = 0; index < branch_count; ++index) {
+ inputs[index + 1] = g.Label(branches[index]);
+ }
+ Emit(kArchSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitReturn(Node* value) {
OperandGenerator g(this);
if (value != NULL) {
void VisitCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
+ void VisitSwitch(Node* node, BasicBlock** branches, size_t branch_count);
void VisitReturn(Node* value);
void VisitThrow(Node* value);
void VisitDeoptimize(Node* deopt);
V(Branch) \
V(IfTrue) \
V(IfFalse) \
+ V(Switch) \
+ V(Case) \
V(Merge) \
V(Return) \
V(OsrNormalEntry) \
namespace internal {
namespace compiler {
+namespace {
template <typename N>
-static inline N CheckRange(size_t val) {
- CHECK(val <= std::numeric_limits<N>::max());
+V8_INLINE N CheckRange(size_t val) {
+ CHECK_LE(val, std::numeric_limits<N>::max());
return static_cast<N>(val);
}
+} // namespace
+
Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
int EffectOutputCount() const { return effect_out_; }
int ControlOutputCount() const { return control_out_; }
- static inline size_t ZeroIfPure(Properties properties) {
+ static size_t ZeroIfPure(Properties properties) {
return (properties & kPure) == kPure ? 0 : 1;
}
}
+void RawMachineAssembler::Switch(Node* index, Label** succ_labels,
+ size_t succ_count) {
+ DCHECK_NE(schedule()->end(), current_block_);
+ Node* sw = NewNode(common()->Switch(succ_count), index);
+ BasicBlock** succ_blocks =
+ zone()->NewArray<BasicBlock*>(static_cast<int>(succ_count));
+ for (size_t index = 0; index < succ_count; ++index) {
+ succ_blocks[index] = Use(succ_labels[index]);
+ }
+ schedule()->AddSwitch(CurrentBlock(), sw, succ_blocks, succ_count);
+ current_block_ = nullptr;
+}
+
+
void RawMachineAssembler::Return(Node* value) {
schedule()->AddReturn(CurrentBlock(), value);
current_block_ = NULL;
Label* Exit();
void Goto(Label* label);
void Branch(Node* condition, Label* true_val, Label* false_val);
+ void Switch(Node* index, Label** succ_labels, size_t succ_count);
// Call through CallFunctionStub with lazy deopt and frame-state.
Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
Node* frame_state, CallFunctionFlags flags);
return os << "goto";
case BasicBlock::kBranch:
return os << "branch";
+ case BasicBlock::kSwitch:
+ return os << "switch";
case BasicBlock::kReturn:
return os << "return";
case BasicBlock::kThrow:
}
+void Schedule::AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
+ size_t succ_count) {
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
+ block->set_control(BasicBlock::kSwitch);
+ for (size_t index = 0; index < succ_count; ++index) {
+ AddSuccessor(block, succ_blocks[index]);
+ }
+ SetControlInput(block, sw);
+}
+
+
void Schedule::AddReturn(BasicBlock* block, Node* input) {
DCHECK(block->control() == BasicBlock::kNone);
block->set_control(BasicBlock::kReturn);
MoveSuccessors(block, end);
AddSuccessor(block, tblock);
AddSuccessor(block, fblock);
- if (block->control_input() != NULL) {
+ if (block->control_input() != nullptr) {
SetControlInput(end, block->control_input());
}
SetControlInput(block, branch);
}
+void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
+ BasicBlock** succ_blocks, size_t succ_count) {
+ DCHECK_NE(BasicBlock::kNone, block->control());
+ DCHECK_EQ(BasicBlock::kNone, end->control());
+ end->set_control(block->control());
+ block->set_control(BasicBlock::kSwitch);
+ MoveSuccessors(block, end);
+ for (size_t index = 0; index < succ_count; ++index) {
+ AddSuccessor(block, succ_blocks[index]);
+ }
+ if (block->control_input() != nullptr) {
+ SetControlInput(end, block->control_input());
+ }
+ SetControlInput(block, sw);
+}
+
+
void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
block->AddSuccessor(succ);
succ->AddPredecessor(block);
kNone, // Control not initialized yet.
kGoto, // Goto a single successor block.
kBranch, // Branch if true to first successor, otherwise second.
+ kSwitch, // Table dispatch to one of the successor blocks.
kReturn, // Return a value from this method.
kThrow // Throw an exception.
};
void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock);
+ // BasicBlock building: add a switch at the end of {block}.
+ void AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
+ size_t succ_count);
+
// BasicBlock building: add a return at the end of {block}.
void AddReturn(BasicBlock* block, Node* input);
void InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
BasicBlock* tblock, BasicBlock* fblock);
+ // BasicBlock mutation: insert a switch into the end of {block}.
+ void InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
+ BasicBlock** succ_blocks, size_t succ_count);
+
// Exposed publicly for testing only.
void AddSuccessorForTesting(BasicBlock* block, BasicBlock* succ) {
return AddSuccessor(block, succ);
class CFGBuilder : public ZoneObject {
public:
CFGBuilder(Zone* zone, Scheduler* scheduler)
- : scheduler_(scheduler),
+ : zone_(zone),
+ scheduler_(scheduler),
schedule_(scheduler->schedule_),
queued_(scheduler->graph_, 2),
queue_(zone),
BuildBlockForNode(node);
break;
case IrOpcode::kBranch:
- BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
+ case IrOpcode::kSwitch:
+ BuildBlocksForSuccessors(node);
break;
default:
break;
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectBranch(node);
break;
+ case IrOpcode::kSwitch:
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectSwitch(node);
+ break;
case IrOpcode::kReturn:
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectReturn(node);
return block;
}
- void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
- IrOpcode::Value b) {
- Node* successors[2];
- CollectSuccessorProjections(node, successors, a, b);
- BuildBlockForNode(successors[0]);
- BuildBlockForNode(successors[1]);
+ void BuildBlocksForSuccessors(Node* node) {
+ size_t const successor_count = node->op()->ControlOutputCount();
+ Node** successors =
+ zone_->NewArray<Node*>(static_cast<int>(successor_count));
+ CollectSuccessorProjections(node, successors, successor_count);
+ for (size_t index = 0; index < successor_count; ++index) {
+ BuildBlockForNode(successors[index]);
+ }
}
// Collect the branch-related projections from a node, such as IfTrue,
- // IfFalse.
- // TODO(titzer): consider moving this to node.h
- void CollectSuccessorProjections(Node* node, Node** buffer,
- IrOpcode::Value true_opcode,
- IrOpcode::Value false_opcode) {
- buffer[0] = NULL;
- buffer[1] = NULL;
- for (Node* use : node->uses()) {
- if (use->opcode() == true_opcode) {
- DCHECK(!buffer[0]);
- buffer[0] = use;
- }
- if (use->opcode() == false_opcode) {
- DCHECK(!buffer[1]);
- buffer[1] = use;
+ // IfFalse, and Case.
+ void CollectSuccessorProjections(Node* node, Node** successors,
+ size_t successor_count) {
+#ifdef DEBUG
+ DCHECK_EQ(static_cast<int>(successor_count), node->UseCount());
+ std::memset(successors, 0, sizeof(*successors) * successor_count);
+#endif
+ for (Node* const use : node->uses()) {
+ size_t index;
+ switch (use->opcode()) {
+ default:
+ UNREACHABLE();
+ // Fall through.
+ case IrOpcode::kIfTrue:
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ index = 0;
+ break;
+ case IrOpcode::kIfFalse:
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ index = 1;
+ break;
+ case IrOpcode::kCase:
+ DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
+ index = CaseIndexOf(use->op());
+ break;
}
+ DCHECK_LT(index, successor_count);
+ DCHECK(successors[index] == nullptr);
+ successors[index] = use;
+ }
+#ifdef DEBUG
+ for (size_t index = 0; index < successor_count; ++index) {
+ DCHECK_NOT_NULL(successors[index]);
}
- DCHECK(buffer[0]);
- DCHECK(buffer[1]);
+#endif
}
- void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
- IrOpcode::Value true_opcode,
- IrOpcode::Value false_opcode) {
- Node* successors[2];
- CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
- buffer[0] = schedule_->block(successors[0]);
- buffer[1] = schedule_->block(successors[1]);
+ void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
+ size_t successor_count) {
+ Node** successors = reinterpret_cast<Node**>(successor_blocks);
+ CollectSuccessorProjections(node, successors, successor_count);
+ for (size_t index = 0; index < successor_count; ++index) {
+ successor_blocks[index] = schedule_->block(successors[index]);
+ }
}
void ConnectBranch(Node* branch) {
BasicBlock* successor_blocks[2];
- CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
- IrOpcode::kIfFalse);
+ CollectSuccessorBlocks(branch, successor_blocks,
+ arraysize(successor_blocks));
// Consider branch hints.
switch (BranchHintOf(branch->op())) {
} else {
Node* branch_block_node = NodeProperties::GetControlInput(branch);
BasicBlock* branch_block = schedule_->block(branch_block_node);
- DCHECK(branch_block != NULL);
+ DCHECK_NOT_NULL(branch_block);
TraceConnect(branch, branch_block, successor_blocks[0]);
TraceConnect(branch, branch_block, successor_blocks[1]);
}
}
+ void ConnectSwitch(Node* sw) {
+ size_t const successor_count = sw->op()->ControlOutputCount();
+ BasicBlock** successor_blocks =
+ zone_->NewArray<BasicBlock*>(static_cast<int>(successor_count));
+ CollectSuccessorBlocks(sw, successor_blocks, successor_count);
+
+ if (sw == component_entry_) {
+ for (size_t index = 0; index < successor_count; ++index) {
+ TraceConnect(sw, component_start_, successor_blocks[index]);
+ }
+ schedule_->InsertSwitch(component_start_, component_end_, sw,
+ successor_blocks, successor_count);
+ } else {
+ Node* sw_block_node = NodeProperties::GetControlInput(sw);
+ BasicBlock* sw_block = schedule_->block(sw_block_node);
+ DCHECK_NOT_NULL(sw_block);
+
+ for (size_t index = 0; index < successor_count; ++index) {
+ TraceConnect(sw, sw_block, successor_blocks[index]);
+ }
+ schedule_->AddSwitch(sw_block, sw, successor_blocks, successor_count);
+ }
+ }
+
void ConnectMerge(Node* merge) {
// Don't connect the special merge at the end to its predecessors.
if (IsFinalMerge(merge)) return;
BasicBlock* block = schedule_->block(merge);
- DCHECK(block != NULL);
+ DCHECK_NOT_NULL(block);
// For all of the merge's control inputs, add a goto at the end to the
// merge's basic block.
for (Node* const input : merge->inputs()) {
}
void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
- DCHECK(block);
+ DCHECK_NOT_NULL(block);
if (succ == NULL) {
Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
block->id().ToInt());
DCHECK(control_.empty());
}
+ Zone* zone_;
Scheduler* scheduler_;
Schedule* schedule_;
NodeMarker<bool> queued_; // Mark indicating whether node is queued.
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kSwitch: {
+ // Switch uses are Case.
+ std::vector<bool> uses;
+ uses.resize(node->UseCount());
+ for (auto use : node->uses()) {
+ CHECK_EQ(IrOpcode::kCase, use->opcode());
+ size_t const index = CaseIndexOf(use->op());
+ CHECK_LT(index, uses.size());
+ CHECK(!uses[index]);
+ uses[index] = true;
+ }
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
+ }
+ case IrOpcode::kCase:
+ CHECK_EQ(IrOpcode::kSwitch,
+ NodeProperties::GetControlInput(node)->opcode());
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
case IrOpcode::kLoop:
case IrOpcode::kMerge:
CHECK_EQ(control_count, input_count);
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchSwitch:
+ AssembleArchSwitch(instr);
+ break;
case kArchNop:
// don't emit code for nops.
break;
}
+void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
+ X64OperandConverter i(this, instr);
+ size_t const label_count = instr->InputCount() - 1;
+ Label** labels = zone()->NewArray<Label*>(static_cast<int>(label_count));
+ for (size_t index = 0; index < label_count; ++index) {
+ labels[index] = GetLabel(i.InputRpo(static_cast<int>(index + 1)));
+ }
+ Label* const table = AddJumpTable(labels, label_count);
+ __ leaq(kScratchRegister, Operand(table));
+ __ jmp(Operand(kScratchRegister, i.InputRegister(0), times_8, 0));
+}
+
+
// Assembles boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ dq(targets[index]);
+ }
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
"%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
reinterpret_cast<intptr_t>(ptr),
ptr - begin);
- pc += 4;
+ pc += sizeof(ptr);
} else {
decode_buffer[0] = '\0';
pc += d.InstructionDecode(decode_buffer, pc);
}
+TEST(RunSwitch1) {
+ RawMachineAssemblerTester<int32_t> m;
+
+ int constant = 11223344;
+
+ MLabel block0, block1, end;
+ MLabel* cases[] = {&block0, &block1};
+ m.Switch(m.IntPtrConstant(0), cases, arraysize(cases));
+ m.Bind(&block0);
+ m.Goto(&end);
+ m.Bind(&block1);
+ m.Goto(&end);
+ m.Bind(&end);
+ m.Return(m.Int32Constant(constant));
+
+ CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSwitch2) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+
+ const size_t kNumCases = 255;
+ int32_t values[kNumCases];
+ m.main_isolate()->random_number_generator()->NextBytes(values,
+ sizeof(values));
+ MLabel end;
+ MLabel* cases[kNumCases];
+ Node* results[kNumCases];
+ for (size_t i = 0; i < kNumCases; ++i) {
+ cases[i] = new (m.main_zone()->New(sizeof(MLabel))) MLabel;
+ }
+ m.Switch(m.ConvertInt32ToIntPtr(m.Parameter(0)), cases, arraysize(cases));
+ for (size_t i = 0; i < kNumCases; ++i) {
+ m.Bind(cases[i]);
+ results[i] = m.Int32Constant(values[i]);
+ m.Goto(&end);
+ }
+ m.Bind(&end);
+ const int num_results = static_cast<int>(arraysize(results));
+ Node* phi =
+ m.NewNode(m.common()->Phi(kMachInt32, num_results), num_results, results);
+ m.Return(phi);
+
+ for (size_t i = 0; i < kNumCases; ++i) {
+ CHECK_EQ(values[i], m.Call(static_cast<int>(i)));
+ }
+}
+
+
TEST(RunLoadInt32) {
RawMachineAssemblerTester<int32_t> m;
const int kArguments[] = {1, 5, 6, 42, 100, 10000, 65000};
+const size_t kCases[] = {2, 3, 4, 100, 255};
+
+
const float kFloatValues[] = {-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::min(),
-1.0f,
}
+TEST_F(CommonOperatorTest, Switch) {
+ TRACED_FOREACH(size_t, cases, kCases) {
+ const Operator* const op = common()->Switch(cases);
+ EXPECT_EQ(IrOpcode::kSwitch, op->opcode());
+ EXPECT_EQ(Operator::kFoldable, op->properties());
+ EXPECT_EQ(1, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(static_cast<int>(cases), op->ControlOutputCount());
+ }
+}
+
+
+TEST_F(CommonOperatorTest, Case) {
+ TRACED_FORRANGE(size_t, index, 0, 1024) {
+ const Operator* const op = common()->Case(index);
+ EXPECT_EQ(IrOpcode::kCase, op->opcode());
+ EXPECT_EQ(Operator::kFoldable, op->properties());
+ EXPECT_EQ(index, CaseIndexOf(op));
+ EXPECT_EQ(0, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ControlOutputCount());
+ }
+}
+
+
TEST_F(CommonOperatorTest, Select) {
static const MachineType kTypes[] = {
kMachInt8, kMachUint8, kMachInt16, kMachUint16,
CHECK(!schedule->block(f)->deferred());
}
+
+TARGET_TEST_F(SchedulerTest, Switch) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* sw = graph()->NewNode(common()->Switch(2), p0, start);
+ Node* c0 = graph()->NewNode(common()->Case(0), sw);
+ Node* v0 = graph()->NewNode(common()->Int32Constant(11));
+ Node* c1 = graph()->NewNode(common()->Case(1), sw);
+ Node* v1 = graph()->NewNode(common()->Int32Constant(22));
+ Node* m = graph()->NewNode(common()->Merge(2), c0, c1);
+ Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 2), v0, v1, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
+ Node* end = graph()->NewNode(common()->End(), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(13, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, FloatingSwitch) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* sw = graph()->NewNode(common()->Switch(2), p0, start);
+ Node* c0 = graph()->NewNode(common()->Case(0), sw);
+ Node* v0 = graph()->NewNode(common()->Int32Constant(11));
+ Node* c1 = graph()->NewNode(common()->Case(1), sw);
+ Node* v1 = graph()->NewNode(common()->Int32Constant(22));
+ Node* m = graph()->NewNode(common()->Merge(2), c0, c1);
+ Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 2), v0, v1, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(13, graph());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8