"src/compiler/control-builders.cc",
"src/compiler/control-builders.h",
"src/compiler/control-equivalence.h",
+ "src/compiler/control-flow-optimizer.cc",
+ "src/compiler/control-flow-optimizer.h",
"src/compiler/control-reducer.cc",
"src/compiler/control-reducer.h",
"src/compiler/diamond.h",
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchSwitch:
- AssembleArchSwitch(instr);
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchNop:
}
-void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
- ArmOperandConverter i(this, instr);
- int const kNumLabels = static_cast<int>(instr->InputCount() - 1);
- __ BlockConstPoolFor(kNumLabels + 2);
- __ ldr(pc, MemOperand(pc, i.InputRegister(0), LSL, 2));
- __ nop();
- for (int index = 0; index < kNumLabels; ++index) {
- __ dd(GetLabel(i.InputRpo(index + 1)));
- }
-}
-
-
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
}
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmp(input, Operand(i.InputInt32(index + 0)));
+ __ b(eq, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ __ cmp(input, Operand(case_count));
+ __ BlockConstPoolFor(case_count + 2);
+ __ ldr(pc, MemOperand(pc, input, LSL, 2), lo);
+ __ b(GetLabel(i.InputRpo(1)));
+ for (size_t index = 0; index < case_count; ++index) {
+ __ dd(GetLabel(i.InputRpo(index + 2)));
+ }
+}
+
+
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ ArmOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 4 + value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
+ index_operand, value_operand, g.TempImmediate(min_value));
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
- case kArchSwitch:
- AssembleArchSwitch(instr);
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
break;
case kArchNop:
// don't emit code for nops.
}
-void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
- Arm64OperandConverter i(this, instr);
- UseScratchRegisterScope scope(masm());
- Register reg = i.InputRegister(0);
- Register tmp = scope.AcquireX();
- Label table;
- __ Adr(tmp, &table);
- __ Add(tmp, tmp, Operand(reg, LSL, 2));
- __ Br(tmp);
- __ Bind(&table);
- for (size_t index = 1; index < instr->InputCount(); ++index) {
- __ B(GetLabel(i.InputRpo(index)));
- }
-}
-
-
// Assemble boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
}
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+ Register input = i.InputRegister32(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ Cmp(input, i.InputInt32(index + 0));
+ __ B(eq, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+ UseScratchRegisterScope scope(masm());
+ Register input = i.InputRegister32(0);
+ Register temp = scope.AcquireX();
+ size_t const case_count = instr->InputCount() - 2;
+ Label table;
+ __ Cmp(input, case_count);
+ __ B(hs, GetLabel(i.InputRpo(1)));
+ __ Adr(temp, &table);
+ __ Add(temp, temp, Operand(input, UXTW, 2));
+ __ Br(temp);
+ __ StartBlockPools();
+ __ Bind(&table);
+ for (size_t index = 0; index < case_count; ++index) {
+ __ B(GetLabel(i.InputRpo(index + 2)));
+ }
+ __ EndBlockPools();
+}
+
+
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ Arm64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 4 + value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArm64Sub32, index_operand, value_operand,
+ g.TempImmediate(min_value));
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
Node* const user = node;
FlagsContinuation cont(kEqual, node);
void AssembleArchInstruction(Instruction* instr);
void AssembleArchJump(BasicBlock::RpoNumber target);
- void AssembleArchSwitch(Instruction* instr);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+ void AssembleArchLookupSwitch(Instruction* instr);
+ void AssembleArchTableSwitch(Instruction* instr);
void AssembleDeoptimizerCall(int deoptimization_id);
}
-size_t CaseIndexOf(const Operator* const op) {
- DCHECK_EQ(IrOpcode::kCase, op->opcode());
- return OpParameter<size_t>(op);
-}
-
-
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
return lhs.type() == rhs.type() && lhs.hint() == rhs.hint();
}
V(End, Operator::kKontrol, 0, 0, 1, 0, 0, 0) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(Throw, Operator::kFoldable, 1, 1, 1, 0, 0, 1) \
V(Return, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
- DCHECK_GE(control_output_count, 2u); // Disallow trivial switches.
+ DCHECK_GE(control_output_count, 3u); // Disallow trivial switches.
return new (zone()) Operator( // --
IrOpcode::kSwitch, Operator::kKontrol, // opcode
"Switch", // name
}
-const Operator* CommonOperatorBuilder::Case(size_t index) {
- return new (zone()) Operator1<size_t>( // --
- IrOpcode::kCase, Operator::kKontrol, // opcode
- "Case", // name
- 0, 0, 1, 0, 0, 1, // counts
- index); // parameter
+const Operator* CommonOperatorBuilder::IfValue(int32_t index) {
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kIfValue, Operator::kKontrol, // opcode
+ "IfValue", // name
+ 0, 0, 1, 0, 0, 1, // counts
+ index); // parameter
}
BranchHint BranchHintOf(const Operator* const);
-size_t CaseIndexOf(const Operator* const);
-
-
class SelectParameters FINAL {
public:
explicit SelectParameters(MachineType type,
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* Switch(size_t control_output_count);
- const Operator* Case(size_t index);
+ const Operator* IfValue(int32_t value);
+ const Operator* IfDefault();
const Operator* Throw();
const Operator* Return();
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/control-flow-optimizer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ControlFlowOptimizer::ControlFlowOptimizer(JSGraph* jsgraph, Zone* zone)
+ : jsgraph_(jsgraph),
+ queue_(zone),
+ queued_(jsgraph->graph(), 2),
+ zone_(zone) {}
+
+
+void ControlFlowOptimizer::Optimize() {
+ Enqueue(graph()->start());
+ while (!queue_.empty()) {
+ Node* node = queue_.front();
+ queue_.pop();
+ if (node->IsDead()) continue;
+ switch (node->opcode()) {
+ case IrOpcode::kBranch:
+ VisitBranch(node);
+ break;
+ default:
+ VisitNode(node);
+ break;
+ }
+ }
+}
+
+
+void ControlFlowOptimizer::Enqueue(Node* node) {
+ DCHECK_NOT_NULL(node);
+ if (node->IsDead() || queued_.Get(node)) return;
+ queued_.Set(node, true);
+ queue_.push(node);
+}
+
+
+void ControlFlowOptimizer::VisitNode(Node* node) {
+ for (Node* use : node->uses()) {
+ if (NodeProperties::IsControl(use)) Enqueue(use);
+ }
+}
+
+
+void ControlFlowOptimizer::VisitBranch(Node* node) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+
+ Node* branch = node;
+ Node* cond = NodeProperties::GetValueInput(branch, 0);
+ if (cond->opcode() != IrOpcode::kWord32Equal) return VisitNode(node);
+ Int32BinopMatcher m(cond);
+ Node* index = m.left().node();
+ if (!m.right().HasValue()) return VisitNode(node);
+ int32_t value = m.right().Value();
+ ZoneSet<int32_t> values(zone());
+ values.insert(value);
+
+ Node* if_false;
+ Node* if_true;
+ while (true) {
+ // TODO(turbofan): use NodeProperties::CollectSuccessorProjections() here
+ // once available.
+ auto it = branch->uses().begin();
+ DCHECK(it != branch->uses().end());
+ if_true = *it++;
+ DCHECK(it != branch->uses().end());
+ if_false = *it++;
+ DCHECK(it == branch->uses().end());
+ if (if_true->opcode() != IrOpcode::kIfTrue) std::swap(if_true, if_false);
+ DCHECK_EQ(IrOpcode::kIfTrue, if_true->opcode());
+ DCHECK_EQ(IrOpcode::kIfFalse, if_false->opcode());
+
+ it = if_false->uses().begin();
+ if (it == if_false->uses().end()) break;
+ Node* branch1 = *it++;
+ if (branch1->opcode() != IrOpcode::kBranch) break;
+ if (it != if_false->uses().end()) break;
+ Node* cond1 = branch1->InputAt(0);
+ if (cond1->opcode() != IrOpcode::kWord32Equal) break;
+ Int32BinopMatcher m1(cond1);
+ if (m1.left().node() != index) break;
+ if (!m1.right().HasValue()) break;
+ int32_t value1 = m1.right().Value();
+ if (values.find(value1) != values.end()) break;
+ DCHECK_NE(value, value1);
+
+ if (branch != node) {
+ branch->RemoveAllInputs();
+ if_true->ReplaceInput(0, node);
+ }
+ if_true->set_op(common()->IfValue(value));
+ if_false->RemoveAllInputs();
+ Enqueue(if_true);
+
+ branch = branch1;
+ value = value1;
+ values.insert(value);
+ }
+
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ DCHECK_EQ(IrOpcode::kIfTrue, if_true->opcode());
+ DCHECK_EQ(IrOpcode::kIfFalse, if_false->opcode());
+ if (branch == node) {
+ DCHECK_EQ(1u, values.size());
+ Enqueue(if_true);
+ Enqueue(if_false);
+ } else {
+ DCHECK_LT(1u, values.size());
+ node->set_op(common()->Switch(values.size() + 1));
+ node->ReplaceInput(0, index);
+ if_true->set_op(common()->IfValue(value));
+ if_true->ReplaceInput(0, node);
+ Enqueue(if_true);
+ if_false->set_op(common()->IfDefault());
+ if_false->ReplaceInput(0, node);
+ Enqueue(if_false);
+ branch->RemoveAllInputs();
+ }
+}
+
+
+CommonOperatorBuilder* ControlFlowOptimizer::common() const {
+ return jsgraph()->common();
+}
+
+
+Graph* ControlFlowOptimizer::graph() const { return jsgraph()->graph(); }
+
+
+MachineOperatorBuilder* ControlFlowOptimizer::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
+#define V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
+
+#include "src/compiler/node-marker.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+class Node;
+
+
+class ControlFlowOptimizer FINAL {
+ public:
+ ControlFlowOptimizer(JSGraph* jsgraph, Zone* zone);
+
+ void Optimize();
+
+ private:
+ void Enqueue(Node* node);
+ void VisitNode(Node* node);
+ void VisitBranch(Node* node);
+
+ CommonOperatorBuilder* common() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ MachineOperatorBuilder* machine() const;
+ Zone* zone() const { return zone_; }
+
+ JSGraph* const jsgraph_;
+ ZoneQueue<Node*> queue_;
+ NodeMarker<bool> queued_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
- case kArchSwitch:
- AssembleArchSwitch(instr);
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
break;
case kArchNop:
// don't emit code for nops.
}
-void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
- IA32OperandConverter i(this, instr);
- size_t const label_count = instr->InputCount() - 1;
- Label** labels = zone()->NewArray<Label*>(label_count);
- for (size_t index = 0; index < label_count; ++index) {
- labels[index] = GetLabel(i.InputRpo(index + 1));
- }
- Label* const table = AddJumpTable(labels, label_count);
- __ jmp(Operand::JumpTable(i.InputRegister(0), times_4, table));
-}
-
-
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
}
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmp(input, Immediate(i.InputInt32(index + 0)));
+ __ j(equal, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (size_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ cmp(input, Immediate(case_count));
+ __ j(above_equal, GetLabel(i.InputRpo(1)));
+ __ jmp(Operand::JumpTable(input, times_4, table));
+}
+
+
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ IA32OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 4 + value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (min_value) {
+ index_operand = g.TempRegister();
+ Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-min_value));
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
V(ArchCallCodeObject) \
V(ArchCallJSFunction) \
V(ArchJmp) \
- V(ArchSwitch) \
+ V(ArchLookupSwitch) \
+ V(ArchTableSwitch) \
V(ArchNop) \
V(ArchRet) \
V(ArchStackPointer) \
#include "src/compiler/instruction-selector.h"
+#include <limits>
+
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
}
case BasicBlock::kSwitch: {
DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
- BasicBlock** const branches = &block->successors().front();
- size_t const branch_count = block->SuccessorCount();
- DCHECK_LE(2u, branch_count);
+ // Last successor must be Default.
+ BasicBlock* default_branch = block->successors().back();
+ DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
// SSA deconstruction requires targets of branches not to have phis.
// Edge split form guarantees this property, but is more strict.
- for (size_t index = 0; index < branch_count; ++index) {
- CheckNoPhis(branches[index]);
+ CheckNoPhis(default_branch);
+ // All other successors must be cases.
+ size_t case_count = block->SuccessorCount() - 1;
+ DCHECK_LE(1u, case_count);
+ BasicBlock** case_branches = &block->successors().front();
+ // Determine case values and their min/max.
+ int32_t* case_values = zone()->NewArray<int32_t>(case_count);
+ int32_t min_value = std::numeric_limits<int32_t>::max();
+ int32_t max_value = std::numeric_limits<int32_t>::min();
+ for (size_t index = 0; index < case_count; ++index) {
+ BasicBlock* branch = case_branches[index];
+ int32_t value = OpParameter<int32_t>(branch->front()->op());
+ case_values[index] = value;
+ if (min_value > value) min_value = value;
+ if (max_value < value) max_value = value;
+ // SSA deconstruction requires targets of branches not to have phis.
+ // Edge split form guarantees this property, but is more strict.
+ CheckNoPhis(branch);
}
- return VisitSwitch(input, branches, branch_count);
+ DCHECK_LE(min_value, max_value);
+ return VisitSwitch(input, default_branch, case_branches, case_values,
+ case_count, min_value, max_value);
}
case BasicBlock::kReturn: {
// If the result itself is a return, return its input.
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
case IrOpcode::kSwitch:
- case IrOpcode::kCase:
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
case IrOpcode::kEffectPhi:
case IrOpcode::kEffectSet:
case IrOpcode::kMerge:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
case IrOpcode::kSwitch:
- case IrOpcode::kCase:
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
// No code needed for these graph artifacts.
}
-void InstructionSelector::VisitSwitch(Node* node, BasicBlock** branches,
- size_t branch_count) {
- OperandGenerator g(this);
- Node* const value = node->InputAt(0);
- size_t const input_count = branch_count + 1;
- InstructionOperand* const inputs =
- zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = g.UseRegister(value);
- for (size_t index = 0; index < branch_count; ++index) {
- inputs[index + 1] = g.Label(branches[index]);
- }
- Emit(kArchSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
-}
-
-
void InstructionSelector::VisitReturn(Node* value) {
OperandGenerator g(this);
if (value != NULL) {
void VisitCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
- void VisitSwitch(Node* node, BasicBlock** branches, size_t branch_count);
+ void VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches, int32_t* case_values,
+ size_t case_count, int32_t min_value, int32_t max_value);
void VisitReturn(Node* value);
void VisitThrow(Node* value);
void VisitDeoptimize(Node* deopt);
V(IfTrue) \
V(IfFalse) \
V(Switch) \
- V(Case) \
+ V(IfValue) \
+ V(IfDefault) \
V(Merge) \
V(Return) \
V(OsrNormalEntry) \
return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
- opcode == IrOpcode::kIfFalse;
+ opcode == IrOpcode::kIfFalse || opcode == IrOpcode::kIfValue ||
+ opcode == IrOpcode::kIfDefault;
}
} // namespace compiler
} // namespace
+// static
+STATIC_CONST_MEMBER_DEFINITION const size_t Operator::kMaxControlOutputCount;
+
+
Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
size_t value_out, size_t effect_out, size_t control_out)
control_in_(CheckRange<uint16_t>(control_in)),
value_out_(CheckRange<uint16_t>(value_out)),
effect_out_(CheckRange<uint8_t>(effect_out)),
- control_out_(CheckRange<uint8_t>(control_out)) {}
+ control_out_(CheckRange<uint16_t>(control_out)) {}
std::ostream& operator<<(std::ostream& os, const Operator& op) {
Properties properties() const { return properties_; }
+ // TODO(bmeurer): Use bit fields below?
+ static const size_t kMaxControlOutputCount = (1u << 16) - 1;
+
// TODO(titzer): convert return values here to size_t.
int ValueInputCount() const { return value_in_; }
int EffectInputCount() const { return effect_in_; }
uint16_t control_in_;
uint16_t value_out_;
uint8_t effect_out_;
- uint8_t control_out_;
+ uint16_t control_out_;
DISALLOW_COPY_AND_ASSIGN(Operator);
};
#include "src/compiler/change-lowering.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/control-flow-optimizer.h"
#include "src/compiler/control-reducer.h"
#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-visualizer.h"
};
+struct ControlFlowOptimizationPhase {
+ static const char* phase_name() { return "control flow optimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ ControlFlowOptimizer optimizer(data->jsgraph(), temp_zone);
+ optimizer.Optimize();
+ }
+};
+
+
struct ChangeLoweringPhase {
static const char* phase_name() { return "change lowering"; }
Run<SimplifiedLoweringPhase>();
RunPrintAndVerify("Lowered simplified");
+ // Optimize control flow.
+ if (FLAG_turbo_switch) {
+ Run<ControlFlowOptimizationPhase>();
+ RunPrintAndVerify("Control flow optimized");
+ }
+
// Lower changes that have been inserted before.
Run<ChangeLoweringPhase>();
// // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
}
-void RawMachineAssembler::Switch(Node* index, Label** succ_labels,
- size_t succ_count) {
+void RawMachineAssembler::Switch(Node* index, Label* default_label,
+ int32_t* case_values, Label** case_labels,
+ size_t case_count) {
DCHECK_NE(schedule()->end(), current_block_);
- Node* sw = NewNode(common()->Switch(succ_count), index);
+ size_t succ_count = case_count + 1;
+ Node* switch_node = NewNode(common()->Switch(succ_count), index);
BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
- for (size_t index = 0; index < succ_count; ++index) {
- succ_blocks[index] = Use(succ_labels[index]);
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t case_value = case_values[index];
+ BasicBlock* case_block = Use(case_labels[index]);
+ Node* case_node =
+ graph()->NewNode(common()->IfValue(case_value), switch_node);
+ schedule()->AddNode(case_block, case_node);
+ succ_blocks[index] = case_block;
}
- schedule()->AddSwitch(CurrentBlock(), sw, succ_blocks, succ_count);
+ BasicBlock* default_block = Use(default_label);
+ Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
+ schedule()->AddNode(default_block, default_node);
+ succ_blocks[case_count] = default_block;
+ schedule()->AddSwitch(CurrentBlock(), switch_node, succ_blocks, succ_count);
current_block_ = nullptr;
}
Label* Exit();
void Goto(Label* label);
void Branch(Node* condition, Label* true_val, Label* false_val);
- void Switch(Node* index, Label** succ_labels, size_t succ_count);
+ void Switch(Node* index, Label* default_label, int32_t* case_values,
+ Label** case_labels, size_t case_count);
// Call through CallFunctionStub with lazy deopt and frame-state.
Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
Node* frame_state, CallFunctionFlags flags);
Node* NodeAt(size_t index) { return nodes_[index]; }
size_t NodeCount() const { return nodes_.size(); }
+ value_type& front() { return nodes_.front(); }
+ value_type const& front() const { return nodes_.front(); }
+
typedef NodeVector::iterator iterator;
iterator begin() { return nodes_.begin(); }
iterator end() { return nodes_.end(); }
}
// Collect the branch-related projections from a node, such as IfTrue,
- // IfFalse, and Case.
+ // IfFalse, Case and Default.
void CollectSuccessorProjections(Node* node, Node** successors,
size_t successor_count) {
#ifdef DEBUG
DCHECK_EQ(static_cast<int>(successor_count), node->UseCount());
std::memset(successors, 0, sizeof(*successors) * successor_count);
#endif
+ size_t if_value_index = 0;
for (Node* const use : node->uses()) {
size_t index;
switch (use->opcode()) {
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
index = 1;
break;
- case IrOpcode::kCase:
+ case IrOpcode::kIfValue:
DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
- index = CaseIndexOf(use->op());
+ index = if_value_index++;
+ break;
+ case IrOpcode::kIfDefault:
+ DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
+ index = successor_count - 1;
break;
}
+ DCHECK_LT(if_value_index, successor_count);
DCHECK_LT(index, successor_count);
- DCHECK(successors[index] == nullptr);
+ DCHECK_NULL(successors[index]);
successors[index] = use;
}
#ifdef DEBUG
case IrOpcode::kHeapConstant:
return VisitLeaf(node, kRepTagged);
- case IrOpcode::kEnd:
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kReturn:
- case IrOpcode::kMerge:
- case IrOpcode::kThrow:
- return VisitInputs(node); // default visit for all node inputs.
-
case IrOpcode::kBranch:
ProcessInput(node, 0, kRepBit);
Enqueue(NodeProperties::GetControlInput(node, 0));
break;
+ case IrOpcode::kSwitch:
+ ProcessInput(node, 0, kRepWord32);
+ Enqueue(NodeProperties::GetControlInput(node, 0));
+ break;
case IrOpcode::kSelect:
return VisitSelect(node, use, lowering);
case IrOpcode::kPhi:
if (use->opcode() == IrOpcode::kIfTrue) ++count_true;
if (use->opcode() == IrOpcode::kIfFalse) ++count_false;
}
- CHECK(count_true == 1 && count_false == 1);
+ CHECK_EQ(1, count_true);
+ CHECK_EQ(1, count_false);
// Type is empty.
CheckNotTyped(node);
break;
CheckNotTyped(node);
break;
case IrOpcode::kSwitch: {
- // Switch uses are Case.
- std::vector<bool> uses;
- uses.resize(node->UseCount());
+ // Switch uses are Case and Default.
+ int count_case = 0, count_default = 0;
for (auto use : node->uses()) {
- CHECK_EQ(IrOpcode::kCase, use->opcode());
- size_t const index = CaseIndexOf(use->op());
- CHECK_LT(index, uses.size());
- CHECK(!uses[index]);
- uses[index] = true;
+ switch (use->opcode()) {
+ case IrOpcode::kIfValue: {
+ for (auto user : node->uses()) {
+ if (user != use && user->opcode() == IrOpcode::kIfValue) {
+ CHECK_NE(OpParameter<int32_t>(use->op()),
+ OpParameter<int32_t>(user->op()));
+ }
+ }
+ ++count_case;
+ break;
+ }
+ case IrOpcode::kIfDefault: {
+ ++count_default;
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
}
+ CHECK_LE(1, count_case);
+ CHECK_EQ(1, count_default);
+ CHECK_EQ(node->op()->ControlOutputCount(), count_case + count_default);
// Type is empty.
CheckNotTyped(node);
break;
}
- case IrOpcode::kCase:
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
CHECK_EQ(IrOpcode::kSwitch,
NodeProperties::GetControlInput(node)->opcode());
// Type is empty.
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
- case kArchSwitch:
- AssembleArchSwitch(instr);
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
break;
case kArchNop:
// don't emit code for nops.
}
-void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
- X64OperandConverter i(this, instr);
- size_t const label_count = instr->InputCount() - 1;
- Label** labels = zone()->NewArray<Label*>(label_count);
- for (size_t index = 0; index < label_count; ++index) {
- labels[index] = GetLabel(i.InputRpo(static_cast<int>(index + 1)));
- }
- Label* const table = AddJumpTable(labels, label_count);
- __ leaq(kScratchRegister, Operand(table));
- __ jmp(Operand(kScratchRegister, i.InputRegister(0), times_8, 0));
-}
-
-
// Assembles boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
}
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ X64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmpl(input, Immediate(i.InputInt32(static_cast<int>(index + 0))));
+ __ j(equal, GetLabel(i.InputRpo(static_cast<int>(index + 1))));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ X64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (int32_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ cmpl(input, Immediate(case_count));
+ __ j(above_equal, GetLabel(i.InputRpo(1)));
+ __ leaq(kScratchRegister, Operand(table));
+ __ jmp(Operand(kScratchRegister, input, times_8, 0));
+}
+
+
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ X64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 4 + value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = g.TempRegister();
+ if (min_value) {
+ // The leal automatically zero extends, so result is a valid 64-bit index.
+ Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-min_value));
+ } else {
+ // Zero extend, because we use it as 64-bit index into the jump table.
+ Emit(kX64Movl, index_operand, value_operand);
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
Node* user = node;
FlagsContinuation cont(kEqual, node);
DEFINE_BOOL(turbo_exceptions, false, "enable exception handling in TurboFan")
DEFINE_BOOL(turbo_stress_loop_peeling, false,
"stress loop peeling optimization")
+DEFINE_BOOL(turbo_switch, true, "optimize switches in TurboFan")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
int constant = 11223344;
- MLabel block0, block1, end;
- MLabel* cases[] = {&block0, &block1};
- m.Switch(m.IntPtrConstant(0), cases, arraysize(cases));
+ MLabel block0, block1, def, end;
+ MLabel* case_labels[] = {&block0, &block1};
+ int32_t case_values[] = {0, 1};
+ m.Switch(m.Int32Constant(0), &def, case_values, case_labels,
+ arraysize(case_labels));
m.Bind(&block0);
m.Goto(&end);
m.Bind(&block1);
m.Goto(&end);
+ m.Bind(&def);
+ m.Goto(&end);
m.Bind(&end);
m.Return(m.Int32Constant(constant));
TEST(RunSwitch2) {
RawMachineAssemblerTester<int32_t> m(kMachInt32);
- const size_t kNumCases = 255;
- int32_t values[kNumCases];
+ MLabel blocka, blockb, blockc;
+ MLabel* case_labels[] = {&blocka, &blockb};
+ int32_t case_values[] = {std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::max()};
+ m.Switch(m.Parameter(0), &blockc, case_values, case_labels,
+ arraysize(case_labels));
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(-1));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&blockc);
+ m.Return(m.Int32Constant(0));
+
+ CHECK_EQ(1, m.Call(std::numeric_limits<int32_t>::max()));
+ CHECK_EQ(-1, m.Call(std::numeric_limits<int32_t>::min()));
+ for (int i = -100; i < 100; i += 25) {
+ CHECK_EQ(0, m.Call(i));
+ }
+}
+
+
+TEST(RunSwitch3) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+
+ MLabel blocka, blockb, blockc;
+ MLabel* case_labels[] = {&blocka, &blockb};
+ int32_t case_values[] = {std::numeric_limits<int32_t>::min() + 0,
+ std::numeric_limits<int32_t>::min() + 1};
+ m.Switch(m.Parameter(0), &blockc, case_values, case_labels,
+ arraysize(case_labels));
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&blockc);
+ m.Return(m.Int32Constant(2));
+
+ CHECK_EQ(0, m.Call(std::numeric_limits<int32_t>::min() + 0));
+ CHECK_EQ(1, m.Call(std::numeric_limits<int32_t>::min() + 1));
+ for (int i = -100; i < 100; i += 25) {
+ CHECK_EQ(2, m.Call(i));
+ }
+}
+
+
+TEST(RunSwitch4) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+
+ const size_t kNumCases = 512;
+ const size_t kNumValues = kNumCases + 1;
+ int32_t values[kNumValues];
m.main_isolate()->random_number_generator()->NextBytes(values,
sizeof(values));
- MLabel end;
- MLabel* cases[kNumCases];
- Node* results[kNumCases];
+ MLabel end, def;
+ int32_t case_values[kNumCases];
+ MLabel* case_labels[kNumCases];
+ Node* results[kNumValues];
for (size_t i = 0; i < kNumCases; ++i) {
- cases[i] = new (m.main_zone()->New(sizeof(MLabel))) MLabel;
+ case_values[i] = static_cast<int32_t>(i);
+ case_labels[i] = new (m.main_zone()->New(sizeof(MLabel))) MLabel;
}
- m.Switch(m.ConvertInt32ToIntPtr(m.Parameter(0)), cases, arraysize(cases));
+ m.Switch(m.Parameter(0), &def, case_values, case_labels,
+ arraysize(case_labels));
for (size_t i = 0; i < kNumCases; ++i) {
- m.Bind(cases[i]);
+ m.Bind(case_labels[i]);
results[i] = m.Int32Constant(values[i]);
m.Goto(&end);
}
+ m.Bind(&def);
+ results[kNumCases] = m.Int32Constant(values[kNumCases]);
+ m.Goto(&end);
m.Bind(&end);
const int num_results = static_cast<int>(arraysize(results));
Node* phi =
m.NewNode(m.common()->Phi(kMachInt32, num_results), num_results, results);
m.Return(phi);
- for (size_t i = 0; i < kNumCases; ++i) {
+ for (size_t i = 0; i < kNumValues; ++i) {
CHECK_EQ(values[i], m.Call(static_cast<int>(i)));
}
}
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+
+var switch1 = (function(stdlib, foreign, heap) {
+ "use asm";
+ function switch1(i) {
+ i = i|0;
+ switch (i) {
+ case 0: return 1;
+ case 1: return 2;
+ default: return i|0;
+ }
+ }
+ return { switch1: switch1 };
+})(stdlib, foreign, heap).switch1;
+
+assertEquals(1, switch1(0));
+assertEquals(2, switch1(1));
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(i, switch1(i));
+}
+
+
+var switch2 = (function(stdlib, foreign, heap) {
+ "use asm";
+ function switch2(i) {
+ i = i|0;
+ var j = 0;
+ switch (i) {
+ case 0: j = 1; break;
+ case 1: j = 2; break;
+ case 2: j = 3; break;
+ default: j = i|0; break;
+ }
+ return j|0;
+ }
+ return { switch2: switch2 };
+})(stdlib, foreign, heap).switch2;
+
+assertEquals(1, switch2(0));
+assertEquals(2, switch2(1));
+assertEquals(3, switch2(2));
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(i, switch2(i));
+}
+
+
+var switch3 = (function(stdlib, foreign, heap) {
+ "use asm";
+ function switch3(i) {
+ i = i|0;
+ var j = 0;
+ switch (i) {
+ case 0:
+ case 1: j = 1; break;
+ case 2:
+ case 3: j = 2; break;
+ case 4:
+ case 5: j = 3; break;
+ default: j = 0; break;
+ }
+ return j|0;
+ }
+ return { switch3: switch3 };
+})(stdlib, foreign, heap).switch3;
+
+assertEquals(1, switch3(0));
+assertEquals(1, switch3(1));
+assertEquals(2, switch3(2));
+assertEquals(2, switch3(3));
+assertEquals(3, switch3(4));
+assertEquals(3, switch3(5));
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(0, switch3(i));
+}
+
+
+var switch4 = (function(stdlib, foreign, heap) {
+ "use asm";
+ function switch4(i) {
+ i = i|0;
+ switch (i) {
+ case -1:
+ case 1:
+ return 0;
+
+ case -2:
+ case 2:
+ return 1;
+
+ case -3:
+ case 3:
+ return 2;
+
+ case -8:
+ case 8:
+ return 3;
+
+ default:
+ return 4;
+ }
+ }
+ return { switch4: switch4 };
+})(stdlib, foreign, heap).switch4;
+
+assertEquals(4, switch4(0));
+assertEquals(0, switch4(-1));
+assertEquals(0, switch4(1));
+assertEquals(1, switch4(-2));
+assertEquals(1, switch4(2));
+assertEquals(3, switch4(-8));
+assertEquals(3, switch4(8));
+assertEquals(4, switch4(-123456789));
+assertEquals(4, switch4(123456789));
const int kArguments[] = {1, 5, 6, 42, 100, 10000, 65000};
-const size_t kCases[] = {2, 3, 4, 100, 255};
+const size_t kCases[] = {3, 4, 100, 255, 1024, 65000};
const float kFloatValues[] = {-std::numeric_limits<float>::infinity(),
std::numeric_limits<double>::signaling_NaN()};
+const int32_t kInt32Values[] = {
+ std::numeric_limits<int32_t>::min(), -1914954528, -1698749618, -1578693386,
+ -1577976073, -1573998034, -1529085059, -1499540537, -1299205097,
+ -1090814845, -938186388, -806828902, -750927650, -520676892, -513661538,
+ -453036354, -433622833, -282638793, -28375, -27788, -22770, -18806, -14173,
+ -11956, -11200, -10212, -8160, -3751, -2758, -1522, -121, -120, -118, -117,
+ -106, -84, -80, -74, -59, -52, -48, -39, -35, -17, -11, -10, -9, -7, -5, 0,
+ 9, 12, 17, 23, 29, 31, 33, 35, 40, 47, 55, 56, 62, 64, 67, 68, 69, 74, 79,
+ 84, 89, 90, 97, 104, 118, 124, 126, 127, 7278, 17787, 24136, 24202, 25570,
+ 26680, 30242, 32399, 420886487, 642166225, 821912648, 822577803, 851385718,
+ 1212241078, 1411419304, 1589626102, 1596437184, 1876245816, 1954730266,
+ 2008792749, 2045320228, std::numeric_limits<int32_t>::max()};
+
+
const BranchHint kHints[] = {BranchHint::kNone, BranchHint::kTrue,
BranchHint::kFalse};
}
-TEST_F(CommonOperatorTest, Case) {
- TRACED_FORRANGE(size_t, index, 0, 1024) {
- const Operator* const op = common()->Case(index);
- EXPECT_EQ(IrOpcode::kCase, op->opcode());
+TEST_F(CommonOperatorTest, IfValue) {
+ TRACED_FOREACH(int32_t, value, kInt32Values) {
+ const Operator* const op = common()->IfValue(value);
+ EXPECT_EQ(IrOpcode::kIfValue, op->opcode());
EXPECT_EQ(Operator::kKontrol, op->properties());
- EXPECT_EQ(index, CaseIndexOf(op));
+ EXPECT_EQ(value, OpParameter<int32_t>(op));
EXPECT_EQ(0, op->ValueInputCount());
EXPECT_EQ(0, op->EffectInputCount());
EXPECT_EQ(1, op->ControlInputCount());
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/control-flow-optimizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ControlFlowOptimizerTest : public GraphTest {
+ public:
+ explicit ControlFlowOptimizerTest(int num_parameters = 3)
+ : GraphTest(num_parameters), machine_(zone()) {}
+ ~ControlFlowOptimizerTest() OVERRIDE {}
+
+ protected:
+ void Optimize() {
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, machine());
+ ControlFlowOptimizer optimizer(&jsgraph, zone());
+ optimizer.Optimize();
+ }
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+};
+
+
+TEST_F(ControlFlowOptimizerTest, Switch) {
+ Node* index = Parameter(0);
+ Node* branch0 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(0)),
+ start());
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* branch1 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(1)),
+ if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* merge =
+ graph()->NewNode(common()->Merge(3), if_true0, if_true1, if_false1);
+ graph()->SetEnd(graph()->NewNode(common()->End(), merge));
+ Optimize();
+ Capture<Node*> switch_capture;
+ EXPECT_THAT(end(),
+ IsEnd(IsMerge(IsIfValue(0, CaptureEq(&switch_capture)),
+ IsIfValue(1, CaptureEq(&switch_capture)),
+ IsIfDefault(AllOf(CaptureEq(&switch_capture),
+ IsSwitch(index, start()))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
protected:
Node* start() { return graph()->start(); }
+ Node* end() { return graph()->end(); }
+
Node* Parameter(int32_t index = 0);
Node* Float32Constant(volatile float value);
Node* Float64Constant(volatile double value);
};
+class IsSwitchMatcher FINAL : public NodeMatcher {
+ public:
+ IsSwitchMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kSwitch),
+ value_matcher_(value_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const FINAL {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsIfValueMatcher FINAL : public NodeMatcher {
+ public:
+ IsIfValueMatcher(const Matcher<int32_t>& value_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kIfValue),
+ value_matcher_(value_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const FINAL {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<int32_t>(node->op()), "value",
+ value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<int32_t> value_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
class IsControl1Matcher FINAL : public NodeMatcher {
public:
IsControl1Matcher(IrOpcode::Value opcode,
}
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher) {
+ return MakeMatcher(new IsControl3Matcher(IrOpcode::kMerge, control0_matcher,
+ control1_matcher, control2_matcher));
+}
+
+
Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher) {
return MakeMatcher(new IsControl2Matcher(IrOpcode::kLoop, control0_matcher,
}
+Matcher<Node*> IsSwitch(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsSwitchMatcher(value_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsIfValue(const Matcher<int32_t>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsIfValueMatcher(value_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsIfDefault(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsControl1Matcher(IrOpcode::kIfDefault, control_matcher));
+}
+
+
Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher) {
return MakeMatcher(new IsUnopMatcher(IrOpcode::kValueEffect, value_matcher));
}
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher);
Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher);
Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control2_matcher);
Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsSwitch(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfValue(const Matcher<int32_t>& value_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfDefault(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher);
graph()->SetStart(start);
Node* p0 = graph()->NewNode(common()->Parameter(0), start);
- Node* sw = graph()->NewNode(common()->Switch(2), p0, start);
- Node* c0 = graph()->NewNode(common()->Case(0), sw);
+ Node* sw = graph()->NewNode(common()->Switch(3), p0, start);
+ Node* c0 = graph()->NewNode(common()->IfValue(0), sw);
Node* v0 = graph()->NewNode(common()->Int32Constant(11));
- Node* c1 = graph()->NewNode(common()->Case(1), sw);
+ Node* c1 = graph()->NewNode(common()->IfValue(1), sw);
Node* v1 = graph()->NewNode(common()->Int32Constant(22));
- Node* m = graph()->NewNode(common()->Merge(2), c0, c1);
- Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 2), v0, v1, m);
+ Node* d = graph()->NewNode(common()->IfDefault(), sw);
+ Node* vd = graph()->NewNode(common()->Int32Constant(33));
+ Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
+ Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 3), v0, v1, vd, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
Node* end = graph()->NewNode(common()->End(), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(13, graph());
+ ComputeAndVerifySchedule(16, graph());
}
graph()->SetStart(start);
Node* p0 = graph()->NewNode(common()->Parameter(0), start);
- Node* sw = graph()->NewNode(common()->Switch(2), p0, start);
- Node* c0 = graph()->NewNode(common()->Case(0), sw);
+ Node* sw = graph()->NewNode(common()->Switch(3), p0, start);
+ Node* c0 = graph()->NewNode(common()->IfValue(0), sw);
Node* v0 = graph()->NewNode(common()->Int32Constant(11));
- Node* c1 = graph()->NewNode(common()->Case(1), sw);
+ Node* c1 = graph()->NewNode(common()->IfValue(1), sw);
Node* v1 = graph()->NewNode(common()->Int32Constant(22));
- Node* m = graph()->NewNode(common()->Merge(2), c0, c1);
- Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 2), v0, v1, m);
+ Node* d = graph()->NewNode(common()->IfDefault(), sw);
+ Node* vd = graph()->NewNode(common()->Int32Constant(33));
+ Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
+ Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 3), v0, v1, vd, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
Node* end = graph()->NewNode(common()->End(), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(13, graph());
+ ComputeAndVerifySchedule(16, graph());
}
} // namespace compiler
'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h',
'compiler/control-equivalence-unittest.cc',
+ 'compiler/control-flow-optimizer-unittest.cc',
'compiler/control-reducer-unittest.cc',
'compiler/diamond-unittest.cc',
'compiler/graph-reducer-unittest.cc',
'../../src/compiler/control-builders.cc',
'../../src/compiler/control-builders.h',
'../../src/compiler/control-equivalence.h',
+ '../../src/compiler/control-flow-optimizer.cc',
+ '../../src/compiler/control-flow-optimizer.h',
'../../src/compiler/control-reducer.cc',
'../../src/compiler/control-reducer.h',
'../../src/compiler/diamond.h',