#include "cfg.h"
#include "codegen-inl.h"
+#include "codegen-arm.h" // Include after codegen-inl.h.
#include "macro-assembler-arm.h"
namespace v8 {
{
Comment cmt(masm, "[ InstructionBlock");
for (int i = 0, len = instructions_.length(); i < len; i++) {
+ // If the location of the current instruction is a temp, then the
+ // instruction cannot be in tail position in the block. Allocate the
+ // temp based on peeking ahead to the next instruction.
+ Instruction* instr = instructions_[i];
+ Location* loc = instr->location();
+ if (loc->is_temporary()) {
+ instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
+ }
instructions_[i]->Compile(masm);
}
}
}
+void BinaryOpInstr::Compile(MacroAssembler* masm) {
+ // The right-hand value should not be on the stack---if it is a
+ // compiler-generated temporary it is in the accumulator.
+ ASSERT(!val1_->is_on_stack());
+
+ Comment cmnt(masm, "[ BinaryOpInstr");
+ // We can overwrite one of the operands if it is a temporary.
+ OverwriteMode mode = NO_OVERWRITE;
+ if (val0_->is_temporary()) {
+ mode = OVERWRITE_LEFT;
+ } else if (val1_->is_temporary()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ // Move left to r1 and right to r0.
+ val0_->Get(masm, r1);
+ val1_->Get(masm, r0);
+ GenericBinaryOpStub stub(op_, mode);
+ __ CallStub(&stub);
+ loc_->Set(masm, r0);
+}
+
+
void ReturnInstr::Compile(MacroAssembler* masm) {
+ // The location should be 'Effect'. As a side effect, move the value to
+ // the accumulator.
Comment cmnt(masm, "[ ReturnInstr");
- value_->ToRegister(masm, r0);
+ value_->Get(masm, r0);
}
-void Constant::ToRegister(MacroAssembler* masm, Register reg) {
+void Constant::Get(MacroAssembler* masm, Register reg) {
__ mov(reg, Operand(handle_));
}
-void SlotLocation::ToRegister(MacroAssembler* masm, Register reg) {
- switch (type_) {
+void Constant::Push(MacroAssembler* masm) {
+ __ mov(ip, Operand(handle_));
+ __ push(ip);
+}
+
+
+static MemOperand ToMemOperand(SlotLocation* loc) {
+ switch (loc->type()) {
case Slot::PARAMETER: {
int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ ldr(reg, MemOperand(fp, (1 + count - index_) * kPointerSize));
- break;
+ return MemOperand(fp, (1 + count - loc->index()) * kPointerSize);
}
case Slot::LOCAL: {
const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- __ ldr(reg, MemOperand(fp, kOffset - index_ * kPointerSize));
- break;
+ return MemOperand(fp, kOffset - loc->index() * kPointerSize);
}
default:
UNREACHABLE();
+ return MemOperand(r0);
}
}
+
+void SlotLocation::Get(MacroAssembler* masm, Register reg) {
+ __ ldr(reg, ToMemOperand(this));
+}
+
+
+void SlotLocation::Set(MacroAssembler* masm, Register reg) {
+ __ str(reg, ToMemOperand(this));
+}
+
+
+void SlotLocation::Push(MacroAssembler* masm) {
+ __ ldr(ip, ToMemOperand(this));
+ __ push(ip); // Push will not destroy ip.
+}
+
+
+void TempLocation::Get(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(r0)) __ mov(reg, r0);
+ break;
+ case STACK:
+ __ pop(reg);
+ break;
+ case NOWHERE:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void TempLocation::Set(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(r0)) __ mov(r0, reg);
+ break;
+ case STACK:
+ __ push(reg);
+ break;
+ case NOWHERE:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void TempLocation::Push(MacroAssembler* masm) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ push(r0);
+ break;
+ case STACK:
+ case NOWHERE:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
#undef __
} } // namespace v8::internal
}
-class GenericBinaryOpStub : public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- int constant_rhs = CodeGenerator::kUnknownIntValue)
- : op_(op),
- mode_(mode),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- int constant_rhs_;
- bool specialized_on_rhs_;
-
- static const int kMaxKnownRhs = 0x40000000;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class KnownIntBits: public BitField<int, 8, 8> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt());
- }
-
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm);
-
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
- }
-
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- return key;
- }
-
- const char* GetName() {
- switch (op_) {
- case Token::ADD: return "GenericBinaryOpStub_ADD";
- case Token::SUB: return "GenericBinaryOpStub_SUB";
- case Token::MUL: return "GenericBinaryOpStub_MUL";
- case Token::DIV: return "GenericBinaryOpStub_DIV";
- case Token::MOD: return "GenericBinaryOpStub_MOD";
- case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
- case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
- case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
- case Token::SAR: return "GenericBinaryOpStub_SAR";
- case Token::SHL: return "GenericBinaryOpStub_SHL";
- case Token::SHR: return "GenericBinaryOpStub_SHR";
- default: return "GenericBinaryOpStub";
- }
- }
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
- }
-#endif
-};
-
-
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int constant_rhs) {
};
+class GenericBinaryOpStub : public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ int constant_rhs = CodeGenerator::kUnknownIntValue)
+ : op_(op),
+ mode_(mode),
+ constant_rhs_(constant_rhs),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ int constant_rhs_;
+ bool specialized_on_rhs_;
+
+ static const int kMaxKnownRhs = 0x40000000;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 6> {};
+ class KnownIntBits: public BitField<int, 8, 8> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | KnownIntBits::encode(MinorKeyForKnownInt());
+ }
+
+ void Generate(MacroAssembler* masm);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm);
+
+ static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+ if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
+ if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+ if (op == Token::MOD) {
+ if (constant_rhs <= 1) return false;
+ if (constant_rhs <= 10) return true;
+ if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+ return false;
+ }
+ return false;
+ }
+
+ int MinorKeyForKnownInt() {
+ if (!specialized_on_rhs_) return 0;
+ if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+ ASSERT(IsPowerOf2(constant_rhs_));
+ int key = 12;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ key++;
+ d >>= 1;
+ }
+ return key;
+ }
+
+ const char* GetName() {
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::MOD: return "GenericBinaryOpStub_MOD";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
+ }
+ }
+
+#ifdef DEBUG
+ void Print() {
+ if (!specialized_on_rhs_) {
+ PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+ } else {
+ PrintF("GenericBinaryOpStub (%s by %d)\n",
+ Token::String(op_),
+ constant_rhs_);
+ }
+ }
+#endif
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
CfgGlobals::CfgGlobals(FunctionLiteral* fun)
: global_fun_(fun),
global_exit_(new ExitNode()),
+ effect_(new Effect()),
#ifdef DEBUG
node_counter_(0),
+ temp_counter_(0),
#endif
previous_(top_) {
top_ = this;
if (cfg == NULL) {
BAILOUT("unsupported statement type");
}
+ if (cfg->is_empty()) {
+ BAILOUT("function body produces empty cfg");
+ }
if (cfg->has_exit()) {
BAILOUT("control path without explicit return");
}
void Cfg::Append(Instruction* instr) {
- ASSERT(has_exit());
- ASSERT(!is_empty());
+ ASSERT(is_empty() || has_exit());
+ if (is_empty()) {
+ entry_ = exit_ = new InstructionBlock();
+ }
InstructionBlock::cast(exit_)->Append(instr);
}
}
+void Cfg::Concatenate(Cfg* other) {
+ ASSERT(is_empty() || has_exit());
+ if (other->is_empty()) return;
+
+ if (is_empty()) {
+ entry_ = other->entry();
+ exit_ = other->exit();
+ } else {
+ // We have a pair of nonempty fragments and this has an available exit.
+ // Destructively glue the fragments together.
+ InstructionBlock* first = InstructionBlock::cast(exit_);
+ InstructionBlock* second = InstructionBlock::cast(other->entry());
+ first->instructions()->AddAll(*second->instructions());
+ if (second->successor() != NULL) {
+ first->set_successor(second->successor());
+ exit_ = other->exit();
+ }
+ }
+}
+
+
void InstructionBlock::Unmark() {
if (is_marked_) {
is_marked_ = false;
}
+void BinaryOpInstr::FastAllocate(TempLocation* temp) {
+ ASSERT(temp->where() == TempLocation::NOWHERE);
+ if (temp == val0_ || temp == val1_) {
+ temp->set_where(TempLocation::ACCUMULATOR);
+ } else {
+ temp->set_where(TempLocation::STACK);
+ }
+}
+
+
+void ReturnInstr::FastAllocate(TempLocation* temp) {
+ ASSERT(temp->where() == TempLocation::NOWHERE);
+ if (temp == value_) {
+ temp->set_where(TempLocation::ACCUMULATOR);
+ } else {
+ temp->set_where(TempLocation::STACK);
+ }
+}
+
+
// The expression builder should not be used for declarations or statements.
void ExpressionBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); }
// Macros (temporarily) handling unsupported expression types.
#define BAILOUT(reason) \
do { \
- value_ = NULL; \
+ cfg_ = NULL; \
return; \
} while (false)
-#define CHECK_BAILOUT() \
- if (value_ == NULL) { return; } else {}
-
void ExpressionBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
BAILOUT("FunctionLiteral");
}
void ExpressionBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- BAILOUT("BinaryOperation");
+ Token::Value op = expr->op();
+ switch (op) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+ BAILOUT("unsupported binary operation");
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ ExpressionBuilder left, right;
+ left.Build(expr->left());
+ if (left.cfg() == NULL) {
+ BAILOUT("unsupported left subexpression in binop");
+ }
+ right.Build(expr->right());
+ if (right.cfg() == NULL) {
+ BAILOUT("unsupported right subexpression in binop");
+ }
+
+ Location* temp = new TempLocation();
+ cfg_ = left.cfg();
+ cfg_->Concatenate(right.cfg());
+ cfg_->Append(new BinaryOpInstr(temp, op, left.value(), right.value()));
+
+ value_ = temp;
+ return;
+ }
+
+ default:
+ UNREACHABLE();
+ }
}
}
#undef BAILOUT
-#undef CHECK_BAILOUT
// Macros (temporarily) handling unsupported statement types.
void StatementBuilder::VisitReturnStatement(ReturnStatement* stmt) {
ExpressionBuilder builder;
- builder.Visit(stmt->expression());
- Value* value = builder.value();
- if (value == NULL) BAILOUT("unsupported expression type");
- cfg_->AppendReturnInstruction(value);
+ builder.Build(stmt->expression());
+ if (builder.cfg() == NULL) {
+ BAILOUT("unsupported expression in return statement");
+ }
+
+ cfg_->Concatenate(builder.cfg());
+ cfg_->AppendReturnInstruction(builder.value());
}
}
+void Effect::Print() {
+ PrintF("Effect");
+}
+
+
void SlotLocation::Print() {
PrintF("Slot(");
switch (type_) {
}
+void TempLocation::Print() {
+ PrintF("Temp(%d)", number());
+}
+
+
+void BinaryOpInstr::Print() {
+ PrintF("BinaryOp(");
+ loc_->Print();
+ PrintF(", %s, ", Token::Name(op_));
+ val0_->Print();
+ PrintF(", ");
+ val1_->Print();
+ PrintF(")\n");
+}
+
+
void ReturnInstr::Print() {
- PrintF("Return ");
+ PrintF("Return(");
value_->Print();
- PrintF("\n");
+ PrintF(")\n");
}
namespace internal {
class ExitNode;
+class Location;
+
+// Translate a source AST into a control-flow graph (CFG). The CFG contains
+// single-entry, single-exit blocks of straight-line instructions and
+// administrative nodes.
+//
+// Instructions are described by the following grammar.
+//
+// <Instruction> ::=
+// BinaryOpInstr <Location> Token::Value <Value> <Value>
+// | ReturnInstr Effect <Value>
+//
+// Values are trivial expressions:
+//
+// <Value> ::= Constant | <Location>
+//
+// Locations are storable values ('lvalues'). They can be slots,
+// compiler-generated temporaries, or the special location 'Effect'
+// indicating that no value is needed.
+//
+// <Location> ::=
+// SlotLocation Slot::Type <Index>
+// | TempLocation
+// | Effect
+
+
+// Administrative nodes: There are several types of 'administrative' nodes
+// that do not contain instructions and do not necessarily have a single
+// predecessor and a single successor.
+//
+// EntryNode: there is a distinguished entry node that has no predecessors
+// and a single successor.
+//
+// ExitNode: there is a distinguished exit node that has arbitrarily many
+// predecessors and no successor.
+//
+// JoinNode: join nodes have multiple predecessors and a single successor.
+//
+// BranchNode: branch nodes have a single predecessor and multiple
+// successors.
+
// A convenient class to keep 'global' values when building a CFG. Since
// CFG construction can be invoked recursively, CFG globals are stacked.
return top_;
}
+ // The function currently being compiled.
FunctionLiteral* fun() { return global_fun_; }
+ // The shared global exit node for all exits from the function.
ExitNode* exit() { return global_exit_; }
+ // A singleton effect location.
+ Location* effect_location() { return effect_; }
+
#ifdef DEBUG
- int next_number() { return node_counter_++; }
+ int next_node_number() { return node_counter_++; }
+ int next_temp_number() { return temp_counter_++; }
#endif
private:
static CfgGlobals* top_;
-
- // Function literal currently compiling.
FunctionLiteral* global_fun_;
-
- // Shared global exit node for all returns from the same function.
ExitNode* global_exit_;
+ Location* effect_;
#ifdef DEBUG
- // Used to number nodes when printing.
+ // Used to number nodes and temporaries when printing.
int node_counter_;
+ int temp_counter_;
#endif
CfgGlobals* previous_;
};
-// Values appear in instructions. They represent trivial source
-// expressions: ones with no side effects and that do not require code to be
-// generated.
+// Values represent trivial source expressions: ones with no side effects
+// and that do not require code to be generated.
class Value : public ZoneObject {
public:
virtual ~Value() {}
- virtual void ToRegister(MacroAssembler* masm, Register reg) = 0;
+ // Predicates:
+
+ // True if the value is a temporary allocated to the stack in
+ // fast-compilation mode.
+ virtual bool is_on_stack() { return false; }
+
+ // True if the value is a compiler-generated temporary location.
+ virtual bool is_temporary() { return false; }
+
+ // Support for fast-compilation mode:
+
+ // Move the value into a register.
+ virtual void Get(MacroAssembler* masm, Register reg) = 0;
+
+ // Push the value on the stack.
+ virtual void Push(MacroAssembler* masm) = 0;
#ifdef DEBUG
virtual void Print() = 0;
virtual ~Constant() {}
- void ToRegister(MacroAssembler* masm, Register reg);
+ // Support for fast-compilation mode.
+ void Get(MacroAssembler* masm, Register reg);
+ void Push(MacroAssembler* masm);
#ifdef DEBUG
void Print();
public:
virtual ~Location() {}
- virtual void ToRegister(MacroAssembler* masm, Register reg) = 0;
+ // Static factory function returning the singleton effect location.
+ static Location* Effect() {
+ return CfgGlobals::current()->effect_location();
+ }
+
+ // Support for fast-compilation mode:
+
+ // Assumes temporaries have been allocated.
+ virtual void Get(MacroAssembler* masm, Register reg) = 0;
+
+ // Store the value in a register to the location. Assumes temporaries
+ // have been allocated.
+ virtual void Set(MacroAssembler* masm, Register reg) = 0;
+
+ // Assumes temporaries have been allocated, and if the value is a
+ // temporary it was not allocated to the stack.
+ virtual void Push(MacroAssembler* masm) = 0;
#ifdef DEBUG
virtual void Print() = 0;
};
+// Effect is a special (singleton) location that indicates the value of a
+// computation is not needed (though its side effects are).
+class Effect : public Location {
+ public:
+ // We should not try to emit code to read or write to Effect.
+ void Get(MacroAssembler* masm, Register reg) { UNREACHABLE(); }
+ void Set(MacroAssembler* masm, Register reg) { UNREACHABLE(); }
+ void Push(MacroAssembler* masm) { UNREACHABLE(); }
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Effect() {}
+
+ friend class CfgGlobals;
+};
+
+
// SlotLocations represent parameters and stack-allocated (i.e.,
// non-context) local variables.
class SlotLocation : public Location {
public:
SlotLocation(Slot::Type type, int index) : type_(type), index_(index) {}
- void ToRegister(MacroAssembler* masm, Register reg);
+ // Accessors.
+ Slot::Type type() { return type_; }
+ int index() { return index_; }
+
+ // Support for fast-compilation mode.
+ void Get(MacroAssembler* masm, Register reg);
+ void Set(MacroAssembler* masm, Register reg);
+ void Push(MacroAssembler* masm);
#ifdef DEBUG
void Print();
};
+// TempLocations represent compiler generated temporaries. They are
+// allocated to registers or memory either before code generation (in the
+// optimized-for-speed compiler) or on the fly during code generation (in
+// the optimized-for-space compiler).
+class TempLocation : public Location {
+ public:
+ // Fast-compilation mode allocation decisions.
+ enum Where {
+ NOWHERE, // Not yet allocated.
+ ACCUMULATOR, // Allocated to the dedicated accumulator register.
+ STACK // " " " " stack.
+ };
+
+ TempLocation() : where_(NOWHERE) {
+#ifdef DEBUG
+ number_ = -1;
+#endif
+ }
+
+ // Cast accessor.
+ static TempLocation* cast(Location* loc) {
+ ASSERT(loc->is_temporary());
+ return reinterpret_cast<TempLocation*>(loc);
+ }
+
+ // Accessors.
+ Where where() { return where_; }
+ void set_where(Where where) { where_ = where; }
+
+ // Predicates.
+ bool is_on_stack() { return where_ == STACK; }
+ bool is_temporary() { return true; }
+
+ // Support for fast-compilation mode. Assume the temp has been allocated.
+ void Get(MacroAssembler* masm, Register reg);
+ void Set(MacroAssembler* masm, Register reg);
+ void Push(MacroAssembler* masm);
+
+#ifdef DEBUG
+ int number() {
+ if (number_ == -1) number_ = CfgGlobals::current()->next_temp_number();
+ return number_;
+ }
+
+ void Print();
+#endif
+
+ private:
+ Where where_;
+
+#ifdef DEBUG
+ int number_;
+#endif
+};
+
+
// Instructions are computations. The represent non-trivial source
// expressions: typically ones that have side effects and require code to
// be generated.
class Instruction : public ZoneObject {
public:
+ // Every instruction has a location where its result is stored (which may
+ // be Effect).
+ explicit Instruction(Location* loc) : loc_(loc) {}
+
virtual ~Instruction() {}
+ // Accessors.
+ Location* location() { return loc_; }
+
+ // Support for fast-compilation mode:
+
+ // Emit code to perform the instruction.
virtual void Compile(MacroAssembler* masm) = 0;
+ // Allocate a temporary which is the result of the immediate predecessor
+ // instruction. It is allocated to the accumulator register if it is used
+ // as an operand to this instruction, otherwise to the stack.
+ virtual void FastAllocate(TempLocation* temp) = 0;
+
#ifdef DEBUG
virtual void Print() = 0;
#endif
+
+ protected:
+ Location* loc_;
};
-// Return a value.
+// Perform a (non-short-circuited) binary operation on a pair of values,
+// leaving the result in a location.
+class BinaryOpInstr : public Instruction {
+ public:
+ BinaryOpInstr(Location* loc, Token::Value op, Value* val0, Value* val1)
+ : Instruction(loc), op_(op), val0_(val0), val1_(val1) {
+ }
+
+ // Support for fast-compilation mode.
+ void Compile(MacroAssembler* masm);
+ void FastAllocate(TempLocation* temp);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Token::Value op_;
+ Value* val0_;
+ Value* val1_;
+};
+
+
+// Return a value. Has the side effect of moving its value into the return
+// value register. Can only occur as the last instruction in an instruction
+// block, and implies that the block is closed (cannot have instructions
+// appended or graph fragments concatenated to the end) and that the block's
+// successor is the global exit node for the current function.
class ReturnInstr : public Instruction {
public:
- explicit ReturnInstr(Value* value) : value_(value) {}
+ // Location is always Effect.
+ explicit ReturnInstr(Value* value)
+ : Instruction(CfgGlobals::current()->effect_location()),
+ value_(value) {
+ }
virtual ~ReturnInstr() {}
+ // Support for fast-compilation mode.
void Compile(MacroAssembler* masm);
+ void FastAllocate(TempLocation* temp);
#ifdef DEBUG
void Print();
};
-// Nodes make up control-flow graphs. They contain single-entry,
-// single-exit blocks of instructions and administrative nodes making up the
-// graph structure.
+// Nodes make up control-flow graphs.
class CfgNode : public ZoneObject {
public:
CfgNode() : is_marked_(false) {
virtual ~CfgNode() {}
+ // Because CFGs contain cycles, nodes support marking during traversal
+ // (e.g., for printing or compilation). The traversal functions will mark
+ // unmarked nodes and backtrack if they encounter a marked one. After a
+ // traversal, the graph should be explicitly unmarked by calling Unmark on
+ // the entry node.
bool is_marked() { return is_marked_; }
+ virtual void Unmark() = 0;
- virtual bool is_block() { return false; }
+ // Predicates:
- virtual void Unmark() = 0;
+ // True if the node is an instruction block.
+ virtual bool is_block() { return false; }
+ // Support for fast-compilation mode. Emit the instructions or control
+ // flow represented by the node.
virtual void Compile(MacroAssembler* masm) = 0;
#ifdef DEBUG
int number() {
- if (number_ == -1) number_ = CfgGlobals::current()->next_number();
+ if (number_ == -1) number_ = CfgGlobals::current()->next_node_number();
return number_;
}
virtual ~InstructionBlock() {}
+ void Unmark();
+
+ // Cast accessor.
static InstructionBlock* cast(CfgNode* node) {
ASSERT(node->is_block());
return reinterpret_cast<InstructionBlock*>(node);
}
+ bool is_block() { return true; }
+
+ // Accessors.
+ CfgNode* successor() { return successor_; }
+
void set_successor(CfgNode* succ) {
ASSERT(successor_ == NULL);
successor_ = succ;
}
- bool is_block() { return true; }
-
- void Unmark();
+ ZoneList<Instruction*>* instructions() { return &instructions_; }
+ // Support for fast-compilation mode.
void Compile(MacroAssembler* masm);
+ // Add an instruction to the end of the block.
void Append(Instruction* instr) { instructions_.Add(instr); }
#ifdef DEBUG
};
-// The CFG for a function has a distinguished entry node. It has no
-// predecessors and a single successor. The successor is the block
-// containing the function's first instruction.
+// An entry node (one per function).
class EntryNode : public CfgNode {
public:
explicit EntryNode(InstructionBlock* succ) : successor_(succ) {}
void Unmark();
+ // Support for fast-compilation mode.
void Compile(MacroAssembler* masm);
#ifdef DEBUG
};
-// The CFG for a function has a distinguished exit node. It has no
-// successor and arbitrarily many predecessors. The predecessors are all
-// the blocks returning from the function.
+// An exit node (one per function).
class ExitNode : public CfgNode {
public:
ExitNode() {}
void Unmark();
+ // Support for fast-compilation mode.
void Compile(MacroAssembler* masm);
#ifdef DEBUG
};
-// A CFG consists of a linked structure of nodes. It has a single entry
-// node and optionally an exit node. There is a distinguished global exit
-// node that is used as the successor of all blocks that return from the
-// function.
+// A CFG consists of a linked structure of nodes. Nodes are linked by
+// pointing to their successors, always beginning with a (single) entry node
+// (not necessarily of type EntryNode). If it is still possible to add
+// nodes to the end of the graph (i.e., there is a (single) path that does
+// not end with the global exit node), then the CFG has an exit node as
+// well.
+//
+// The empty CFG is represented by a NULL entry and a NULL exit.
+//
+// We use the term 'open fragment' to mean a CFG whose entry and exits are
+// both instruction blocks. It is always possible to add instructions and
+// nodes to the beginning or end of an open fragment.
//
-// Fragments of control-flow graphs, produced when traversing the statements
-// and expressions in the source AST, are represented by the same class.
-// They have instruction blocks as both their entry and exit (if there is
-// one). Instructions can always be prepended or appended to fragments, and
-// fragments can always be concatenated.
+// We use the term 'closed fragment' to mean a CFG whose entry is an
+// instruction block and whose exit is NULL (all paths go to the global
+// exit).
//
-// A singleton CFG fragment (i.e., with only one node) has the same node as
-// both entry and exit (if the exit is available).
+// We use the term 'fragment' to refer to a CFG that is known to be an open
+// or closed fragment.
class Cfg : public ZoneObject {
public:
- // Create a singleton CFG fragment.
- explicit Cfg(InstructionBlock* block) : entry_(block), exit_(block) {}
+ // Create an empty CFG fragment.
+ Cfg() : entry_(NULL), exit_(NULL) {}
- // Build the CFG for a function.
+ // Build the CFG for a function. The returned CFG begins with an
+ // EntryNode and all paths end with the ExitNode.
static Cfg* Build();
- // The entry and exit nodes.
+ // The entry and exit nodes of the CFG (not necessarily EntryNode and
+ // ExitNode).
CfgNode* entry() { return entry_; }
CfgNode* exit() { return exit_; }
// concatenated to).
bool has_exit() { return exit_ != NULL; }
- // Add an entry node to a CFG fragment. It is no longer a fragment
- // (instructions cannot be prepended).
+ // Add an EntryNode to a CFG fragment. It is no longer a fragment
+ // (instructions can no longer be prepended).
void PrependEntryNode();
- // Append an instruction to the end of a CFG fragment. Assumes it has an
- // available exit.
+ // Append an instruction to the end of an open fragment.
void Append(Instruction* instr);
- // Appends a return instruction to the end of a CFG fragment. It no
- // longer has an available exit node.
+ // Appends a return instruction to the end of an open fragment and make
+ // it a closed fragment (the exit's successor becomes global exit node).
void AppendReturnInstruction(Value* value);
+ // Glue an other CFG fragment to the end of this (open) fragment.
+ void Concatenate(Cfg* other);
+
+ // Support for compilation. Compile the entire CFG.
Handle<Code> Compile(Handle<Script> script);
#ifdef DEBUG
};
-// An Expression Builder traverses a trivial expression and returns a value.
+// An ExpressionBuilder traverses an expression and returns an open CFG
+// fragment (currently a possibly empty list of instructions represented by
+// a singleton instruction block) and the expression's value.
+//
+// Failure is to build the CFG is indicated by a NULL CFG.
class ExpressionBuilder : public AstVisitor {
public:
- ExpressionBuilder() : value_(new Constant(Handle<Object>::null())) {}
+ ExpressionBuilder() : value_(NULL), cfg_(NULL) {}
+ // Result accessors.
Value* value() { return value_; }
+ Cfg* cfg() { return cfg_; }
+
+ void Build(Expression* expr) {
+ value_ = NULL;
+ cfg_ = new Cfg();
+ Visit(expr);
+ }
// AST node visitors.
#define DECLARE_VISIT(type) void Visit##type(type* node);
private:
Value* value_;
+ Cfg* cfg_;
};
-// A StatementBuilder traverses a statement and returns a CFG.
+// A StatementBuilder maintains a CFG fragment accumulator. When it visits
+// a statement, it concatenates the CFG for the statement to the end of the
+// accumulator.
class StatementBuilder : public AstVisitor {
public:
- StatementBuilder() : cfg_(new Cfg(new InstructionBlock())) {}
+ StatementBuilder() : cfg_(new Cfg()) {}
Cfg* cfg() { return cfg_; }
#include "cfg.h"
#include "codegen-inl.h"
+#include "codegen-ia32.h"
#include "macro-assembler-ia32.h"
namespace v8 {
{
Comment cmt(masm, "[ InstructionBlock");
for (int i = 0, len = instructions_.length(); i < len; i++) {
+ // If the location of the current instruction is a temp, then the
+ // instruction cannot be in tail position in the block. Allocate the
+ // temp based on peeking ahead to the next instruction.
+ Instruction* instr = instructions_[i];
+ Location* loc = instr->location();
+ if (loc->is_temporary()) {
+ instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
+ }
instructions_[i]->Compile(masm);
}
}
}
successor_->Compile(masm);
if (FLAG_check_stack) {
+ Comment cmnt(masm, "[ Deferred Stack Check");
__ bind(&deferred_enter);
StackCheckStub stub;
__ CallStub(&stub);
}
+void BinaryOpInstr::Compile(MacroAssembler* masm) {
+ // The right-hand value should not be on the stack---if it is a
+ // compiler-generated temporary it is in the accumulator.
+ ASSERT(!val1_->is_on_stack());
+
+ Comment cmnt(masm, "[ BinaryOpInstr");
+ // We can overwrite one of the operands if it is a temporary.
+ OverwriteMode mode = NO_OVERWRITE;
+ if (val0_->is_temporary()) {
+ mode = OVERWRITE_LEFT;
+ } else if (val1_->is_temporary()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ // Push both operands and call the specialized stub.
+ if (!val0_->is_on_stack()) {
+ val0_->Push(masm);
+ }
+ val1_->Push(masm);
+ GenericBinaryOpStub stub(op_, mode, SMI_CODE_IN_STUB);
+ __ CallStub(&stub);
+ loc_->Set(masm, eax);
+}
+
+
void ReturnInstr::Compile(MacroAssembler* masm) {
+ // The location should be 'Effect'. As a side effect, move the value to
+ // the accumulator.
Comment cmnt(masm, "[ ReturnInstr");
- value_->ToRegister(masm, eax);
+ value_->Get(masm, eax);
}
-void Constant::ToRegister(MacroAssembler* masm, Register reg) {
+void Constant::Get(MacroAssembler* masm, Register reg) {
__ mov(reg, Immediate(handle_));
}
-void SlotLocation::ToRegister(MacroAssembler* masm, Register reg) {
- switch (type_) {
+void Constant::Push(MacroAssembler* masm) {
+ __ push(Immediate(handle_));
+}
+
+
+static Operand ToOperand(SlotLocation* loc) {
+ switch (loc->type()) {
case Slot::PARAMETER: {
int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ mov(reg, Operand(ebp, (1 + count - index_) * kPointerSize));
- break;
+ return Operand(ebp, (1 + count - loc->index()) * kPointerSize);
}
case Slot::LOCAL: {
const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- __ mov(reg, Operand(ebp, kOffset - index_ * kPointerSize));
- break;
+ return Operand(ebp, kOffset - loc->index() * kPointerSize);
}
default:
UNREACHABLE();
+ return Operand(eax);
+ }
+}
+
+
+void SlotLocation::Get(MacroAssembler* masm, Register reg) {
+ __ mov(reg, ToOperand(this));
+}
+
+
+void SlotLocation::Set(MacroAssembler* masm, Register reg) {
+ __ mov(ToOperand(this), reg);
+}
+
+
+void SlotLocation::Push(MacroAssembler* masm) {
+ __ push(ToOperand(this));
+}
+
+
+void TempLocation::Get(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(eax)) __ mov(reg, eax);
+ break;
+ case STACK:
+ __ pop(reg);
+ break;
+ case NOWHERE:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void TempLocation::Set(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(eax)) __ mov(eax, reg);
+ break;
+ case STACK:
+ __ push(reg);
+ break;
+ case NOWHERE:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void TempLocation::Push(MacroAssembler* masm) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ push(eax);
+ break;
+ case STACK:
+ case NOWHERE:
+ UNREACHABLE();
+ break;
}
}
};
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
-enum GenericBinaryFlags {
- SMI_CODE_IN_STUB,
- SMI_CODE_INLINED
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags)
- : op_(op), mode_(mode), flags_(flags) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_));
- }
-#endif
-
- // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 13> {};
- class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_);
- }
- void Generate(MacroAssembler* masm);
-};
-
-
const char* GenericBinaryOpStub::GetName() {
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
};
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+ SMI_CODE_IN_STUB,
+ SMI_CODE_INLINED
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags)
+ : op_(op), mode_(mode), flags_(flags) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 13> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_);
+ }
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
#include "cfg.h"
#include "codegen-inl.h"
+#include "codegen-x64.h"
#include "debug.h"
#include "macro-assembler-x64.h"
{
Comment cmt(masm, "[ InstructionBlock");
for (int i = 0, len = instructions_.length(); i < len; i++) {
+ // If the location of the current instruction is a temp, then the
+ // instruction cannot be in tail position in the block. Allocate the
+ // temp based on peeking ahead to the next instruction.
+ Instruction* instr = instructions_[i];
+ Location* loc = instr->location();
+ if (loc->is_temporary()) {
+ instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
+ }
instructions_[i]->Compile(masm);
}
}
}
successor_->Compile(masm);
if (FLAG_check_stack) {
+ Comment cmnt(masm, "[ Deferred Stack Check");
__ bind(&deferred_enter);
StackCheckStub stub;
__ CallStub(&stub);
}
+void BinaryOpInstr::Compile(MacroAssembler* masm) {
+ // The right-hand value should not be on the stack---if it is a
+ // compiler-generated temporary it is in the accumulator.
+ ASSERT(!val1_->is_on_stack());
+
+ Comment cmnt(masm, "[ BinaryOpInstr");
+ // We can overwrite one of the operands if it is a temporary.
+ OverwriteMode mode = NO_OVERWRITE;
+ if (val0_->is_temporary()) {
+ mode = OVERWRITE_LEFT;
+ } else if (val1_->is_temporary()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ // Push both operands and call the specialized stub.
+ if (!val0_->is_on_stack()) {
+ val0_->Push(masm);
+ }
+ val1_->Push(masm);
+ GenericBinaryOpStub stub(op_, mode, SMI_CODE_IN_STUB);
+ __ CallStub(&stub);
+ loc_->Set(masm, rax);
+}
+
+
void ReturnInstr::Compile(MacroAssembler* masm) {
+ // The location should be 'Effect'. As a side effect, move the value to
+ // the accumulator.
Comment cmnt(masm, "[ ReturnInstr");
- value_->ToRegister(masm, rax);
+ value_->Get(masm, rax);
}
-void Constant::ToRegister(MacroAssembler* masm, Register reg) {
+void Constant::Get(MacroAssembler* masm, Register reg) {
__ Move(reg, handle_);
}
-void SlotLocation::ToRegister(MacroAssembler* masm, Register reg) {
- switch (type_) {
+void Constant::Push(MacroAssembler* masm) {
+ __ Push(handle_);
+}
+
+
+static Operand ToOperand(SlotLocation* loc) {
+ switch (loc->type()) {
case Slot::PARAMETER: {
int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ movq(reg, Operand(rbp, (1 + count - index_) * kPointerSize));
- break;
+ return Operand(rbp, (1 + count - loc->index()) * kPointerSize);
}
case Slot::LOCAL: {
const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- __ movq(reg, Operand(rbp, kOffset - index_ * kPointerSize));
- break;
+ return Operand(rbp, kOffset - loc->index() * kPointerSize);
}
default:
UNREACHABLE();
+ return Operand(rax, 0);
+ }
+}
+
+
+void SlotLocation::Get(MacroAssembler* masm, Register reg) {
+ __ movq(reg, ToOperand(this));
+}
+
+
+void SlotLocation::Set(MacroAssembler* masm, Register reg) {
+ __ movq(ToOperand(this), reg);
+}
+
+
+void SlotLocation::Push(MacroAssembler* masm) {
+ __ push(ToOperand(this));
+}
+
+
+void TempLocation::Get(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(rax)) __ movq(reg, rax);
+ break;
+ case STACK:
+ __ pop(reg);
+ break;
+ case NOWHERE:
+ UNREACHABLE();
+ break;
}
}
+
+void TempLocation::Set(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(rax)) __ movq(rax, reg);
+ break;
+ case STACK:
+ __ push(reg);
+ break;
+ case NOWHERE:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void TempLocation::Push(MacroAssembler* masm) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ push(rax);
+ break;
+ case STACK:
+ case NOWHERE:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
#undef __
} } // namespace v8::internal
}
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
-enum GenericBinaryFlags {
- SMI_CODE_IN_STUB,
- SMI_CODE_INLINED
-};
-
-
class FloatingPointHelper : public AllStatic {
public:
// Code pattern for loading a floating point value. Input value must
};
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags)
- : op_(op), mode_(mode), flags_(flags) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_));
- }
-#endif
-
- // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 13> {};
- class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_);
- }
- void Generate(MacroAssembler* masm);
-};
-
-
class DeferredInlineBinaryOperation: public DeferredCode {
public:
DeferredInlineBinaryOperation(Token::Value op,
};
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+ SMI_CODE_IN_STUB,
+ SMI_CODE_INLINED
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags)
+ : op_(op), mode_(mode), flags_(flags) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 13> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_);
+ }
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_