public:
DECLARE_NODE_TYPE(DebuggerStatement)
+ BailoutId DebugBreakId() const { return debugger_id_; }
+
protected:
- explicit DebuggerStatement(Zone* zone, int pos): Statement(zone, pos) {}
+ explicit DebuggerStatement(Zone* zone, int pos)
+ : Statement(zone, pos), debugger_id_(GetNextId(zone)) {}
+
+ private:
+ const BailoutId debugger_id_;
};
break;
case kArchDeoptimize: {
int deoptimization_id = MiscField::decode(instr->opcode());
- BuildTranslation(instr, deoptimization_id);
+ BuildTranslation(instr, 0, deoptimization_id);
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
if (instr->InputAt(0)->IsImmediate()) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ Call(code, RelocInfo::CODE_TARGET);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
} else {
Register reg = i.InputRegister(0);
int entry = Code::kHeaderSize - kHeapObjectTag;
__ ldr(reg, MemOperand(reg, entry));
__ Call(reg);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- }
- bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
- if (lazy_deopt) {
- RecordLazyDeoptimizationEntry(instr);
}
+
+ AddSafepointAndDeopt(instr);
+
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(ip);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- RecordLazyDeoptimizationEntry(instr);
+ AddSafepointAndDeopt(instr);
+
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
BasicBlock* deoptimization) {
ArmOperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
// TODO(turbofan): on ARM64 it's probably better to use the code object in a
// TODO(dcarney): might be possible to use claim/poke instead
// Push any stack arguments.
- for (int i = buffer.pushed_count - 1; i >= 0; --i) {
- Node* input = buffer.pushed_nodes[i];
- Emit(kArmPush, NULL, g.UseRegister(input));
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
+ Emit(kArmPush, NULL, g.UseRegister(*input));
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- bool lazy_deopt = descriptor->CanLazilyDeoptimize();
- opcode = kArmCallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ opcode = kArmCallCodeObject;
break;
}
case CallDescriptor::kCallAddress:
UNREACHABLE();
return;
}
+ opcode |= MiscField::encode(descriptor->deoptimization_support());
// Emit the call instruction.
Instruction* call_instr =
- Emit(opcode, buffer.output_count, buffer.outputs,
- buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
if (deoptimization != NULL) {
// Caller clean up of stack for C-style calls.
if (descriptor->kind() == CallDescriptor::kCallAddress &&
- buffer.pushed_count > 0) {
+ !buffer.pushed_nodes.empty()) {
DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kArmDrop | MiscField::encode(buffer.pushed_count), NULL);
+ Emit(kArmDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL);
}
}
break;
case kArchDeoptimize: {
int deoptimization_id = MiscField::decode(instr->opcode());
- BuildTranslation(instr, deoptimization_id);
+ BuildTranslation(instr, 0, deoptimization_id);
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
if (instr->InputAt(0)->IsImmediate()) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ Call(code, RelocInfo::CODE_TARGET);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
} else {
Register reg = i.InputRegister(0);
int entry = Code::kHeaderSize - kHeapObjectTag;
__ Ldr(reg, MemOperand(reg, entry));
__ Call(reg);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- }
- bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
- if (lazy_deopt) {
- RecordLazyDeoptimizationEntry(instr);
}
+
+ AddSafepointAndDeopt(instr);
// Meaningless instruction for ICs to overwrite.
AddNopForSmiCodeInlining();
break;
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(x10);
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- RecordLazyDeoptimizationEntry(instr);
+ AddSafepointAndDeopt(instr);
break;
}
case kArm64CallAddress: {
BasicBlock* deoptimization) {
Arm64OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
// TODO(turbofan): on ARM64 it's probably better to use the code object in a
// Push the arguments to the stack.
bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
- bool pushed_count_uneven = buffer.pushed_count & 1;
- int aligned_push_count = buffer.pushed_count;
+ bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
+ int aligned_push_count = buffer.pushed_nodes.size();
if (is_c_frame && pushed_count_uneven) {
aligned_push_count++;
}
}
// Move arguments to the stack.
{
- int slot = buffer.pushed_count - 1;
+ int slot = buffer.pushed_nodes.size() - 1;
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- bool lazy_deopt = descriptor->CanLazilyDeoptimize();
- opcode = kArm64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ opcode = kArm64CallCodeObject;
break;
}
case CallDescriptor::kCallAddress:
UNREACHABLE();
return;
}
+ opcode |= MiscField::encode(descriptor->deoptimization_support());
// Emit the call instruction.
Instruction* call_instr =
- Emit(opcode, buffer.output_count, buffer.outputs,
- buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
if (deoptimization != NULL) {
locals_count_(scope->num_stack_slots()),
parameters_node_(NULL),
locals_node_(NULL),
- stack_node_(NULL),
- parameters_dirty_(true),
- locals_dirty_(true),
- stack_dirty_(true) {
+ stack_node_(NULL) {
DCHECK_EQ(scope->num_parameters() + 1, parameters_count());
// Bind the receiver variable.
locals_count_(copy.locals_count_),
parameters_node_(copy.parameters_node_),
locals_node_(copy.locals_node_),
- stack_node_(copy.stack_node_),
- parameters_dirty_(copy.parameters_dirty_),
- locals_dirty_(copy.locals_dirty_),
- stack_dirty_(copy.stack_dirty_) {}
+ stack_node_(copy.stack_node_) {}
-Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id) {
- if (parameters_dirty_) {
- Operator* op = common()->StateValues(parameters_count());
- if (parameters_count() != 0) {
- Node** parameters = &values()->front();
- parameters_node_ = graph()->NewNode(op, parameters_count(), parameters);
- } else {
- parameters_node_ = graph()->NewNode(op);
- }
- parameters_dirty_ = false;
- }
- if (locals_dirty_) {
- Operator* op = common()->StateValues(locals_count());
- if (locals_count() != 0) {
- Node** locals = &values()->at(parameters_count_);
- locals_node_ = graph()->NewNode(op, locals_count(), locals);
- } else {
- locals_node_ = graph()->NewNode(op);
+void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
+ int offset, int count) {
+ bool should_update = false;
+ Node** env_values = (count == 0) ? NULL : &values()->at(offset);
+ if (*state_values == NULL || (*state_values)->InputCount() != count) {
+ should_update = true;
+ } else {
+ DCHECK(static_cast<size_t>(offset + count) <= values()->size());
+ for (int i = 0; i < count; i++) {
+ if ((*state_values)->InputAt(i) != env_values[i]) {
+ should_update = true;
+ break;
+ }
}
- locals_dirty_ = false;
}
- if (stack_dirty_) {
- Operator* op = common()->StateValues(stack_height());
- if (stack_height() != 0) {
- Node** stack = &values()->at(parameters_count_ + locals_count_);
- stack_node_ = graph()->NewNode(op, stack_height(), stack);
- } else {
- stack_node_ = graph()->NewNode(op);
- }
- stack_dirty_ = false;
+ if (should_update) {
+ Operator* op = common()->StateValues(count);
+ (*state_values) = graph()->NewNode(op, count, env_values);
}
+}
+
+
+Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id) {
+ UpdateStateValues(¶meters_node_, 0, parameters_count());
+ UpdateStateValues(&locals_node_, parameters_count(), locals_count());
+ UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
+ stack_height());
Operator* op = common()->FrameState(ast_id);
AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
- Expression::Context kind,
- BailoutId bailout_id)
- : bailout_id_(bailout_id),
- kind_(kind),
- owner_(own),
- outer_(own->ast_context()) {
+ Expression::Context kind)
+ : kind_(kind), owner_(own), outer_(own->ast_context()) {
owner()->set_ast_context(this); // Push.
#ifdef DEBUG
original_height_ = environment()->stack_height();
}
-void AstGraphBuilder::AstEffectContext::ProduceValueWithLazyBailout(
- Node* value) {
- ProduceValue(value);
- owner()->BuildLazyBailout(value, bailout_id_);
-}
-
-
-void AstGraphBuilder::AstValueContext::ProduceValueWithLazyBailout(
- Node* value) {
- ProduceValue(value);
- owner()->BuildLazyBailout(value, bailout_id_);
-}
-
-
-void AstGraphBuilder::AstTestContext::ProduceValueWithLazyBailout(Node* value) {
- environment()->Push(value);
- owner()->BuildLazyBailout(value, bailout_id_);
- environment()->Pop();
- ProduceValue(value);
-}
-
-
void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) {
// The value is ignored.
}
void AstGraphBuilder::VisitForValue(Expression* expr) {
- AstValueContext for_value(this, expr->id());
+ AstValueContext for_value(this);
if (!HasStackOverflow()) {
expr->Accept(this);
}
void AstGraphBuilder::VisitForEffect(Expression* expr) {
- AstEffectContext for_effect(this, expr->id());
+ AstEffectContext for_effect(this);
if (!HasStackOverflow()) {
expr->Accept(this);
}
void AstGraphBuilder::VisitForTest(Expression* expr) {
- AstTestContext for_condition(this, expr->id());
+ AstTestContext for_condition(this);
if (!HasStackOverflow()) {
expr->Accept(this);
}
Node* exit_cond =
NewNode(javascript()->LessThan(), index, cache_length);
// TODO(jarin): provide real bailout id.
- BuildLazyBailout(exit_cond, BailoutId::None());
+ PrepareFrameState(exit_cond, BailoutId::None());
for_loop.BreakUnless(exit_cond);
// TODO(dcarney): this runtime call should be a handful of
// simplified instructions that
Node* res = ProcessArguments(
javascript()->Call(3, NO_CALL_FUNCTION_FLAGS), 3);
// TODO(jarin): provide real bailout id.
- BuildLazyBailout(res, BailoutId::None());
+ PrepareFrameState(res, BailoutId::None());
Node* property_missing = NewNode(javascript()->StrictEqual(), res,
jsgraph()->ZeroConstant());
{
// Inc counter and continue.
Node* index_inc =
NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
- environment()->Poke(0, index_inc);
// TODO(jarin): provide real bailout id.
- BuildLazyBailout(index_inc, BailoutId::None());
+ PrepareFrameState(index_inc, BailoutId::None());
+ environment()->Poke(0, index_inc);
for_loop.Continue();
is_property_missing.Else();
is_property_missing.End();
// Inc counter and continue.
Node* index_inc =
NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
- environment()->Poke(0, index_inc);
// TODO(jarin): provide real bailout id.
- BuildLazyBailout(index_inc, BailoutId::None());
+ PrepareFrameState(index_inc, BailoutId::None());
+ environment()->Poke(0, index_inc);
for_loop.EndBody();
for_loop.EndLoop();
environment()->Drop(5);
void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
// TODO(turbofan): Do we really need a separate reloc-info for this?
- NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+ Node* node = NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+ PrepareFrameState(node, stmt->DebugBreakId());
}
PrintableUnique<Name> name = MakeUnique(key->AsPropertyName());
Node* store =
NewNode(javascript()->StoreNamed(name), literal, value);
- BuildLazyBailout(store, key->id());
+ PrepareFrameState(store, key->id());
} else {
VisitForEffect(property->value());
}
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(i);
Node* store = NewNode(javascript()->StoreProperty(), literal, index, value);
- BuildLazyBailout(store, expr->GetIdForElement(i));
+ PrepareFrameState(store, expr->GetIdForElement(i));
}
environment()->Pop(); // Array literal index.
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
Node* store = NewNode(javascript()->StoreNamed(name), object, value);
// TODO(jarin) Fill in the correct bailout id.
- BuildLazyBailout(store, BailoutId::None());
+ PrepareFrameState(store, BailoutId::None());
break;
}
case KEYED_PROPERTY: {
value = environment()->Pop();
Node* store = NewNode(javascript()->StoreProperty(), object, key, value);
// TODO(jarin) Fill in the correct bailout id.
- BuildLazyBailout(store, BailoutId::None());
+ PrepareFrameState(store, BailoutId::None());
break;
}
}
PrintableUnique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
old_value = NewNode(javascript()->LoadNamed(name), object);
- BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ PrepareFrameState(old_value, property->LoadId(), PUSH_OUTPUT);
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Top();
Node* object = environment()->Peek(1);
old_value = NewNode(javascript()->LoadProperty(), object, key);
- BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ PrepareFrameState(old_value, property->LoadId(), PUSH_OUTPUT);
break;
}
}
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = BuildBinaryOp(left, right, expr->binary_op());
+ PrepareFrameState(value, expr->binary_operation()->id(), PUSH_OUTPUT);
environment()->Push(value);
- BuildLazyBailout(value, expr->binary_operation()->id());
} else {
VisitForValue(expr->value());
}
PrintableUnique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
Node* store = NewNode(javascript()->StoreNamed(name), object, value);
- BuildLazyBailout(store, expr->AssignmentId());
+ PrepareFrameState(store, expr->AssignmentId());
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
Node* store = NewNode(javascript()->StoreProperty(), object, key, value);
- BuildLazyBailout(store, expr->AssignmentId());
+ PrepareFrameState(store, expr->AssignmentId());
break;
}
}
Node* object = environment()->Pop();
value = NewNode(javascript()->LoadProperty(), object, key);
}
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
Node* key = environment()->Pop();
callee_value = NewNode(javascript()->LoadProperty(), object, key);
}
- BuildLazyBailoutWithPushedNode(callee_value, property->LoadId());
+ PrepareFrameState(callee_value, property->LoadId(), PUSH_OUTPUT);
receiver_value = environment()->Pop();
// Note that a PROPERTY_CALL requires the receiver to be wrapped into an
// object for sloppy callees. This could also be modeled explicitly here,
// Create node to perform the function call.
Operator* call = javascript()->Call(args->length() + 2, flags);
Node* value = ProcessArguments(call, args->length() + 2);
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
// Create node to perform the construct call.
Operator* call = javascript()->CallNew(args->length() + 1);
Node* value = ProcessArguments(call, args->length() + 1);
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
Node* receiver_value = BuildLoadBuiltinsObject();
PrintableUnique<String> unique = MakeUnique(name);
Node* callee_value = NewNode(javascript()->LoadNamed(unique), receiver_value);
- environment()->Push(callee_value);
// TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft
// refuses to optimize functions with jsruntime calls).
- BuildLazyBailout(callee_value, BailoutId::None());
+ PrepareFrameState(callee_value, BailoutId::None(), PUSH_OUTPUT);
+ environment()->Push(callee_value);
environment()->Push(receiver_value);
// Evaluate all arguments to the JS runtime call.
// Create node to perform the JS runtime call.
Operator* call = javascript()->Call(args->length() + 2, flags);
Node* value = ProcessArguments(call, args->length() + 2);
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
Runtime::FunctionId functionId = function->function_id;
Operator* call = javascript()->Runtime(functionId, args->length());
Node* value = ProcessArguments(call, args->length());
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
PrintableUnique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
old_value = NewNode(javascript()->LoadNamed(name), object);
- BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ PrepareFrameState(old_value, property->LoadId(), PUSH_OUTPUT);
stack_depth = 1;
break;
}
Node* key = environment()->Top();
Node* object = environment()->Peek(1);
old_value = NewNode(javascript()->LoadProperty(), object, key);
- BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ PrepareFrameState(old_value, property->LoadId(), PUSH_OUTPUT);
stack_depth = 2;
break;
}
BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
// TODO(jarin) Insert proper bailout id here (will need to change
// full code generator).
- BuildLazyBailout(value, BailoutId::None());
+ PrepareFrameState(value, BailoutId::None());
// Store the value.
switch (assign_type) {
PrintableUnique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
Node* store = NewNode(javascript()->StoreNamed(name), object, value);
- BuildLazyBailout(store, expr->AssignmentId());
+ PrepareFrameState(store, expr->AssignmentId());
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
Node* store = NewNode(javascript()->StoreProperty(), object, key, value);
- BuildLazyBailout(store, expr->AssignmentId());
+ PrepareFrameState(store, expr->AssignmentId());
break;
}
}
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = BuildBinaryOp(left, right, expr->op());
- ast_context()->ProduceValueWithLazyBailout(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
}
}
}
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = NewNode(op, left, right);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
-
- BuildLazyBailout(value, expr->id());
}
PrintableUnique<Name> name = MakeUnique(variable->name());
Operator* op = javascript()->LoadNamed(name, contextual_mode);
Node* node = NewNode(op, global);
- BuildLazyBailoutWithPushedNode(node, bailout_id);
+ PrepareFrameState(node, bailout_id, PUSH_OUTPUT);
return node;
}
case Variable::PARAMETER:
PrintableUnique<Name> name = MakeUnique(variable->name());
Operator* op = javascript()->StoreNamed(name);
Node* store = NewNode(op, global, value);
- BuildLazyBailout(store, bailout_id);
+ PrepareFrameState(store, bailout_id);
return store;
}
case Variable::PARAMETER:
}
-void AstGraphBuilder::BuildLazyBailout(Node* node, BailoutId ast_id) {
+void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
+ OutputFrameStateCombine combine) {
+ if (OperatorProperties::HasFrameStateInput(node->op())) {
+ int frame_state_index = NodeProperties::GetFrameStateIndex(node);
+
+ DCHECK(node->InputAt(frame_state_index)->op()->opcode() == IrOpcode::kDead);
+
+ Node* frame_state_node = environment()->Checkpoint(ast_id);
+ node->ReplaceInput(frame_state_index, frame_state_node);
+ }
+
if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
// The deopting node should have an outgoing control dependency.
DCHECK(environment()->GetControlDependency() == node);
CopyEnvironment(continuation_env);
set_environment(deopt_env);
+ if (combine == PUSH_OUTPUT) {
+ environment()->Push(node);
+ }
+
NewNode(common()->LazyDeoptimization());
// TODO(jarin) If ast_id.IsNone(), perhaps we should generate an empty
// deopt block and make sure there is no patch entry for this (so
// that the deoptimizer dies when trying to deoptimize here).
-
Node* state_node = environment()->Checkpoint(ast_id);
-
Node* deoptimize_node = NewNode(common()->Deoptimize(), state_node);
-
UpdateControlDependencyToLeaveFunction(deoptimize_node);
// Continue with the original environment.
set_environment(continuation_env);
-
NewNode(common()->Continuation());
}
}
-
-void AstGraphBuilder::BuildLazyBailoutWithPushedNode(Node* node,
- BailoutId ast_id) {
- environment()->Push(node);
- BuildLazyBailout(node, ast_id);
- environment()->Pop();
-}
}
}
} // namespace v8::internal::compiler
// Dispatched from VisitForInStatement.
void VisitForInAssignment(Expression* expr, Node* value);
- void BuildLazyBailout(Node* node, BailoutId ast_id);
- void BuildLazyBailoutWithPushedNode(Node* node, BailoutId ast_id);
+ // Flag that describes how to combine the current environment with
+ // the output of a node to obtain a framestate for lazy bailout.
+ enum OutputFrameStateCombine {
+ PUSH_OUTPUT, // Push the output on the expression stack.
+ IGNORE_OUTPUT // Use the frame state as-is.
+ };
+
+ // Builds deoptimization for a given node.
+ void PrepareFrameState(Node* node, BailoutId ast_id,
+ OutputFrameStateCombine combine = IGNORE_OUTPUT);
+
+ OutputFrameStateCombine StateCombineFromAstContext();
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
DCHECK(variable->IsStackAllocated());
if (variable->IsParameter()) {
values()->at(variable->index() + 1) = node;
- parameters_dirty_ = true;
} else {
DCHECK(variable->IsStackLocal());
values()->at(variable->index() + parameters_count_) = node;
- locals_dirty_ = true;
}
}
Node* Lookup(Variable* variable) {
// Operations on the operand stack.
void Push(Node* node) {
values()->push_back(node);
- stack_dirty_ = true;
}
Node* Top() {
DCHECK(stack_height() > 0);
DCHECK(stack_height() > 0);
Node* back = values()->back();
values()->pop_back();
- stack_dirty_ = true;
return back;
}
DCHECK(depth >= 0 && depth < stack_height());
int index = static_cast<int>(values()->size()) - depth - 1;
values()->at(index) = node;
- stack_dirty_ = true;
}
Node* Peek(int depth) {
DCHECK(depth >= 0 && depth < stack_height());
void Drop(int depth) {
DCHECK(depth >= 0 && depth <= stack_height());
values()->erase(values()->end() - depth, values()->end());
- stack_dirty_ = true;
}
// Preserve a checkpoint of the environment for the IR graph. Any
Node* Checkpoint(BailoutId ast_id);
private:
+ void UpdateStateValues(Node** state_values, int offset, int count);
+
int parameters_count_;
int locals_count_;
Node* parameters_node_;
Node* locals_node_;
Node* stack_node_;
- bool parameters_dirty_;
- bool locals_dirty_;
- bool stack_dirty_;
};
bool IsValue() const { return kind_ == Expression::kValue; }
bool IsTest() const { return kind_ == Expression::kTest; }
+ // Determines how to combine the frame state with the value
+ // that is about to be plugged into this AstContext.
+ AstGraphBuilder::OutputFrameStateCombine GetStateCombine() {
+ return IsEffect() ? IGNORE_OUTPUT : PUSH_OUTPUT;
+ }
+
// Plug a node into this expression context. Call this function in tail
// position in the Visit functions for expressions.
virtual void ProduceValue(Node* value) = 0;
- virtual void ProduceValueWithLazyBailout(Node* value) = 0;
// Unplugs a node from this expression context. Call this to retrieve the
// result of another Visit function that already plugged the context.
void ReplaceValue() { ProduceValue(ConsumeValue()); }
protected:
- AstContext(AstGraphBuilder* owner, Expression::Context kind,
- BailoutId bailout_id);
+ AstContext(AstGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
AstGraphBuilder* owner() const { return owner_; }
int original_height_;
#endif
- BailoutId bailout_id_;
-
private:
Expression::Context kind_;
AstGraphBuilder* owner_;
// Context to evaluate expression for its side effects only.
class AstGraphBuilder::AstEffectContext V8_FINAL : public AstContext {
public:
- explicit AstEffectContext(AstGraphBuilder* owner, BailoutId bailout_id)
- : AstContext(owner, Expression::kEffect, bailout_id) {}
+ explicit AstEffectContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kEffect) {}
virtual ~AstEffectContext();
virtual void ProduceValue(Node* value) V8_OVERRIDE;
- virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
virtual Node* ConsumeValue() V8_OVERRIDE;
};
// Context to evaluate expression for its value (and side effects).
class AstGraphBuilder::AstValueContext V8_FINAL : public AstContext {
public:
- explicit AstValueContext(AstGraphBuilder* owner, BailoutId bailout_id)
- : AstContext(owner, Expression::kValue, bailout_id) {}
+ explicit AstValueContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kValue) {}
virtual ~AstValueContext();
virtual void ProduceValue(Node* value) V8_OVERRIDE;
- virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
virtual Node* ConsumeValue() V8_OVERRIDE;
};
// Context to evaluate expression for a condition value (and side effects).
class AstGraphBuilder::AstTestContext V8_FINAL : public AstContext {
public:
- explicit AstTestContext(AstGraphBuilder* owner, BailoutId bailout_id)
- : AstContext(owner, Expression::kTest, bailout_id) {}
+ explicit AstTestContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kTest) {}
virtual ~AstTestContext();
virtual void ProduceValue(Node* value) V8_OVERRIDE;
- virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
virtual Node* ConsumeValue() V8_OVERRIDE;
};
}
+void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
+ CallDescriptor::DeoptimizationSupport deopt =
+ static_cast<CallDescriptor::DeoptimizationSupport>(
+ MiscField::decode(instr->opcode()));
+
+ if ((deopt & CallDescriptor::kLazyDeoptimization) != 0) {
+ RecordLazyDeoptimizationEntry(instr);
+ }
+
+ bool needs_frame_state = (deopt & CallDescriptor::kNeedsFrameState) != 0;
+
+ RecordSafepoint(
+ instr->pointer_map(), Safepoint::kSimple, 0,
+ needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
+
+ if ((deopt & CallDescriptor::kNeedsFrameState) != 0) {
+ // If the frame state is present, it starts at argument 1
+ // (just after the code address).
+ InstructionOperandConverter converter(this, instr);
+ // Argument 1 is deoptimization id.
+ int deoptimization_id = converter.ToConstant(instr->InputAt(1)).ToInt32();
+ // The actual frame state values start with argument 2.
+ BuildTranslation(instr, 2, deoptimization_id);
+ safepoints()->RecordLazyDeoptimizationIndex(deoptimization_id);
+ }
+}
+
+
void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) {
InstructionOperandConverter i(this, instr);
void CodeGenerator::BuildTranslation(Instruction* instr,
+ int first_argument_index,
int deoptimization_id) {
// We should build translation only once.
DCHECK_EQ(NULL, deoptimization_states_[deoptimization_id]);
descriptor->size() - descriptor->parameters_count());
for (int i = 0; i < descriptor->size(); i++) {
- AddTranslationForOperand(&translation, instr, instr->InputAt(i));
+ AddTranslationForOperand(&translation, instr,
+ instr->InputAt(i + first_argument_index));
}
deoptimization_states_[deoptimization_id] =
// ===========================================================================
// Deoptimization table construction
+ void AddSafepointAndDeopt(Instruction* instr);
void RecordLazyDeoptimizationEntry(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
- void BuildTranslation(Instruction* instr, int deoptimization_id);
+ void BuildTranslation(Instruction* instr, int first_argument_index,
+ int deoptimization_id);
void AddTranslationForOperand(Translation* translation, Instruction* instr,
InstructionOperand* op);
void AddNopForSmiCodeInlining();
public:
CallOperator(CallDescriptor* descriptor, const char* mnemonic)
: Operator1<CallDescriptor*>(
- IrOpcode::kCall, descriptor->properties(), descriptor->InputCount(),
+ IrOpcode::kCall, descriptor->properties(),
+ descriptor->InputCount() + descriptor->FrameStateCount(),
descriptor->ReturnCount(), mnemonic, descriptor) {}
virtual OStream& PrintParameter(OStream& os) const { // NOLINT
Node* StructuredGraphBuilder::MakeNode(Operator* op, int value_input_count,
Node** value_inputs) {
+ DCHECK(op->InputCount() == value_input_count);
+
bool has_context = OperatorProperties::HasContextInput(op);
+ bool has_framestate = OperatorProperties::HasFrameStateInput(op);
bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
} else {
int input_count_with_deps = value_input_count;
if (has_context) ++input_count_with_deps;
+ if (has_framestate) ++input_count_with_deps;
if (has_control) ++input_count_with_deps;
if (has_effect) ++input_count_with_deps;
void* raw_buffer = alloca(kPointerSize * input_count_with_deps);
if (has_context) {
*current_input++ = current_context();
}
+ if (has_framestate) {
+ // The frame state will be inserted later. Here we misuse
+ // the dead_control node as a sentinel to be later overwritten
+ // with the real frame state.
+ *current_input++ = dead_control();
+ }
if (has_effect) {
*current_input++ = environment_->GetEffectDependency();
}
++i, j--) {
os_ << "|<I" << i.index() << ">X #" << (*i)->id();
}
+ for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
+ ++i, j--) {
+ os_ << "|<I" << i.index() << ">X #" << (*i)->id();
+ }
for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
++i, j--) {
os_ << "|<I" << i.index() << ">E #" << (*i)->id();
break;
case kArchDeoptimize: {
int deoptimization_id = MiscField::decode(instr->opcode());
- BuildTranslation(instr, deoptimization_id);
+ BuildTranslation(instr, 0, deoptimization_id);
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
int entry = Code::kHeaderSize - kHeapObjectTag;
__ call(Operand(reg, entry));
}
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
- if (lazy_deopt) {
- RecordLazyDeoptimizationEntry(instr);
- }
+ AddSafepointAndDeopt(instr);
+
AddNopForSmiCodeInlining();
break;
}
__ mov(esi, FieldOperand(func, JSFunction::kContextOffset));
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- RecordLazyDeoptimizationEntry(instr);
+ AddSafepointAndDeopt(instr);
break;
}
case kSSEFloat64Cmp:
BasicBlock* deoptimization) {
IA32OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- CallBuffer buffer(zone(), descriptor);
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
// Push any stack arguments.
- for (int i = buffer.pushed_count - 1; i >= 0; --i) {
- Node* input = buffer.pushed_nodes[i];
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
// TODO(titzer): handle pushing double parameters.
Emit(kIA32Push, NULL,
- g.CanBeImmediate(input) ? g.UseImmediate(input) : g.Use(input));
+ g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- bool lazy_deopt = descriptor->CanLazilyDeoptimize();
- opcode = kIA32CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ opcode = kIA32CallCodeObject;
break;
}
case CallDescriptor::kCallAddress:
UNREACHABLE();
return;
}
+ opcode |= MiscField::encode(descriptor->deoptimization_support());
// Emit the call instruction.
Instruction* call_instr =
- Emit(opcode, buffer.output_count, buffer.outputs,
- buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
if (deoptimization != NULL) {
// Caller clean up of stack for C-style calls.
if (descriptor->kind() == CallDescriptor::kCallAddress &&
- buffer.pushed_count > 0) {
+ buffer.pushed_nodes.size() > 0) {
DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
+ Emit(kPopStack | MiscField::encode(buffer.pushed_nodes.size()), NULL);
}
}
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
struct CallBuffer {
- CallBuffer(Zone* zone, CallDescriptor* descriptor);
+ CallBuffer(Zone* zone, CallDescriptor* descriptor,
+ FrameStateDescriptor* frame_state);
- int output_count;
CallDescriptor* descriptor;
- Node** output_nodes;
- InstructionOperand** outputs;
- InstructionOperand** fixed_and_control_args;
- int fixed_count;
- Node** pushed_nodes;
- int pushed_count;
+ FrameStateDescriptor* frame_state_descriptor;
+ NodeVector output_nodes;
+ InstructionOperandVector outputs;
+ InstructionOperandVector instruction_args;
+ NodeVector pushed_nodes;
- int input_count() { return descriptor->InputCount(); }
+ int input_count() const { return descriptor->InputCount(); }
- int control_count() { return descriptor->CanLazilyDeoptimize() ? 2 : 0; }
+ int frame_state_count() const { return descriptor->FrameStateCount(); }
- int fixed_and_control_count() { return fixed_count + control_count(); }
+ int frame_state_value_count() const {
+ return (frame_state_descriptor == NULL)
+ ? 0
+ : (frame_state_descriptor->size() + 1);
+ }
+
+ int control_count() const {
+ return descriptor->CanLazilyDeoptimize() ? 2 : 0;
+ }
};
} // namespace compiler
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
-CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d)
- : output_count(0),
- descriptor(d),
- output_nodes(zone->NewArray<Node*>(d->ReturnCount())),
- outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())),
- fixed_and_control_args(
- zone->NewArray<InstructionOperand*>(input_count() + control_count())),
- fixed_count(0),
- pushed_nodes(zone->NewArray<Node*>(input_count())),
- pushed_count(0) {
- if (d->ReturnCount() > 1) {
- memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount()); // NOLINT
- }
- memset(pushed_nodes, 0, sizeof(Node*) * input_count()); // NOLINT
+CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d,
+ FrameStateDescriptor* frame_desc)
+ : descriptor(d),
+ frame_state_descriptor(frame_desc),
+ output_nodes(NodeVector::allocator_type(zone)),
+ outputs(InstructionOperandVector::allocator_type(zone)),
+ instruction_args(InstructionOperandVector::allocator_type(zone)),
+ pushed_nodes(NodeVector::allocator_type(zone)) {
+ output_nodes.reserve(d->ReturnCount());
+ outputs.reserve(d->ReturnCount());
+ pushed_nodes.reserve(input_count());
+ instruction_args.reserve(input_count() + control_count() +
+ frame_state_value_count());
}
OperandGenerator g(this);
DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
DCHECK_EQ(OperatorProperties::GetValueInputCount(call->op()),
- buffer->input_count());
+ buffer->input_count() + buffer->frame_state_count());
if (buffer->descriptor->ReturnCount() > 0) {
// Collect the projections that represent multiple outputs from this call.
if (buffer->descriptor->ReturnCount() == 1) {
- buffer->output_nodes[0] = call;
+ buffer->output_nodes.push_back(call);
} else {
- call->CollectProjections(buffer->descriptor->ReturnCount(),
- buffer->output_nodes);
+ call->CollectProjections(&buffer->output_nodes);
+ DCHECK(buffer->output_nodes.size() <=
+ static_cast<size_t>(buffer->descriptor->ReturnCount()));
}
// Filter out the outputs that aren't live because no projection uses them.
- for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) {
+ for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
if (buffer->output_nodes[i] != NULL) {
Node* output = buffer->output_nodes[i];
- LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
+ LinkageLocation location =
+ buffer->descriptor->GetReturnLocation(static_cast<int>(i));
MarkAsRepresentation(location.representation(), output);
- buffer->outputs[buffer->output_count++] =
- g.DefineAsLocation(output, location);
+ buffer->outputs.push_back(g.DefineAsLocation(output, location));
}
}
}
- buffer->fixed_count = 1; // First argument is always the callee.
+ // The first argument is always the callee code.
Node* callee = call->InputAt(0);
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
- buffer->fixed_and_control_args[0] =
+ buffer->instruction_args.push_back(
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
? g.UseImmediate(callee)
- : g.UseRegister(callee);
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallAddress:
- buffer->fixed_and_control_args[0] =
+ buffer->instruction_args.push_back(
(call_address_immediate &&
(callee->opcode() == IrOpcode::kInt32Constant ||
callee->opcode() == IrOpcode::kInt64Constant))
? g.UseImmediate(callee)
- : g.UseRegister(callee);
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
- buffer->fixed_and_control_args[0] =
- g.UseLocation(callee, buffer->descriptor->GetInputLocation(0));
+ buffer->instruction_args.push_back(
+ g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
break;
}
+ DCHECK_EQ(1, buffer->instruction_args.size());
+
+ // If the call needs a frame state, we insert the state information as
+ // follows (n is the number of value inputs to the frame state):
+ // arg 1 : deoptimization id.
+ // arg 2 - arg (n + 1) : value inputs to the frame state.
+ if (buffer->frame_state_descriptor != NULL) {
+ int deoptimization_id =
+ sequence()->AddDeoptimizationEntry(buffer->frame_state_descriptor);
+ buffer->instruction_args.push_back(g.TempImmediate(deoptimization_id));
+
+ Node* frame_state = call->InputAt(buffer->descriptor->InputCount());
+ AddFrameStateInputs(frame_state, &buffer->instruction_args,
+ buffer->frame_state_descriptor);
+ }
+ DCHECK_EQ(1 + buffer->frame_state_value_count(),
+ buffer->instruction_args.size());
int input_count = buffer->input_count();
- // Split the arguments into pushed_nodes and fixed_args. Pushed arguments
- // require an explicit push instruction before the call and do not appear
- // as arguments to the call. Everything else ends up as an InstructionOperand
- // argument to the call.
+ // Split the arguments into pushed_nodes and instruction_args. Pushed
+ // arguments require an explicit push instruction before the call and do
+ // not appear as arguments to the call. Everything else ends up
+ // as an InstructionOperand argument to the call.
InputIter iter(call->inputs().begin());
+ int pushed_count = 0;
for (int index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
DCHECK(index == iter.index());
+ DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
if (index == 0) continue; // The first argument (callee) is already done.
InstructionOperand* op =
g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index));
if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
- DCHECK(buffer->pushed_nodes[stack_index] == NULL);
+ if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
+ buffer->pushed_nodes.resize(stack_index + 1, NULL);
+ }
+ DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]);
buffer->pushed_nodes[stack_index] = *iter;
- buffer->pushed_count++;
+ pushed_count++;
} else {
- buffer->fixed_and_control_args[buffer->fixed_count] = op;
- buffer->fixed_count++;
+ buffer->instruction_args.push_back(op);
}
}
+ CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
// If the call can deoptimize, we add the continuation and deoptimization
// block labels.
if (buffer->descriptor->CanLazilyDeoptimize()) {
DCHECK(cont_node != NULL);
DCHECK(deopt_node != NULL);
- buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node);
- buffer->fixed_and_control_args[buffer->fixed_count + 1] =
- g.Label(deopt_node);
+ buffer->instruction_args.push_back(g.Label(cont_node));
+ buffer->instruction_args.push_back(g.Label(deopt_node));
} else {
DCHECK(cont_node == NULL);
DCHECK(deopt_node == NULL);
}
- DCHECK(input_count == (buffer->fixed_count + buffer->pushed_count));
+ DCHECK(input_count ==
+ (buffer->instruction_args.size() - buffer->control_count() +
+ buffer->pushed_nodes.size() - buffer->frame_state_value_count()));
}
}
+FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
+ Node* state) {
+ DCHECK(state->op()->opcode() == IrOpcode::kFrameState);
+ BailoutId ast_id = OpParameter<BailoutId>(state);
+ Node* parameters = state->InputAt(0);
+ Node* locals = state->InputAt(1);
+ Node* stack = state->InputAt(2);
+
+ return new (instruction_zone())
+ FrameStateDescriptor(ast_id, OpParameter<int>(parameters),
+ OpParameter<int>(locals), OpParameter<int>(stack));
+}
+
+
static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
}
-void InstructionSelector::VisitDeoptimize(Node* deopt) {
- DCHECK(deopt->op()->opcode() == IrOpcode::kDeoptimize);
- Node* state = deopt->InputAt(0);
- DCHECK(state->op()->opcode() == IrOpcode::kFrameState);
- BailoutId ast_id = OpParameter<BailoutId>(state);
+void InstructionSelector::AddFrameStateInputs(
+ Node* state, InstructionOperandVector* inputs,
+ FrameStateDescriptor* descriptor) {
+ DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
- // Add the inputs.
Node* parameters = state->InputAt(0);
- int parameters_count = OpParameter<int>(parameters);
-
Node* locals = state->InputAt(1);
- int locals_count = OpParameter<int>(locals);
-
Node* stack = state->InputAt(2);
- int stack_count = OpParameter<int>(stack);
+
+ DCHECK_EQ(descriptor->parameters_count(), parameters->InputCount());
+ DCHECK_EQ(descriptor->locals_count(), locals->InputCount());
+ DCHECK_EQ(descriptor->stack_count(), stack->InputCount());
OperandGenerator g(this);
- std::vector<InstructionOperand*> inputs;
- inputs.reserve(parameters_count + locals_count + stack_count);
- for (int i = 0; i < parameters_count; i++) {
- inputs.push_back(UseOrImmediate(&g, parameters->InputAt(i)));
+ for (int i = 0; i < descriptor->parameters_count(); i++) {
+ inputs->push_back(UseOrImmediate(&g, parameters->InputAt(i)));
}
- for (int i = 0; i < locals_count; i++) {
- inputs.push_back(UseOrImmediate(&g, locals->InputAt(i)));
+ for (int i = 0; i < descriptor->locals_count(); i++) {
+ inputs->push_back(UseOrImmediate(&g, locals->InputAt(i)));
}
- for (int i = 0; i < stack_count; i++) {
- inputs.push_back(UseOrImmediate(&g, stack->InputAt(i)));
+ for (int i = 0; i < descriptor->stack_count(); i++) {
+ inputs->push_back(UseOrImmediate(&g, stack->InputAt(i)));
}
+}
+
+
+void InstructionSelector::VisitDeoptimize(Node* deopt) {
+ DCHECK(deopt->op()->opcode() == IrOpcode::kDeoptimize);
+ Node* state = deopt->InputAt(0);
+ FrameStateDescriptor* descriptor = GetFrameStateDescriptor(state);
+
+ InstructionOperandVector inputs(
+ (InstructionOperandVector::allocator_type(zone())));
+ inputs.reserve(descriptor->size());
- FrameStateDescriptor* descriptor = new (instruction_zone())
- FrameStateDescriptor(ast_id, parameters_count, locals_count, stack_count);
+ AddFrameStateInputs(state, &inputs, descriptor);
DCHECK_EQ(descriptor->size(), inputs.size());
bool call_address_immediate, BasicBlock* cont_node,
BasicBlock* deopt_node);
+ FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+ void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
+ FrameStateDescriptor* descriptor);
+
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
// ===========================================================================
unsigned value_;
};
+typedef std::vector<InstructionOperand*, zone_allocator<InstructionOperand*> >
+ InstructionOperandVector;
+
OStream& operator<<(OStream& os, const InstructionOperand& op);
class UnallocatedOperand : public InstructionOperand {
static CallDescriptor::DeoptimizationSupport DeoptimizationSupportForNode(
Node* node) {
- return OperatorProperties::CanLazilyDeoptimize(node->op())
- ? CallDescriptor::kCanDeoptimize
- : CallDescriptor::kCannotDeoptimize;
+ int result = CallDescriptor::kNoDeoptimization;
+ if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
+ result |= CallDescriptor::kLazyDeoptimization;
+ }
+ if (OperatorProperties::HasFrameStateInput(node->op())) {
+ result |= CallDescriptor::kNeedsFrameState;
+ }
+ return static_cast<CallDescriptor::DeoptimizationSupport>(result);
}
locations, // locations
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
- CallDescriptor::kCanDeoptimize); // deoptimization
+ CallDescriptor::kLazyDeoptimization); // deoptimization
}
return new (zone) CallDescriptor(
CallDescriptor::kCallAddress, 1, num_params, num_params + 1, locations,
Operator::kNoProperties, LinkageTraits::CCalleeSaveRegisters(),
- CallDescriptor::kCannotDeoptimize); // TODO(jarin) should deoptimize!
+ CallDescriptor::kNoDeoptimization); // TODO(jarin) should deoptimize!
}
};
}
OStream& operator<<(OStream& os, const CallDescriptor& d) {
// TODO(svenpanne) Output properties etc. and be less cryptic.
return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
- << "p" << d.ParameterCount() << "i" << d.InputCount()
- << (d.CanLazilyDeoptimize() ? "deopt" : "");
+ << "p" << d.ParameterCount() << "i" << d.InputCount() << "f"
+ << d.FrameStateCount() << (d.CanLazilyDeoptimize() ? "deopt" : "");
}
// or an address--all of which require different machine sequences to call.
enum Kind { kCallCodeObject, kCallJSFunction, kCallAddress };
- enum DeoptimizationSupport { kCanDeoptimize, kCannotDeoptimize };
+ // TODO(jarin) kLazyDeoptimization and kNeedsFrameState should be unified.
+ enum DeoptimizationSupport {
+ kNoDeoptimization = 0,
+ kLazyDeoptimization = 1,
+ kNeedsFrameState = 2
+ };
CallDescriptor(Kind kind, int8_t return_count, int16_t parameter_count,
int16_t input_count, LinkageLocation* locations,
int InputCount() const { return input_count_; }
+ int FrameStateCount() const { return NeedsFrameState() ? 1 : 0; }
+
bool CanLazilyDeoptimize() const {
- return deoptimization_support_ == kCanDeoptimize;
+ return (deoptimization_support() & kLazyDeoptimization) != 0;
+ }
+
+ bool NeedsFrameState() const {
+ return (deoptimization_support() & kNeedsFrameState) != 0;
+ }
+
+ DeoptimizationSupport deoptimization_support() const {
+ return deoptimization_support_;
}
LinkageLocation GetReturnLocation(int index) {
Runtime::FunctionId function, int parameter_count,
Operator::Property properties,
CallDescriptor::DeoptimizationSupport can_deoptimize =
- CallDescriptor::kCannotDeoptimize);
+ CallDescriptor::kNoDeoptimization);
static CallDescriptor* GetRuntimeCallDescriptor(
Runtime::FunctionId function, int parameter_count,
Operator::Property properties,
CallDescriptor* GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count = 0,
CallDescriptor::DeoptimizationSupport can_deoptimize =
- CallDescriptor::kCannotDeoptimize);
+ CallDescriptor::kNoDeoptimization);
static CallDescriptor* GetStubCallDescriptor(
CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone);
return PastValueIndex(node);
}
-inline int NodeProperties::FirstEffectIndex(Node* node) {
+inline int NodeProperties::FirstFrameStateIndex(Node* node) {
return PastContextIndex(node);
}
+inline int NodeProperties::FirstEffectIndex(Node* node) {
+ return PastFrameStateIndex(node);
+}
+
inline int NodeProperties::FirstControlIndex(Node* node) {
return PastEffectIndex(node);
}
OperatorProperties::GetContextInputCount(node->op());
}
+inline int NodeProperties::PastFrameStateIndex(Node* node) {
+ return FirstFrameStateIndex(node) +
+ OperatorProperties::GetFrameStateInputCount(node->op());
+}
+
inline int NodeProperties::PastEffectIndex(Node* node) {
return FirstEffectIndex(node) +
OperatorProperties::GetEffectInputCount(node->op());
return node->InputAt(FirstContextIndex(node));
}
+inline Node* NodeProperties::GetFrameStateInput(Node* node) {
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ return node->InputAt(FirstFrameStateIndex(node));
+}
+
inline Node* NodeProperties::GetEffectInput(Node* node, int index) {
DCHECK(0 <= index &&
index < OperatorProperties::GetEffectInputCount(node->op()));
return node->InputAt(FirstControlIndex(node) + index);
}
+inline int NodeProperties::GetFrameStateIndex(Node* node) {
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ return FirstFrameStateIndex(node);
+}
// -----------------------------------------------------------------------------
// Edge kinds.
public:
static inline Node* GetValueInput(Node* node, int index);
static inline Node* GetContextInput(Node* node);
+ static inline Node* GetFrameStateInput(Node* node);
static inline Node* GetEffectInput(Node* node, int index = 0);
static inline Node* GetControlInput(Node* node, int index = 0);
+ static inline int GetFrameStateIndex(Node* node);
+
static inline bool IsValueEdge(Node::Edge edge);
static inline bool IsContextEdge(Node::Edge edge);
static inline bool IsEffectEdge(Node::Edge edge);
private:
static inline int FirstValueIndex(Node* node);
static inline int FirstContextIndex(Node* node);
+ static inline int FirstFrameStateIndex(Node* node);
static inline int FirstEffectIndex(Node* node);
static inline int FirstControlIndex(Node* node);
static inline int PastValueIndex(Node* node);
static inline int PastContextIndex(Node* node);
+ static inline int PastFrameStateIndex(Node* node);
static inline int PastEffectIndex(Node* node);
static inline int PastControlIndex(Node* node);
namespace internal {
namespace compiler {
-void Node::CollectProjections(int projection_count, Node** projections) {
- for (int i = 0; i < projection_count; ++i) projections[i] = NULL;
+void Node::CollectProjections(NodeVector* projections) {
for (UseIter i = uses().begin(); i != uses().end(); ++i) {
if ((*i)->opcode() != IrOpcode::kProjection) continue;
- int32_t index = OpParameter<int32_t>(*i);
- DCHECK_GE(index, 0);
- DCHECK_LT(index, projection_count);
- DCHECK_EQ(NULL, projections[index]);
- projections[index] = *i;
+ DCHECK_GE(OpParameter<int32_t>(*i), 0);
+ projections->push_back(*i);
}
}
void Initialize(Operator* op) { set_op(op); }
- void CollectProjections(int projection_count, Node** projections);
+ void CollectProjections(
+ std::vector<Node*, zone_allocator<Node*> >* projections);
Node* FindProjection(int32_t projection_index);
};
return OperatorProperties::GetControlInputCount(op) > 0;
}
+inline bool OperatorProperties::HasFrameStateInput(Operator* op) {
+ if (!FLAG_turbo_deoptimization) {
+ return false;
+ }
+
+ switch (op->opcode()) {
+ case IrOpcode::kJSCallFunction:
+ return true;
+ case IrOpcode::kJSCallRuntime: {
+ Runtime::FunctionId function =
+ reinterpret_cast<Operator1<Runtime::FunctionId>*>(op)->parameter();
+ // TODO(jarin) At the moment, we only add frame state for
+ // few chosen runtime functions.
+ switch (function) {
+ case Runtime::kDebugBreak:
+ case Runtime::kDeoptimizeFunction:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ }
+
+ default:
+ return false;
+ }
+}
inline int OperatorProperties::GetValueInputCount(Operator* op) {
return op->InputCount();
return OperatorProperties::HasContextInput(op) ? 1 : 0;
}
+inline int OperatorProperties::GetFrameStateInputCount(Operator* op) {
+ return OperatorProperties::HasFrameStateInput(op) ? 1 : 0;
+}
+
inline int OperatorProperties::GetEffectInputCount(Operator* op) {
if (op->opcode() == IrOpcode::kEffectPhi ||
op->opcode() == IrOpcode::kFinish) {
inline int OperatorProperties::GetTotalInputCount(Operator* op) {
return GetValueInputCount(op) + GetContextInputCount(op) +
- GetEffectInputCount(op) + GetControlInputCount(op);
+ GetFrameStateInputCount(op) + GetEffectInputCount(op) +
+ GetControlInputCount(op);
}
// -----------------------------------------------------------------------------
Runtime::FunctionId function =
reinterpret_cast<Operator1<Runtime::FunctionId>*>(op)->parameter();
// TODO(jarin) At the moment, we only support lazy deoptimization for
- // the %DeoptimizeFunction runtime function.
- return function == Runtime::kDeoptimizeFunction;
+ // a few chosen runtime functions.
+ switch (function) {
+ case Runtime::kDebugBreak:
+ case Runtime::kDeoptimizeFunction:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
}
// JS function calls
static inline bool HasContextInput(Operator* node);
static inline bool HasEffectInput(Operator* node);
static inline bool HasControlInput(Operator* node);
+ static inline bool HasFrameStateInput(Operator* node);
static inline int GetValueInputCount(Operator* op);
static inline int GetContextInputCount(Operator* op);
static inline int GetEffectInputCount(Operator* op);
static inline int GetControlInputCount(Operator* op);
+ static inline int GetFrameStateInputCount(Operator* op);
static inline int GetTotalInputCount(Operator* op);
static inline bool HasValueOutput(Operator* op);
Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
Node* arg0, Label* continuation,
Label* deoptimization) {
- CallDescriptor* descriptor =
- Linkage::GetRuntimeCallDescriptor(function, 1, Operator::kNoProperties,
- CallDescriptor::kCanDeoptimize, zone());
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ function, 1, Operator::kNoProperties, CallDescriptor::kLazyDeoptimization,
+ zone());
Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
Node* ref = NewNode(
GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) {
int value_count = OperatorProperties::GetValueInputCount(node->op());
int context_count = OperatorProperties::GetContextInputCount(node->op());
+ int frame_state_count =
+ OperatorProperties::GetFrameStateInputCount(node->op());
int effect_count = OperatorProperties::GetEffectInputCount(node->op());
int control_count = OperatorProperties::GetControlInputCount(node->op());
// Verify number of inputs matches up.
- int input_count = value_count + context_count + effect_count + control_count;
+ int input_count = value_count + context_count + frame_state_count +
+ effect_count + control_count;
CHECK_EQ(input_count, node->InputCount());
// Verify all value inputs actually produce a value.
break;
case kArchDeoptimize: {
int deoptimization_id = MiscField::decode(instr->opcode());
- BuildTranslation(instr, deoptimization_id);
+ BuildTranslation(instr, 0, deoptimization_id);
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
int entry = Code::kHeaderSize - kHeapObjectTag;
__ Call(Operand(reg, entry));
}
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
- if (lazy_deopt) {
- RecordLazyDeoptimizationEntry(instr);
- }
+
+ AddSafepointAndDeopt(instr);
+
AddNopForSmiCodeInlining();
break;
}
__ movp(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- RecordLazyDeoptimizationEntry(instr);
+ AddSafepointAndDeopt(instr);
break;
}
case kSSEFloat64Cmp: {
BasicBlock* deoptimization) {
X64OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
// TODO(dcarney): stack alignment for c calls.
// TODO(dcarney): shadow space on window for c calls.
// Push any stack arguments.
- for (int i = buffer.pushed_count - 1; i >= 0; --i) {
- Node* input = buffer.pushed_nodes[i];
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
// TODO(titzer): handle pushing double parameters.
- if (g.CanBeImmediate(input)) {
- Emit(kX64PushI, NULL, g.UseImmediate(input));
+ if (g.CanBeImmediate(*input)) {
+ Emit(kX64PushI, NULL, g.UseImmediate(*input));
} else {
- Emit(kX64Push, NULL, g.Use(input));
+ Emit(kX64Push, NULL, g.Use(*input));
}
}
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- bool lazy_deopt = descriptor->CanLazilyDeoptimize();
- opcode = kX64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ opcode = kX64CallCodeObject;
break;
}
case CallDescriptor::kCallAddress:
UNREACHABLE();
return;
}
+ opcode |= MiscField::encode(descriptor->deoptimization_support());
// Emit the call instruction.
Instruction* call_instr =
- Emit(opcode, buffer.output_count, buffer.outputs,
- buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
if (deoptimization != NULL) {
// Caller clean up of stack for C-style calls.
if (descriptor->kind() == CallDescriptor::kCallAddress &&
- buffer.pushed_count > 0) {
+ !buffer.pushed_nodes.empty()) {
DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
+ Emit(kPopStack |
+ MiscField::encode(static_cast<int>(buffer.pushed_nodes.size())),
+ NULL);
}
}
__ DebugBreak();
// Ignore the return value.
+
+ PrepareForBailoutForId(stmt->DebugBreakId(), NO_REGISTERS);
}
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --turbo-deoptimization
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --turbo-deoptimization
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug;
# Support for %GetFrameDetails is missing and requires checkpoints.
'debug-backtrace-text': [PASS, NO_VARIANTS],
'debug-break-inline': [PASS, NO_VARIANTS],
- 'debug-evaluate-arguments': [PASS, NO_VARIANTS],
'debug-evaluate-bool-constructor': [PASS, NO_VARIANTS],
'debug-evaluate-closure': [PASS, NO_VARIANTS],
'debug-evaluate-const': [PASS, NO_VARIANTS],
'debug-evaluate-with': [PASS, NO_VARIANTS],
'debug-liveedit-double-call': [PASS, NO_VARIANTS],
'debug-liveedit-restart-frame': [PASS, NO_VARIANTS],
- 'debug-receiver': [PASS, NO_VARIANTS],
'debug-return-value': [PASS, NO_VARIANTS],
'debug-scopes': [PASS, NO_VARIANTS],
'debug-set-variable-value': [PASS, NO_VARIANTS],