}
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) const {
stream->Add("[r2] #%d / ", arity());
}
}
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ return DefineAsRegister(new LLoadContextSlot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
return DefineAsRegister(
new LLoadNamedField(UseRegisterAtStart(instr->object())));
// LGlobalObject
// LGlobalReceiver
// LLabel
-// LLayzBailout
+// LLazyBailout
+// LLoadContextSlot
// LLoadGlobal
// LMaterializedLiteral
// LArrayLiteral
V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
};
+class LLoadContextSlot: public LInstruction {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int context_chain_length() const {
+ return hydrogen()->context_chain_length();
+ }
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LPushArgument: public LUnaryOperation {
public:
explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
}
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ // TODO(antonm): load a context with a separate instruction.
+ Register result = ToRegister(instr->result());
+ __ LoadContext(result, instr->context_chain_length());
+ __ ldr(result, ContextOperand(result, instr->slot_index()));
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->input());
Register result = ToRegister(instr->result());
}
-void LCodeGen::LoadPrototype(Register result,
- Handle<JSObject> prototype) {
- if (Heap::InNewSpace(*prototype)) {
+void LCodeGen::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (Heap::InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(prototype);
+ Factory::NewJSGlobalPropertyCell(object);
__ mov(result, Operand(cell));
__ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
} else {
- __ mov(result, Operand(prototype));
+ __ mov(result, Operand(object));
}
}
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadPrototype(temp1, current_prototype);
+ LoadHeapObject(temp1, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadPrototype(temp1, current_prototype);
+ LoadHeapObject(temp1, current_prototype);
}
// Check the holder map.
int arity,
LInstruction* instr);
- void LoadPrototype(Register result, Handle<JSObject> prototype);
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
}
-bool FunctionLiteral::AllowOptimize() {
- // We can't deal with heap-allocated locals.
- return scope()->num_heap_slots() == 0;
-}
-
-
ObjectLiteral::Property::Property(Literal* key, Expression* value) {
emit_store_ = true;
key_ = key;
int num_parameters() { return num_parameters_; }
bool AllowsLazyCompilation();
- bool AllowOptimize();
Handle<String> debug_name() const {
if (name_->length() > 0) return name_;
}
+void CompilationInfo::DisableOptimization() {
+ if (FLAG_optimize_closures) {
+ // If we allow closures optimizations and it's an optimizable closure
+ // mark it correspondingly.
+ bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
+ if (is_closure) {
+ bool is_optimizable_closure =
+ !scope_->outer_scope_calls_eval() && !scope_->inside_with();
+ if (is_optimizable_closure) {
+ SetMode(BASE);
+ return;
+ }
+ }
+ }
+
+ SetMode(NONOPT);
+}
+
+
// Determine whether to use the full compiler for all code. If the flag
// --always-full-compiler is specified this is the case. For the virtual frame
// based compiler the full compiler is also used if a debugger is connected, as
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
}
- void DisableOptimization() { SetMode(NONOPT); }
+ void DisableOptimization();
// Deoptimization support.
bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
// Determine whether or not we can adaptively optimize.
bool AllowOptimize() {
- return V8::UseCrankshaft() &&
- !closure_.is_null() &&
- function_->AllowOptimize();
+ return V8::UseCrankshaft() && !closure_.is_null();
}
private:
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
+DEFINE_bool(optimize_closures, true, "optimize closures")
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_bool(debug_code, false,
}
+void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
+ stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
// Implementation of type inference and type conversions. Calculates
// the inferred type of this instruction based on the input operands.
// HGlobalObject
// HGlobalReceiver
// HLeaveInlined
+// HLoadContextSlot
// HLoadGlobal
// HMaterializedLiteral
// HArrayLiteral
V(JSArrayLength) \
V(ClassOfTest) \
V(LeaveInlined) \
+ V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
};
+class HLoadContextSlot: public HInstruction {
+ public:
+ HLoadContextSlot(int context_chain_length , int slot_index)
+ : context_chain_length_(context_chain_length), slot_index_(slot_index) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetFlag(kDependsOnCalls);
+ }
+
+ int context_chain_length() const { return context_chain_length_; }
+ int slot_index() const { return slot_index_; }
+
+ virtual void PrintDataTo(StringStream* stream) const;
+
+ virtual intptr_t Hashcode() const {
+ return context_chain_length() * 29 + slot_index();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const {
+ HLoadContextSlot* b = HLoadContextSlot::cast(other);
+ return (context_chain_length() == b->context_chain_length())
+ && (slot_index() == b->slot_index());
+ }
+
+ private:
+ int context_chain_length_;
+ int slot_index_;
+};
+
+
class HLoadNamedField: public HUnaryOperation {
public:
HLoadNamedField(HValue* object, bool is_in_object, int offset)
BAILOUT("unsupported context for arguments object");
}
ast_context()->ReturnValue(environment()->Lookup(variable));
+ } else if (variable->IsContextSlot()) {
+ if (variable->mode() == Variable::CONST) {
+ BAILOUT("reference to const context slot");
+ }
+ Slot* slot = variable->AsSlot();
+ CompilationInfo* info = graph()->info();
+ int context_chain_length = info->function()->scope()->
+ ContextChainLength(slot->var()->scope());
+ ASSERT(context_chain_length >= 0);
+ // TODO(antonm): if slot's value is not modified by closures, instead
+ // of reading it out of context, we could just embed the value as
+ // a constant.
+ HLoadContextSlot* instr =
+ new HLoadContextSlot(context_chain_length, slot->index());
+ ast_context()->ReturnInstruction(instr, expr->id());
} else if (variable->is_global()) {
LookupResult lookup;
LookupGlobalPropertyCell(variable, &lookup, false);
HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
ast_context()->ReturnInstruction(instr, expr->id());
} else {
- BAILOUT("reference to non-stack-allocated/non-global variable");
+ BAILOUT("reference to a variable which requires dynamic lookup");
}
}
Top(),
expr->position(),
expr->AssignmentId());
- } else {
+ } else if (var->IsStackAllocated()) {
// We allow reference to the arguments object only in assignemtns
// to local variables to make sure that the arguments object does
// not escape and is not modified.
VISIT_FOR_VALUE(expr->value());
}
Bind(proxy->var(), Top());
+ } else {
+ BAILOUT("Assigning to no non-stack-allocated/non-global variable");
}
// Return the value.
ast_context()->ReturnValue(Pop());
}
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ // TODO(antonm): load a context with a separate instruction.
+ Register result = ToRegister(instr->result());
+ __ LoadContext(result, instr->context_chain_length());
+ __ mov(result, ContextOperand(result, instr->slot_index()));
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->input());
Register result = ToRegister(instr->result());
}
-void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
- if (Heap::InNewSpace(*prototype)) {
+void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
+ if (Heap::InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(prototype);
+ Factory::NewJSGlobalPropertyCell(object);
__ mov(result, Operand::Cell(cell));
} else {
- __ mov(result, prototype);
+ __ mov(result, object);
}
}
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadPrototype(reg, current_prototype);
+ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadPrototype(reg, current_prototype);
+ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
int arity,
LInstruction* instr);
- void LoadPrototype(Register result, Handle<JSObject> prototype);
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
}
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[ecx] #%d / ", arity());
}
}
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ return DefineAsRegister(new LLoadContextSlot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
// LGlobalReceiver
// LGoto
// LLazyBailout
+// LLoadContextSlot
// LLoadGlobal
// LMaterializedLiteral
// LArrayLiteral
V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
};
+class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int context_chain_length() const {
+ return hydrogen()->context_chain_length();
+ }
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LPushArgument: public LUnaryOperation<0> {
public:
explicit LPushArgument(LOperand* argument) : LUnaryOperation<0>(argument) {}
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
- // If optimization has been disabled for the shared function info,
- // reflect that in the code object so it will not be counted as
- // optimizable code.
- ASSERT(value->kind() != Code::FUNCTION ||
- !value->optimizable() ||
- this->code() == Builtins::builtin(Builtins::Illegal) ||
- this->allows_lazy_compilation());
WRITE_FIELD(this, kCodeOffset, value);
CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode);
}
void JSFunction::MarkForLazyRecompilation() {
ASSERT(is_compiled() && !IsOptimized());
- ASSERT(shared()->allows_lazy_compilation());
+ ASSERT(shared()->allows_lazy_compilation() ||
+ code()->optimizable());
ReplaceCode(Builtins::builtin(Builtins::LazyRecompile));
}
}
-// Assumes code has been parsed and scopes hve been analyzed. Mutates the
+// Assumes code has been parsed and scopes have been analyzed. Mutates the
// AST, so the AST should not continue to be used in the case of failure.
bool Rewriter::Rewrite(CompilationInfo* info) {
FunctionLiteral* function = info->function();
}
SharedFunctionInfo* shared = function->shared();
- // If the code is not optimizable, don't try OSR.
- if (!shared->code()->optimizable()) return;
+ // If the code is not optimizable or references context slots, don't try OSR.
+ if (!shared->code()->optimizable() || !shared->allows_lazy_compilation()) {
+ return;
+ }
// We are not prepared to do OSR for a function that already has an
// allocated arguments object. The optimized code would bypass it for
// Array, and Object, and some web code
// doesn't like seeing source code for constructors.
target->shared()->set_script(Heap::undefined_value());
+ target->shared()->code()->set_optimizable(false);
// Clear the optimization hints related to the compiled code as these are no
// longer valid when the code is overwritten.
target->shared()->ClearThisPropertyAssignmentsInfo();
// code from the full compiler.
if (!function->shared()->code()->optimizable() ||
Debug::has_break_points()) {
+ if (FLAG_trace_opt) {
+ PrintF("[failed to optimize ");
+ function->PrintName();
+ PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
+ function->shared()->code()->optimizable() ? "T" : "F",
+ Debug::has_break_points() ? "T" : "F");
+ }
function->ReplaceCode(function->shared()->code());
return function->code();
}
if (CompileOptimized(function, AstNode::kNoNumber)) {
return function->code();
}
+ if (FLAG_trace_opt) {
+ PrintF("[failed to optimize ");
+ function->PrintName();
+ PrintF(": optimized compilation failed]\n");
+ }
function->ReplaceCode(function->shared()->code());
return Failure::Exception();
}
// Dummy constructor
Scope::Scope(Type type)
- : outer_scope_(NULL),
- inner_scopes_(0),
- type_(type),
- scope_name_(Factory::empty_symbol()),
+ : inner_scopes_(0),
variables_(false),
temps_(0),
params_(0),
- dynamics_(NULL),
unresolved_(0),
- decls_(0),
- receiver_(NULL),
- function_(NULL),
- arguments_(NULL),
- arguments_shadow_(NULL),
- illegal_redecl_(NULL),
- scope_inside_with_(false),
- scope_contains_with_(false),
- scope_calls_eval_(false),
- outer_scope_calls_eval_(false),
- inner_scope_calls_eval_(false),
- outer_scope_is_eval_scope_(false),
- force_eager_compilation_(false),
- num_stack_slots_(0),
- num_heap_slots_(0) {
+ decls_(0) {
+ SetDefaults(type, NULL, NULL);
+ ASSERT(!resolved());
}
Scope::Scope(Scope* outer_scope, Type type)
- : outer_scope_(outer_scope),
- inner_scopes_(4),
- type_(type),
- scope_name_(Factory::empty_symbol()),
+ : inner_scopes_(4),
+ variables_(),
temps_(4),
params_(4),
- dynamics_(NULL),
unresolved_(16),
- decls_(4),
- receiver_(NULL),
- function_(NULL),
- arguments_(NULL),
- arguments_shadow_(NULL),
- illegal_redecl_(NULL),
- scope_inside_with_(false),
- scope_contains_with_(false),
- scope_calls_eval_(false),
- outer_scope_calls_eval_(false),
- inner_scope_calls_eval_(false),
- outer_scope_is_eval_scope_(false),
- force_eager_compilation_(false),
- num_stack_slots_(0),
- num_heap_slots_(0) {
+ decls_(4) {
+ SetDefaults(type, outer_scope, NULL);
// At some point we might want to provide outer scopes to
// eval scopes (by walking the stack and reading the scope info).
// In that case, the ASSERT below needs to be adjusted.
ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
ASSERT(!HasIllegalRedeclaration());
+ ASSERT(!resolved());
}
+Scope::Scope(Scope* inner_scope, SerializedScopeInfo* scope_info)
+ : inner_scopes_(4),
+ variables_(),
+ temps_(4),
+ params_(4),
+ unresolved_(16),
+ decls_(4) {
+ ASSERT(scope_info != NULL);
+ SetDefaults(FUNCTION_SCOPE, inner_scope->outer_scope(), scope_info);
+ ASSERT(resolved());
+ InsertAfterScope(inner_scope);
+ if (scope_info->HasHeapAllocatedLocals()) {
+ num_heap_slots_ = scope_info_->NumberOfContextSlots();
+ }
+}
+
+
+
bool Scope::Analyze(CompilationInfo* info) {
ASSERT(info->function() != NULL);
Scope* top = info->function()->scope();
+
+ // If we have a serialized scope info, reuse it.
+ if (!info->closure().is_null()) {
+ SerializedScopeInfo* scope_info = info->closure()->shared()->scope_info();
+ if (scope_info != SerializedScopeInfo::Empty()) {
+ Scope* scope = top;
+ JSFunction* current = *info->closure();
+ do {
+ current = current->context()->closure();
+ SerializedScopeInfo* scope_info = current->shared()->scope_info();
+ if (scope_info != SerializedScopeInfo::Empty()) {
+ scope = new Scope(scope, scope_info);
+ } else {
+ ASSERT(current->context()->IsGlobalContext());
+ }
+ } while (!current->context()->IsGlobalContext());
+ }
+ }
+
while (top->outer_scope() != NULL) top = top->outer_scope();
top->AllocateVariables(info->calling_context());
void Scope::Initialize(bool inside_with) {
+ ASSERT(!resolved());
+
// Add this scope as a new inner scope of the outer scope.
if (outer_scope_ != NULL) {
outer_scope_->inner_scopes_.Add(this);
Variable* var =
variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
false, Variable::THIS);
- var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
+ var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
receiver_ = var;
if (is_function_scope()) {
Variable* Scope::LocalLookup(Handle<String> name) {
- return variables_.Lookup(name);
+ Variable* result = variables_.Lookup(name);
+ if (result != NULL || !resolved()) {
+ return result;
+ }
+ // If the scope is resolved, we can find a variable in serialized scope info.
+
+ // We should never lookup 'arguments' in this scope
+ // as it is impllicitly present in any scope.
+ ASSERT(*name != *Factory::arguments_symbol());
+
+ // Check context slot lookup.
+ Variable::Mode mode;
+ int index = scope_info_->ContextSlotIndex(*name, &mode);
+ if (index < 0) {
+ return NULL;
+ }
+
+ // Check that there is no local slot with the given name.
+ ASSERT(scope_info_->StackSlotIndex(*name) < 0);
+ Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL);
+ var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
+ return var;
}
// DYNAMIC variables are introduces during variable allocation,
// INTERNAL variables are allocated explicitly, and TEMPORARY
// variables are allocated via NewTemporary().
+ ASSERT(!resolved());
ASSERT(mode == Variable::VAR || mode == Variable::CONST);
return variables_.Declare(this, name, mode, true, Variable::NORMAL);
}
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
+ ASSERT(!resolved());
VariableProxy* proxy = new VariableProxy(name, false, inside_with);
unresolved_.Add(proxy);
return proxy;
Variable* Scope::NewTemporary(Handle<String> name) {
+ ASSERT(!resolved());
Variable* var =
new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
temps_.Add(var);
// Declare a new non-local.
var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
// Allocate it by giving it a dynamic lookup.
- var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
+ var->set_rewrite(new Slot(var, Slot::LOOKUP, -1));
}
return var;
}
ASSERT(var != NULL);
// If this is a lookup from an inner scope, mark the variable.
- if (inner_lookup)
- var->is_accessed_from_inner_scope_ = true;
+ if (inner_lookup) {
+ var->MarkAsAccessedFromInnerScope();
+ }
// If the variable we have found is just a guess, invalidate the
// result. If the found variable is local, record that fact so we
// via an eval() call. This is only possible if the variable has a
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
- (var->is_accessed_from_inner_scope_ ||
+ (var->is_accessed_from_inner_scope() ||
scope_calls_eval_ || inner_scope_calls_eval_ ||
scope_contains_with_)) {
var->set_is_used(true);
// context.
return
var->mode() != Variable::TEMPORARY &&
- (var->is_accessed_from_inner_scope_ ||
+ (var->is_accessed_from_inner_scope() ||
scope_calls_eval_ || inner_scope_calls_eval_ ||
scope_contains_with_ || var->is_global());
}
void Scope::AllocateStackSlot(Variable* var) {
- var->rewrite_ = new Slot(var, Slot::LOCAL, num_stack_slots_++);
+ var->set_rewrite(new Slot(var, Slot::LOCAL, num_stack_slots_++));
}
void Scope::AllocateHeapSlot(Variable* var) {
- var->rewrite_ = new Slot(var, Slot::CONTEXT, num_heap_slots_++);
+ var->set_rewrite(new Slot(var, Slot::CONTEXT, num_heap_slots_++));
}
// It is ok to set this only now, because arguments is a local
// variable that is allocated after the parameters have been
// allocated.
- arguments_shadow_->is_accessed_from_inner_scope_ = true;
+ arguments_shadow_->MarkAsAccessedFromInnerScope();
}
Property* rewrite =
new Property(new VariableProxy(arguments_shadow_),
RelocInfo::kNoPosition,
Property::SYNTHETIC);
rewrite->set_is_arguments_access(true);
- var->rewrite_ = rewrite;
+ var->set_rewrite(rewrite);
}
}
ASSERT(var->scope() == this);
if (MustAllocate(var)) {
if (MustAllocateInContext(var)) {
- ASSERT(var->rewrite_ == NULL ||
+ ASSERT(var->rewrite() == NULL ||
(var->AsSlot() != NULL &&
var->AsSlot()->type() == Slot::CONTEXT));
- if (var->rewrite_ == NULL) {
+ if (var->rewrite() == NULL) {
// Only set the heap allocation if the parameter has not
// been allocated yet.
AllocateHeapSlot(var);
}
} else {
- ASSERT(var->rewrite_ == NULL ||
+ ASSERT(var->rewrite() == NULL ||
(var->AsSlot() != NULL &&
var->AsSlot()->type() == Slot::PARAMETER));
// Set the parameter index always, even if the parameter
// was seen before! (We need to access the actual parameter
// supplied for the last occurrence of a multiply declared
// parameter.)
- var->rewrite_ = new Slot(var, Slot::PARAMETER, i);
+ var->set_rewrite(new Slot(var, Slot::PARAMETER, i));
}
}
}
void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
- ASSERT(var->rewrite_ == NULL ||
+ ASSERT(var->rewrite() == NULL ||
(!var->IsVariable(Factory::result_symbol())) ||
(var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
- if (var->rewrite_ == NULL && MustAllocate(var)) {
+ if (var->rewrite() == NULL && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
AllocateHeapSlot(var);
} else {
void Scope::AllocateVariablesRecursively() {
- // The number of slots required for variables.
- num_stack_slots_ = 0;
- num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
-
// Allocate variables for inner scopes.
for (int i = 0; i < inner_scopes_.length(); i++) {
inner_scopes_[i]->AllocateVariablesRecursively();
}
+ // If scope is already resolved, we still need to allocate
+ // variables in inner scopes which might not had been resolved yet.
+ if (resolved()) return;
+ // The number of slots required for variables.
+ num_stack_slots_ = 0;
+ num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
+
// Allocate variables for this scope.
// Parameters must be allocated first, if any.
if (is_function_scope()) AllocateParameterLocals();
explicit Scope(Type type);
+ void InsertAfterScope(Scope* scope) {
+ inner_scopes_.Add(scope);
+ outer_scope_ = scope->outer_scope_;
+ outer_scope_->inner_scopes_.RemoveElement(scope);
+ outer_scope_->inner_scopes_.Add(this);
+ scope->outer_scope_ = this;
+ }
+
// Scope tree.
Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
int num_stack_slots_;
int num_heap_slots_;
+ // Serialized scopes support.
+ SerializedScopeInfo* scope_info_;
+ bool resolved() { return scope_info_ != NULL; }
+
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
Variable* NonLocal(Handle<String> name, Variable::Mode mode);
void AllocateNonParameterLocal(Variable* var);
void AllocateNonParameterLocals();
void AllocateVariablesRecursively();
+
+ private:
+ Scope(Scope* inner_scope, SerializedScopeInfo* scope_info);
+
+ void SetDefaults(Type type,
+ Scope* outer_scope,
+ SerializedScopeInfo* scope_info) {
+ outer_scope_ = outer_scope;
+ type_ = type;
+ scope_name_ = Factory::empty_symbol();
+ dynamics_ = NULL;
+ receiver_ = NULL;
+ function_ = NULL;
+ arguments_ = NULL;
+ arguments_shadow_ = NULL;
+ illegal_redecl_ = NULL;
+ scope_inside_with_ = false;
+ scope_contains_with_ = false;
+ scope_calls_eval_ = false;
+ outer_scope_calls_eval_ = false;
+ inner_scope_calls_eval_ = false;
+ outer_scope_is_eval_scope_ = false;
+ force_eager_compilation_ = false;
+ num_stack_slots_ = 0;
+ num_heap_slots_ = 0;
+ scope_info_ = scope_info;
+ }
};
}
+bool Variable::IsContextSlot() const {
+ Slot* s = AsSlot();
+ return s != NULL && s->type() == Slot::CONTEXT;
+}
+
+
Variable::Variable(Scope* scope,
Handle<String> name,
Mode mode,
bool is_accessed_from_inner_scope() const {
return is_accessed_from_inner_scope_;
}
+ void MarkAsAccessedFromInnerScope() {
+ is_accessed_from_inner_scope_ = true;
+ }
bool is_used() { return is_used_; }
void set_is_used(bool flag) { is_used_ = flag; }
bool IsStackAllocated() const;
bool IsParameter() const; // Includes 'this'.
bool IsStackLocal() const;
+ bool IsContextSlot() const;
bool is_dynamic() const {
return (mode_ == DYNAMIC ||
}
Expression* rewrite() const { return rewrite_; }
+ void set_rewrite(Expression* expr) { rewrite_ = expr; }
StaticType* type() { return &type_; }
// Code generation.
// rewrite_ is usually a Slot or a Property, but may be any expression.
Expression* rewrite_;
-
- friend class Scope; // Has explicit access to rewrite_.
};
}
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Abort("Unimplemented: %s", "DoLoadContextSlot");
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Abort("Unimplemented: %s", "DoLoadNamedField");
}
}
-void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
- Abort("Unimplemented: %s", "LoadPrototype");
+void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
+ Abort("Unimplemented: %s", "LoadHeapObject");
}
int arity,
LInstruction* instr);
- void LoadPrototype(Register result, Handle<JSObject> prototype);
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
}
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[ecx] #%d / ", arity());
+ stream->Add("[rcx] #%d / ", arity());
}
}
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ Abort("Unimplemented: %s", "DoLoadContextSlot");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
Abort("Unimplemented: %s", "DoLoadNamedField");
return NULL;
// LGlobalReceiver
// LGoto
// LLazyBailout
+// LLoadContextSlot
// LLoadGlobal
// LMaterializedLiteral
// LArrayLiteral
V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
};
+class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int context_chain_length() const {
+ return hydrogen()->context_chain_length();
+ }
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LPushArgument: public LUnaryOperation<0> {
public:
explicit LPushArgument(LOperand* argument) : LUnaryOperation<0>(argument) {}
test-api/Bug*: FAIL
+# The problem is that a code object can get a different optimizable flag
+# in crankshaft after creation.
+test-log/EquivalenceOfLoggingAndTraversal: SKIP
+
##############################################################################
# BUG(281): This test fails on some Linuxes.
--- /dev/null
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function runner(f, expected) {
+ for (var i = 0; i < 1000000; i++) {
+ assertEquals(expected, f.call(this));
+ }
+}
+
+function test(n) {
+ function MyFunction() {
+ var result = n * 2 + arguments.length;
+ return result;
+ }
+ runner(MyFunction, n * 2);
+}
+
+test(1);
+test(42);
+test(239);
+
--- /dev/null
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Verifies that closures in presence of eval work fine.
+function withEval(expr, filter) {
+ function walk(v) {
+ for (var i in v) {
+ for (var i in v) {}
+ }
+ return filter(v);
+ }
+
+ var o = eval(expr);
+ return walk(o);
+}
+
+function makeTagInfoJSON(n) {
+ var a = new Array(n);
+ for (var i = 0; i < n; i++) a.push('{}');
+ return a;
+}
+
+var expr = '([' + makeTagInfoJSON(128).join(', ') + '])'
+
+for (var n = 0; n < 300; n++) {
+ withEval(expr, function(a) { return a; });
+}
return j; // Make sure that future optimizations don't eliminate j.
} catch(e) {
ok = true;
- assertTrue(re.test(e));
+ assertTrue(re.test(e), 'e: ' + e);
}
assertTrue(ok);
}