RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- info()->CommitDependentMaps(code);
+ for (int i = 0 ; i < prototype_maps_.length(); i++) {
+ prototype_maps_.at(i)->AddDependentCode(
+ DependentCode::kPrototypeCheckGroup, code);
+ }
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
+ if (graph()->depends_on_empty_array_proto_elements()) {
+ isolate()->initial_object_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ isolate()->initial_array_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ }
}
}
if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
__ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+ for (int i = 0; i < maps->length(); i++) {
+ prototype_maps_.Add(maps->at(i), info()->zone());
+ }
+ } else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
__ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
+ prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
- for (int i = 0; i < DependentCode::kGroupCount; i++) {
- dependent_maps_[i] = NULL;
- }
if (mode == STUB) {
mode_ = STUB;
return;
CompilationInfo::~CompilationInfo() {
delete deferred_handles_;
delete no_frame_ranges_;
-#ifdef DEBUG
- // Check that no dependent maps have been added or added dependent maps have
- // been rolled back or committed.
- for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ASSERT_EQ(NULL, dependent_maps_[i]);
- }
-#endif // DEBUG
-}
-
-
-void CompilationInfo::CommitDependentMaps(Handle<Code> code) {
- for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
- if (group_maps == NULL) continue;
- ASSERT(!object_wrapper_.is_null());
- for (int j = 0; j < group_maps->length(); j++) {
- group_maps->at(j)->dependent_code()->UpdateToFinishedCode(
- static_cast<DependentCode::DependencyGroup>(i), this, *code);
- }
- dependent_maps_[i] = NULL; // Zone-allocated, no need to delete.
- }
-}
-
-
-void CompilationInfo::RollbackDependentMaps() {
- // Unregister from all dependent maps if not yet committed.
- for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
- if (group_maps == NULL) continue;
- for (int j = 0; j < group_maps->length(); j++) {
- group_maps->at(j)->dependent_code()->RemoveCompilationInfo(
- static_cast<DependentCode::DependencyGroup>(i), this);
- }
- dependent_maps_[i] = NULL; // Zone-allocated, no need to delete.
- }
}
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
- info->AbortOptimization();
+ info->SetCode(Handle<Code>(info->shared_info()->code()));
InstallFullCode(*info);
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** aborting optimization for ");
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
- if (info->HasAbortedDueToDependentMap()) {
- info->set_bailout_reason("bailed out due to dependent map");
- status = optimizing_compiler->AbortOptimization();
- } else if (status != OptimizingCompiler::SUCCEEDED) {
- info->set_bailout_reason("failed/bailed out last time");
+ if (status != OptimizingCompiler::SUCCEEDED) {
+ optimizing_compiler->info()->set_bailout_reason(
+ "failed/bailed out last time");
status = optimizing_compiler->AbortOptimization();
} else {
status = optimizing_compiler->GenerateAndInstallCode();
// is constructed based on the resources available at compile-time.
class CompilationInfo {
public:
+ CompilationInfo(Handle<Script> script, Zone* zone);
+ CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
- virtual ~CompilationInfo();
+ CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
+
+ ~CompilationInfo();
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
deferred_handles_ = deferred_handles;
}
- ZoneList<Handle<Map> >* dependent_maps(DependentCode::DependencyGroup group) {
- if (dependent_maps_[group] == NULL) {
- dependent_maps_[group] = new(zone_) ZoneList<Handle<Map> >(2, zone_);
- }
- return dependent_maps_[group];
- }
-
- void CommitDependentMaps(Handle<Code> code);
-
- void RollbackDependentMaps();
-
void SaveHandles() {
SaveHandle(&closure_);
SaveHandle(&shared_info_);
return result;
}
- Handle<Foreign> object_wrapper() {
- if (object_wrapper_.is_null()) {
- object_wrapper_ =
- isolate()->factory()->NewForeign(reinterpret_cast<Address>(this));
- }
- return object_wrapper_;
- }
-
- void AbortDueToDependentMap() {
- mode_ = DEPENDENT_MAP_ABORT;
- }
-
- bool HasAbortedDueToDependentMap() {
- return mode_ == DEPENDENT_MAP_ABORT;
- }
-
- protected:
- CompilationInfo(Handle<Script> script, Zone* zone);
- CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
- CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
private:
Isolate* isolate_;
BASE,
OPTIMIZE,
NONOPT,
- STUB,
- DEPENDENT_MAP_ABORT
+ STUB
};
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
DeferredHandles* deferred_handles_;
- ZoneList<Handle<Map> >* dependent_maps_[DependentCode::kGroupCount];
-
template<typename T>
void SaveHandle(Handle<T> *object) {
if (!object->is_null()) {
// during graph optimization.
int opt_count_;
- Handle<Foreign> object_wrapper_;
-
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
: CompilationInfo(closure, &zone_),
zone_(closure->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
- CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+ explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
: CompilationInfo(stub, isolate, &zone_),
zone_(isolate),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
- // Virtual destructor because a CompilationInfoWithZone has to exit the
- // zone scope and get rid of dependent maps even when the destructor is
- // called when cast as a CompilationInfo.
- virtual ~CompilationInfoWithZone() {
- RollbackDependentMaps();
- }
-
private:
Zone zone_;
ZoneScope zone_scope_;
public:
HCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder,
- Zone* zone,
- CompilationInfo* info)
+ Zone* zone)
: prototypes_(2, zone),
maps_(2, zone),
first_prototype_unique_id_(),
- last_prototype_unique_id_(),
- can_omit_prototype_maps_(true) {
+ last_prototype_unique_id_() {
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
// Keep a list of all objects on the prototype chain up to the holder
// and the expected maps.
while (true) {
prototypes_.Add(prototype, zone);
- Handle<Map> map(prototype->map());
- maps_.Add(map, zone);
- can_omit_prototype_maps_ &= map->CanOmitPrototypeChecks();
+ maps_.Add(Handle<Map>(prototype->map()), zone);
if (prototype.is_identical_to(holder)) break;
prototype = Handle<JSObject>(JSObject::cast(prototype->GetPrototype()));
}
- if (can_omit_prototype_maps_) {
- // Mark in-flight compilation as dependent on those maps.
- for (int i = 0; i < maps()->length(); i++) {
- Handle<Map> map = maps()->at(i);
- map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
- info);
- }
- }
}
ZoneList<Handle<JSObject> >* prototypes() { return &prototypes_; }
last_prototype_unique_id_ = UniqueValueId(prototypes_.last());
}
- bool CanOmitPrototypeChecks() { return can_omit_prototype_maps_; }
+ bool CanOmitPrototypeChecks() {
+ for (int i = 0; i < maps()->length(); i++) {
+ if (!maps()->at(i)->CanOmitPrototypeChecks()) return false;
+ }
+ return true;
+ }
protected:
virtual bool DataEquals(HValue* other) {
ZoneList<Handle<Map> > maps_;
UniqueValueId first_prototype_unique_id_;
UniqueValueId last_prototype_unique_id_;
- bool can_omit_prototype_maps_;
};
= Representation::Tagged())
: access_(access),
field_representation_(field_representation),
- transition_(),
transition_unique_id_(),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
HObjectAccess access() const { return access_; }
Handle<Map> transition() const { return transition_; }
UniqueValueId transition_unique_id() const { return transition_unique_id_; }
- void SetTransition(Handle<Map> map, CompilationInfo* info) {
- ASSERT(transition_.is_null()); // Only set once.
- if (map->CanBeDeprecated()) {
- map->AddDependentCompilationInfo(DependentCode::kTransitionGroup, info);
- }
- transition_ = map;
- }
+ void set_transition(Handle<Map> map) { transition_ = map; }
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
void HOptimizedGraphBuilder::Bailout(const char* reason) {
- current_info()->set_bailout_reason(reason);
+ info()->set_bailout_reason(reason);
SetStackOverflow();
}
bool HOptimizedGraphBuilder::BuildGraph() {
- if (current_info()->function()->is_generator()) {
+ if (info()->function()->is_generator()) {
Bailout("function is a generator");
return false;
}
- Scope* scope = current_info()->scope();
+ Scope* scope = info()->scope();
if (scope->HasIllegalRedeclaration()) {
Bailout("function with illegal redeclaration");
return false;
AddInstruction(
new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
- VisitStatements(current_info()->function()->body());
+ VisitStatements(info()->function()->body());
if (HasStackOverflow()) return false;
if (current_block() != NULL) {
// last time this function was compiled, then this recompile is likely not
// due to missing/inadequate type feedback, but rather too aggressive
// optimization. Disable optimistic LICM in that case.
- Handle<Code> unoptimized_code(current_info()->shared_info()->code());
+ Handle<Code> unoptimized_code(info()->shared_info()->code());
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Handle<TypeFeedbackInfo> type_info(
TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
- return statement->OsrEntryId() == current_info()->osr_ast_id();
+ return statement->OsrEntryId() == info()->osr_ast_id();
}
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<SharedFunctionInfo> shared_info =
- SearchSharedFunctionInfo(current_info()->shared_info()->code(), expr);
+ SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
if (shared_info.is_null()) {
- shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script());
+ shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
}
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
HOptimizedGraphBuilder::GlobalPropertyAccess
HOptimizedGraphBuilder::LookupGlobalProperty(
Variable* var, LookupResult* lookup, bool is_store) {
- if (var->is_this() || !current_info()->has_global_object()) {
+ if (var->is_this() || !info()->has_global_object()) {
return kUseGeneric;
}
- Handle<GlobalObject> global(current_info()->global_object());
+ Handle<GlobalObject> global(info()->global_object());
global->Lookup(*var->name(), lookup);
if (!lookup->IsNormal() ||
(is_store && lookup->IsReadOnly()) ||
HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
HValue* context = environment()->LookupContext();
- int length = current_info()->scope()->ContextChainLength(var->scope());
+ int length = info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
HInstruction* context_instruction = new(zone()) HOuterContext(context);
AddInstruction(context_instruction);
LookupGlobalProperty(variable, &lookup, false);
if (type == kUseCell &&
- current_info()->global_object()->IsAccessCheckNeeded()) {
+ info()->global_object()->IsAccessCheckNeeded()) {
type = kUseGeneric;
}
if (type == kUseCell) {
- Handle<GlobalObject> global(current_info()->global_object());
+ Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HLoadGlobalCell* instr =
new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
AddInstruction(new(zone()) HCheckPrototypeMaps(
Handle<JSObject>(JSObject::cast(map->prototype())),
Handle<JSObject>(JSObject::cast(proto)),
- zone(),
- top_info()));
+ zone()));
}
HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
if (transition_to_field) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
- instr->SetTransition(transition, top_info());
+ instr->set_transition(transition);
// TODO(fschneider): Record the new map type of the object in the IR to
// enable elimination of redundant checks after the transition store.
instr->SetGVNFlag(kChangesMaps);
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
- Handle<GlobalObject> global(current_info()->global_object());
+ Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HInstruction* instr =
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (current_info()->scope()->arguments() != NULL) {
+ if (info()->scope()->arguments() != NULL) {
// Parameters will be allocated to context slots. We have no
// direct way to detect that the variable is a parameter so we do
// a linear search of the parameter variables.
- int count = current_info()->scope()->num_parameters();
+ int count = info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == current_info()->scope()->parameter(i)) {
+ if (var == info()->scope()->parameter(i)) {
Bailout(
"assignment to parameter, function uses arguments object");
}
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (current_info()->scope()->arguments() != NULL) {
+ if (info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct way
// to detect that the variable is a parameter.
- int count = current_info()->scope()->num_parameters();
+ int count = info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == current_info()->scope()->parameter(i)) {
+ if (var == info()->scope()->parameter(i)) {
return Bailout("assignment to parameter in arguments object");
}
}
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- prototype, holder, zone(), top_info()));
+ AddInstruction(
+ new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
HValue* holder_value = AddInstruction(new(zone())
HConstant(holder, Representation::Tagged()));
return BuildLoadNamedField(holder_value,
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- prototype, holder, zone(), top_info()));
+ AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*holder_map));
return new(zone()) HConstant(function, Representation::Tagged());
}
isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- prototype, object_prototype, zone(), top_info()));
+ AddInstruction(
+ new(zone()) HCheckPrototypeMaps(prototype, object_prototype, zone()));
load_mode = ALLOW_RETURN_HOLE;
graph()->MarkDependsOnEmptyArrayProtoElements();
}
Handle<Map> receiver_map) {
if (!holder.is_null()) {
Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- prototype, holder, zone(), top_info()));
+ AddInstruction(
+ new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
}
}
expr->ComputeTarget(map, name);
AddCheckPrototypeMaps(expr->holder(), map);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
- Handle<JSFunction> caller = current_info()->closure();
+ Handle<JSFunction> caller = info()->closure();
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
// Precondition: call is monomorphic and we have found a target with the
// appropriate arity.
- Handle<JSFunction> caller = current_info()->closure();
+ Handle<JSFunction> caller = info()->closure();
Handle<SharedFunctionInfo> target_shared(target->shared());
// Do a quick check on source code length to avoid parsing large
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
- Handle<JSFunction> caller = current_info()->closure();
+ Handle<JSFunction> caller = info()->closure();
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]");
#if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context.
- CompilationInfo* outer_info = current_info();
+ CompilationInfo* outer_info = info();
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
Call::GetPrototypeForPrimitiveCheck(STRING_CHECK,
expr->holder()->GetIsolate()),
expr->holder(),
- zone(),
- top_info()));
+ zone()));
HInstruction* char_code =
BuildStringCharCodeAt(context, string, index);
if (id == kStringCharCodeAt) {
return false;
}
- if (current_info()->scope()->arguments() == NULL) return false;
+ if (info()->scope()->arguments() == NULL) return false;
ZoneList<Expression*>* args = expr->arguments();
if (args->length() != 2) return false;
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
if (type == kUseCell &&
- !current_info()->global_object()->IsAccessCheckNeeded()) {
- Handle<GlobalObject> global(current_info()->global_object());
+ !info()->global_object()->IsAccessCheckNeeded()) {
+ Handle<GlobalObject> global(info()->global_object());
known_global_function = expr->ComputeGlobalTarget(global, &lookup);
}
if (known_global_function) {
}
if (TryInlineCall(expr)) return;
- if (expr->target().is_identical_to(current_info()->closure())) {
+ if (expr->target().is_identical_to(info()->closure())) {
graph()->MarkRecursive();
}
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
- if (current_info()->scope()->arguments() != NULL) {
+ if (info()->scope()->arguments() != NULL) {
// Parameters will rewrite to context slots. We have no direct
// way to detect that the variable is a parameter so we use a
// linear search of the parameter list.
- int count = current_info()->scope()->num_parameters();
+ int count = info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
- if (var == current_info()->scope()->parameter(i)) {
+ if (var == info()->scope()->parameter(i)) {
return Bailout("assignment to parameter in arguments object");
}
}
VariableProxy* proxy = expr->right()->AsVariableProxy();
bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
if (global_function &&
- current_info()->has_global_object() &&
- !current_info()->global_object()->IsAccessCheckNeeded()) {
+ info()->has_global_object() &&
+ !info()->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = proxy->name();
- Handle<GlobalObject> global(current_info()->global_object());
+ Handle<GlobalObject> global(info()->global_object());
LookupResult lookup(isolate());
global->Lookup(*name, &lookup);
if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
- int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
+ int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+ DeclareGlobalsLanguageMode::encode(info()->language_mode());
HInstruction* result = new(zone()) HDeclareGlobals(
environment()->LookupContext(), array, flags);
AddInstruction(result);
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_.Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function = Compiler::BuildFunctionInfo(
- declaration->fun(), current_info()->script());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), info()->script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_.Add(function, zone());
}
void MarkDependsOnEmptyArrayProtoElements() {
- // Add map dependency if not already added.
- if (depends_on_empty_array_proto_elements_) return;
- isolate()->initial_object_prototype()->map()->AddDependentCompilationInfo(
- DependentCode::kElementsCantBeAddedGroup, info());
- isolate()->initial_array_prototype()->map()->AddDependentCompilationInfo(
- DependentCode::kElementsCantBeAddedGroup, info());
depends_on_empty_array_proto_elements_ = true;
}
+ bool depends_on_empty_array_proto_elements() {
+ return depends_on_empty_array_proto_elements_;
+ }
+
void RecordUint32Instruction(HInstruction* instr) {
if (uint32_instructions_ == NULL) {
uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
Zone* zone() const { return info_->zone(); }
HGraph* graph() const { return graph_; }
Isolate* isolate() const { return graph_->isolate(); }
- CompilationInfo* top_info() { return info_; }
HGraph* CreateGraph();
void set_ast_context(AstContext* context) { ast_context_ = context; }
// Accessors forwarded to the function state.
- CompilationInfo* current_info() const {
+ CompilationInfo* info() const {
return function_state()->compilation_info();
}
AstContext* call_context() const {
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
- info()->CommitDependentMaps(code);
+ for (int i = 0 ; i < prototype_maps_.length(); i++) {
+ prototype_maps_.at(i)->AddDependentCode(
+ DependentCode::kPrototypeCheckGroup, code);
+ }
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
+ if (graph()->depends_on_empty_array_proto_elements()) {
+ isolate()->initial_object_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ isolate()->initial_array_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ }
}
}
if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
__ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+ for (int i = 0; i < maps->length(); i++) {
+ prototype_maps_.Add(maps->at(i), info()->zone());
+ }
+ } else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
DoCheckMapCommon(reg, maps->at(i), instr);
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
+ prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
int number_of_entries = starts.number_of_entries();
if (number_of_entries == 0) return;
for (int i = 0; i < number_of_entries; i++) {
- if (!entries->is_code_at(i)) continue;
Code* code = entries->code_at(i);
if (IsMarked(code) && !code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
}
- entries->clear_at(i);
+ entries->clear_code_at(i);
}
map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
for (int g = 0; g < DependentCode::kGroupCount; g++) {
int group_number_of_entries = 0;
for (int i = starts.at(g); i < starts.at(g + 1); i++) {
- if (!entries->is_code_at(i)) continue;
Code* code = entries->code_at(i);
if (IsMarked(code) && !code->marked_for_deoptimization()) {
if (new_number_of_entries + group_number_of_entries != i) {
- entries->set_object_at(
- new_number_of_entries + group_number_of_entries, code);
+ entries->set_code_at(new_number_of_entries +
+ group_number_of_entries, code);
}
- Object** slot = entries->slot_at(new_number_of_entries +
- group_number_of_entries);
+ Object** slot = entries->code_slot_at(new_number_of_entries +
+ group_number_of_entries);
RecordSlot(slot, slot, code);
group_number_of_entries++;
}
new_number_of_entries += group_number_of_entries;
}
for (int i = new_number_of_entries; i < number_of_entries; i++) {
- entries->clear_at(i);
+ entries->clear_code_at(i);
}
}
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- info()->CommitDependentMaps(code);
+ for (int i = 0 ; i < prototype_maps_.length(); i++) {
+ prototype_maps_.at(i)->AddDependentCode(
+ DependentCode::kPrototypeCheckGroup, code);
+ }
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
+ if (graph()->depends_on_empty_array_proto_elements()) {
+ isolate()->initial_object_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ isolate()->initial_array_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ }
}
}
if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
__ li(scratch, Operand(transition));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+ for (int i = 0; i < maps->length(); i++) {
+ prototype_maps_.Add(maps->at(i), info()->zone());
+ }
+ } else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
__ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
+ prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
}
+void Map::AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code) {
+ Handle<DependentCode> codes =
+ DependentCode::Insert(Handle<DependentCode>(dependent_code()),
+ group, code);
+ if (*codes != dependent_code()) {
+ set_dependent_code(*codes);
+ }
+}
+
+
int DependentCode::number_of_entries(DependencyGroup group) {
if (length() == 0) return 0;
return Smi::cast(get(group))->value();
}
-bool DependentCode::is_code_at(int i) {
- return get(kCodesStartIndex + i)->IsCode();
-}
-
Code* DependentCode::code_at(int i) {
return Code::cast(get(kCodesStartIndex + i));
}
-CompilationInfo* DependentCode::compilation_info_at(int i) {
- return reinterpret_cast<CompilationInfo*>(
- Foreign::cast(get(kCodesStartIndex + i))->foreign_address());
+void DependentCode::set_code_at(int i, Code* value) {
+ set(kCodesStartIndex + i, value);
}
-void DependentCode::set_object_at(int i, Object* object) {
- set(kCodesStartIndex + i, object);
-}
-
-
-Object* DependentCode::object_at(int i) {
- return get(kCodesStartIndex + i);
-}
-
-
-Object** DependentCode::slot_at(int i) {
+Object** DependentCode::code_slot_at(int i) {
return HeapObject::RawField(
this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
}
-void DependentCode::clear_at(int i) {
+void DependentCode::clear_code_at(int i) {
set_undefined(kCodesStartIndex + i);
}
-void DependentCode::copy(int from, int to) {
- set(kCodesStartIndex + to, get(kCodesStartIndex + from));
-}
-
-
void DependentCode::ExtendGroup(DependencyGroup group) {
GroupStartIndexes starts(this);
for (int g = kGroupCount - 1; g > group; g--) {
if (starts.at(g) < starts.at(g + 1)) {
- copy(starts.at(g), starts.at(g + 1));
+ set_code_at(starts.at(g + 1), code_at(starts.at(g)));
}
}
}
}
-void Map::AddDependentCompilationInfo(DependentCode::DependencyGroup group,
- CompilationInfo* info) {
- Handle<DependentCode> dep(dependent_code());
- Handle<DependentCode> codes =
- DependentCode::Insert(dep, group, info->object_wrapper());
- if (*codes != dependent_code()) set_dependent_code(*codes);
- info->dependent_maps(group)->Add(Handle<Map>(this), info->zone());
-}
-
-
-void Map::AddDependentCode(DependentCode::DependencyGroup group,
- Handle<Code> code) {
- Handle<DependentCode> codes = DependentCode::Insert(
- Handle<DependentCode>(dependent_code()), group, code);
- if (*codes != dependent_code()) set_dependent_code(*codes);
-}
-
-
DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
Recompute(entries);
}
Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
DependencyGroup group,
- Handle<Object> object) {
+ Handle<Code> value) {
GroupStartIndexes starts(*entries);
int start = starts.at(group);
int end = starts.at(group + 1);
int number_of_entries = starts.number_of_entries();
- if (start < end && entries->object_at(end - 1) == *object) {
- // Do not append the compilation info if it is already in the array.
+ if (start < end && entries->code_at(end - 1) == *value) {
+ // Do not append the code if it is already in the array.
// It is sufficient to just check only the last element because
// we process embedded maps of an optimized code in one batch.
return entries;
end = starts.at(group + 1);
number_of_entries = starts.number_of_entries();
for (int i = 0; i < number_of_entries; i++) {
- entries->clear_at(i);
+ entries->clear_code_at(i);
}
// If the old fixed array was empty, we need to reset counters of the
// new array.
entries = new_entries;
}
entries->ExtendGroup(group);
- entries->set_object_at(end, *object);
+ entries->set_code_at(end, *value);
entries->set_number_of_entries(group, end + 1 - start);
return entries;
}
-void DependentCode::UpdateToFinishedCode(DependencyGroup group,
- CompilationInfo* info,
- Code* code) {
- DisallowHeapAllocation no_gc;
- AllowDeferredHandleDereference get_object_wrapper;
- Foreign* info_wrapper = *info->object_wrapper();
- GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- for (int i = start; i < end; i++) {
- if (object_at(i) == info_wrapper) {
- set_object_at(i, code);
- break;
- }
- }
-
-#ifdef DEBUG
- for (int i = start; i < end; i++) {
- ASSERT(is_code_at(i) || compilation_info_at(i) != info);
- }
-#endif
-}
-
-
-void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
- CompilationInfo* info) {
- DisallowHeapAllocation no_allocation;
- AllowDeferredHandleDereference get_object_wrapper;
- Foreign* info_wrapper = *info->object_wrapper();
- GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- // Find compilation info wrapper.
- int info_pos = -1;
- for (int i = start; i < end; i++) {
- if (object_at(i) == info_wrapper) {
- info_pos = i;
- break;
- }
- }
- if (info_pos == -1) return; // Not found.
- int gap = info_pos;
- // Use the last of each group to fill the gap in the previous group.
- for (int i = group; i < kGroupCount; i++) {
- int last_of_group = starts.at(group + 1) - 1;
- ASSERT(last_of_group >= gap);
- if (last_of_group == gap) continue;
- copy(last_of_group, gap);
- gap = last_of_group;
- }
- clear_at(gap); // Clear last gap.
- set_number_of_entries(group, end - start - 1);
-
-#ifdef DEBUG
- for (int i = start; i < end - 1; i++) {
- ASSERT(is_code_at(i) || compilation_info_at(i) != info);
- }
-#endif
-}
-
-
bool DependentCode::Contains(DependencyGroup group, Code* code) {
GroupStartIndexes starts(this);
- int number_of_entries = starts.number_of_code_entries();
+ int number_of_entries = starts.at(kGroupCount);
for (int i = 0; i < number_of_entries; i++) {
- if (object_at(i) == code) return true;
+ if (code_at(i) == code) return true;
}
return false;
}
DependentCode::GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
- int code_entries = starts.number_of_code_entries();
+ int number_of_entries = starts.at(DependentCode::kGroupCount);
if (start == end) return;
for (int i = start; i < end; i++) {
- if (is_code_at(i)) {
- Code* code = code_at(i);
- code->set_marked_for_deoptimization(true);
- } else {
- CompilationInfo* info = compilation_info_at(i);
- info->AbortDueToDependentMap();
- }
+ Code* code = code_at(i);
+ code->set_marked_for_deoptimization(true);
}
// Compact the array by moving all subsequent groups to fill in the new holes.
- for (int src = end, dst = start; src < code_entries; src++, dst++) {
- copy(src, dst);
+ for (int src = end, dst = start; src < number_of_entries; src++, dst++) {
+ set_code_at(dst, code_at(src));
}
// Now the holes are at the end of the array, zap them for heap-verifier.
int removed = end - start;
- for (int i = code_entries - removed; i < code_entries; i++) {
- clear_at(i);
+ for (int i = number_of_entries - removed; i < number_of_entries; i++) {
+ clear_code_at(i);
}
set_number_of_entries(group, 0);
DeoptimizeDependentCodeFilter filter;
};
-class CompilationInfo;
-
// This class describes the layout of dependent codes array of a map. The
// array is partitioned into several groups of dependent codes. Each group
// contains codes with the same dependency on the map. The array has the
void Recompute(DependentCode* entries);
int at(int i) { return start_indexes_[i]; }
int number_of_entries() { return start_indexes_[kGroupCount]; }
- int number_of_code_entries() {
- return start_indexes_[kGroupCount];
- }
private:
int start_indexes_[kGroupCount + 1];
};
bool Contains(DependencyGroup group, Code* code);
static Handle<DependentCode> Insert(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<Object> object);
- void UpdateToFinishedCode(DependencyGroup group,
- CompilationInfo* info,
- Code* code);
- void RemoveCompilationInfo(DependentCode::DependencyGroup group,
- CompilationInfo* info);
-
+ DependencyGroup group,
+ Handle<Code> value);
void DeoptimizeDependentCodeGroup(Isolate* isolate,
DependentCode::DependencyGroup group);
// and the mark compact collector.
inline int number_of_entries(DependencyGroup group);
inline void set_number_of_entries(DependencyGroup group, int value);
- inline bool is_code_at(int i);
inline Code* code_at(int i);
- inline CompilationInfo* compilation_info_at(int i);
- inline void set_object_at(int i, Object* object);
- inline Object** slot_at(int i);
- inline Object* object_at(int i);
- inline void clear_at(int i);
- inline void copy(int from, int to);
+ inline void set_code_at(int i, Code* value);
+ inline Object** code_slot_at(int i);
+ inline void clear_code_at(int i);
static inline DependentCode* cast(Object* object);
private:
inline bool CanOmitPrototypeChecks();
- void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
- CompilationInfo* info);
-
- void AddDependentCode(DependentCode::DependencyGroup group,
- Handle<Code> code);
+ inline void AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code);
bool IsMapInArrayPrototypeChain();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompleteOptimization) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WaitUntilOptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_parallel_recompilation && V8::UseCrankshaft()) {
- // While function is in optimization pipeline, it is marked with builtins.
- while (function->code()->kind() == Code::BUILTIN) {
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- OS::Sleep(50);
+ if (FLAG_parallel_recompilation) {
+ if (V8::UseCrankshaft() && function->IsOptimizable()) {
+ while (!function->IsOptimized()) OS::Sleep(50);
}
}
return isolate->heap()->undefined_value();
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
- F(CompleteOptimization, 1, 1) \
+ F(WaitUntilOptimized, 1, 1) \
F(GetOptimizationStatus, 1, 1) \
F(GetOptimizationCount, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- info()->CommitDependentMaps(code);
+ for (int i = 0 ; i < prototype_maps_.length(); i++) {
+ prototype_maps_.at(i)->AddDependentCode(
+ DependentCode::kPrototypeCheckGroup, code);
+ }
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
+ if (graph()->depends_on_empty_array_proto_elements()) {
+ isolate()->initial_object_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ isolate()->initial_array_prototype()->map()->AddDependentCode(
+ DependentCode::kElementsCantBeAddedGroup, code);
+ }
}
}
if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+ for (int i = 0; i < maps->length(); i++) {
+ prototype_maps_.Add(maps->at(i), info()->zone());
+ }
+ } else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
DoCheckMapCommon(reg, maps->at(i), instr);
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
+ prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
-
-function assertUnoptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 1);
-}
+// Flags: --allow-natives-syntax --parallel-recompilation
function f(foo) { return foo.bar(); }
assertEquals(1, f(o));
%OptimizeFunctionOnNextCall(f, "parallel");
-assertEquals(1, f(o)); // Trigger optimization.
-assertUnoptimized(f); // Optimization not yet done.
-// Change the prototype chain during optimization to trigger map invalidation.
+assertEquals(1, f(o));
+// Change the prototype chain during optimization.
o.__proto__.__proto__ = { bar: function() { return 2; } };
-%CompleteOptimization(f); // Conclude optimization with...
-assertUnoptimized(f); // ... bailing out due to map dependency.
+
+%WaitUntilOptimized(f);
+
assertEquals(2, f(o));
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
+// Flags: --allow-natives-syntax --expose-gc --parallel-recompilation
function assertUnoptimized(fun) {
assertTrue(%GetOptimizationStatus(fun) != 1);
}
-function assertOptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 2);
-}
-
function f(x) {
var xx = x * x;
var xxstr = xx.toString();
%OptimizeFunctionOnNextCall(f, "parallel");
%OptimizeFunctionOnNextCall(g, "parallel");
-f(g(2)); // Trigger optimization.
+f(g(2));
-assertUnoptimized(f); // Not yet optimized.
+assertUnoptimized(f);
assertUnoptimized(g);
-%CompleteOptimization(f); // Wait till optimized code is installed.
-%CompleteOptimization(g);
-
-assertOptimized(f); // Optimized now.
-assertOptimized(g);
+%WaitUntilOptimized(f);
+%WaitUntilOptimized(g);
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
-
-function assertUnoptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 1);
-}
-
-function f1(a, i) {
- return a[i] + 0.5;
-}
-
-var arr = [0.0,,2.5];
-assertEquals(0.5, f1(arr, 0));
-assertEquals(0.5, f1(arr, 0));
-
-// Optimized code of f1 depends on initial object and array maps.
-%OptimizeFunctionOnNextCall(f1, "parallel");
-assertEquals(0.5, f1(arr, 0));
-assertUnoptimized(f1); // Not yet optimized.
-Object.prototype[1] = 1.5; // Invalidate current initial object map.
-assertEquals(2, f1(arr, 1));
-%CompleteOptimization(f1); // Conclude optimization with...
-assertUnoptimized(f1); // ... bailing out due to map dependency.
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --track-fields --track-double-fields --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
-
-function assertUnoptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 1);
-}
-
-function new_object() {
- var o = {};
- o.a = 1;
- o.b = 2;
- return o;
-}
-
-function add_field(obj) {
- obj.c = 3;
-}
-
-add_field(new_object());
-add_field(new_object());
-%OptimizeFunctionOnNextCall(add_field, "parallel");
-
-var o = new_object();
-add_field(o); // Trigger optimization.
-assertUnoptimized(add_field); // Not yet optimized.
-o.c = 2.2; // Invalidate transition map.
-%CompleteOptimization(add_field); // Conclude optimization with...
-assertUnoptimized(add_field); // ... bailing out due to map dependency.
-
%OptimizeFunctionOnNextCall(g, "parallel");
f(0); // g() is disabled for optimization on inlining attempt.
// Attempt to optimize g() should not run into any assertion.
-%CompleteOptimization(g);
+%WaitUntilOptimized(g);