From c4caf766bf612c42d3ec59e209dde2970af4477f Mon Sep 17 00:00:00 2001 From: "mvstanton@chromium.org" Date: Fri, 1 Mar 2013 16:06:34 +0000 Subject: [PATCH] Allocation Info Tracking, continued. Addresses missing cases for array literals. Adds support for "new Array()" call sites. This isn't complete yet, I have to run with --noinline_new. BUG= Review URL: https://codereview.chromium.org/11818021 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13790 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/builtins-arm.cc | 62 +++-- src/arm/code-stubs-arm.cc | 137 ++++++++++- src/arm/lithium-arm.cc | 19 ++ src/arm/lithium-arm.h | 18 ++ src/arm/lithium-codegen-arm.cc | 22 +- src/arm/macro-assembler-arm.cc | 13 ++ src/arm/macro-assembler-arm.h | 1 + src/ast.cc | 1 + src/ast.h | 5 +- src/bootstrapper.cc | 7 +- src/builtins.cc | 178 +++++++------- src/builtins.h | 1 + src/code-stubs-hydrogen.cc | 50 ++++ src/code-stubs.h | 60 +++++ src/elements.cc | 97 ++++++++ src/elements.h | 3 + src/flag-definitions.h | 2 + src/heap-inl.h | 9 + src/heap.cc | 312 +++++++++++++++++++++++-- src/heap.h | 40 +++- src/hydrogen-instructions.h | 20 ++ src/hydrogen.cc | 17 +- src/ia32/builtins-ia32.cc | 58 +++-- src/ia32/code-stubs-ia32.cc | 141 ++++++++++- src/ia32/lithium-codegen-ia32.cc | 20 ++ src/ia32/lithium-ia32.cc | 23 ++ src/ia32/lithium-ia32.h | 20 ++ src/ia32/macro-assembler-ia32.cc | 13 +- src/ia32/macro-assembler-ia32.h | 2 + src/objects-inl.h | 6 + src/objects-printer.cc | 13 +- src/objects.cc | 103 ++++---- src/objects.h | 11 +- src/runtime.cc | 9 +- src/type-info.cc | 34 ++- src/type-info.h | 5 + src/x64/builtins-x64.cc | 62 +++-- src/x64/code-stubs-x64.cc | 136 ++++++++++- src/x64/lithium-codegen-x64.cc | 20 +- src/x64/lithium-x64.cc | 20 ++ src/x64/lithium-x64.h | 18 ++ src/x64/macro-assembler-x64.cc | 9 + src/x64/macro-assembler-x64.h | 1 + test/mjsunit/allocation-site-info.js | 152 +++++++++++- test/mjsunit/array-natives-elements.js | 1 + test/mjsunit/elements-transition.js | 2 +- 46 files changed, 1709 insertions(+), 244 deletions(-) diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc index 6a5f61fd1..ec27bd666 100644 --- a/src/arm/builtins-arm.cc +++ b/src/arm/builtins-arm.cc @@ -542,31 +542,63 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : number of arguments // -- r1 : constructor function + // -- r2 : type info cell // -- lr : return address // -- sp[...]: constructor arguments // ----------------------------------- - Label generic_constructor; if (FLAG_debug_code) { // The array construct code is only set for the builtin and internal // Array functions which always have a map. // Initial map for the builtin Array function should be a map. - __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ tst(r2, Operand(kSmiTagMask)); + __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ tst(r3, Operand(kSmiTagMask)); __ Assert(ne, "Unexpected initial map for Array function"); - __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ CompareObjectType(r1, r3, r4, MAP_TYPE); __ Assert(eq, "Unexpected initial map for Array function"); - } - // Run the native code for the Array function called as a constructor. - ArrayNativeCode(masm, &generic_constructor); + // We should either have undefined in r2 or a valid jsglobalpropertycell + Label okay_here; + Handle undefined_sentinel( + masm->isolate()->heap()->undefined_value(), masm->isolate()); + Handle global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ cmp(r2, Operand(undefined_sentinel)); + __ b(eq, &okay_here); + __ ldr(r3, FieldMemOperand(r2, 0)); + __ cmp(r3, Operand(global_property_cell_map)); + __ Assert(eq, "Expected property cell in register ebx"); + __ bind(&okay_here); + } - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + if (FLAG_optimize_constructed_arrays) { + Label not_zero_case, not_one_case; + __ tst(r0, r0); + __ b(ne, ¬_zero_case); + ArrayNoArgumentConstructorStub no_argument_stub; + __ TailCallStub(&no_argument_stub); + + __ bind(¬_zero_case); + __ cmp(r0, Operand(1)); + __ b(gt, ¬_one_case); + ArraySingleArgumentConstructorStub single_argument_stub; + __ TailCallStub(&single_argument_stub); + + __ bind(¬_one_case); + ArrayNArgumentsConstructorStub n_argument_stub; + __ TailCallStub(&n_argument_stub); + } else { + Label generic_constructor; + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + } } @@ -1145,6 +1177,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Invoke the code and pass argc as r0. __ mov(r0, Operand(r3)); if (is_construct) { + // No type feedback cell is available + Handle undefined_sentinel( + masm->isolate()->heap()->undefined_value(), masm->isolate()); + __ mov(r2, Operand(undefined_sentinel)); CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ CallStub(&stub); } else { diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index 1ee669234..e016a8c92 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -73,6 +73,44 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( } +static void InitializeArrayConstructorDescriptor(Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + // register state + // r1 -- constructor function + // r2 -- type info cell with elements kind + // r0 -- number of arguments to the constructor function + static Register registers[] = { r1, r2 }; + descriptor->register_param_count_ = 2; + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &r0; + descriptor->register_params_ = registers; + descriptor->extra_expression_stack_count_ = 1; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ArrayConstructor_StubFailure); +} + + +void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + #define __ ACCESS_MASM(masm) static void EmitIdenticalObjectComparison(MacroAssembler* masm, @@ -5546,12 +5584,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } -static void GenerateRecordCallTarget(MacroAssembler* masm) { +static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. // r1 : the function to call // r2 : cache cell for call target + ASSERT(!FLAG_optimize_constructed_arrays); Label done; ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), @@ -5585,6 +5624,82 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { } +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a global property cell. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // r1 : the function to call + // r2 : cache cell for call target + ASSERT(FLAG_optimize_constructed_arrays); + Label initialize, done, miss, megamorphic, not_array_function; + + ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), + masm->isolate()->heap()->undefined_value()); + ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), + masm->isolate()->heap()->the_hole_value()); + + // Load the cache state into r3. + __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmp(r3, r1); + __ b(eq, &done); + __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); + __ b(eq, &done); + + // Special handling of the Array() function, which caches not only the + // monomorphic Array function but the initial ElementsKind with special + // sentinels + Handle terminal_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), + LAST_FAST_ELEMENTS_KIND); + __ cmp(r3, Operand(terminal_kind_sentinel)); + __ b(ne, &miss); + // Make sure the function is the Array() function + __ LoadArrayFunction(r3); + __ cmp(r1, r3); + __ b(ne, &megamorphic); + __ jmp(&done); + + __ bind(&miss); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); + __ b(eq, &initialize); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ bind(&megamorphic); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + + // An uninitialized cache is patched with the function or sentinel to + // indicate the ElementsKind if function is the Array constructor. + __ bind(&initialize); + // Make sure the function is the Array() function + __ LoadArrayFunction(r3); + __ cmp(r1, r3); + __ b(ne, ¬_array_function); + + // The target function is the Array constructor, install a sentinel value in + // the constructor's type info cell that will track the initial ElementsKind + // that should be used for the array when its constructed. + Handle initial_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), + GetInitialFastElementsKind()); + __ mov(r3, Operand(initial_kind_sentinel)); + __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + __ b(&done); + + __ bind(¬_array_function); + __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + // No need for a write barrier here - cells are rescanned. + + __ bind(&done); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { // r1 : the function to call // r2 : cache cell for call target @@ -5617,7 +5732,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ b(ne, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Fast-case: Invoke the function now. @@ -5692,13 +5811,19 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ b(ne, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Jump to the function-specific construct stub. - __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset)); - __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag)); + Register jmp_reg = FLAG_optimize_constructed_arrays ? r3 : r2; + __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(jmp_reg, FieldMemOperand(jmp_reg, + SharedFunctionInfo::kConstructStubOffset)); + __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); // r0: number of arguments // r1: called object diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc index bab398cae..d95759468 100644 --- a/src/arm/lithium-arm.cc +++ b/src/arm/lithium-arm.cc @@ -355,6 +355,17 @@ void LCallNew::PrintDataTo(StringStream* stream) { } +void LCallNewArray::PrintDataTo(StringStream* stream) { + stream->Add("= "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); + ASSERT(hydrogen()->property_cell()->value()->IsSmi()); + ElementsKind kind = static_cast( + Smi::cast(hydrogen()->property_cell()->value())->value()); + stream->Add(" (%s) ", ElementsKindToString(kind)); +} + + void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); stream->Add(" length "); @@ -1139,6 +1150,14 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { } +LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + LOperand* constructor = UseFixed(instr->constructor(), r1); + argument_count_ -= instr->argument_count(); + LCallNewArray* result = new(zone()) LCallNewArray(constructor); + return MarkAsCall(DefineFixed(result, r0), instr); +} + + LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { LOperand* function = UseFixed(instr->function(), r1); argument_count_ -= instr->argument_count(); diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h index e81734ea8..ecc77a855 100644 --- a/src/arm/lithium-arm.h +++ b/src/arm/lithium-arm.h @@ -68,6 +68,7 @@ class LCodeGen; V(CallKnownGlobal) \ V(CallNamed) \ V(CallNew) \ + V(CallNewArray) \ V(CallRuntime) \ V(CallStub) \ V(CheckFunction) \ @@ -1817,6 +1818,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> { }; +class LCallNewArray: public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallNewArray(LOperand* constructor) { + inputs_[0] = constructor; + } + + LOperand* constructor() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") + DECLARE_HYDROGEN_ACCESSOR(CallNewArray) + + virtual void PrintDataTo(StringStream* stream); + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + class LCallRuntime: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 943886c7d..888739193 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -4295,12 +4295,32 @@ void LCodeGen::DoCallNew(LCallNew* instr) { ASSERT(ToRegister(instr->constructor()).is(r1)); ASSERT(ToRegister(instr->result()).is(r0)); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ mov(r0, Operand(instr->arity())); + if (FLAG_optimize_constructed_arrays) { + // No cell in r2 for construct type feedback in optimized code + Handle undefined_value(isolate()->heap()->undefined_value(), + isolate()); + __ mov(r2, Operand(undefined_value)); + } + CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); } +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + ASSERT(ToRegister(instr->constructor()).is(r1)); + ASSERT(ToRegister(instr->result()).is(r0)); + ASSERT(FLAG_optimize_constructed_arrays); + + __ mov(r0, Operand(instr->arity())); + __ mov(r2, Operand(instr->hydrogen()->property_cell())); + Handle array_construct_code = + isolate()->builtins()->ArrayConstructCode(); + + CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); +} + + void LCodeGen::DoCallRuntime(LCallRuntime* instr) { CallRuntime(instr->function(), instr->arity(), instr); } diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index bc3c78642..326f555a0 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -3076,6 +3076,19 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) { } +void MacroAssembler::LoadArrayFunction(Register function) { + // Load the global or builtins object from the current context. + ldr(function, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + // Load the global context from the global or builtins object. + ldr(function, + FieldMemOperand(function, GlobalObject::kGlobalContextOffset)); + // Load the array function from the native context. + ldr(function, + MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); +} + + void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch) { diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index d26eda7f8..0826b7d7f 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -514,6 +514,7 @@ class MacroAssembler: public Assembler { bool can_have_holes); void LoadGlobalFunction(int index, Register function); + void LoadArrayFunction(Register function); // Load the initial map from the global function. The registers // function and map can be the same, function is then overwritten. diff --git a/src/ast.cc b/src/ast.cc index d220f5560..02c815cd7 100644 --- a/src/ast.cc +++ b/src/ast.cc @@ -605,6 +605,7 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) { is_monomorphic_ = oracle->CallNewIsMonomorphic(this); if (is_monomorphic_) { target_ = oracle->GetCallNewTarget(this); + elements_kind_ = oracle->GetCallNewElementsKind(this); } } diff --git a/src/ast.h b/src/ast.h index 740aa4429..66b4dd46c 100644 --- a/src/ast.h +++ b/src/ast.h @@ -1595,6 +1595,7 @@ class CallNew: public Expression { Handle target() { return target_; } BailoutId ReturnId() const { return return_id_; } + ElementsKind elements_kind() const { return elements_kind_; } protected: CallNew(Isolate* isolate, @@ -1606,7 +1607,8 @@ class CallNew: public Expression { arguments_(arguments), pos_(pos), is_monomorphic_(false), - return_id_(GetNextId(isolate)) { } + return_id_(GetNextId(isolate)), + elements_kind_(GetInitialFastElementsKind()) { } private: Expression* expression_; @@ -1617,6 +1619,7 @@ class CallNew: public Expression { Handle target_; const BailoutId return_id_; + ElementsKind elements_kind_; }; diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc index 5417d9926..fcb1de9f0 100644 --- a/src/bootstrapper.cc +++ b/src/bootstrapper.cc @@ -1487,8 +1487,14 @@ Handle Genesis::InstallInternalArray( factory()->NewJSObject(isolate()->object_function(), TENURED); SetPrototype(array_function, prototype); + // TODO(mvstanton): For performance reasons, this code would have to + // be changed to successfully run with FLAG_optimize_constructed_arrays. + // The next checkin to enable FLAG_optimize_constructed_arrays by + // default will address this. + CHECK(!FLAG_optimize_constructed_arrays); array_function->shared()->set_construct_stub( isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); + array_function->shared()->DontAdaptArguments(); MaybeObject* maybe_map = array_function->initial_map()->Copy(); @@ -1744,7 +1750,6 @@ bool Genesis::InstallNatives() { native_context()->set_opaque_reference_function(*opaque_reference_fun); } - // InternalArrays should not use Smi-Only array optimizations. There are too // many places in the C++ runtime code (e.g. RegEx) that assume that // elements in InternalArrays can be set to non-Smi values without going diff --git a/src/builtins.cc b/src/builtins.cc index 279594e0f..aa6920375 100644 --- a/src/builtins.cc +++ b/src/builtins.cc @@ -186,9 +186,67 @@ BUILTIN(EmptyFunction) { } +#define CONVERT_ARG_STUB_CALLER_ARGS(name) \ + Arguments* name = reinterpret_cast(args[0]); + + +RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) { + CONVERT_ARG_STUB_CALLER_ARGS(caller_args); + // ASSERT(args.length() == 3); + Handle function = args.at(1); + Handle type_info = args.at(2); + + JSArray* array = NULL; + bool holey = false; + if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) { + int value = Smi::cast((*caller_args)[0])->value(); + holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray); + } + + ASSERT(function->has_initial_map()); + ElementsKind kind = function->initial_map()->elements_kind(); + if (holey) { + kind = GetHoleyElementsKind(kind); + } + + MaybeObject* maybe_array; + if (*type_info != isolate->heap()->undefined_value()) { + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info); + if (cell->value()->IsSmi()) { + Smi* smi = Smi::cast(cell->value()); + ElementsKind to_kind = static_cast(smi->value()); + if (holey && !IsFastHoleyElementsKind(to_kind)) { + to_kind = GetHoleyElementsKind(to_kind); + // Update the allocation site info to reflect the advice alteration. + cell->set_value(Smi::FromInt(to_kind)); + } + + AllocationSiteMode mode = AllocationSiteInfo::GetMode(to_kind); + if (mode == TRACK_ALLOCATION_SITE) { + maybe_array = isolate->heap()->AllocateEmptyJSArrayWithAllocationSite( + kind, type_info); + } else { + maybe_array = isolate->heap()->AllocateEmptyJSArray(kind); + } + if (!maybe_array->To(&array)) return maybe_array; + } + } + + if (array == NULL) { + maybe_array = isolate->heap()->AllocateEmptyJSArray(kind); + if (!maybe_array->To(&array)) return maybe_array; + } + + maybe_array = ArrayConstructInitializeElements(array, caller_args); + if (maybe_array->IsFailure()) return maybe_array; + return array; +} + + static MaybeObject* ArrayCodeGenericCommon(Arguments* args, Isolate* isolate, JSFunction* constructor) { + ASSERT(args->length() >= 1); Heap* heap = isolate->heap(); isolate->counters()->array_function_runtime()->Increment(); @@ -197,8 +255,29 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, array = JSArray::cast((*args)[0]); // Initialize elements and length in case later allocations fail so that the // array object is initialized in a valid state. - array->set_length(Smi::FromInt(0)); - array->set_elements(heap->empty_fixed_array()); + MaybeObject* maybe_array = array->Initialize(0); + if (maybe_array->IsFailure()) return maybe_array; + + if (FLAG_optimize_constructed_arrays) { + AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array); + ElementsKind to_kind = array->GetElementsKind(); + if (info != NULL && info->GetElementsKindPayload(&to_kind)) { + if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(), + to_kind)) { + // We have advice that we should change the elements kind + if (FLAG_trace_track_allocation_sites) { + PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n", + reinterpret_cast(array), + ElementsKindToString(array->GetElementsKind()), + ElementsKindToString(to_kind)); + } + + maybe_array = array->TransitionElementsKind(to_kind); + if (maybe_array->IsFailure()) return maybe_array; + } + } + } + if (!FLAG_smi_only_arrays) { Context* native_context = isolate->context()->native_context(); if (array->GetElementsKind() == GetInitialFastElementsKind() && @@ -215,97 +294,10 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, if (!maybe_obj->To(&array)) return maybe_obj; } - // Optimize the case where there is one argument and the argument is a - // small smi. - if (args->length() == 2) { - Object* obj = (*args)[1]; - if (obj->IsSmi()) { - int len = Smi::cast(obj)->value(); - if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) { - Object* fixed_array; - { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); - if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj; - } - ElementsKind elements_kind = array->GetElementsKind(); - if (!IsFastHoleyElementsKind(elements_kind)) { - elements_kind = GetHoleyElementsKind(elements_kind); - MaybeObject* maybe_array = - array->TransitionElementsKind(elements_kind); - if (maybe_array->IsFailure()) return maybe_array; - } - // We do not use SetContent to skip the unnecessary elements type check. - array->set_elements(FixedArray::cast(fixed_array)); - array->set_length(Smi::cast(obj)); - return array; - } - } - // Take the argument as the length. - { MaybeObject* maybe_obj = array->Initialize(0); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - return array->SetElementsLength((*args)[1]); - } - - // Optimize the case where there are no parameters passed. - if (args->length() == 1) { - return array->Initialize(JSArray::kPreallocatedArrayElements); - } - - // Set length and elements on the array. - int number_of_elements = args->length() - 1; - MaybeObject* maybe_object = - array->EnsureCanContainElements(args, 1, number_of_elements, - ALLOW_CONVERTED_DOUBLE_ELEMENTS); - if (maybe_object->IsFailure()) return maybe_object; - - // Allocate an appropriately typed elements array. - MaybeObject* maybe_elms; - ElementsKind elements_kind = array->GetElementsKind(); - if (IsFastDoubleElementsKind(elements_kind)) { - maybe_elms = heap->AllocateUninitializedFixedDoubleArray( - number_of_elements); - } else { - maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements); - } - FixedArrayBase* elms; - if (!maybe_elms->To(&elms)) return maybe_elms; - - // Fill in the content - switch (array->GetElementsKind()) { - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_SMI_ELEMENTS: { - FixedArray* smi_elms = FixedArray::cast(elms); - for (int index = 0; index < number_of_elements; index++) { - smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER); - } - break; - } - case FAST_HOLEY_ELEMENTS: - case FAST_ELEMENTS: { - AssertNoAllocation no_gc; - WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - FixedArray* object_elms = FixedArray::cast(elms); - for (int index = 0; index < number_of_elements; index++) { - object_elms->set(index, (*args)[index+1], mode); - } - break; - } - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: { - FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms); - for (int index = 0; index < number_of_elements; index++) { - double_elms->set(index, (*args)[index+1]->Number()); - } - break; - } - default: - UNREACHABLE(); - break; - } - - array->set_elements(elms); - array->set_length(Smi::FromInt(number_of_elements)); - return array; + Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1); + ASSERT(adjusted_arguments.length() < 1 || + adjusted_arguments[0] == (*args)[1]); + return ArrayConstructInitializeElements(array, &adjusted_arguments); } diff --git a/src/builtins.h b/src/builtins.h index cb0c9c516..83b134c6d 100644 --- a/src/builtins.h +++ b/src/builtins.h @@ -272,6 +272,7 @@ enum BuiltinExtraArguments { V(APPLY_PREPARE, 1) \ V(APPLY_OVERFLOW, 1) +MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate); class BuiltinFunctionTable; class ObjectVisitor; diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc index 20abec4fa..491e25550 100644 --- a/src/code-stubs-hydrogen.cc +++ b/src/code-stubs-hydrogen.cc @@ -306,6 +306,33 @@ void CodeStubGraphBuilder::BuildCodeStub() { } +template <> +void CodeStubGraphBuilder::BuildCodeStub() { + HInstruction* deopt = new(zone()) HSoftDeoptimize(); + AddInstruction(deopt); + current_block()->MarkAsDeoptimizing(); + HReturn* ret = new(zone()) HReturn(GetParameter(0), context()); + current_block()->Finish(ret); +} + + +Handle ArrayNoArgumentConstructorStub::GenerateCode() { + CodeStubGraphBuilder builder(this); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(Code::COMPILED_STUB); +} + + +template <> +void CodeStubGraphBuilder::BuildCodeStub() { + HInstruction* deopt = new(zone()) HSoftDeoptimize(); + AddInstruction(deopt); + current_block()->MarkAsDeoptimizing(); + HReturn* ret = new(zone()) HReturn(GetParameter(0), context()); + current_block()->Finish(ret); +} + + Handle TransitionElementsKindStub::GenerateCode() { CodeStubGraphBuilder builder(this); LChunk* chunk = OptimizeGraph(builder.CreateGraph()); @@ -313,4 +340,27 @@ Handle TransitionElementsKindStub::GenerateCode() { } +Handle ArraySingleArgumentConstructorStub::GenerateCode() { + CodeStubGraphBuilder builder(this); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(Code::COMPILED_STUB); +} + + +template <> +void CodeStubGraphBuilder::BuildCodeStub() { + HInstruction* deopt = new(zone()) HSoftDeoptimize(); + AddInstruction(deopt); + current_block()->MarkAsDeoptimizing(); + HReturn* ret = new(zone()) HReturn(GetParameter(0), context()); + current_block()->Finish(ret); +} + + +Handle ArrayNArgumentsConstructorStub::GenerateCode() { + CodeStubGraphBuilder builder(this); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(Code::COMPILED_STUB); +} + } } // namespace v8::internal diff --git a/src/code-stubs.h b/src/code-stubs.h index 9d4e21d15..0cb3d423f 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -73,6 +73,9 @@ namespace internal { V(CEntry) \ V(JSEntry) \ V(KeyedLoadElement) \ + V(ArrayNoArgumentConstructor) \ + V(ArraySingleArgumentConstructor) \ + V(ArrayNArgumentsConstructor) \ V(KeyedStoreElement) \ V(DebuggerStatement) \ V(StringDictionaryLookup) \ @@ -1289,6 +1292,63 @@ class TransitionElementsKindStub : public HydrogenCodeStub { }; +class ArrayNoArgumentConstructorStub : public HydrogenCodeStub { + public: + ArrayNoArgumentConstructorStub() { + } + + Major MajorKey() { return ArrayNoArgumentConstructor; } + int MinorKey() { return 0; } + + virtual Handle GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub); +}; + + +class ArraySingleArgumentConstructorStub : public HydrogenCodeStub { + public: + ArraySingleArgumentConstructorStub() { + } + + Major MajorKey() { return ArraySingleArgumentConstructor; } + int MinorKey() { return 0; } + + virtual Handle GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub); +}; + + +class ArrayNArgumentsConstructorStub : public HydrogenCodeStub { + public: + ArrayNArgumentsConstructorStub() { + } + + Major MajorKey() { return ArrayNArgumentsConstructor; } + int MinorKey() { return 0; } + + virtual Handle GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub); +}; + + class KeyedStoreElementStub : public PlatformCodeStub { public: KeyedStoreElementStub(bool is_js_array, diff --git a/src/elements.cc b/src/elements.cc index e37ae58c4..9deef6061 100644 --- a/src/elements.cc +++ b/src/elements.cc @@ -27,6 +27,7 @@ #include "v8.h" +#include "arguments.h" #include "objects.h" #include "elements.h" #include "utils.h" @@ -1973,4 +1974,100 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBaseGetIsolate()->heap(); + + // Optimize the case where there is one argument and the argument is a + // small smi. + if (args->length() == 1) { + Object* obj = (*args)[0]; + if (obj->IsSmi()) { + int len = Smi::cast(obj)->value(); + if (len > 0 && len < JSObject::kInitialMaxFastElementArray) { + ElementsKind elements_kind = array->GetElementsKind(); + MaybeObject* maybe_array = array->Initialize(len, len); + if (maybe_array->IsFailure()) return maybe_array; + + if (!IsFastHoleyElementsKind(elements_kind)) { + elements_kind = GetHoleyElementsKind(elements_kind); + maybe_array = array->TransitionElementsKind(elements_kind); + if (maybe_array->IsFailure()) return maybe_array; + } + + return array; + } else if (len == 0) { + return array->Initialize(JSArray::kPreallocatedArrayElements); + } + } + + // Take the argument as the length. + MaybeObject* maybe_obj = array->Initialize(0); + if (!maybe_obj->To(&obj)) return maybe_obj; + + return array->SetElementsLength((*args)[0]); + } + + // Optimize the case where there are no parameters passed. + if (args->length() == 0) { + return array->Initialize(JSArray::kPreallocatedArrayElements); + } + + // Set length and elements on the array. + int number_of_elements = args->length(); + MaybeObject* maybe_object = + array->EnsureCanContainElements(args, 0, number_of_elements, + ALLOW_CONVERTED_DOUBLE_ELEMENTS); + if (maybe_object->IsFailure()) return maybe_object; + + // Allocate an appropriately typed elements array. + MaybeObject* maybe_elms; + ElementsKind elements_kind = array->GetElementsKind(); + if (IsFastDoubleElementsKind(elements_kind)) { + maybe_elms = heap->AllocateUninitializedFixedDoubleArray( + number_of_elements); + } else { + maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements); + } + FixedArrayBase* elms; + if (!maybe_elms->To(&elms)) return maybe_elms; + + // Fill in the content + switch (array->GetElementsKind()) { + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_SMI_ELEMENTS: { + FixedArray* smi_elms = FixedArray::cast(elms); + for (int index = 0; index < number_of_elements; index++) { + smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER); + } + break; + } + case FAST_HOLEY_ELEMENTS: + case FAST_ELEMENTS: { + AssertNoAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + FixedArray* object_elms = FixedArray::cast(elms); + for (int index = 0; index < number_of_elements; index++) { + object_elms->set(index, (*args)[index], mode); + } + break; + } + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: { + FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms); + for (int index = 0; index < number_of_elements; index++) { + double_elms->set(index, (*args)[index]->Number()); + } + break; + } + default: + UNREACHABLE(); + break; + } + + array->set_elements(elms); + array->set_length(Smi::FromInt(number_of_elements)); + return array; +} + } } // namespace v8::internal diff --git a/src/elements.h b/src/elements.h index e31ceb44b..6353aaecf 100644 --- a/src/elements.h +++ b/src/elements.h @@ -200,6 +200,9 @@ class ElementsAccessor { void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key, bool allow_appending = false); +MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements( + JSArray* array, Arguments* args); + } } // namespace v8::internal #endif // V8_ELEMENTS_H_ diff --git a/src/flag-definitions.h b/src/flag-definitions.h index cbc38b071..dcc6c2cd1 100644 --- a/src/flag-definitions.h +++ b/src/flag-definitions.h @@ -222,6 +222,8 @@ DEFINE_bool(unreachable_code_elimination, false, "eliminate unreachable code (hidden behind soft deopts)") DEFINE_bool(track_allocation_sites, true, "Use allocation site info to reduce transitions") +DEFINE_bool(optimize_constructed_arrays, false, + "Use allocation site info on constructed arrays") DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_int(stress_runs, 0, "number of stress runs") DEFINE_bool(optimize_closures, true, "optimize closures") diff --git a/src/heap-inl.h b/src/heap-inl.h index 87d741fb3..916a6a293 100644 --- a/src/heap-inl.h +++ b/src/heap-inl.h @@ -456,6 +456,15 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { } +MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite( + ElementsKind elements_kind, + Handle allocation_site_payload) { + return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0, + allocation_site_payload, + DONT_INITIALIZE_ARRAY_ELEMENTS); +} + + bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) { const char* collector_reason = NULL; GarbageCollector collector = SelectGarbageCollector(space, &collector_reason); diff --git a/src/heap.cc b/src/heap.cc index e1260aed8..7ce926312 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -3904,6 +3904,28 @@ MaybeObject* Heap::CopyCode(Code* code, Vector reloc_info) { } +MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space, + Handle allocation_site_info_payload) { + ASSERT(gc_state_ == NOT_IN_GC); + ASSERT(map->instance_type() != MAP_TYPE); + // If allocation failures are disallowed, we may allocate in a different + // space when new space is full and the object is not a large object. + AllocationSpace retry_space = + (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); + int size = map->instance_size() + AllocationSiteInfo::kSize; + Object* result; + MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); + if (!maybe_result->ToObject(&result)) return maybe_result; + // No need for write barrier since object is white and map is in old space. + HeapObject::cast(result)->set_map_no_write_barrier(map); + AllocationSiteInfo* alloc_info = reinterpret_cast( + reinterpret_cast
(result) + map->instance_size()); + alloc_info->set_map_no_write_barrier(allocation_site_info_map()); + alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER); + return result; +} + + MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { ASSERT(gc_state_ == NOT_IN_GC); ASSERT(map->instance_type() != MAP_TYPE); @@ -3911,11 +3933,10 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { // space when new space is full and the object is not a large object. AllocationSpace retry_space = (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); + int size = map->instance_size(); Object* result; - { MaybeObject* maybe_result = - AllocateRaw(map->instance_size(), space, retry_space); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); + if (!maybe_result->ToObject(&result)) return maybe_result; // No need for write barrier since object is white and map is in old space. HeapObject::cast(result)->set_map_no_write_barrier(map); return result; @@ -4183,10 +4204,48 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) { (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE; Object* obj; - { MaybeObject* maybe_obj = Allocate(map, space); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + MaybeObject* maybe_obj = Allocate(map, space); + if (!maybe_obj->To(&obj)) return maybe_obj; + + // Initialize the JSObject. + InitializeJSObjectFromMap(JSObject::cast(obj), + FixedArray::cast(properties), + map); + ASSERT(JSObject::cast(obj)->HasFastElements()); + return obj; +} + + +MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map, + Handle allocation_site_info_payload) { + // JSFunctions should be allocated using AllocateFunction to be + // properly initialized. + ASSERT(map->instance_type() != JS_FUNCTION_TYPE); + + // Both types of global objects should be allocated using + // AllocateGlobalObject to be properly initialized. + ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); + ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE); + + // Allocate the backing storage for the properties. + int prop_size = + map->pre_allocated_property_fields() + + map->unused_property_fields() - + map->inobject_properties(); + ASSERT(prop_size >= 0); + Object* properties; + { MaybeObject* maybe_properties = AllocateFixedArray(prop_size); + if (!maybe_properties->ToObject(&properties)) return maybe_properties; } + // Allocate the JSObject. + AllocationSpace space = NEW_SPACE; + if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE; + Object* obj; + MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space, + allocation_site_info_payload); + if (!maybe_obj->To(&obj)) return maybe_obj; + // Initialize the JSObject. InitializeJSObjectFromMap(JSObject::cast(obj), FixedArray::cast(properties), @@ -4219,6 +4278,51 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor, } +MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor, + Handle allocation_site_info_payload) { + // Allocate the initial map if absent. + if (!constructor->has_initial_map()) { + Object* initial_map; + { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor); + if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map; + } + constructor->set_initial_map(Map::cast(initial_map)); + Map::cast(initial_map)->set_constructor(constructor); + } + // Allocate the object based on the constructors initial map, or the payload + // advice + Map* initial_map = constructor->initial_map(); + + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast( + *allocation_site_info_payload); + Smi* smi = Smi::cast(cell->value()); + ElementsKind to_kind = static_cast(smi->value()); + AllocationSiteMode mode = TRACK_ALLOCATION_SITE; + if (to_kind != initial_map->elements_kind()) { + MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap( + isolate(), to_kind); + if (!maybe_new_map->To(&initial_map)) return maybe_new_map; + // Possibly alter the mode, since we found an updated elements kind + // in the type info cell. + mode = AllocationSiteInfo::GetMode(to_kind); + } + + MaybeObject* result; + if (mode == TRACK_ALLOCATION_SITE) { + result = AllocateJSObjectFromMapWithAllocationSite(initial_map, + allocation_site_info_payload); + } else { + result = AllocateJSObjectFromMap(initial_map, NOT_TENURED); + } +#ifdef DEBUG + // Make sure result is NOT a global object if valid. + Object* non_failure; + ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject()); +#endif + return result; +} + + MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) { // Allocate a fresh map. Modules do not have a prototype. Map* map; @@ -4240,11 +4344,14 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( int capacity, ArrayStorageAllocationMode mode, PretenureFlag pretenure) { - ASSERT(capacity >= length); MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); JSArray* array; if (!maybe_array->To(&array)) return maybe_array; + // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage + // for performance reasons. + ASSERT(capacity >= length); + if (capacity == 0) { array->set_length(Smi::FromInt(0)); array->set_elements(empty_fixed_array()); @@ -4277,6 +4384,60 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( } +MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite( + ElementsKind elements_kind, + int length, + int capacity, + Handle allocation_site_payload, + ArrayStorageAllocationMode mode) { + MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind, + allocation_site_payload); + JSArray* array; + if (!maybe_array->To(&array)) return maybe_array; + return AllocateJSArrayStorage(array, length, capacity, mode); +} + + +MaybeObject* Heap::AllocateJSArrayStorage( + JSArray* array, + int length, + int capacity, + ArrayStorageAllocationMode mode) { + ASSERT(capacity >= length); + + if (capacity == 0) { + array->set_length(Smi::FromInt(0)); + array->set_elements(empty_fixed_array()); + return array; + } + + FixedArrayBase* elms; + MaybeObject* maybe_elms = NULL; + ElementsKind elements_kind = array->GetElementsKind(); + if (IsFastDoubleElementsKind(elements_kind)) { + if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { + maybe_elms = AllocateUninitializedFixedDoubleArray(capacity); + } else { + ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); + maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity); + } + } else { + ASSERT(IsFastSmiOrObjectElementsKind(elements_kind)); + if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { + maybe_elms = AllocateUninitializedFixedArray(capacity); + } else { + ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); + maybe_elms = AllocateFixedArrayWithHoles(capacity); + } + } + if (!maybe_elms->To(&elms)) return maybe_elms; + + array->set_elements(elms); + array->set_length(Smi::FromInt(length)); + return array; +} + + MaybeObject* Heap::AllocateJSArrayWithElements( FixedArrayBase* elements, ElementsKind elements_kind, @@ -4407,8 +4568,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) { } -MaybeObject* Heap::CopyJSObject(JSObject* source, - AllocationSiteMode mode) { +MaybeObject* Heap::CopyJSObject(JSObject* source) { // Never used to copy functions. If functions need to be copied we // have to be careful to clear the literals array. SLOW_ASSERT(!source->IsJSFunction()); @@ -4418,9 +4578,81 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, int object_size = map->instance_size(); Object* clone; - bool track_origin = mode == TRACK_ALLOCATION_SITE && - map->CanTrackAllocationSite(); + WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; + + // If we're forced to always allocate, we use the general allocation + // functions which may leave us with an object in old space. + if (always_allocate()) { + { MaybeObject* maybe_clone = + AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); + if (!maybe_clone->ToObject(&clone)) return maybe_clone; + } + Address clone_address = HeapObject::cast(clone)->address(); + CopyBlock(clone_address, + source->address(), + object_size); + // Update write barrier for all fields that lie beyond the header. + RecordWrites(clone_address, + JSObject::kHeaderSize, + (object_size - JSObject::kHeaderSize) / kPointerSize); + } else { + wb_mode = SKIP_WRITE_BARRIER; + + { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size); + if (!maybe_clone->ToObject(&clone)) return maybe_clone; + } + SLOW_ASSERT(InNewSpace(clone)); + // Since we know the clone is allocated in new space, we can copy + // the contents without worrying about updating the write barrier. + CopyBlock(HeapObject::cast(clone)->address(), + source->address(), + object_size); + } + + SLOW_ASSERT( + JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); + FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); + FixedArray* properties = FixedArray::cast(source->properties()); + // Update elements if necessary. + if (elements->length() > 0) { + Object* elem; + { MaybeObject* maybe_elem; + if (elements->map() == fixed_cow_array_map()) { + maybe_elem = FixedArray::cast(elements); + } else if (source->HasFastDoubleElements()) { + maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements)); + } else { + maybe_elem = CopyFixedArray(FixedArray::cast(elements)); + } + if (!maybe_elem->ToObject(&elem)) return maybe_elem; + } + JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode); + } + // Update properties if necessary. + if (properties->length() > 0) { + Object* prop; + { MaybeObject* maybe_prop = CopyFixedArray(properties); + if (!maybe_prop->ToObject(&prop)) return maybe_prop; + } + JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode); + } + // Return the new clone. + return clone; +} + + +MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) { + // Never used to copy functions. If functions need to be copied we + // have to be careful to clear the literals array. + SLOW_ASSERT(!source->IsJSFunction()); + + // Make the clone. + Map* map = source->map(); + int object_size = map->instance_size(); + Object* clone; + ASSERT(map->CanTrackAllocationSite()); + ASSERT(map->instance_type() == JS_ARRAY_TYPE); WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; // If we're forced to always allocate, we use the general allocation @@ -4428,11 +4660,9 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, int adjusted_object_size = object_size; if (always_allocate()) { // We'll only track origin if we are certain to allocate in new space - if (track_origin) { - const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4; - if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) { - adjusted_object_size += AllocationSiteInfo::kSize; - } + const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4; + if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) { + adjusted_object_size += AllocationSiteInfo::kSize; } { MaybeObject* maybe_clone = @@ -4444,14 +4674,29 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, source->address(), object_size); // Update write barrier for all fields that lie beyond the header. - RecordWrites(clone_address, - JSObject::kHeaderSize, - (object_size - JSObject::kHeaderSize) / kPointerSize); + int write_barrier_offset = adjusted_object_size > object_size + ? JSArray::kSize + AllocationSiteInfo::kSize + : JSObject::kHeaderSize; + if (((object_size - write_barrier_offset) / kPointerSize) > 0) { + RecordWrites(clone_address, + write_barrier_offset, + (object_size - write_barrier_offset) / kPointerSize); + } + + // Track allocation site information, if we failed to allocate it inline. + if (InNewSpace(clone) && + adjusted_object_size == object_size) { + MaybeObject* maybe_alloc_info = + AllocateStruct(ALLOCATION_SITE_INFO_TYPE); + AllocationSiteInfo* alloc_info; + if (maybe_alloc_info->To(&alloc_info)) { + alloc_info->set_map_no_write_barrier(allocation_site_info_map()); + alloc_info->set_payload(source, SKIP_WRITE_BARRIER); + } + } } else { wb_mode = SKIP_WRITE_BARRIER; - if (track_origin) { - adjusted_object_size += AllocationSiteInfo::kSize; - } + adjusted_object_size += AllocationSiteInfo::kSize; { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size); if (!maybe_clone->ToObject(&clone)) return maybe_clone; @@ -4467,8 +4712,8 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, if (adjusted_object_size > object_size) { AllocationSiteInfo* alloc_info = reinterpret_cast( reinterpret_cast
(clone) + object_size); - alloc_info->set_map(allocation_site_info_map()); - alloc_info->set_payload(source); + alloc_info->set_map_no_write_barrier(allocation_site_info_map()); + alloc_info->set_payload(source, SKIP_WRITE_BARRIER); } SLOW_ASSERT( @@ -4900,6 +5145,25 @@ MaybeObject* Heap::AllocateJSArray( } +MaybeObject* Heap::AllocateJSArrayWithAllocationSite( + ElementsKind elements_kind, + Handle allocation_site_info_payload) { + Context* native_context = isolate()->context()->native_context(); + JSFunction* array_function = native_context->array_function(); + Map* map = array_function->initial_map(); + Object* maybe_map_array = native_context->js_array_maps(); + if (!maybe_map_array->IsUndefined()) { + Object* maybe_transitioned_map = + FixedArray::cast(maybe_map_array)->get(elements_kind); + if (!maybe_transitioned_map->IsUndefined()) { + map = Map::cast(maybe_transitioned_map); + } + } + return AllocateJSObjectFromMapWithAllocationSite(map, + allocation_site_info_payload); +} + + MaybeObject* Heap::AllocateEmptyFixedArray() { int size = FixedArray::SizeFor(0); Object* result; diff --git a/src/heap.h b/src/heap.h index 05f8b634b..a5ff86ea0 100644 --- a/src/heap.h +++ b/src/heap.h @@ -601,7 +601,12 @@ class Heap { // failed. // Please note this does not perform a garbage collection. MUST_USE_RESULT MaybeObject* AllocateJSObject( - JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED); + JSFunction* constructor, + PretenureFlag pretenure = NOT_TENURED); + + MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite( + JSFunction* constructor, + Handle allocation_site_info_payload); MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context, ScopeInfo* scope_info); @@ -615,6 +620,10 @@ class Heap { pretenure); } + inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite( + ElementsKind elements_kind, + Handle allocation_site_payload); + // Allocate a JSArray with a specified length but elements that are left // uninitialized. MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage( @@ -624,6 +633,19 @@ class Heap { ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS, PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorageWithAllocationSite( + ElementsKind elements_kind, + int length, + int capacity, + Handle allocation_site_payload, + ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS); + + MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage( + JSArray* array, + int length, + int capacity, + ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS); + // Allocate a JSArray with no elements MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements( FixedArrayBase* array_base, @@ -640,9 +662,9 @@ class Heap { // Returns a deep copy of the JavaScript object. // Properties and elements are copied too. // Returns failure if allocation failed. - MUST_USE_RESULT MaybeObject* CopyJSObject( - JSObject* source, - AllocationSiteMode mode = DONT_TRACK_ALLOCATION_SITE); + MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source); + + MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(JSObject* source); // Allocates the function prototype. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation @@ -685,12 +707,18 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap( Map* map, PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite( + Map* map, Handle allocation_site_info_payload); + // Allocates a heap object based on the map. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space); + MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map, + AllocationSpace space, Handle allocation_site_info_payload); + // Allocates a JS Map in the heap. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -2035,6 +2063,10 @@ class Heap { ElementsKind elements_kind, PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite( + ElementsKind elements_kind, + Handle allocation_site_info_payload); + // Allocate empty fixed array. MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray(); diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h index ded6ae34e..cef8a541d 100644 --- a/src/hydrogen-instructions.h +++ b/src/hydrogen-instructions.h @@ -83,6 +83,7 @@ class LChunkBuilder; V(CallKnownGlobal) \ V(CallNamed) \ V(CallNew) \ + V(CallNewArray) \ V(CallRuntime) \ V(CallStub) \ V(Change) \ @@ -2210,6 +2211,25 @@ class HCallNew: public HBinaryCall { }; +class HCallNewArray: public HCallNew { + public: + HCallNewArray(HValue* context, HValue* constructor, int argument_count, + Handle type_cell) + : HCallNew(context, constructor, argument_count), + type_cell_(type_cell) { + } + + Handle property_cell() const { + return type_cell_; + } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray) + + private: + Handle type_cell_; +}; + + class HCallRuntime: public HCall<1> { public: HCallRuntime(HValue* context, diff --git a/src/hydrogen.cc b/src/hydrogen.cc index 79a1008ab..b34b644d0 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -8490,8 +8490,21 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) { CHECK_ALIVE(VisitArgument(expr->expression())); HValue* constructor = HPushArgument::cast(Top())->argument(); CHECK_ALIVE(VisitArgumentList(expr->arguments())); - HInstruction* call = - new(zone()) HCallNew(context, constructor, argument_count); + HCallNew* call; + if (FLAG_optimize_constructed_arrays && + !(expr->target().is_null()) && + *(expr->target()) == isolate()->global_context()->array_function()) { + Handle feedback = oracle()->GetInfo(expr->CallNewFeedbackId()); + ASSERT(feedback->IsSmi()); + Handle cell = + isolate()->factory()->NewJSGlobalPropertyCell(feedback); + AddInstruction(new(zone()) HCheckFunction(constructor, + Handle(isolate()->global_context()->array_function()))); + call = new(zone()) HCallNewArray(context, constructor, argument_count, + cell); + } else { + call = new(zone()) HCallNew(context, constructor, argument_count); + } Drop(argument_count); call->set_position(expr->position()); return ast_context()->ReturnInstruction(call, expr->id()); diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc index c8653f499..bcd713dce 100644 --- a/src/ia32/builtins-ia32.cc +++ b/src/ia32/builtins-ia32.cc @@ -1479,34 +1479,64 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : argc + // -- ebx : type info cell // -- edi : constructor // -- esp[0] : return address // -- esp[4] : last argument // ----------------------------------- - Label generic_constructor; - if (FLAG_debug_code) { // The array construct code is only set for the global and natives // builtin Array functions which always have maps. // Initial map for the builtin Array function should be a map. - __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. - __ test(ebx, Immediate(kSmiTagMask)); + __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_zero, "Unexpected initial map for Array function"); - __ CmpObjectType(ebx, MAP_TYPE, ecx); + __ CmpObjectType(ecx, MAP_TYPE, ecx); __ Assert(equal, "Unexpected initial map for Array function"); - } - // Run the native code for the Array function called as constructor. - ArrayNativeCode(masm, true, &generic_constructor); + // We should either have undefined in ebx or a valid jsglobalpropertycell + Label okay_here; + Handle undefined_sentinel( + masm->isolate()->heap()->undefined_value(), masm->isolate()); + Handle global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ cmp(ebx, Immediate(undefined_sentinel)); + __ j(equal, &okay_here); + __ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map)); + __ Assert(equal, "Expected property cell in register ebx"); + __ bind(&okay_here); + } - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); + if (FLAG_optimize_constructed_arrays) { + Label not_zero_case, not_one_case; + __ test(eax, eax); + __ j(not_zero, ¬_zero_case); + ArrayNoArgumentConstructorStub no_argument_stub; + __ TailCallStub(&no_argument_stub); + + __ bind(¬_zero_case); + __ cmp(eax, 1); + __ j(greater, ¬_one_case); + ArraySingleArgumentConstructorStub single_argument_stub; + __ TailCallStub(&single_argument_stub); + + __ bind(¬_one_case); + ArrayNArgumentsConstructorStub n_argument_stub; + __ TailCallStub(&n_argument_stub); + } else { + Label generic_constructor; + // Run the native code for the Array function called as constructor. + ArrayNativeCode(masm, true, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); + } } diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index a52685bf6..a2237e7a4 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -37,6 +37,7 @@ #include "runtime.h" #include "stub-cache.h" #include "codegen.h" +#include "runtime.h" namespace v8 { namespace internal { @@ -60,6 +61,7 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( static Register registers[] = { edx, ecx }; descriptor->register_param_count_ = 2; descriptor->register_params_ = registers; + descriptor->stack_parameter_count_ = NULL; descriptor->deoptimization_handler_ = FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); } @@ -76,6 +78,44 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( } +static void InitializeArrayConstructorDescriptor(Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + // register state + // edi -- constructor function + // ebx -- type info cell with elements kind + // eax -- number of arguments to the constructor function + static Register registers[] = { edi, ebx }; + descriptor->register_param_count_ = 2; + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &eax; + descriptor->register_params_ = registers; + descriptor->extra_expression_stack_count_ = 1; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ArrayConstructor_StubFailure); +} + + +void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + #define __ ACCESS_MASM(masm) void ToNumberStub::Generate(MacroAssembler* masm) { @@ -4787,12 +4827,13 @@ void InterruptStub::Generate(MacroAssembler* masm) { } -static void GenerateRecordCallTarget(MacroAssembler* masm) { +static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. // ebx : cache cell for call target // edi : the function to call + ASSERT(!FLAG_optimize_constructed_arrays); Isolate* isolate = masm->isolate(); Label initialize, done; @@ -4825,6 +4866,82 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { } +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a global property cell. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // ebx : cache cell for call target + // edi : the function to call + ASSERT(FLAG_optimize_constructed_arrays); + Isolate* isolate = masm->isolate(); + Label initialize, done, miss, megamorphic, not_array_function; + + // Load the cache state into ecx. + __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmp(ecx, edi); + __ j(equal, &done); + __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate))); + __ j(equal, &done); + + // Special handling of the Array() function, which caches not only the + // monomorphic Array function but the initial ElementsKind with special + // sentinels + Handle terminal_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(isolate, + LAST_FAST_ELEMENTS_KIND); + __ cmp(ecx, Immediate(terminal_kind_sentinel)); + __ j(above, &miss); + // Load the global or builtins object from the current context + __ LoadGlobalContext(ecx); + // Make sure the function is the Array() function + __ cmp(edi, Operand(ecx, + Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); + __ j(not_equal, &megamorphic); + __ jmp(&done); + + __ bind(&miss); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate))); + __ j(equal, &initialize); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ bind(&megamorphic); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), + Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate))); + __ jmp(&done, Label::kNear); + + // An uninitialized cache is patched with the function or sentinel to + // indicate the ElementsKind if function is the Array constructor. + __ bind(&initialize); + __ LoadGlobalContext(ecx); + // Make sure the function is the Array() function + __ cmp(edi, Operand(ecx, + Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); + __ j(not_equal, ¬_array_function); + + // The target function is the Array constructor, install a sentinel value in + // the constructor's type info cell that will track the initial ElementsKind + // that should be used for the array when its constructed. + Handle initial_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(isolate, + GetInitialFastElementsKind()); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), + Immediate(initial_kind_sentinel)); + __ jmp(&done); + + __ bind(¬_array_function); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi); + // No need for a write barrier here - cells are rescanned. + + __ bind(&done); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { // ebx : cache cell for call target // edi : the function to call @@ -4856,7 +4973,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ j(not_equal, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Fast-case: Just invoke the function. @@ -4929,14 +5050,20 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ j(not_equal, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Jump to the function-specific construct stub. - __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset)); - __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize)); - __ jmp(ebx); + Register jmp_reg = FLAG_optimize_constructed_arrays ? ecx : ebx; + __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(jmp_reg, FieldOperand(jmp_reg, + SharedFunctionInfo::kConstructStubOffset)); + __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize)); + __ jmp(jmp_reg); // edi: called object // eax: number of arguments diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index 20352cf44..d4b5d93c1 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -4146,12 +4146,32 @@ void LCodeGen::DoCallNew(LCallNew* instr) { ASSERT(ToRegister(instr->constructor()).is(edi)); ASSERT(ToRegister(instr->result()).is(eax)); + if (FLAG_optimize_constructed_arrays) { + // No cell in ebx for construct type feedback in optimized code + Handle undefined_value(isolate()->heap()->undefined_value(), + isolate()); + __ mov(ebx, Immediate(undefined_value)); + } CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ Set(eax, Immediate(instr->arity())); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); } +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + ASSERT(ToRegister(instr->context()).is(esi)); + ASSERT(ToRegister(instr->constructor()).is(edi)); + ASSERT(ToRegister(instr->result()).is(eax)); + ASSERT(FLAG_optimize_constructed_arrays); + + __ mov(ebx, instr->hydrogen()->property_cell()); + Handle array_construct_code = + isolate()->builtins()->ArrayConstructCode(); + __ Set(eax, Immediate(instr->arity())); + CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); +} + + void LCodeGen::DoCallRuntime(LCallRuntime* instr) { CallRuntime(instr->function(), instr->arity(), instr); } diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc index d5cf125eb..74580f314 100644 --- a/src/ia32/lithium-ia32.cc +++ b/src/ia32/lithium-ia32.cc @@ -367,6 +367,19 @@ void LCallNew::PrintDataTo(StringStream* stream) { } +void LCallNewArray::PrintDataTo(StringStream* stream) { + stream->Add("= "); + context()->PrintTo(stream); + stream->Add(" "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); + ASSERT(hydrogen()->property_cell()->value()->IsSmi()); + ElementsKind kind = static_cast( + Smi::cast(hydrogen()->property_cell()->value())->value()); + stream->Add(" (%s) ", ElementsKindToString(kind)); +} + + void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); @@ -1221,6 +1234,16 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { } +LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + ASSERT(FLAG_optimize_constructed_arrays); + LOperand* context = UseFixed(instr->context(), esi); + LOperand* constructor = UseFixed(instr->constructor(), edi); + argument_count_ -= instr->argument_count(); + LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { LOperand* context = UseFixed(instr->context(), esi); LOperand* function = UseFixed(instr->function(), edi); diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h index ff4c477bc..cae42f37e 100644 --- a/src/ia32/lithium-ia32.h +++ b/src/ia32/lithium-ia32.h @@ -62,6 +62,7 @@ class LCodeGen; V(CallKnownGlobal) \ V(CallNamed) \ V(CallNew) \ + V(CallNewArray) \ V(CallRuntime) \ V(CallStub) \ V(CheckFunction) \ @@ -1847,6 +1848,25 @@ class LCallNew: public LTemplateInstruction<1, 2, 0> { }; +class LCallNewArray: public LTemplateInstruction<1, 2, 0> { + public: + LCallNewArray(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; + } + + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") + DECLARE_HYDROGEN_ACCESSOR(CallNewArray) + + virtual void PrintDataTo(StringStream* stream); + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + class LCallRuntime: public LTemplateInstruction<1, 1, 0> { public: explicit LCallRuntime(LOperand* context) { diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index 5e4f21129..587699f66 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -2409,12 +2409,23 @@ void MacroAssembler::LoadInitialArrayMap( } +void MacroAssembler::LoadGlobalContext(Register global_context) { + // Load the global or builtins object from the current context. + mov(global_context, + Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + // Load the native context from the global or builtins object. + mov(global_context, + FieldOperand(global_context, GlobalObject::kNativeContextOffset)); +} + + void MacroAssembler::LoadGlobalFunction(int index, Register function) { // Load the global or builtins object from the current context. mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); // Load the native context from the global or builtins object. - mov(function, FieldOperand(function, GlobalObject::kNativeContextOffset)); + mov(function, + FieldOperand(function, GlobalObject::kNativeContextOffset)); // Load the function from the native context. mov(function, Operand(function, Context::SlotOffset(index))); } diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h index 8c893dcda..65899cedb 100644 --- a/src/ia32/macro-assembler-ia32.h +++ b/src/ia32/macro-assembler-ia32.h @@ -249,6 +249,8 @@ class MacroAssembler: public Assembler { Register map_out, bool can_have_holes); + void LoadGlobalContext(Register global_context); + // Load the global function with the given index. void LoadGlobalFunction(int index, Register function); diff --git a/src/objects-inl.h b/src/objects-inl.h index 7c7facef4..d1bc520ac 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -5747,6 +5747,12 @@ Handle TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) { } +Handle TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate, + ElementsKind elements_kind) { + return Handle(Smi::FromInt(static_cast(elements_kind)), isolate); +} + + Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) { return heap->the_hole_value(); } diff --git a/src/objects-printer.cc b/src/objects-printer.cc index 9c3a35d2e..32940c487 100644 --- a/src/objects-printer.cc +++ b/src/objects-printer.cc @@ -1039,7 +1039,18 @@ void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) { void AllocationSiteInfo::AllocationSiteInfoPrint(FILE* out) { HeapObject::PrintHeader(out, "AllocationSiteInfo"); PrintF(out, " - payload: "); - if (payload()->IsJSArray()) { + if (payload()->IsJSGlobalPropertyCell()) { + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload()); + Object* cell_contents = cell->value(); + if (cell_contents->IsSmi()) { + ElementsKind kind = static_cast( + Smi::cast(cell_contents)->value()); + PrintF(out, "Array allocation with ElementsKind "); + PrintElementsKind(out, kind); + PrintF(out, "\n"); + return; + } + } else if (payload()->IsJSArray()) { PrintF(out, "Array literal "); payload()->ShortPrint(out); PrintF(out, "\n"); diff --git a/src/objects.cc b/src/objects.cc index df72d609b..f001d3c6f 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2013 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -7530,6 +7530,21 @@ AllocationSiteInfo* AllocationSiteInfo::FindForJSObject(JSObject* object) { } +bool AllocationSiteInfo::GetElementsKindPayload(ElementsKind* kind) { + ASSERT(kind != NULL); + if (payload()->IsJSGlobalPropertyCell()) { + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload()); + Object* cell_contents = cell->value(); + if (cell_contents->IsSmi()) { + *kind = static_cast( + Smi::cast(cell_contents)->value()); + return true; + } + } + return false; +} + + // Heuristic: We only need to create allocation site info if the boilerplate // elements kind is the initial elements kind. AllocationSiteMode AllocationSiteInfo::GetMode( @@ -9353,19 +9368,10 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength( } -MaybeObject* JSArray::Initialize(int capacity) { - Heap* heap = GetHeap(); +MaybeObject* JSArray::Initialize(int capacity, int length) { ASSERT(capacity >= 0); - set_length(Smi::FromInt(0)); - FixedArray* new_elements; - if (capacity == 0) { - new_elements = heap->empty_fixed_array(); - } else { - MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity); - if (!maybe_obj->To(&new_elements)) return maybe_obj; - } - set_elements(new_elements); - return this; + return GetHeap()->AllocateJSArrayStorage(this, length, capacity, + INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); } @@ -10054,8 +10060,8 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, ? FAST_HOLEY_DOUBLE_ELEMENTS : FAST_DOUBLE_ELEMENTS; - MaybeObject* trans = PossiblyTransitionArrayBoilerplate(to_kind); - if (trans->IsFailure()) return trans; + MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind); + if (maybe_failure->IsFailure()) return maybe_failure; MaybeObject* maybe = SetFastDoubleElementsCapacityAndLength(new_capacity, array_length); @@ -10071,8 +10077,8 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; - MaybeObject* trans = PossiblyTransitionArrayBoilerplate(kind); - if (trans->IsFailure()) return trans; + MaybeObject* maybe_failure = UpdateAllocationSiteInfo(kind); + if (maybe_failure->IsFailure()) return maybe_failure; MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(), kind); @@ -10619,38 +10625,53 @@ Handle JSObject::TransitionElementsKind(Handle object, } -MaybeObject* JSObject::PossiblyTransitionArrayBoilerplate( - ElementsKind to_kind) { - MaybeObject* ret = NULL; +MaybeObject* JSObject::UpdateAllocationSiteInfo(ElementsKind to_kind) { if (!FLAG_track_allocation_sites || !IsJSArray()) { - return ret; + return this; } AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(this); if (info == NULL) { - return ret; + return this; } - ASSERT(info->payload()->IsJSArray()); - JSArray* payload = JSArray::cast(info->payload()); - ElementsKind kind = payload->GetElementsKind(); - if (IsMoreGeneralElementsKindTransition(kind, to_kind)) { - // If the array is huge, it's not likely to be defined in a local - // function, so we shouldn't make new instances of it very often. - uint32_t length = 0; - CHECK(payload->length()->ToArrayIndex(&length)); - if (length <= 8 * 1024) { - ret = payload->TransitionElementsKind(to_kind); - if (FLAG_trace_track_allocation_sites) { - PrintF( - "AllocationSiteInfo: JSArray %p boilerplate updated %s->%s\n", - reinterpret_cast(this), - ElementsKindToString(kind), - ElementsKindToString(to_kind)); + if (info->payload()->IsJSArray()) { + JSArray* payload = JSArray::cast(info->payload()); + ElementsKind kind = payload->GetElementsKind(); + if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) { + // If the array is huge, it's not likely to be defined in a local + // function, so we shouldn't make new instances of it very often. + uint32_t length = 0; + CHECK(payload->length()->ToArrayIndex(&length)); + if (length <= AllocationSiteInfo::kMaximumArrayBytesToPretransition) { + if (FLAG_trace_track_allocation_sites) { + PrintF( + "AllocationSiteInfo: JSArray %p boilerplate updated %s->%s\n", + reinterpret_cast(this), + ElementsKindToString(kind), + ElementsKindToString(to_kind)); + } + return payload->TransitionElementsKind(to_kind); + } + } + } else if (info->payload()->IsJSGlobalPropertyCell()) { + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(info->payload()); + Object* cell_contents = cell->value(); + if (cell_contents->IsSmi()) { + ElementsKind kind = static_cast( + Smi::cast(cell_contents)->value()); + if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) { + if (FLAG_trace_track_allocation_sites) { + PrintF("AllocationSiteInfo: JSArray %p info updated %s->%s\n", + reinterpret_cast(this), + ElementsKindToString(kind), + ElementsKindToString(to_kind)); + } + cell->set_value(Smi::FromInt(to_kind)); } } } - return ret; + return this; } @@ -10664,8 +10685,8 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) { if (from_kind == to_kind) return this; - MaybeObject* trans = PossiblyTransitionArrayBoilerplate(to_kind); - if (trans->IsFailure()) return trans; + MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind); + if (maybe_failure->IsFailure()) return maybe_failure; Isolate* isolate = GetIsolate(); if (elements() == isolate->heap()->empty_fixed_array() || diff --git a/src/objects.h b/src/objects.h index 53f4cfad4..79e2a5ee9 100644 --- a/src/objects.h +++ b/src/objects.h @@ -2051,7 +2051,7 @@ class JSObject: public JSReceiver { ElementsKind to_kind); MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind); - MUST_USE_RESULT MaybeObject* PossiblyTransitionArrayBoilerplate( + MUST_USE_RESULT MaybeObject* UpdateAllocationSiteInfo( ElementsKind to_kind); // Replaces an existing transition with a transition to a map with a FIELD. @@ -4203,6 +4203,11 @@ class TypeFeedbackCells: public FixedArray { // The object that indicates a megamorphic state. static inline Handle MegamorphicSentinel(Isolate* isolate); + // The object that indicates a monomorphic state of Array with + // ElementsKind + static inline Handle MonomorphicArraySentinel(Isolate* isolate, + ElementsKind elements_kind); + // A raw version of the uninitialized sentinel that's safe to read during // garbage collection (e.g., for patching the cache). static inline Object* RawUninitializedSentinel(Heap* heap); @@ -7083,7 +7088,9 @@ class AllocationSiteInfo: public Struct { static const int kPayloadOffset = HeapObject::kHeaderSize; static const int kSize = kPayloadOffset + kPointerSize; + static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024; + bool GetElementsKindPayload(ElementsKind* kind); private: DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSiteInfo); }; @@ -8405,7 +8412,7 @@ class JSArray: public JSObject { // Initialize the array with the given capacity. The function may // fail due to out-of-memory situations, but only if the requested // capacity is non-zero. - MUST_USE_RESULT MaybeObject* Initialize(int capacity); + MUST_USE_RESULT MaybeObject* Initialize(int capacity, int length = 0); // Initializes the array to a certain length. inline bool AllowsSetElementsLength(); diff --git a/src/runtime.cc b/src/runtime.cc index 8aabcb585..2215eadcf 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -80,9 +80,6 @@ namespace internal { RUNTIME_ASSERT(args[index]->Is##Type()); \ Handle name = args.at(index); -#define CONVERT_ARG_STUB_CALLER_ARGS(name) \ - Arguments* name = reinterpret_cast(args[0]); - // Cast the given object to a boolean and store it in a variable with // the given name. If the object is not a boolean call IllegalOperation // and return. @@ -677,7 +674,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) { JSObject* boilerplate_object = JSObject::cast(*boilerplate); AllocationSiteMode mode = AllocationSiteInfo::GetMode( boilerplate_object->GetElementsKind()); - return isolate->heap()->CopyJSObject(boilerplate_object, mode); + if (mode == TRACK_ALLOCATION_SITE) { + return isolate->heap()->CopyJSObjectWithAllocationSite(boilerplate_object); + } + + return isolate->heap()->CopyJSObject(boilerplate_object); } diff --git a/src/type-info.cc b/src/type-info.cc index ac8572a10..fc88c3c55 100644 --- a/src/type-info.cc +++ b/src/type-info.cc @@ -163,8 +163,13 @@ bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) { bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) { - Handle value = GetInfo(expr->CallNewFeedbackId()); - return value->IsJSFunction(); + Handle info = GetInfo(expr->CallNewFeedbackId()); + if (info->IsSmi()) { + ASSERT(static_cast(Smi::cast(*info)->value()) <= + LAST_FAST_ELEMENTS_KIND); + return Isolate::Current()->global_context()->array_function(); + } + return info->IsJSFunction(); } @@ -288,10 +293,33 @@ Handle TypeFeedbackOracle::GetCallTarget(Call* expr) { Handle TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) { - return Handle::cast(GetInfo(expr->CallNewFeedbackId())); + Handle info = GetInfo(expr->CallNewFeedbackId()); + if (info->IsSmi()) { + ASSERT(static_cast(Smi::cast(*info)->value()) <= + LAST_FAST_ELEMENTS_KIND); + return Handle(Isolate::Current()->global_context()-> + array_function()); + } else { + return Handle::cast(info); + } } +ElementsKind TypeFeedbackOracle::GetCallNewElementsKind(CallNew* expr) { + Handle info = GetInfo(expr->CallNewFeedbackId()); + if (info->IsSmi()) { + return static_cast(Smi::cast(*info)->value()); + } else { + // TODO(mvstanton): avoided calling GetInitialFastElementsKind() for perf + // reasons. Is there a better fix? + if (FLAG_packed_arrays) { + return FAST_SMI_ELEMENTS; + } else { + return FAST_HOLEY_SMI_ELEMENTS; + } + } +} + Handle TypeFeedbackOracle::GetObjectLiteralStoreMap( ObjectLiteral::Property* prop) { ASSERT(ObjectLiteralStoreIsMonomorphic(prop)); diff --git a/src/type-info.h b/src/type-info.h index bcb6ca7d0..a962803da 100644 --- a/src/type-info.h +++ b/src/type-info.h @@ -277,6 +277,7 @@ class TypeFeedbackOracle: public ZoneObject { Handle GetCallTarget(Call* expr); Handle GetCallNewTarget(CallNew* expr); + ElementsKind GetCallNewElementsKind(CallNew* expr); Handle GetObjectLiteralStoreMap(ObjectLiteral::Property* prop); @@ -323,8 +324,12 @@ class TypeFeedbackOracle: public ZoneObject { // Returns an element from the backing store. Returns undefined if // there is no information. + public: + // TODO(mvstanton): how to get this information without making the method + // public? Handle GetInfo(TypeFeedbackId ast_id); + private: Handle native_context_; Isolate* isolate_; Handle dictionary_; diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index f2f31aa8f..c3d757a1a 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -527,6 +527,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Invoke the code. if (is_construct) { + // No type feedback cell is available + Handle undefined_sentinel( + masm->isolate()->factory()->undefined_value()); + __ Move(rbx, undefined_sentinel); // Expects rdi to hold function pointer. CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ CallStub(&stub); @@ -1507,30 +1511,60 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { // -- rsp[0] : return address // -- rsp[8] : last argument // ----------------------------------- - Label generic_constructor; - if (FLAG_debug_code) { // The array construct code is only set for the builtin and internal // Array functions which always have a map. + // Initial map for the builtin Array function should be a map. - __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); + __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. STATIC_ASSERT(kSmiTag == 0); - Condition not_smi = NegateCondition(masm->CheckSmi(rbx)); + Condition not_smi = NegateCondition(masm->CheckSmi(rcx)); __ Check(not_smi, "Unexpected initial map for Array function"); - __ CmpObjectType(rbx, MAP_TYPE, rcx); + __ CmpObjectType(rcx, MAP_TYPE, rcx); __ Check(equal, "Unexpected initial map for Array function"); - } - // Run the native code for the Array function called as constructor. - ArrayNativeCode(masm, &generic_constructor); + // We should either have undefined in ebx or a valid jsglobalpropertycell + Label okay_here; + Handle undefined_sentinel( + masm->isolate()->factory()->undefined_value()); + Handle global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ Cmp(rbx, undefined_sentinel); + __ j(equal, &okay_here); + __ Cmp(FieldOperand(rbx, 0), global_property_cell_map); + __ Assert(equal, "Expected property cell in register rbx"); + __ bind(&okay_here); + } - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + if (FLAG_optimize_constructed_arrays) { + Label not_zero_case, not_one_case; + __ testq(rax, rax); + __ j(not_zero, ¬_zero_case); + ArrayNoArgumentConstructorStub no_argument_stub; + __ TailCallStub(&no_argument_stub); + + __ bind(¬_zero_case); + __ cmpq(rax, Immediate(1)); + __ j(greater, ¬_one_case); + ArraySingleArgumentConstructorStub single_argument_stub; + __ TailCallStub(&single_argument_stub); + + __ bind(¬_one_case); + ArrayNArgumentsConstructorStub n_argument_stub; + __ TailCallStub(&n_argument_stub); + } else { + Label generic_constructor; + // Run the native code for the Array function called as constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + } } diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index 638e23dbd..49db9fe16 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -73,6 +73,44 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( } +static void InitializeArrayConstructorDescriptor(Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + // register state + // rdi -- constructor function + // rbx -- type info cell with elements kind + // rax -- number of arguments to the constructor function + static Register registers[] = { rdi, rbx }; + descriptor->register_param_count_ = 2; + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &rax; + descriptor->register_params_ = registers; + descriptor->extra_expression_stack_count_ = 1; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ArrayConstructor_StubFailure); +} + + +void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + #define __ ACCESS_MASM(masm) void ToNumberStub::Generate(MacroAssembler* masm) { @@ -3852,12 +3890,13 @@ void InterruptStub::Generate(MacroAssembler* masm) { } -static void GenerateRecordCallTarget(MacroAssembler* masm) { +static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. // rbx : cache cell for call target // rdi : the function to call + ASSERT(!FLAG_optimize_constructed_arrays); Isolate* isolate = masm->isolate(); Label initialize, done; @@ -3890,6 +3929,79 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { } +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a global property cell. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // rbx : cache cell for call target + // rdi : the function to call + ASSERT(FLAG_optimize_constructed_arrays); + Isolate* isolate = masm->isolate(); + Label initialize, done, miss, megamorphic, not_array_function; + + // Load the cache state into rcx. + __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmpq(rcx, rdi); + __ j(equal, &done); + __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate)); + __ j(equal, &done); + + // Special handling of the Array() function, which caches not only the + // monomorphic Array function but the initial ElementsKind with special + // sentinels + Handle terminal_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(isolate, + LAST_FAST_ELEMENTS_KIND); + __ Cmp(rcx, terminal_kind_sentinel); + __ j(not_equal, &miss); + // Make sure the function is the Array() function + __ LoadArrayFunction(rcx); + __ cmpq(rdi, rcx); + __ j(not_equal, &megamorphic); + __ jmp(&done); + + __ bind(&miss); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate)); + __ j(equal, &initialize); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ bind(&megamorphic); + __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), + TypeFeedbackCells::MegamorphicSentinel(isolate)); + __ jmp(&done, Label::kNear); + + // An uninitialized cache is patched with the function or sentinel to + // indicate the ElementsKind if function is the Array constructor. + __ bind(&initialize); + // Make sure the function is the Array() function + __ LoadArrayFunction(rcx); + __ cmpq(rdi, rcx); + __ j(not_equal, ¬_array_function); + + // The target function is the Array constructor, install a sentinel value in + // the constructor's type info cell that will track the initial ElementsKind + // that should be used for the array when its constructed. + Handle initial_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(isolate, + GetInitialFastElementsKind()); + __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), + initial_kind_sentinel); + __ jmp(&done); + + __ bind(¬_array_function); + __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi); + // No need for a write barrier here - cells are rescanned. + + __ bind(&done); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { // rbx : cache cell for call target // rdi : the function to call @@ -3921,7 +4033,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ j(not_equal, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Fast-case: Just invoke the function. @@ -3996,14 +4112,20 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ j(not_equal, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Jump to the function-specific construct stub. - __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset)); - __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize)); - __ jmp(rbx); + Register jmp_reg = FLAG_optimize_constructed_arrays ? rcx : rbx; + __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ movq(jmp_reg, FieldOperand(jmp_reg, + SharedFunctionInfo::kConstructStubOffset)); + __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize)); + __ jmp(jmp_reg); // rdi: called object // rax: number of arguments diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index fd873b3d8..73e278905 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -3925,12 +3925,30 @@ void LCodeGen::DoCallNew(LCallNew* instr) { ASSERT(ToRegister(instr->constructor()).is(rdi)); ASSERT(ToRegister(instr->result()).is(rax)); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ Set(rax, instr->arity()); + if (FLAG_optimize_constructed_arrays) { + // No cell in ebx for construct type feedback in optimized code + Handle undefined_value(isolate()->factory()->undefined_value()); + __ Move(rbx, undefined_value); + } + CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); } +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + ASSERT(ToRegister(instr->constructor()).is(rdi)); + ASSERT(ToRegister(instr->result()).is(rax)); + ASSERT(FLAG_optimize_constructed_arrays); + + __ Set(rax, instr->arity()); + __ Move(rbx, instr->hydrogen()->property_cell()); + Handle array_construct_code = + isolate()->builtins()->ArrayConstructCode(); + CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); +} + + void LCodeGen::DoCallRuntime(LCallRuntime* instr) { CallRuntime(instr->function(), instr->arity(), instr); } diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc index 7d680ae1d..1ce7d2e69 100644 --- a/src/x64/lithium-x64.cc +++ b/src/x64/lithium-x64.cc @@ -357,6 +357,17 @@ void LCallNew::PrintDataTo(StringStream* stream) { } +void LCallNewArray::PrintDataTo(StringStream* stream) { + stream->Add("= "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); + ASSERT(hydrogen()->property_cell()->value()->IsSmi()); + ElementsKind kind = static_cast( + Smi::cast(hydrogen()->property_cell()->value())->value()); + stream->Add(" (%s) ", ElementsKindToString(kind)); +} + + void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); @@ -1149,6 +1160,15 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { } +LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + ASSERT(FLAG_optimize_constructed_arrays); + LOperand* constructor = UseFixed(instr->constructor(), rdi); + argument_count_ -= instr->argument_count(); + LCallNewArray* result = new(zone()) LCallNewArray(constructor); + return MarkAsCall(DefineFixed(result, rax), instr); +} + + LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { LOperand* function = UseFixed(instr->function(), rdi); argument_count_ -= instr->argument_count(); diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h index 0744ed5bf..2ee766820 100644 --- a/src/x64/lithium-x64.h +++ b/src/x64/lithium-x64.h @@ -68,6 +68,7 @@ class LCodeGen; V(CallKnownGlobal) \ V(CallNamed) \ V(CallNew) \ + V(CallNewArray) \ V(CallRuntime) \ V(CallStub) \ V(CheckFunction) \ @@ -1748,6 +1749,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> { }; +class LCallNewArray: public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallNewArray(LOperand* constructor) { + inputs_[0] = constructor; + } + + LOperand* constructor() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") + DECLARE_HYDROGEN_ACCESSOR(CallNewArray) + + virtual void PrintDataTo(StringStream* stream); + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + class LCallRuntime: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 807e90dbe..5f467e3ce 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -4269,6 +4269,15 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) { } +void MacroAssembler::LoadArrayFunction(Register function) { + movq(function, + Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset)); + movq(function, + Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); +} + + void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, Register map) { // Load the initial map. The global functions all have initial maps. diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 35cc3f513..675372423 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -1154,6 +1154,7 @@ class MacroAssembler: public Assembler { // Load the global function with the given index. void LoadGlobalFunction(int index, Register function); + void LoadArrayFunction(Register function); // Load the initial map from the global function. The registers // function and map can be the same. diff --git a/test/mjsunit/allocation-site-info.js b/test/mjsunit/allocation-site-info.js index 3c8238710..d57fd321e 100644 --- a/test/mjsunit/allocation-site-info.js +++ b/test/mjsunit/allocation-site-info.js @@ -26,7 +26,11 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Flags: --allow-natives-syntax --smi-only-arrays --expose-gc -// Flags: --track-allocation-sites +// Flags: --track-allocation-sites --nooptimize-constructed-arrays + +// TODO(mvstanton): remove --nooptimize-constructed-arrays and enable +// the constructed array code below when the feature is turned on +// by default. // Test element kind of objects. // Since --smi-only-arrays affects builtins, its default setting at compile @@ -36,6 +40,7 @@ // enabled, this test takes the appropriate code path to check smi-only arrays. support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8)); +optimize_constructed_arrays = false; if (support_smi_only_arrays) { print("Tests include smi-only arrays."); @@ -43,6 +48,12 @@ if (support_smi_only_arrays) { print("Tests do NOT include smi-only arrays."); } +if (optimize_constructed_arrays) { + print("Tests include constructed array optimizations."); +} else { + print("Tests do NOT include constructed array optimizations."); +} + var elements_kind = { fast_smi_only : 'fast smi only elements', fast : 'fast elements', @@ -66,6 +77,11 @@ function getKind(obj) { if (%HasDictionaryElements(obj)) return elements_kind.dictionary; } +function isHoley(obj) { + if (%HasFastHoleyElements(obj)) return true; + return false; +} + function assertKind(expected, obj, name_opt) { if (!support_smi_only_arrays && expected == elements_kind.fast_smi_only) { @@ -74,9 +90,45 @@ function assertKind(expected, obj, name_opt) { assertEquals(expected, getKind(obj), name_opt); } +function assertHoley(obj, name_opt) { + assertEquals(true, isHoley(obj), name_opt); +} + +function assertNotHoley(obj, name_opt) { + assertEquals(false, isHoley(obj), name_opt); +} + if (support_smi_only_arrays) { + + obj = []; + assertNotHoley(obj); + assertKind(elements_kind.fast_smi_only, obj); + + obj = [1, 2, 3]; + assertNotHoley(obj); + assertKind(elements_kind.fast_smi_only, obj); + + obj = new Array(); + assertNotHoley(obj); + assertKind(elements_kind.fast_smi_only, obj); + + obj = new Array(0); + assertNotHoley(obj); + assertKind(elements_kind.fast_smi_only, obj); + + obj = new Array(2); + assertHoley(obj); + assertKind(elements_kind.fast_smi_only, obj); + + obj = new Array(1,2,3); + assertNotHoley(obj); + assertKind(elements_kind.fast_smi_only, obj); + + obj = new Array(1, "hi", 2, undefined); + assertNotHoley(obj); + assertKind(elements_kind.fast, obj); + function fastliteralcase(literal, value) { - // var literal = [1, 2, 3]; literal[0] = value; return literal; } @@ -104,7 +156,6 @@ if (support_smi_only_arrays) { // Verify that we will not pretransition the double->fast path. obj = fastliteralcase(get_standard_literal(), "elliot"); assertKind(elements_kind.fast, obj); - // This fails until we turn off optimistic transitions to the // most general elements kind seen on keyed stores. It's a goal // to turn it off, but for now we need it. @@ -123,4 +174,99 @@ if (support_smi_only_arrays) { assertKind(elements_kind.fast, obj); obj = fastliteralcase_smifast(2); assertKind(elements_kind.fast, obj); + + if (optimize_constructed_arrays) { + function newarraycase_smidouble(value) { + var a = new Array(); + a[0] = value; + return a; + } + + // Case: new Array() as allocation site, smi->double + obj = newarraycase_smidouble(1); + assertKind(elements_kind.fast_smi_only, obj); + obj = newarraycase_smidouble(1.5); + assertKind(elements_kind.fast_double, obj); + obj = newarraycase_smidouble(2); + assertKind(elements_kind.fast_double, obj); + + function newarraycase_smiobj(value) { + var a = new Array(); + a[0] = value; + return a; + } + + // Case: new Array() as allocation site, smi->fast + obj = newarraycase_smiobj(1); + assertKind(elements_kind.fast_smi_only, obj); + obj = newarraycase_smiobj("gloria"); + assertKind(elements_kind.fast, obj); + obj = newarraycase_smiobj(2); + assertKind(elements_kind.fast, obj); + + function newarraycase_length_smidouble(value) { + var a = new Array(3); + a[0] = value; + return a; + } + + // Case: new Array(length) as allocation site + obj = newarraycase_length_smidouble(1); + assertKind(elements_kind.fast_smi_only, obj); + obj = newarraycase_length_smidouble(1.5); + assertKind(elements_kind.fast_double, obj); + obj = newarraycase_length_smidouble(2); + assertKind(elements_kind.fast_double, obj); + + // Try to continue the transition to fast object, but + // we will not pretransition from double->fast, because + // it may hurt performance ("poisoning"). + obj = newarraycase_length_smidouble("coates"); + assertKind(elements_kind.fast, obj); + obj = newarraycase_length_smidouble(2.5); + // However, because of optimistic transitions, we will + // transition to the most general kind of elements kind found, + // therefore I can't count on this assert yet. + // assertKind(elements_kind.fast_double, obj); + + function newarraycase_length_smiobj(value) { + var a = new Array(3); + a[0] = value; + return a; + } + + // Case: new Array() as allocation site, smi->fast + obj = newarraycase_length_smiobj(1); + assertKind(elements_kind.fast_smi_only, obj); + obj = newarraycase_length_smiobj("gloria"); + assertKind(elements_kind.fast, obj); + obj = newarraycase_length_smiobj(2); + assertKind(elements_kind.fast, obj); + + function newarraycase_list_smidouble(value) { + var a = new Array(1, 2, 3); + a[0] = value; + return a; + } + + obj = newarraycase_list_smidouble(1); + assertKind(elements_kind.fast_smi_only, obj); + obj = newarraycase_list_smidouble(1.5); + assertKind(elements_kind.fast_double, obj); + obj = newarraycase_list_smidouble(2); + assertKind(elements_kind.fast_double, obj); + + function newarraycase_list_smiobj(value) { + var a = new Array(4, 5, 6); + a[0] = value; + return a; + } + + obj = newarraycase_list_smiobj(1); + assertKind(elements_kind.fast_smi_only, obj); + obj = newarraycase_list_smiobj("coates"); + assertKind(elements_kind.fast, obj); + obj = newarraycase_list_smiobj(2); + assertKind(elements_kind.fast, obj); + } } diff --git a/test/mjsunit/array-natives-elements.js b/test/mjsunit/array-natives-elements.js index f74f8be0d..b3a714109 100644 --- a/test/mjsunit/array-natives-elements.js +++ b/test/mjsunit/array-natives-elements.js @@ -27,6 +27,7 @@ // Flags: --allow-natives-syntax --smi-only-arrays // Flags: --noparallel-recompilation +// Flags: --notrack-allocation-sites // Test element kind of objects. // Since --smi-only-arrays affects builtins, its default setting at compile time diff --git a/test/mjsunit/elements-transition.js b/test/mjsunit/elements-transition.js index 0dffd3723..e28f3c3d6 100644 --- a/test/mjsunit/elements-transition.js +++ b/test/mjsunit/elements-transition.js @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Flags: --allow-natives-syntax --smi-only-arrays +// Flags: --allow-natives-syntax --smi-only-arrays --notrack-allocation-sites support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8)); -- 2.34.1