// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- r2 : type info cell
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- Label generic_constructor;
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r1, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for Array function");
- }
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
+ // We should either have undefined in r2 or a valid jsglobalpropertycell
+ Label okay_here;
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ Handle<Map> global_property_cell_map(
+ masm->isolate()->heap()->global_property_cell_map());
+ __ cmp(r2, Operand(undefined_sentinel));
+ __ b(eq, &okay_here);
+ __ ldr(r3, FieldMemOperand(r2, 0));
+ __ cmp(r3, Operand(global_property_cell_map));
+ __ Assert(eq, "Expected property cell in register ebx");
+ __ bind(&okay_here);
+ }
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ Label not_zero_case, not_one_case;
+ __ tst(r0, r0);
+ __ b(ne, ¬_zero_case);
+ ArrayNoArgumentConstructorStub no_argument_stub;
+ __ TailCallStub(&no_argument_stub);
+
+ __ bind(¬_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, ¬_one_case);
+ ArraySingleArgumentConstructorStub single_argument_stub;
+ __ TailCallStub(&single_argument_stub);
+
+ __ bind(¬_one_case);
+ ArrayNArgumentsConstructorStub n_argument_stub;
+ __ TailCallStub(&n_argument_stub);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
}
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ mov(r2, Operand(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
}
+static void InitializeArrayConstructorDescriptor(Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // register state
+ // r1 -- constructor function
+ // r2 -- type info cell with elements kind
+ // r0 -- number of arguments to the constructor function
+ static Register registers[] = { r1, r2 };
+ descriptor->register_param_count_ = 2;
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &r0;
+ descriptor->register_params_ = registers;
+ descriptor->extra_expression_stack_count_ = 1;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ArrayConstructor_StubFailure);
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r1 : the function to call
// r2 : cache cell for call target
+ ASSERT(!FLAG_optimize_constructed_arrays);
Label done;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // r1 : the function to call
+ // r2 : cache cell for call target
+ ASSERT(FLAG_optimize_constructed_arrays);
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->the_hole_value());
+
+ // Load the cache state into r3.
+ __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(r3, r1);
+ __ b(eq, &done);
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &done);
+
+ // Special handling of the Array() function, which caches not only the
+ // monomorphic Array function but the initial ElementsKind with special
+ // sentinels
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ LAST_FAST_ELEMENTS_KIND);
+ __ cmp(r3, Operand(terminal_kind_sentinel));
+ __ b(ne, &miss);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(r3);
+ __ cmp(r1, r3);
+ __ b(ne, &megamorphic);
+ __ jmp(&done);
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ b(eq, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(r3);
+ __ cmp(r1, r3);
+ __ b(ne, ¬_array_function);
+
+ // The target function is the Array constructor, install a sentinel value in
+ // the constructor's type info cell that will track the initial ElementsKind
+ // that should be used for the array when its constructed.
+ Handle<Object> initial_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ GetInitialFastElementsKind());
+ __ mov(r3, Operand(initial_kind_sentinel));
+ __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ b(&done);
+
+ __ bind(¬_array_function);
+ __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ // No need for a write barrier here - cells are rescanned.
+
+ __ bind(&done);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
// r2 : cache cell for call target
__ b(ne, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Fast-case: Invoke the function now.
__ b(ne, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Jump to the function-specific construct stub.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Register jmp_reg = FLAG_optimize_constructed_arrays ? r3 : r2;
+ __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
// r0: number of arguments
// r1: called object
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ASSERT(hydrogen()->property_cell()->value()->IsSmi());
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(hydrogen()->property_cell()->value())->value());
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* constructor = UseFixed(instr->constructor(), r1);
+ argument_count_ -= instr->argument_count();
+ LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
};
+class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallNewArray(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ LOperand* constructor() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ mov(r0, Operand(instr->arity()));
+ if (FLAG_optimize_constructed_arrays) {
+ // No cell in r2 for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
+ isolate());
+ __ mov(r2, Operand(undefined_value));
+ }
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->constructor()).is(r1));
+ ASSERT(ToRegister(instr->result()).is(r0));
+ ASSERT(FLAG_optimize_constructed_arrays);
+
+ __ mov(r0, Operand(instr->arity()));
+ __ mov(r2, Operand(instr->hydrogen()->property_cell()));
+ Handle<Code> array_construct_code =
+ isolate()->builtins()->ArrayConstructCode();
+
+ CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
}
}
+void MacroAssembler::LoadArrayFunction(Register function) {
+ // Load the global or builtins object from the current context.
+ ldr(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the global context from the global or builtins object.
+ ldr(function,
+ FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the array function from the native context.
+ ldr(function,
+ MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
+ void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
+ elements_kind_ = oracle->GetCallNewElementsKind(this);
}
}
Handle<JSFunction> target() { return target_; }
BailoutId ReturnId() const { return return_id_; }
+ ElementsKind elements_kind() const { return elements_kind_; }
protected:
CallNew(Isolate* isolate,
arguments_(arguments),
pos_(pos),
is_monomorphic_(false),
- return_id_(GetNextId(isolate)) { }
+ return_id_(GetNextId(isolate)),
+ elements_kind_(GetInitialFastElementsKind()) { }
private:
Expression* expression_;
Handle<JSFunction> target_;
const BailoutId return_id_;
+ ElementsKind elements_kind_;
};
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(array_function, prototype);
+ // TODO(mvstanton): For performance reasons, this code would have to
+ // be changed to successfully run with FLAG_optimize_constructed_arrays.
+ // The next checkin to enable FLAG_optimize_constructed_arrays by
+ // default will address this.
+ CHECK(!FLAG_optimize_constructed_arrays);
array_function->shared()->set_construct_stub(
isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
+
array_function->shared()->DontAdaptArguments();
MaybeObject* maybe_map = array_function->initial_map()->Copy();
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
-
// InternalArrays should not use Smi-Only array optimizations. There are too
// many places in the C++ runtime code (e.g. RegEx) that assume that
// elements in InternalArrays can be set to non-Smi values without going
}
+#define CONVERT_ARG_STUB_CALLER_ARGS(name) \
+ Arguments* name = reinterpret_cast<Arguments*>(args[0]);
+
+
+RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
+ CONVERT_ARG_STUB_CALLER_ARGS(caller_args);
+ // ASSERT(args.length() == 3);
+ Handle<JSFunction> function = args.at<JSFunction>(1);
+ Handle<Object> type_info = args.at<Object>(2);
+
+ JSArray* array = NULL;
+ bool holey = false;
+ if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) {
+ int value = Smi::cast((*caller_args)[0])->value();
+ holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray);
+ }
+
+ ASSERT(function->has_initial_map());
+ ElementsKind kind = function->initial_map()->elements_kind();
+ if (holey) {
+ kind = GetHoleyElementsKind(kind);
+ }
+
+ MaybeObject* maybe_array;
+ if (*type_info != isolate->heap()->undefined_value()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
+ if (cell->value()->IsSmi()) {
+ Smi* smi = Smi::cast(cell->value());
+ ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ if (holey && !IsFastHoleyElementsKind(to_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ // Update the allocation site info to reflect the advice alteration.
+ cell->set_value(Smi::FromInt(to_kind));
+ }
+
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(to_kind);
+ if (mode == TRACK_ALLOCATION_SITE) {
+ maybe_array = isolate->heap()->AllocateEmptyJSArrayWithAllocationSite(
+ kind, type_info);
+ } else {
+ maybe_array = isolate->heap()->AllocateEmptyJSArray(kind);
+ }
+ if (!maybe_array->To(&array)) return maybe_array;
+ }
+ }
+
+ if (array == NULL) {
+ maybe_array = isolate->heap()->AllocateEmptyJSArray(kind);
+ if (!maybe_array->To(&array)) return maybe_array;
+ }
+
+ maybe_array = ArrayConstructInitializeElements(array, caller_args);
+ if (maybe_array->IsFailure()) return maybe_array;
+ return array;
+}
+
+
static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
Isolate* isolate,
JSFunction* constructor) {
+ ASSERT(args->length() >= 1);
Heap* heap = isolate->heap();
isolate->counters()->array_function_runtime()->Increment();
array = JSArray::cast((*args)[0]);
// Initialize elements and length in case later allocations fail so that the
// array object is initialized in a valid state.
- array->set_length(Smi::FromInt(0));
- array->set_elements(heap->empty_fixed_array());
+ MaybeObject* maybe_array = array->Initialize(0);
+ if (maybe_array->IsFailure()) return maybe_array;
+
+ if (FLAG_optimize_constructed_arrays) {
+ AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
+ ElementsKind to_kind = array->GetElementsKind();
+ if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
+ if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
+ to_kind)) {
+ // We have advice that we should change the elements kind
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
+ reinterpret_cast<void*>(array),
+ ElementsKindToString(array->GetElementsKind()),
+ ElementsKindToString(to_kind));
+ }
+
+ maybe_array = array->TransitionElementsKind(to_kind);
+ if (maybe_array->IsFailure()) return maybe_array;
+ }
+ }
+ }
+
if (!FLAG_smi_only_arrays) {
Context* native_context = isolate->context()->native_context();
if (array->GetElementsKind() == GetInitialFastElementsKind() &&
if (!maybe_obj->To(&array)) return maybe_obj;
}
- // Optimize the case where there is one argument and the argument is a
- // small smi.
- if (args->length() == 2) {
- Object* obj = (*args)[1];
- if (obj->IsSmi()) {
- int len = Smi::cast(obj)->value();
- if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
- Object* fixed_array;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
- if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
- }
- ElementsKind elements_kind = array->GetElementsKind();
- if (!IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe_array =
- array->TransitionElementsKind(elements_kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
- // We do not use SetContent to skip the unnecessary elements type check.
- array->set_elements(FixedArray::cast(fixed_array));
- array->set_length(Smi::cast(obj));
- return array;
- }
- }
- // Take the argument as the length.
- { MaybeObject* maybe_obj = array->Initialize(0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return array->SetElementsLength((*args)[1]);
- }
-
- // Optimize the case where there are no parameters passed.
- if (args->length() == 1) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
- }
-
- // Set length and elements on the array.
- int number_of_elements = args->length() - 1;
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(args, 1, number_of_elements,
- ALLOW_CONVERTED_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
-
- // Allocate an appropriately typed elements array.
- MaybeObject* maybe_elms;
- ElementsKind elements_kind = array->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
- maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
- number_of_elements);
- } else {
- maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
- }
- FixedArrayBase* elms;
- if (!maybe_elms->To(&elms)) return maybe_elms;
-
- // Fill in the content
- switch (array->GetElementsKind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
- FixedArray* smi_elms = FixedArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
- }
- break;
- }
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS: {
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- FixedArray* object_elms = FixedArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- object_elms->set(index, (*args)[index+1], mode);
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- double_elms->set(index, (*args)[index+1]->Number());
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- array->set_elements(elms);
- array->set_length(Smi::FromInt(number_of_elements));
- return array;
+ Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1);
+ ASSERT(adjusted_arguments.length() < 1 ||
+ adjusted_arguments[0] == (*args)[1]);
+ return ArrayConstructInitializeElements(array, &adjusted_arguments);
}
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
+MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate);
class BuiltinFunctionTable;
class ObjectVisitor;
}
+template <>
+void CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
+ HInstruction* deopt = new(zone()) HSoftDeoptimize();
+ AddInstruction(deopt);
+ current_block()->MarkAsDeoptimizing();
+ HReturn* ret = new(zone()) HReturn(GetParameter(0), context());
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
+ CodeStubGraphBuilder<ArrayNoArgumentConstructorStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+
+template <>
+void CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::BuildCodeStub() {
+ HInstruction* deopt = new(zone()) HSoftDeoptimize();
+ AddInstruction(deopt);
+ current_block()->MarkAsDeoptimizing();
+ HReturn* ret = new(zone()) HReturn(GetParameter(0), context());
+ current_block()->Finish(ret);
+}
+
+
Handle<Code> TransitionElementsKindStub::GenerateCode() {
CodeStubGraphBuilder<TransitionElementsKindStub> builder(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
}
+Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
+ CodeStubGraphBuilder<ArraySingleArgumentConstructorStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+
+template <>
+void CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
+ HInstruction* deopt = new(zone()) HSoftDeoptimize();
+ AddInstruction(deopt);
+ current_block()->MarkAsDeoptimizing();
+ HReturn* ret = new(zone()) HReturn(GetParameter(0), context());
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
+ CodeStubGraphBuilder<ArrayNArgumentsConstructorStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
} } // namespace v8::internal
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(ArrayNArgumentsConstructor) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
V(StringDictionaryLookup) \
};
+class ArrayNoArgumentConstructorStub : public HydrogenCodeStub {
+ public:
+ ArrayNoArgumentConstructorStub() {
+ }
+
+ Major MajorKey() { return ArrayNoArgumentConstructor; }
+ int MinorKey() { return 0; }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
+};
+
+
+class ArraySingleArgumentConstructorStub : public HydrogenCodeStub {
+ public:
+ ArraySingleArgumentConstructorStub() {
+ }
+
+ Major MajorKey() { return ArraySingleArgumentConstructor; }
+ int MinorKey() { return 0; }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
+};
+
+
+class ArrayNArgumentsConstructorStub : public HydrogenCodeStub {
+ public:
+ ArrayNArgumentsConstructorStub() {
+ }
+
+ Major MajorKey() { return ArrayNArgumentsConstructor; }
+ int MinorKey() { return 0; }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
+};
+
+
class KeyedStoreElementStub : public PlatformCodeStub {
public:
KeyedStoreElementStub(bool is_js_array,
#include "v8.h"
+#include "arguments.h"
#include "objects.h"
#include "elements.h"
#include "utils.h"
}
+MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
+ JSArray* array, Arguments* args) {
+ Heap* heap = array->GetIsolate()->heap();
+
+ // Optimize the case where there is one argument and the argument is a
+ // small smi.
+ if (args->length() == 1) {
+ Object* obj = (*args)[0];
+ if (obj->IsSmi()) {
+ int len = Smi::cast(obj)->value();
+ if (len > 0 && len < JSObject::kInitialMaxFastElementArray) {
+ ElementsKind elements_kind = array->GetElementsKind();
+ MaybeObject* maybe_array = array->Initialize(len, len);
+ if (maybe_array->IsFailure()) return maybe_array;
+
+ if (!IsFastHoleyElementsKind(elements_kind)) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ maybe_array = array->TransitionElementsKind(elements_kind);
+ if (maybe_array->IsFailure()) return maybe_array;
+ }
+
+ return array;
+ } else if (len == 0) {
+ return array->Initialize(JSArray::kPreallocatedArrayElements);
+ }
+ }
+
+ // Take the argument as the length.
+ MaybeObject* maybe_obj = array->Initialize(0);
+ if (!maybe_obj->To(&obj)) return maybe_obj;
+
+ return array->SetElementsLength((*args)[0]);
+ }
+
+ // Optimize the case where there are no parameters passed.
+ if (args->length() == 0) {
+ return array->Initialize(JSArray::kPreallocatedArrayElements);
+ }
+
+ // Set length and elements on the array.
+ int number_of_elements = args->length();
+ MaybeObject* maybe_object =
+ array->EnsureCanContainElements(args, 0, number_of_elements,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS);
+ if (maybe_object->IsFailure()) return maybe_object;
+
+ // Allocate an appropriately typed elements array.
+ MaybeObject* maybe_elms;
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
+ number_of_elements);
+ } else {
+ maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+ }
+ FixedArrayBase* elms;
+ if (!maybe_elms->To(&elms)) return maybe_elms;
+
+ // Fill in the content
+ switch (array->GetElementsKind()) {
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
+ FixedArray* smi_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER);
+ }
+ break;
+ }
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_ELEMENTS: {
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ FixedArray* object_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ object_elms->set(index, (*args)[index], mode);
+ }
+ break;
+ }
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ double_elms->set(index, (*args)[index]->Number());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ array->set_elements(elms);
+ array->set_length(Smi::FromInt(number_of_elements));
+ return array;
+}
+
} } // namespace v8::internal
void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
bool allow_appending = false);
+MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
+ JSArray* array, Arguments* args);
+
} } // namespace v8::internal
#endif // V8_ELEMENTS_H_
"eliminate unreachable code (hidden behind soft deopts)")
DEFINE_bool(track_allocation_sites, true,
"Use allocation site info to reduce transitions")
+DEFINE_bool(optimize_constructed_arrays, false,
+ "Use allocation site info on constructed arrays")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
}
+MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite(
+ ElementsKind elements_kind,
+ Handle<Object> allocation_site_payload) {
+ return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0,
+ allocation_site_payload,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
+}
+
+
bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
const char* collector_reason = NULL;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
}
+MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
+ Handle<Object> allocation_site_info_payload) {
+ ASSERT(gc_state_ == NOT_IN_GC);
+ ASSERT(map->instance_type() != MAP_TYPE);
+ // If allocation failures are disallowed, we may allocate in a different
+ // space when new space is full and the object is not a large object.
+ AllocationSpace retry_space =
+ (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+ int size = map->instance_size() + AllocationSiteInfo::kSize;
+ Object* result;
+ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ // No need for write barrier since object is white and map is in old space.
+ HeapObject::cast(result)->set_map_no_write_barrier(map);
+ AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
+ reinterpret_cast<Address>(result) + map->instance_size());
+ alloc_info->set_map_no_write_barrier(allocation_site_info_map());
+ alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
+ return result;
+}
+
+
MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
// space when new space is full and the object is not a large object.
AllocationSpace retry_space =
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+ int size = map->instance_size();
Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(map->instance_size(), space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
// No need for write barrier since object is white and map is in old space.
HeapObject::cast(result)->set_map_no_write_barrier(map);
return result;
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
Object* obj;
- { MaybeObject* maybe_obj = Allocate(map, space);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ MaybeObject* maybe_obj = Allocate(map, space);
+ if (!maybe_obj->To(&obj)) return maybe_obj;
+
+ // Initialize the JSObject.
+ InitializeJSObjectFromMap(JSObject::cast(obj),
+ FixedArray::cast(properties),
+ map);
+ ASSERT(JSObject::cast(obj)->HasFastElements());
+ return obj;
+}
+
+
+MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
+ Handle<Object> allocation_site_info_payload) {
+ // JSFunctions should be allocated using AllocateFunction to be
+ // properly initialized.
+ ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
+
+ // Both types of global objects should be allocated using
+ // AllocateGlobalObject to be properly initialized.
+ ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
+ ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
+
+ // Allocate the backing storage for the properties.
+ int prop_size =
+ map->pre_allocated_property_fields() +
+ map->unused_property_fields() -
+ map->inobject_properties();
+ ASSERT(prop_size >= 0);
+ Object* properties;
+ { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
+ if (!maybe_properties->ToObject(&properties)) return maybe_properties;
}
+ // Allocate the JSObject.
+ AllocationSpace space = NEW_SPACE;
+ if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ Object* obj;
+ MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
+ allocation_site_info_payload);
+ if (!maybe_obj->To(&obj)) return maybe_obj;
+
// Initialize the JSObject.
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
}
+MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
+ Handle<Object> allocation_site_info_payload) {
+ // Allocate the initial map if absent.
+ if (!constructor->has_initial_map()) {
+ Object* initial_map;
+ { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
+ if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
+ }
+ constructor->set_initial_map(Map::cast(initial_map));
+ Map::cast(initial_map)->set_constructor(constructor);
+ }
+ // Allocate the object based on the constructors initial map, or the payload
+ // advice
+ Map* initial_map = constructor->initial_map();
+
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
+ *allocation_site_info_payload);
+ Smi* smi = Smi::cast(cell->value());
+ ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
+ if (to_kind != initial_map->elements_kind()) {
+ MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap(
+ isolate(), to_kind);
+ if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
+ // Possibly alter the mode, since we found an updated elements kind
+ // in the type info cell.
+ mode = AllocationSiteInfo::GetMode(to_kind);
+ }
+
+ MaybeObject* result;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
+ allocation_site_info_payload);
+ } else {
+ result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
+ }
+#ifdef DEBUG
+ // Make sure result is NOT a global object if valid.
+ Object* non_failure;
+ ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
+#endif
+ return result;
+}
+
+
MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
int capacity,
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
- ASSERT(capacity >= length);
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
+ // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
+ // for performance reasons.
+ ASSERT(capacity >= length);
+
if (capacity == 0) {
array->set_length(Smi::FromInt(0));
array->set_elements(empty_fixed_array());
}
+MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
+ ElementsKind elements_kind,
+ int length,
+ int capacity,
+ Handle<Object> allocation_site_payload,
+ ArrayStorageAllocationMode mode) {
+ MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
+ allocation_site_payload);
+ JSArray* array;
+ if (!maybe_array->To(&array)) return maybe_array;
+ return AllocateJSArrayStorage(array, length, capacity, mode);
+}
+
+
+MaybeObject* Heap::AllocateJSArrayStorage(
+ JSArray* array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode) {
+ ASSERT(capacity >= length);
+
+ if (capacity == 0) {
+ array->set_length(Smi::FromInt(0));
+ array->set_elements(empty_fixed_array());
+ return array;
+ }
+
+ FixedArrayBase* elms;
+ MaybeObject* maybe_elms = NULL;
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
+ } else {
+ ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
+ }
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ maybe_elms = AllocateUninitializedFixedArray(capacity);
+ } else {
+ ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ maybe_elms = AllocateFixedArrayWithHoles(capacity);
+ }
+ }
+ if (!maybe_elms->To(&elms)) return maybe_elms;
+
+ array->set_elements(elms);
+ array->set_length(Smi::FromInt(length));
+ return array;
+}
+
+
MaybeObject* Heap::AllocateJSArrayWithElements(
FixedArrayBase* elements,
ElementsKind elements_kind,
}
-MaybeObject* Heap::CopyJSObject(JSObject* source,
- AllocationSiteMode mode) {
+MaybeObject* Heap::CopyJSObject(JSObject* source) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
SLOW_ASSERT(!source->IsJSFunction());
int object_size = map->instance_size();
Object* clone;
- bool track_origin = mode == TRACK_ALLOCATION_SITE &&
- map->CanTrackAllocationSite();
+ WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+
+ // If we're forced to always allocate, we use the general allocation
+ // functions which may leave us with an object in old space.
+ if (always_allocate()) {
+ { MaybeObject* maybe_clone =
+ AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
+ if (!maybe_clone->ToObject(&clone)) return maybe_clone;
+ }
+ Address clone_address = HeapObject::cast(clone)->address();
+ CopyBlock(clone_address,
+ source->address(),
+ object_size);
+ // Update write barrier for all fields that lie beyond the header.
+ RecordWrites(clone_address,
+ JSObject::kHeaderSize,
+ (object_size - JSObject::kHeaderSize) / kPointerSize);
+ } else {
+ wb_mode = SKIP_WRITE_BARRIER;
+
+ { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
+ if (!maybe_clone->ToObject(&clone)) return maybe_clone;
+ }
+ SLOW_ASSERT(InNewSpace(clone));
+ // Since we know the clone is allocated in new space, we can copy
+ // the contents without worrying about updating the write barrier.
+ CopyBlock(HeapObject::cast(clone)->address(),
+ source->address(),
+ object_size);
+ }
+
+ SLOW_ASSERT(
+ JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
+ FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
+ FixedArray* properties = FixedArray::cast(source->properties());
+ // Update elements if necessary.
+ if (elements->length() > 0) {
+ Object* elem;
+ { MaybeObject* maybe_elem;
+ if (elements->map() == fixed_cow_array_map()) {
+ maybe_elem = FixedArray::cast(elements);
+ } else if (source->HasFastDoubleElements()) {
+ maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
+ } else {
+ maybe_elem = CopyFixedArray(FixedArray::cast(elements));
+ }
+ if (!maybe_elem->ToObject(&elem)) return maybe_elem;
+ }
+ JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
+ }
+ // Update properties if necessary.
+ if (properties->length() > 0) {
+ Object* prop;
+ { MaybeObject* maybe_prop = CopyFixedArray(properties);
+ if (!maybe_prop->ToObject(&prop)) return maybe_prop;
+ }
+ JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
+ }
+ // Return the new clone.
+ return clone;
+}
+
+
+MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
+ // Never used to copy functions. If functions need to be copied we
+ // have to be careful to clear the literals array.
+ SLOW_ASSERT(!source->IsJSFunction());
+
+ // Make the clone.
+ Map* map = source->map();
+ int object_size = map->instance_size();
+ Object* clone;
+ ASSERT(map->CanTrackAllocationSite());
+ ASSERT(map->instance_type() == JS_ARRAY_TYPE);
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
// If we're forced to always allocate, we use the general allocation
int adjusted_object_size = object_size;
if (always_allocate()) {
// We'll only track origin if we are certain to allocate in new space
- if (track_origin) {
- const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
- if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
- adjusted_object_size += AllocationSiteInfo::kSize;
- }
+ const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
+ if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
+ adjusted_object_size += AllocationSiteInfo::kSize;
}
{ MaybeObject* maybe_clone =
source->address(),
object_size);
// Update write barrier for all fields that lie beyond the header.
- RecordWrites(clone_address,
- JSObject::kHeaderSize,
- (object_size - JSObject::kHeaderSize) / kPointerSize);
+ int write_barrier_offset = adjusted_object_size > object_size
+ ? JSArray::kSize + AllocationSiteInfo::kSize
+ : JSObject::kHeaderSize;
+ if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
+ RecordWrites(clone_address,
+ write_barrier_offset,
+ (object_size - write_barrier_offset) / kPointerSize);
+ }
+
+ // Track allocation site information, if we failed to allocate it inline.
+ if (InNewSpace(clone) &&
+ adjusted_object_size == object_size) {
+ MaybeObject* maybe_alloc_info =
+ AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
+ AllocationSiteInfo* alloc_info;
+ if (maybe_alloc_info->To(&alloc_info)) {
+ alloc_info->set_map_no_write_barrier(allocation_site_info_map());
+ alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
+ }
+ }
} else {
wb_mode = SKIP_WRITE_BARRIER;
- if (track_origin) {
- adjusted_object_size += AllocationSiteInfo::kSize;
- }
+ adjusted_object_size += AllocationSiteInfo::kSize;
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
if (adjusted_object_size > object_size) {
AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
reinterpret_cast<Address>(clone) + object_size);
- alloc_info->set_map(allocation_site_info_map());
- alloc_info->set_payload(source);
+ alloc_info->set_map_no_write_barrier(allocation_site_info_map());
+ alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
}
SLOW_ASSERT(
}
+MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
+ ElementsKind elements_kind,
+ Handle<Object> allocation_site_info_payload) {
+ Context* native_context = isolate()->context()->native_context();
+ JSFunction* array_function = native_context->array_function();
+ Map* map = array_function->initial_map();
+ Object* maybe_map_array = native_context->js_array_maps();
+ if (!maybe_map_array->IsUndefined()) {
+ Object* maybe_transitioned_map =
+ FixedArray::cast(maybe_map_array)->get(elements_kind);
+ if (!maybe_transitioned_map->IsUndefined()) {
+ map = Map::cast(maybe_transitioned_map);
+ }
+ }
+ return AllocateJSObjectFromMapWithAllocationSite(map,
+ allocation_site_info_payload);
+}
+
+
MaybeObject* Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
Object* result;
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObject(
- JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
+ JSFunction* constructor,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite(
+ JSFunction* constructor,
+ Handle<Object> allocation_site_info_payload);
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
pretenure);
}
+ inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite(
+ ElementsKind elements_kind,
+ Handle<Object> allocation_site_payload);
+
// Allocate a JSArray with a specified length but elements that are left
// uninitialized.
MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorageWithAllocationSite(
+ ElementsKind elements_kind,
+ int length,
+ int capacity,
+ Handle<Object> allocation_site_payload,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+
+ MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage(
+ JSArray* array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
FixedArrayBase* array_base,
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
- MUST_USE_RESULT MaybeObject* CopyJSObject(
- JSObject* source,
- AllocationSiteMode mode = DONT_TRACK_ALLOCATION_SITE);
+ MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
+
+ MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(JSObject* source);
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
Map* map, PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
+ Map* map, Handle<Object> allocation_site_info_payload);
+
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
+ MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map,
+ AllocationSpace space, Handle<Object> allocation_site_info_payload);
+
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite(
+ ElementsKind elements_kind,
+ Handle<Object> allocation_site_info_payload);
+
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(Change) \
};
+class HCallNewArray: public HCallNew {
+ public:
+ HCallNewArray(HValue* context, HValue* constructor, int argument_count,
+ Handle<JSGlobalPropertyCell> type_cell)
+ : HCallNew(context, constructor, argument_count),
+ type_cell_(type_cell) {
+ }
+
+ Handle<JSGlobalPropertyCell> property_cell() const {
+ return type_cell_;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
+
+ private:
+ Handle<JSGlobalPropertyCell> type_cell_;
+};
+
+
class HCallRuntime: public HCall<1> {
public:
HCallRuntime(HValue* context,
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HInstruction* call =
- new(zone()) HCallNew(context, constructor, argument_count);
+ HCallNew* call;
+ if (FLAG_optimize_constructed_arrays &&
+ !(expr->target().is_null()) &&
+ *(expr->target()) == isolate()->global_context()->array_function()) {
+ Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
+ ASSERT(feedback->IsSmi());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(feedback);
+ AddInstruction(new(zone()) HCheckFunction(constructor,
+ Handle<JSFunction>(isolate()->global_context()->array_function())));
+ call = new(zone()) HCallNewArray(context, constructor, argument_count,
+ cell);
+ } else {
+ call = new(zone()) HCallNew(context, constructor, argument_count);
+ }
Drop(argument_count);
call->set_position(expr->position());
return ast_context()->ReturnInstruction(call, expr->id());
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
+ // -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
- Label generic_constructor;
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, "Unexpected initial map for Array function");
- }
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, true, &generic_constructor);
+ // We should either have undefined in ebx or a valid jsglobalpropertycell
+ Label okay_here;
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ Handle<Map> global_property_cell_map(
+ masm->isolate()->heap()->global_property_cell_map());
+ __ cmp(ebx, Immediate(undefined_sentinel));
+ __ j(equal, &okay_here);
+ __ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map));
+ __ Assert(equal, "Expected property cell in register ebx");
+ __ bind(&okay_here);
+ }
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, ¬_zero_case);
+ ArrayNoArgumentConstructorStub no_argument_stub;
+ __ TailCallStub(&no_argument_stub);
+
+ __ bind(¬_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, ¬_one_case);
+ ArraySingleArgumentConstructorStub single_argument_stub;
+ __ TailCallStub(&single_argument_stub);
+
+ __ bind(¬_one_case);
+ ArrayNArgumentsConstructorStub n_argument_stub;
+ __ TailCallStub(&n_argument_stub);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, true, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
}
#include "runtime.h"
#include "stub-cache.h"
#include "codegen.h"
+#include "runtime.h"
namespace v8 {
namespace internal {
static Register registers[] = { edx, ecx };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
}
}
+static void InitializeArrayConstructorDescriptor(Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // register state
+ // edi -- constructor function
+ // ebx -- type info cell with elements kind
+ // eax -- number of arguments to the constructor function
+ static Register registers[] = { edi, ebx };
+ descriptor->register_param_count_ = 2;
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &eax;
+ descriptor->register_params_ = registers;
+ descriptor->extra_expression_stack_count_ = 1;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ArrayConstructor_StubFailure);
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// ebx : cache cell for call target
// edi : the function to call
+ ASSERT(!FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done;
}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // ebx : cache cell for call target
+ // edi : the function to call
+ ASSERT(FLAG_optimize_constructed_arrays);
+ Isolate* isolate = masm->isolate();
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ // Load the cache state into ecx.
+ __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(ecx, edi);
+ __ j(equal, &done);
+ __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ __ j(equal, &done);
+
+ // Special handling of the Array() function, which caches not only the
+ // monomorphic Array function but the initial ElementsKind with special
+ // sentinels
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(isolate,
+ LAST_FAST_ELEMENTS_KIND);
+ __ cmp(ecx, Immediate(terminal_kind_sentinel));
+ __ j(above, &miss);
+ // Load the global or builtins object from the current context
+ __ LoadGlobalContext(ecx);
+ // Make sure the function is the Array() function
+ __ cmp(edi, Operand(ecx,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
+ __ j(equal, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kNear);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ __ LoadGlobalContext(ecx);
+ // Make sure the function is the Array() function
+ __ cmp(edi, Operand(ecx,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ j(not_equal, ¬_array_function);
+
+ // The target function is the Array constructor, install a sentinel value in
+ // the constructor's type info cell that will track the initial ElementsKind
+ // that should be used for the array when its constructed.
+ Handle<Object> initial_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(isolate,
+ GetInitialFastElementsKind());
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(initial_kind_sentinel));
+ __ jmp(&done);
+
+ __ bind(¬_array_function);
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
+ // No need for a write barrier here - cells are rescanned.
+
+ __ bind(&done);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
// ebx : cache cell for call target
// edi : the function to call
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Fast-case: Just invoke the function.
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Jump to the function-specific construct stub.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
+ Register jmp_reg = FLAG_optimize_constructed_arrays ? ecx : ebx;
+ __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(jmp_reg, FieldOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ jmp(jmp_reg);
// edi: called object
// eax: number of arguments
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
+ if (FLAG_optimize_constructed_arrays) {
+ // No cell in ebx for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
+ isolate());
+ __ mov(ebx, Immediate(undefined_value));
+ }
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ Set(eax, Immediate(instr->arity()));
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->constructor()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+ ASSERT(FLAG_optimize_constructed_arrays);
+
+ __ mov(ebx, instr->hydrogen()->property_cell());
+ Handle<Code> array_construct_code =
+ isolate()->builtins()->ArrayConstructCode();
+ __ Set(eax, Immediate(instr->arity()));
+ CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
}
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ASSERT(hydrogen()->property_cell()->value()->IsSmi());
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(hydrogen()->property_cell()->value())->value());
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ ASSERT(FLAG_optimize_constructed_arrays);
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* constructor = UseFixed(instr->constructor(), edi);
+ argument_count_ -= instr->argument_count();
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
};
+class LCallNewArray: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
}
+void MacroAssembler::LoadGlobalContext(Register global_context) {
+ // Load the global or builtins object from the current context.
+ mov(global_context,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ mov(global_context,
+ FieldOperand(global_context, GlobalObject::kNativeContextOffset));
+}
+
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function,
Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- mov(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ mov(function,
+ FieldOperand(function, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
mov(function, Operand(function, Context::SlotOffset(index)));
}
Register map_out,
bool can_have_holes);
+ void LoadGlobalContext(Register global_context);
+
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
}
+Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind) {
+ return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
+}
+
+
Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
return heap->the_hole_value();
}
void AllocationSiteInfo::AllocationSiteInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "AllocationSiteInfo");
PrintF(out, " - payload: ");
- if (payload()->IsJSArray()) {
+ if (payload()->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload());
+ Object* cell_contents = cell->value();
+ if (cell_contents->IsSmi()) {
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(cell_contents)->value());
+ PrintF(out, "Array allocation with ElementsKind ");
+ PrintElementsKind(out, kind);
+ PrintF(out, "\n");
+ return;
+ }
+ } else if (payload()->IsJSArray()) {
PrintF(out, "Array literal ");
payload()->ShortPrint(out);
PrintF(out, "\n");
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
}
+bool AllocationSiteInfo::GetElementsKindPayload(ElementsKind* kind) {
+ ASSERT(kind != NULL);
+ if (payload()->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload());
+ Object* cell_contents = cell->value();
+ if (cell_contents->IsSmi()) {
+ *kind = static_cast<ElementsKind>(
+ Smi::cast(cell_contents)->value());
+ return true;
+ }
+ }
+ return false;
+}
+
+
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSiteInfo::GetMode(
}
-MaybeObject* JSArray::Initialize(int capacity) {
- Heap* heap = GetHeap();
+MaybeObject* JSArray::Initialize(int capacity, int length) {
ASSERT(capacity >= 0);
- set_length(Smi::FromInt(0));
- FixedArray* new_elements;
- if (capacity == 0) {
- new_elements = heap->empty_fixed_array();
- } else {
- MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
- if (!maybe_obj->To(&new_elements)) return maybe_obj;
- }
- set_elements(new_elements);
- return this;
+ return GetHeap()->AllocateJSArrayStorage(this, length, capacity,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
}
? FAST_HOLEY_DOUBLE_ELEMENTS
: FAST_DOUBLE_ELEMENTS;
- MaybeObject* trans = PossiblyTransitionArrayBoilerplate(to_kind);
- if (trans->IsFailure()) return trans;
+ MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
MaybeObject* maybe =
SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
? FAST_HOLEY_ELEMENTS
: FAST_ELEMENTS;
- MaybeObject* trans = PossiblyTransitionArrayBoilerplate(kind);
- if (trans->IsFailure()) return trans;
+ MaybeObject* maybe_failure = UpdateAllocationSiteInfo(kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
kind);
}
-MaybeObject* JSObject::PossiblyTransitionArrayBoilerplate(
- ElementsKind to_kind) {
- MaybeObject* ret = NULL;
+MaybeObject* JSObject::UpdateAllocationSiteInfo(ElementsKind to_kind) {
if (!FLAG_track_allocation_sites || !IsJSArray()) {
- return ret;
+ return this;
}
AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(this);
if (info == NULL) {
- return ret;
+ return this;
}
- ASSERT(info->payload()->IsJSArray());
- JSArray* payload = JSArray::cast(info->payload());
- ElementsKind kind = payload->GetElementsKind();
- if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
- // If the array is huge, it's not likely to be defined in a local
- // function, so we shouldn't make new instances of it very often.
- uint32_t length = 0;
- CHECK(payload->length()->ToArrayIndex(&length));
- if (length <= 8 * 1024) {
- ret = payload->TransitionElementsKind(to_kind);
- if (FLAG_trace_track_allocation_sites) {
- PrintF(
- "AllocationSiteInfo: JSArray %p boilerplate updated %s->%s\n",
- reinterpret_cast<void*>(this),
- ElementsKindToString(kind),
- ElementsKindToString(to_kind));
+ if (info->payload()->IsJSArray()) {
+ JSArray* payload = JSArray::cast(info->payload());
+ ElementsKind kind = payload->GetElementsKind();
+ if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ // If the array is huge, it's not likely to be defined in a local
+ // function, so we shouldn't make new instances of it very often.
+ uint32_t length = 0;
+ CHECK(payload->length()->ToArrayIndex(&length));
+ if (length <= AllocationSiteInfo::kMaximumArrayBytesToPretransition) {
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF(
+ "AllocationSiteInfo: JSArray %p boilerplate updated %s->%s\n",
+ reinterpret_cast<void*>(this),
+ ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
+ }
+ return payload->TransitionElementsKind(to_kind);
+ }
+ }
+ } else if (info->payload()->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(info->payload());
+ Object* cell_contents = cell->value();
+ if (cell_contents->IsSmi()) {
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(cell_contents)->value());
+ if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSiteInfo: JSArray %p info updated %s->%s\n",
+ reinterpret_cast<void*>(this),
+ ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
+ }
+ cell->set_value(Smi::FromInt(to_kind));
}
}
}
- return ret;
+ return this;
}
if (from_kind == to_kind) return this;
- MaybeObject* trans = PossiblyTransitionArrayBoilerplate(to_kind);
- if (trans->IsFailure()) return trans;
+ MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
Isolate* isolate = GetIsolate();
if (elements() == isolate->heap()->empty_fixed_array() ||
ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* PossiblyTransitionArrayBoilerplate(
+ MUST_USE_RESULT MaybeObject* UpdateAllocationSiteInfo(
ElementsKind to_kind);
// Replaces an existing transition with a transition to a map with a FIELD.
// The object that indicates a megamorphic state.
static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+ // The object that indicates a monomorphic state of Array with
+ // ElementsKind
+ static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind);
+
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
static inline Object* RawUninitializedSentinel(Heap* heap);
static const int kPayloadOffset = HeapObject::kHeaderSize;
static const int kSize = kPayloadOffset + kPointerSize;
+ static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
+ bool GetElementsKindPayload(ElementsKind* kind);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSiteInfo);
};
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
// capacity is non-zero.
- MUST_USE_RESULT MaybeObject* Initialize(int capacity);
+ MUST_USE_RESULT MaybeObject* Initialize(int capacity, int length = 0);
// Initializes the array to a certain length.
inline bool AllowsSetElementsLength();
RUNTIME_ASSERT(args[index]->Is##Type()); \
Handle<Type> name = args.at<Type>(index);
-#define CONVERT_ARG_STUB_CALLER_ARGS(name) \
- Arguments* name = reinterpret_cast<Arguments*>(args[0]);
-
// Cast the given object to a boolean and store it in a variable with
// the given name. If the object is not a boolean call IllegalOperation
// and return.
JSObject* boilerplate_object = JSObject::cast(*boilerplate);
AllocationSiteMode mode = AllocationSiteInfo::GetMode(
boilerplate_object->GetElementsKind());
- return isolate->heap()->CopyJSObject(boilerplate_object, mode);
+ if (mode == TRACK_ALLOCATION_SITE) {
+ return isolate->heap()->CopyJSObjectWithAllocationSite(boilerplate_object);
+ }
+
+ return isolate->heap()->CopyJSObject(boilerplate_object);
}
bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
- Handle<Object> value = GetInfo(expr->CallNewFeedbackId());
- return value->IsJSFunction();
+ Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+ if (info->IsSmi()) {
+ ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
+ LAST_FAST_ELEMENTS_KIND);
+ return Isolate::Current()->global_context()->array_function();
+ }
+ return info->IsJSFunction();
}
Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->CallNewFeedbackId()));
+ Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+ if (info->IsSmi()) {
+ ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
+ LAST_FAST_ELEMENTS_KIND);
+ return Handle<JSFunction>(Isolate::Current()->global_context()->
+ array_function());
+ } else {
+ return Handle<JSFunction>::cast(info);
+ }
}
+ElementsKind TypeFeedbackOracle::GetCallNewElementsKind(CallNew* expr) {
+ Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+ if (info->IsSmi()) {
+ return static_cast<ElementsKind>(Smi::cast(*info)->value());
+ } else {
+ // TODO(mvstanton): avoided calling GetInitialFastElementsKind() for perf
+ // reasons. Is there a better fix?
+ if (FLAG_packed_arrays) {
+ return FAST_SMI_ELEMENTS;
+ } else {
+ return FAST_HOLEY_SMI_ELEMENTS;
+ }
+ }
+}
+
Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
ObjectLiteral::Property* prop) {
ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
Handle<JSFunction> GetCallTarget(Call* expr);
Handle<JSFunction> GetCallNewTarget(CallNew* expr);
+ ElementsKind GetCallNewElementsKind(CallNew* expr);
Handle<Map> GetObjectLiteralStoreMap(ObjectLiteral::Property* prop);
// Returns an element from the backing store. Returns undefined if
// there is no information.
+ public:
+ // TODO(mvstanton): how to get this information without making the method
+ // public?
Handle<Object> GetInfo(TypeFeedbackId ast_id);
+ private:
Handle<Context> native_context_;
Isolate* isolate_;
Handle<UnseededNumberDictionary> dictionary_;
// Invoke the code.
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->factory()->undefined_value());
+ __ Move(rbx, undefined_sentinel);
// Expects rdi to hold function pointer.
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
- Label generic_constructor;
-
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
+
// Initial map for the builtin Array function should be a map.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+ Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
__ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, "Unexpected initial map for Array function");
- }
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
+ // We should either have undefined in ebx or a valid jsglobalpropertycell
+ Label okay_here;
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->factory()->undefined_value());
+ Handle<Map> global_property_cell_map(
+ masm->isolate()->heap()->global_property_cell_map());
+ __ Cmp(rbx, undefined_sentinel);
+ __ j(equal, &okay_here);
+ __ Cmp(FieldOperand(rbx, 0), global_property_cell_map);
+ __ Assert(equal, "Expected property cell in register rbx");
+ __ bind(&okay_here);
+ }
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ Label not_zero_case, not_one_case;
+ __ testq(rax, rax);
+ __ j(not_zero, ¬_zero_case);
+ ArrayNoArgumentConstructorStub no_argument_stub;
+ __ TailCallStub(&no_argument_stub);
+
+ __ bind(¬_zero_case);
+ __ cmpq(rax, Immediate(1));
+ __ j(greater, ¬_one_case);
+ ArraySingleArgumentConstructorStub single_argument_stub;
+ __ TailCallStub(&single_argument_stub);
+
+ __ bind(¬_one_case);
+ ArrayNArgumentsConstructorStub n_argument_stub;
+ __ TailCallStub(&n_argument_stub);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
}
}
+static void InitializeArrayConstructorDescriptor(Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // register state
+ // rdi -- constructor function
+ // rbx -- type info cell with elements kind
+ // rax -- number of arguments to the constructor function
+ static Register registers[] = { rdi, rbx };
+ descriptor->register_param_count_ = 2;
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &rax;
+ descriptor->register_params_ = registers;
+ descriptor->extra_expression_stack_count_ = 1;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ArrayConstructor_StubFailure);
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rbx : cache cell for call target
// rdi : the function to call
+ ASSERT(!FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done;
}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // rbx : cache cell for call target
+ // rdi : the function to call
+ ASSERT(FLAG_optimize_constructed_arrays);
+ Isolate* isolate = masm->isolate();
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ // Load the cache state into rcx.
+ __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmpq(rcx, rdi);
+ __ j(equal, &done);
+ __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ j(equal, &done);
+
+ // Special handling of the Array() function, which caches not only the
+ // monomorphic Array function but the initial ElementsKind with special
+ // sentinels
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(isolate,
+ LAST_FAST_ELEMENTS_KIND);
+ __ Cmp(rcx, terminal_kind_sentinel);
+ __ j(not_equal, &miss);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(rcx);
+ __ cmpq(rdi, rcx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+ __ j(equal, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ jmp(&done, Label::kNear);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(rcx);
+ __ cmpq(rdi, rcx);
+ __ j(not_equal, ¬_array_function);
+
+ // The target function is the Array constructor, install a sentinel value in
+ // the constructor's type info cell that will track the initial ElementsKind
+ // that should be used for the array when its constructed.
+ Handle<Object> initial_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(isolate,
+ GetInitialFastElementsKind());
+ __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ initial_kind_sentinel);
+ __ jmp(&done);
+
+ __ bind(¬_array_function);
+ __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
+ // No need for a write barrier here - cells are rescanned.
+
+ __ bind(&done);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
// rbx : cache cell for call target
// rdi : the function to call
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Fast-case: Just invoke the function.
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Jump to the function-specific construct stub.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
- __ jmp(rbx);
+ Register jmp_reg = FLAG_optimize_constructed_arrays ? rcx : rbx;
+ __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(jmp_reg, FieldOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ jmp(jmp_reg);
// rdi: called object
// rax: number of arguments
ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ Set(rax, instr->arity());
+ if (FLAG_optimize_constructed_arrays) {
+ // No cell in ebx for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ Move(rbx, undefined_value);
+ }
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->constructor()).is(rdi));
+ ASSERT(ToRegister(instr->result()).is(rax));
+ ASSERT(FLAG_optimize_constructed_arrays);
+
+ __ Set(rax, instr->arity());
+ __ Move(rbx, instr->hydrogen()->property_cell());
+ Handle<Code> array_construct_code =
+ isolate()->builtins()->ArrayConstructCode();
+ CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
}
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ASSERT(hydrogen()->property_cell()->value()->IsSmi());
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(hydrogen()->property_cell()->value())->value());
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ ASSERT(FLAG_optimize_constructed_arrays);
+ LOperand* constructor = UseFixed(instr->constructor(), rdi);
+ argument_count_ -= instr->argument_count();
+ LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), rdi);
argument_count_ -= instr->argument_count();
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
};
+class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallNewArray(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ LOperand* constructor() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
}
+void MacroAssembler::LoadArrayFunction(Register function) {
+ movq(function,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+ movq(function,
+ Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
+ void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-// Flags: --track-allocation-sites
+// Flags: --track-allocation-sites --nooptimize-constructed-arrays
+
+// TODO(mvstanton): remove --nooptimize-constructed-arrays and enable
+// the constructed array code below when the feature is turned on
+// by default.
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
// enabled, this test takes the appropriate code path to check smi-only arrays.
support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
+optimize_constructed_arrays = false;
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
print("Tests do NOT include smi-only arrays.");
}
+if (optimize_constructed_arrays) {
+ print("Tests include constructed array optimizations.");
+} else {
+ print("Tests do NOT include constructed array optimizations.");
+}
+
var elements_kind = {
fast_smi_only : 'fast smi only elements',
fast : 'fast elements',
if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
}
+function isHoley(obj) {
+ if (%HasFastHoleyElements(obj)) return true;
+ return false;
+}
+
function assertKind(expected, obj, name_opt) {
if (!support_smi_only_arrays &&
expected == elements_kind.fast_smi_only) {
assertEquals(expected, getKind(obj), name_opt);
}
+function assertHoley(obj, name_opt) {
+ assertEquals(true, isHoley(obj), name_opt);
+}
+
+function assertNotHoley(obj, name_opt) {
+ assertEquals(false, isHoley(obj), name_opt);
+}
+
if (support_smi_only_arrays) {
+
+ obj = [];
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = [1, 2, 3];
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array();
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array(0);
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array(2);
+ assertHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array(1,2,3);
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array(1, "hi", 2, undefined);
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast, obj);
+
function fastliteralcase(literal, value) {
- // var literal = [1, 2, 3];
literal[0] = value;
return literal;
}
// Verify that we will not pretransition the double->fast path.
obj = fastliteralcase(get_standard_literal(), "elliot");
assertKind(elements_kind.fast, obj);
-
// This fails until we turn off optimistic transitions to the
// most general elements kind seen on keyed stores. It's a goal
// to turn it off, but for now we need it.
assertKind(elements_kind.fast, obj);
obj = fastliteralcase_smifast(2);
assertKind(elements_kind.fast, obj);
+
+ if (optimize_constructed_arrays) {
+ function newarraycase_smidouble(value) {
+ var a = new Array();
+ a[0] = value;
+ return a;
+ }
+
+ // Case: new Array() as allocation site, smi->double
+ obj = newarraycase_smidouble(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_smidouble(1.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = newarraycase_smidouble(2);
+ assertKind(elements_kind.fast_double, obj);
+
+ function newarraycase_smiobj(value) {
+ var a = new Array();
+ a[0] = value;
+ return a;
+ }
+
+ // Case: new Array() as allocation site, smi->fast
+ obj = newarraycase_smiobj(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_smiobj("gloria");
+ assertKind(elements_kind.fast, obj);
+ obj = newarraycase_smiobj(2);
+ assertKind(elements_kind.fast, obj);
+
+ function newarraycase_length_smidouble(value) {
+ var a = new Array(3);
+ a[0] = value;
+ return a;
+ }
+
+ // Case: new Array(length) as allocation site
+ obj = newarraycase_length_smidouble(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_length_smidouble(1.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = newarraycase_length_smidouble(2);
+ assertKind(elements_kind.fast_double, obj);
+
+ // Try to continue the transition to fast object, but
+ // we will not pretransition from double->fast, because
+ // it may hurt performance ("poisoning").
+ obj = newarraycase_length_smidouble("coates");
+ assertKind(elements_kind.fast, obj);
+ obj = newarraycase_length_smidouble(2.5);
+ // However, because of optimistic transitions, we will
+ // transition to the most general kind of elements kind found,
+ // therefore I can't count on this assert yet.
+ // assertKind(elements_kind.fast_double, obj);
+
+ function newarraycase_length_smiobj(value) {
+ var a = new Array(3);
+ a[0] = value;
+ return a;
+ }
+
+ // Case: new Array(<length>) as allocation site, smi->fast
+ obj = newarraycase_length_smiobj(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_length_smiobj("gloria");
+ assertKind(elements_kind.fast, obj);
+ obj = newarraycase_length_smiobj(2);
+ assertKind(elements_kind.fast, obj);
+
+ function newarraycase_list_smidouble(value) {
+ var a = new Array(1, 2, 3);
+ a[0] = value;
+ return a;
+ }
+
+ obj = newarraycase_list_smidouble(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_list_smidouble(1.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = newarraycase_list_smidouble(2);
+ assertKind(elements_kind.fast_double, obj);
+
+ function newarraycase_list_smiobj(value) {
+ var a = new Array(4, 5, 6);
+ a[0] = value;
+ return a;
+ }
+
+ obj = newarraycase_list_smiobj(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_list_smiobj("coates");
+ assertKind(elements_kind.fast, obj);
+ obj = newarraycase_list_smiobj(2);
+ assertKind(elements_kind.fast, obj);
+ }
}
// Flags: --allow-natives-syntax --smi-only-arrays
// Flags: --noparallel-recompilation
+// Flags: --notrack-allocation-sites
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile time
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --allow-natives-syntax --smi-only-arrays --notrack-allocation-sites
support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));