// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: function info
- static Register registers[] = { x2 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: function
- static Register registers[] = { x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x3: array literals array
- // x2: array literal index
- // x1: constant elements
- static Register registers[] = { x3, x2, x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(
- Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x3: object literals array
- // x2: object literal index
- // x1: constant properties
- // x0: object literal flags
- static Register registers[] = { x3, x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: feedback vector
- // x3: call feedback slot
- static Register registers[] = { x2, x3 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- // x0: key
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- // x0: key
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: length
- // x1: index (of last match)
- // x0: string
- static Register registers[] = { x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: receiver
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- static Register registers[] = { x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void StringLengthStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { x0, x2 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStringLengthStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: receiver
- // x1: key
- // x0: value
- static Register registers[] = { x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value (js_array)
- // x1: to_map
- static Register registers[] = { x0, x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value to compare
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(CompareNilIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
-}
-
-
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
+ // cp: context
// x1: function
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
- static Register registers_variable_args[] = { x1, x2, x0 };
- static Register registers_no_args[] = { x1, x2 };
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- descriptor->register_param_count_ =
- sizeof(registers_no_args) / sizeof(registers_no_args[0]);
- descriptor->register_params_ = registers_no_args;
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->stack_parameter_count_ = x0;
- descriptor->register_param_count_ =
- sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
- descriptor->register_params_ = registers_variable_args;
+ descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
-
- descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // x1: constructor function
- // x0: number of arguments to the constructor function
- static Register registers_variable_args[] = { x1, x0 };
- static Register registers_no_args[] = { x1 };
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
- descriptor->register_param_count_ =
- sizeof(registers_no_args) / sizeof(registers_no_args[0]);
- descriptor->register_params_ = registers_no_args;
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
} else {
- // stack param count needs (constructor pointer, and single argument)
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->stack_parameter_count_ = x0;
- descriptor->register_param_count_ =
- sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
- descriptor->register_params_ = registers_variable_args;
+ descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
-
- descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
-}
-
-
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
-}
-
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
-}
-
-
-void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- // x2: key (unused)
- // x0: value
- static Register registers[] = { x1, x2, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(StoreIC_MissFromStubFailure);
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value
- // x3: target map
- // x1: key
- // x2: receiver
- static Register registers[] = { x0, x3, x1, x2 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: left operand
- // x0: right operand
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
}
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: allocation site
- // x1: left operand
- // x0: right operand
- static Register registers[] = { x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void StringAddStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: left operand
- // x0: right operand
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- static PlatformCallInterfaceDescriptor default_descriptor =
- PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- static PlatformCallInterfaceDescriptor noInlineDescriptor =
- PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
-
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- static Register registers[] = { x1, // JSFunction
- cp, // context
- x0, // actual number of arguments
- x2, // expected number of arguments
- };
- static Representation representations[] = {
- Representation::Tagged(), // JSFunction
- Representation::Tagged(), // context
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &default_descriptor;
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- static Register registers[] = { cp, // context
- x2, // key
- };
- static Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- static Register registers[] = { cp, // context
- x2, // name
- };
- static Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- static Register registers[] = { cp, // context
- x0, // receiver
- };
- static Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &default_descriptor;
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- static Register registers[] = { x0, // callee
- x4, // call_data
- x2, // holder
- x1, // api_function_address
- cp, // context
- };
- static Representation representations[] = {
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Tagged(), // context
- };
- descriptor->register_param_count_ = 5;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &default_descriptor;
- }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
#define __ ACCESS_MASM(masm)
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
- int param_count = descriptor->register_param_count_;
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
- ASSERT((descriptor->register_param_count_ == 0) ||
- x0.Is(descriptor->register_params_[param_count - 1]));
+ DCHECK((param_count == 0) ||
+ x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) {
- queue.Queue(descriptor->register_params_[i]);
+ queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
}
queue.PushQueued();
- ExternalReference miss = descriptor->miss_handler();
- __ CallExternalReference(miss, descriptor->register_param_count_);
+ __ CallExternalReference(miss, param_count);
}
__ Ret();
Label done;
Register input = source();
Register result = destination();
- ASSERT(is_truncating());
+ DCHECK(is_truncating());
- ASSERT(result.Is64Bits());
- ASSERT(jssp.Is(masm->StackPointer()));
+ DCHECK(result.Is64Bits());
+ DCHECK(jssp.Is(masm->StackPointer()));
int double_offset = offset();
FPRegister double_scratch,
Label* slow,
Condition cond) {
- ASSERT(!AreAliased(left, right, scratch));
+ DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
if ((cond == lt) || (cond == gt)) {
__ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
ge);
+ } else if (cond == eq) {
+ __ JumpIfHeapNumber(right, &heap_number);
} else {
Register right_type = scratch;
__ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
&heap_number);
// Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
- __ B(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if ((cond == le) || (cond == ge)) {
- __ Cmp(right_type, ODDBALL_TYPE);
- __ B(ne, &return_equal);
- __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ Mov(result, GREATER);
- } else {
- // undefined >= undefined should fail.
- __ Mov(result, LESS);
- }
- __ Ret();
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if ((cond == le) || (cond == ge)) {
+ __ Cmp(right_type, ODDBALL_TYPE);
+ __ B(ne, &return_equal);
+ __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ Mov(result, GREATER);
+ } else {
+ // undefined >= undefined should fail.
+ __ Mov(result, LESS);
}
+ __ Ret();
}
}
// it is handled in the parser (see Parser::ParseBinaryExpression). We are
// only concerned with cases ge, le and eq here.
if ((cond != lt) && (cond != gt)) {
- ASSERT((cond == ge) || (cond == le) || (cond == eq));
+ DCHECK((cond == ge) || (cond == le) || (cond == eq));
__ Bind(&heap_number);
// Left and right are identical pointers to a heap number object. Return
// non-equal if the heap number is a NaN, and equal otherwise. Comparing
Register left_type,
Register right_type,
Register scratch) {
- ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
+ DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
if (masm->emit_debug_code()) {
// We assume that the arguments are not identical.
__ B(lt, &right_non_object);
// Return non-zero - x0 already contains a non-zero pointer.
- ASSERT(left.is(x0) || right.is(x0));
+ DCHECK(left.is(x0) || right.is(x0));
Label return_not_equal;
__ Bind(&return_not_equal);
__ Ret();
Register right,
FPRegister left_d,
FPRegister right_d,
- Register scratch,
Label* slow,
bool strict) {
- ASSERT(!AreAliased(left, right, scratch));
- ASSERT(!AreAliased(left_d, right_d));
- ASSERT((left.is(x0) && right.is(x1)) ||
+ DCHECK(!AreAliased(left_d, right_d));
+ DCHECK((left.is(x0) && right.is(x1)) ||
(right.is(x0) && left.is(x1)));
Register result = x0;
// If right is not a number and left is a smi, then strict equality cannot
// succeed. Return non-equal.
Label is_heap_number;
- __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
- &is_heap_number);
+ __ JumpIfHeapNumber(right, &is_heap_number);
// Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
if (!right.is(result)) {
__ Mov(result, NOT_EQUAL);
} else {
// Smi compared non-strictly with a non-smi, non-heap-number. Call the
// runtime.
- __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ __ JumpIfNotHeapNumber(right, slow);
}
// Left is the smi. Right is a heap number. Load right value into right_d, and
// If left is not a number and right is a smi then strict equality cannot
// succeed. Return non-equal.
Label is_heap_number;
- __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
- &is_heap_number);
+ __ JumpIfHeapNumber(left, &is_heap_number);
// Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
if (!left.is(result)) {
__ Mov(result, NOT_EQUAL);
} else {
// Smi compared non-strictly with a non-smi, non-heap-number. Call the
// runtime.
- __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ __ JumpIfNotHeapNumber(left, slow);
}
// Right is the smi. Left is a heap number. Load left value into left_d, and
Register right_type,
Label* possible_strings,
Label* not_both_strings) {
- ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
+ DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
Register result = x0;
Label object_test;
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+ CompareICState::State expected,
Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(input, fail);
}
// We could be strict about internalized/non-internalized here, but as long as
// hydrogen doesn't care, the stub doesn't have to care either.
}
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Register lhs = x1;
Register rhs = x0;
Register result = x0;
Condition cond = GetCondition();
Label miss;
- ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
+ CompareICStub_CheckInputType(masm, lhs, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, right(), &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
// rhs_d, left into lhs_d.
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
- EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
+ EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
__ Bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in rhs_d and
// Left and/or right is a NaN. Load the result register with whatever makes
// the comparison fail, since comparisons with NaN always fail (except ne,
// which is filtered out at a higher level.)
- ASSERT(cond != ne);
+ DCHECK(cond != ne);
if ((cond == lt) || (cond == le)) {
__ Mov(result, GREATER);
} else {
&flat_string_check, &slow);
}
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
__ Bind(&flat_string_check);
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
- x15, &slow);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
+ x15, &slow);
- Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10,
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
x11);
if (cond == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
- x10, x11, x12);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
+ x12);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
- x10, x11, x12, x13);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
+ x12, x13);
}
// Never fall through to here.
if ((cond == lt) || (cond == le)) {
ncr = GREATER;
} else {
- ASSERT((cond == gt) || (cond == ge)); // remaining cases
+ DCHECK((cond == gt) || (cond == ge)); // remaining cases
ncr = LESS;
}
__ Mov(x10, Smi::FromInt(ncr));
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9,
- // ip0 and ip1 are corrupted by the call into C.
CPURegList saved_regs = kCallerSaved;
- saved_regs.Remove(ip0);
- saved_regs.Remove(ip1);
- saved_regs.Remove(x8);
- saved_regs.Remove(x9);
+ CPURegList saved_fp_regs = kCallerSavedFP;
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
+
+ // We don't care if MacroAssembler scratch registers are corrupted.
+ saved_regs.Remove(*(masm->TmpList()));
+ saved_fp_regs.Remove(*(masm->FPTmpList()));
+
__ PushCPURegList(saved_regs);
- if (save_doubles_ == kSaveFPRegs) {
- __ PushCPURegList(kCallerSavedFP);
+ if (save_doubles()) {
+ __ PushCPURegList(saved_fp_regs);
}
AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(x0, ExternalReference::isolate_address(masm->isolate()));
+ __ Mov(x0, ExternalReference::isolate_address(isolate()));
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- 1, 0);
+ ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
- if (save_doubles_ == kSaveFPRegs) {
- __ PopCPURegList(kCallerSavedFP);
+ if (save_doubles()) {
+ __ PopCPURegList(saved_fp_regs);
}
__ PopCPURegList(saved_regs);
__ Ret();
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
// Restore lr with the value it had before the call to this stub (the value
// which must be pushed).
__ Mov(lr, saved_lr);
- if (save_doubles_ == kSaveFPRegs) {
- __ PushSafepointRegistersAndDoubles();
- } else {
- __ PushSafepointRegisters();
- }
+ __ PushSafepointRegisters();
__ Ret(return_address);
}
Register return_address = temps.AcquireX();
// Preserve the return address (lr will be clobbered by the pop).
__ Mov(return_address, lr);
- if (save_doubles_ == kSaveFPRegs) {
- __ PopSafepointRegistersAndDoubles();
- } else {
- __ PopSafepointRegisters();
- }
+ __ PopSafepointRegisters();
__ Ret(return_address);
}
Register result_tagged = x0;
Register base_tagged = x10;
- Register exponent_tagged = x11;
- Register exponent_integer = x12;
+ Register exponent_tagged = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent_tagged.is(x11));
+ Register exponent_integer = MathPowIntegerDescriptor::exponent();
+ DCHECK(exponent_integer.is(x12));
Register scratch1 = x14;
Register scratch0 = x15;
Register saved_lr = x19;
Label done;
// Unpack the inputs.
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi;
Label unpack_exponent;
// exponent_tagged is a heap number, so load its double value.
__ Ldr(exponent_double,
FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
__ JumpIfSmi(exponent_tagged, &exponent_is_smi);
__ Ldr(exponent_double,
FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
}
// Handle double (heap number) exponents.
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
// Detect integer exponents stored as doubles and handle those in the
// integer fast-path.
- __ TryConvertDoubleToInt64(exponent_integer, exponent_double,
- scratch0_double, &exponent_is_integer);
+ __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
+ scratch0_double, &exponent_is_integer);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
FPRegister half_double = d3;
FPRegister minus_half_double = d4;
// Detect square root case. Crankshaft detects constant +/-0.5 at compile
AllowExternalCallThatCantCauseGC scope(masm);
__ Mov(saved_lr, lr);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
__ Mov(lr, saved_lr);
__ B(&done);
__ Fcmp(result_double, 0.0);
__ B(&done, ne);
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Bail out to runtime code.
__ Bind(&call_runtime);
// Put the arguments back on the stack.
__ Push(base_tagged, exponent_tagged);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
// Return.
__ Bind(&done);
- __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1);
- __ Str(result_double,
- FieldMemOperand(result_tagged, HeapNumber::kValueOffset));
- ASSERT(result_tagged.is(x0));
+ __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
+ result_double);
+ DCHECK(result_tagged.is(x0));
__ IncrementCounter(
- masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ isolate()->counters()->math_pow(), 1, scratch0, scratch1);
__ Ret();
} else {
AllowExternalCallThatCantCauseGC scope(masm);
__ Fmov(base_double, base_double_copy);
__ Scvtf(exponent_double, exponent_integer);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
__ Mov(lr, saved_lr);
__ Bind(&done);
__ IncrementCounter(
- masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ isolate()->counters()->math_pow(), 1, scratch0, scratch1);
__ Ret();
}
}
void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- StoreRegistersStateStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
- StoreRegistersStateStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
+ StoreRegistersStateStub stub(isolate);
+ stub.GetCode();
}
void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
- RestoreRegistersStateStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
- RestoreRegistersStateStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
+ RestoreRegistersStateStub stub(isolate);
+ stub.GetCode();
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate);
- CEntryStub stub_fp(1, kSaveFPRegs);
- stub_fp.GetCode(isolate);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
+ CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
+ stub_fp.GetCode();
}
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal,
- Label* throw_termination,
- bool do_gc,
- bool always_allocate) {
- // x0 : Result parameter for PerformGC, if do_gc is true.
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // The Abort mechanism relies on CallRuntime, which in turn relies on
+ // CEntryStub, so until this stub has been generated, we have to use a
+ // fall-back Abort mechanism.
+ //
+ // Note that this stub must be generated before any use of Abort.
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+
+ ASM_LOCATION("CEntryStub::Generate entry");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Register parameters:
+ // x0: argc (including receiver, untagged)
+ // x1: target
+ //
+ // The stack on entry holds the arguments and the receiver, with the receiver
+ // at the highest address:
+ //
+ // jssp]argc-1]: receiver
+ // jssp[argc-2]: arg[argc-2]
+ // ... ...
+ // jssp[1]: arg[1]
+ // jssp[0]: arg[0]
+ //
+ // The arguments are in reverse order, so that arg[argc-2] is actually the
+ // first argument to the target function and arg[0] is the last.
+ DCHECK(jssp.Is(__ StackPointer()));
+ const Register& argc_input = x0;
+ const Register& target_input = x1;
+
+ // Calculate argv, argc and the target address, and store them in
+ // callee-saved registers so we can retry the call without having to reload
+ // these arguments.
+ // TODO(jbramley): If the first call attempt succeeds in the common case (as
+ // it should), then we might be better off putting these parameters directly
+ // into their argument registers, rather than using callee-saved registers and
+ // preserving them on the stack.
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ // Derive argv from the stack pointer so that it points to the first argument
+ // (arg[argc-2]), or just below the receiver in case there are no arguments.
+ // - Adjust for the arg[] array.
+ Register temp_argv = x11;
+ __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ // - Adjust for the receiver.
+ __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+
+ // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
+ // registers.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles(), x10, 3);
+ DCHECK(csp.Is(__ StackPointer()));
+
+ // Poke callee-saved registers into reserved space.
+ __ Poke(argv, 1 * kPointerSize);
+ __ Poke(argc, 2 * kPointerSize);
+ __ Poke(target, 3 * kPointerSize);
+
+ // We normally only keep tagged values in callee-saved registers, as they
+ // could be pushed onto the stack by called stubs and functions, and on the
+ // stack they can confuse the GC. However, we're only calling C functions
+ // which can push arbitrary data onto the stack anyway, and so the GC won't
+ // examine that part of the stack.
+ __ Mov(argc, argc_input);
+ __ Mov(target, target_input);
+ __ Mov(argv, temp_argv);
+
// x21 : argv
// x22 : argc
- // x23 : target
+ // x23 : call target
//
// The stack (on entry) holds the arguments and the receiver, with the
// receiver at the highest address:
//
// After an unsuccessful call, the exit frame and suchlike are left
// untouched, and the stub either throws an exception by jumping to one of
- // the provided throw_ labels, or it falls through. The failure details are
- // passed through in x0.
- ASSERT(csp.Is(__ StackPointer()));
-
- Isolate* isolate = masm->isolate();
-
- const Register& argv = x21;
- const Register& argc = x22;
- const Register& target = x23;
-
- if (do_gc) {
- // Call Runtime::PerformGC, passing x0 (the result parameter for
- // PerformGC) and x1 (the isolate).
- __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
- __ CallCFunction(
- ExternalReference::perform_gc_function(isolate), 2, 0);
- }
+ // the exception_returned label.
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ Mov(x10, Operand(scope_depth));
- __ Ldr(x11, MemOperand(x10));
- __ Add(x11, x11, 1);
- __ Str(x11, MemOperand(x10));
- }
+ DCHECK(csp.Is(__ StackPointer()));
// Prepare AAPCS64 arguments to pass to the builtin.
__ Mov(x0, argc);
__ Mov(x1, argv);
- __ Mov(x2, ExternalReference::isolate_address(isolate));
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
- // Store the return address on the stack, in the space previously allocated
- // by EnterExitFrame. The return address is queried by
- // ExitFrame::GetStateForFramePointer.
Label return_location;
__ Adr(x12, &return_location);
__ Poke(x12, 0);
+
if (__ emit_debug_code()) {
// Verify that the slot below fp[kSPOffset]-8 points to the return location
// (currently in x12).
// Call the builtin.
__ Blr(target);
__ Bind(&return_location);
- const Register& result = x0;
-
- if (always_allocate) {
- __ Mov(x10, Operand(scope_depth));
- __ Ldr(x11, MemOperand(x10));
- __ Sub(x11, x11, 1);
- __ Str(x11, MemOperand(x10));
- }
// x0 result The return code from the call.
// x21 argv
// x22 argc
// x23 target
- //
- // If all of the result bits matching kFailureTagMask are '1', the result is
- // a failure. Otherwise, it's an ordinary tagged object and the call was a
- // success.
- Label failure;
- __ And(x10, result, kFailureTagMask);
- __ Cmp(x10, kFailureTagMask);
- __ B(&failure, eq);
+ const Register& result = x0;
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(result, Heap::kExceptionRootIndex);
+ __ B(eq, &exception_returned);
// The call succeeded, so unwind the stack and return.
__ Peek(argc, 2 * kPointerSize);
__ Peek(target, 3 * kPointerSize);
- __ LeaveExitFrame(save_doubles_, x10, true);
- ASSERT(jssp.Is(__ StackPointer()));
+ __ LeaveExitFrame(save_doubles(), x10, true);
+ DCHECK(jssp.Is(__ StackPointer()));
// Pop or drop the remaining stack slots and return from the stub.
// jssp[24]: Arguments array (of size argc), including receiver.
// jssp[16]: Preserved x23 (used for target).
// jssp[8]: Preserved x22 (used for argc).
// jssp[0]: Preserved x21 (used for argv).
__ Drop(x11);
+ __ AssertFPCRState();
__ Ret();
// The stack pointer is still csp if we aren't returning, and the frame
// hasn't changed (except for the return address).
__ SetStackPointer(csp);
- __ Bind(&failure);
- // The call failed, so check if we need to throw an exception, and fall
- // through (to retry) otherwise.
-
- Label retry;
- // x0 result The return code from the call, including the failure
- // code and details.
- // x21 argv
- // x22 argc
- // x23 target
- // Refer to the Failure class for details of the bit layout.
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ Tst(result, kFailureTypeTagMask << kFailureTagSize);
- __ B(eq, &retry); // RETRY_AFTER_GC
+ // Handling of exception.
+ __ Bind(&exception_returned);
// Retrieve the pending exception.
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
const Register& exception = result;
const Register& exception_address = x11;
- __ Mov(exception_address,
- Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ __ Mov(exception_address, Operand(pending_exception_address));
__ Ldr(exception, MemOperand(exception_address));
// Clear the pending exception.
- __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
__ Str(x10, MemOperand(exception_address));
// x0 exception The exception descriptor.
// Special handling of termination exceptions, which are uncatchable by
// JavaScript code.
- __ Cmp(exception, Operand(isolate->factory()->termination_exception()));
- __ B(eq, throw_termination);
-
- // Handle normal exception.
- __ B(throw_normal);
-
- __ Bind(&retry);
- // The result (x0) is passed through as the next PerformGC parameter.
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // The Abort mechanism relies on CallRuntime, which in turn relies on
- // CEntryStub, so until this stub has been generated, we have to use a
- // fall-back Abort mechanism.
- //
- // Note that this stub must be generated before any use of Abort.
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
-
- ASM_LOCATION("CEntryStub::Generate entry");
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Register parameters:
- // x0: argc (including receiver, untagged)
- // x1: target
- //
- // The stack on entry holds the arguments and the receiver, with the receiver
- // at the highest address:
- //
- // jssp]argc-1]: receiver
- // jssp[argc-2]: arg[argc-2]
- // ... ...
- // jssp[1]: arg[1]
- // jssp[0]: arg[0]
- //
- // The arguments are in reverse order, so that arg[argc-2] is actually the
- // first argument to the target function and arg[0] is the last.
- ASSERT(jssp.Is(__ StackPointer()));
- const Register& argc_input = x0;
- const Register& target_input = x1;
-
- // Calculate argv, argc and the target address, and store them in
- // callee-saved registers so we can retry the call without having to reload
- // these arguments.
- // TODO(jbramley): If the first call attempt succeeds in the common case (as
- // it should), then we might be better off putting these parameters directly
- // into their argument registers, rather than using callee-saved registers and
- // preserving them on the stack.
- const Register& argv = x21;
- const Register& argc = x22;
- const Register& target = x23;
-
- // Derive argv from the stack pointer so that it points to the first argument
- // (arg[argc-2]), or just below the receiver in case there are no arguments.
- // - Adjust for the arg[] array.
- Register temp_argv = x11;
- __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
- // - Adjust for the receiver.
- __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
-
- // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
- // registers.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_, x10, 3);
- ASSERT(csp.Is(__ StackPointer()));
-
- // Poke callee-saved registers into reserved space.
- __ Poke(argv, 1 * kPointerSize);
- __ Poke(argc, 2 * kPointerSize);
- __ Poke(target, 3 * kPointerSize);
-
- // We normally only keep tagged values in callee-saved registers, as they
- // could be pushed onto the stack by called stubs and functions, and on the
- // stack they can confuse the GC. However, we're only calling C functions
- // which can push arbitrary data onto the stack anyway, and so the GC won't
- // examine that part of the stack.
- __ Mov(argc, argc_input);
- __ Mov(target, target_input);
- __ Mov(argv, temp_argv);
-
- Label throw_normal;
- Label throw_termination;
-
- // Call the runtime function.
- GenerateCore(masm,
- &throw_normal,
- &throw_termination,
- false,
- false);
-
- // If successful, the previous GenerateCore will have returned to the
- // calling code. Otherwise, we fall through into the following.
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal,
- &throw_termination,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError()));
- GenerateCore(masm,
- &throw_normal,
- &throw_termination,
- true,
- true);
+ Label throw_termination_exception;
+ __ Cmp(exception, Operand(isolate()->factory()->termination_exception()));
+ __ B(eq, &throw_termination_exception);
// We didn't execute a return case, so the stack frame hasn't been updated
// (except for the return address slot). However, we don't need to initialize
// jssp because the throw method will immediately overwrite it when it
// unwinds the stack.
- if (__ emit_debug_code()) {
- __ Mov(jssp, kDebugZapValue);
- }
__ SetStackPointer(jssp);
- // Throw exceptions.
- // If we throw an exception, we can end up re-entering CEntryStub before we
- // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values
- // here.
-
- __ Bind(&throw_termination);
- ASM_LOCATION("Throw termination");
+ ASM_LOCATION("Throw normal");
__ Mov(argv, 0);
__ Mov(argc, 0);
__ Mov(target, 0);
- __ ThrowUncatchable(x0, x10, x11, x12, x13);
+ __ Throw(x0, x10, x11, x12, x13);
- __ Bind(&throw_normal);
- ASM_LOCATION("Throw normal");
+ __ Bind(&throw_termination_exception);
+ ASM_LOCATION("Throw termination");
__ Mov(argv, 0);
__ Mov(argc, 0);
__ Mov(target, 0);
- __ Throw(x0, x10, x11, x12, x13);
+ __ ThrowUncatchable(x0, x10, x11, x12, x13);
}
// x4: argv.
// Output:
// x0: result.
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- ASSERT(jssp.Is(__ StackPointer()));
+void JSEntryStub::Generate(MacroAssembler* masm) {
+ DCHECK(jssp.Is(__ StackPointer()));
Register code_entry = x0;
// Enable instruction instrumentation. This only works on the simulator, and
__ Mov(jssp, csp);
__ SetStackPointer(jssp);
+ // Configure the FPCR. We don't restore it, so this is technically not allowed
+ // according to AAPCS64. However, we only set default-NaN mode and this will
+ // be harmless for most C code. Also, it works for ARM.
+ __ ConfigureFPCR();
+
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Set up the reserved register for 0.0.
__ Fmov(fp_zero, 0.0);
// Build an entry frame (see layout below).
- Isolate* isolate = masm->isolate();
-
- // Build an entry frame.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
__ Mov(x13, bad_frame_pointer);
__ Mov(x12, Smi::FromInt(marker));
- __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate));
+ __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
__ Push(x13, xzr, x12, x10);
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
Label non_outermost_js, done;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
__ Mov(x10, ExternalReference(js_entry_sp));
__ Ldr(x11, MemOperand(x10));
__ Cbnz(x11, &non_outermost_js);
__ B(&done);
__ Bind(&non_outermost_js);
// We spare one instruction by pushing xzr since the marker is 0.
- ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+ DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
__ Push(xzr);
__ Bind(&done);
// fp will be invalid because the PushTryHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
}
__ Str(code_entry, MemOperand(x10));
- __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception())));
+ __ LoadRoot(x0, Heap::kExceptionRootIndex);
__ B(&exit);
// Invoke: Link this frame into the handler chain. There's only one
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
__ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
__ Str(x10, MemOperand(x11));
// Invoke the function by calling through the JS entry trampoline builtin.
// x2: receiver.
// x3: argc.
// x4: argv.
- ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
- : Builtins::kJSEntryTrampoline,
- isolate);
+ ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
+ ? Builtins::kJSConstructEntryTrampoline
+ : Builtins::kJSEntryTrampoline,
+ isolate());
__ Mov(x10, entry);
// Call the JSEntryTrampoline.
// Restore the top frame descriptors from the stack.
__ Pop(x10);
- __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate));
+ __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
__ Str(x10, MemOperand(x11));
// Reset the stack to the callee saved registers.
__ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
// Restore the callee-saved registers and return.
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Mov(csp, jssp);
__ SetStackPointer(csp);
__ PopCalleeSavedRegisters();
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x1 : receiver
- // -- x0 : key
- // -----------------------------------
- Register key = x0;
- receiver = x1;
- __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string()));
- __ B(ne, &miss);
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x2 : name
- // -- x0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = x0;
- }
+ Register receiver = LoadDescriptor::ReceiverRegister();
+
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
+ x11, &miss);
+
+ __ Bind(&miss);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
+}
- StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
+
+void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label miss;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
+ Register result = x0;
+ Register scratch = x3;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX,
+ RECEIVER_IS_STRING);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
__ Bind(&miss);
- StubCompiler::TailCallBuiltin(masm,
- BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
// If there is a call site cache, don't look in the global cache, but do the
// real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
+ if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
Label miss;
__ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
__ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
}
Label return_true, return_result;
+ Register smi_value = scratch1;
{
// Loop through the prototype chain looking for the function prototype.
Register chain_map = x1;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
// Speculatively set a result.
__ Mov(result, res_false);
+ if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
+ // Value to store in the cache cannot be an object.
+ __ Mov(smi_value, Smi::FromInt(1));
+ }
__ Bind(&loop);
// We cannot fall through to here.
__ Bind(&return_true);
__ Mov(result, res_true);
+ if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
+ // Value to store in the cache cannot be an object.
+ __ Mov(smi_value, Smi::FromInt(0));
+ }
__ Bind(&return_result);
if (HasCallSiteInlineCheck()) {
- ASSERT(ReturnTrueFalseObject());
+ DCHECK(ReturnTrueFalseObject());
__ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
__ GetRelocatedValueLocation(map_check_site, scratch2);
__ Str(result, MemOperand(scratch2));
} else {
- __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ Register cached_value = ReturnTrueFalseObject() ? smi_value : result;
+ __ StoreRoot(cached_value, Heap::kInstanceofCacheAnswerRootIndex);
}
__ Ret();
__ Mov(result, res_false);
// Null is not instance of anything.
- __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value()));
+ __ Cmp(object, Operand(isolate()->factory()->null_value()));
__ B(ne, &object_not_null);
__ Ret();
}
-Register InstanceofStub::left() {
- // Object to check (instanceof lhs).
- return x11;
-}
-
-
-Register InstanceofStub::right() {
- // Constructor function (instanceof rhs).
- return x10;
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- Register arg_count = x0;
- Register key = x1;
+ Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
+ Register key = ArgumentsAccessReadDescriptor::index();
+ DCHECK(arg_count.is(x0));
+ DCHECK(key.is(x1));
// The displacement is the offset of the last parameter (if any) relative
// to the frame pointer.
Register caller_fp = x10;
__ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Load and untag the context.
- STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
- __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
- (kSmiShift / kBitsPerByte)));
+ __ Ldr(w11, UntagSmiMemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
__ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
__ B(ne, &runtime);
__ Poke(x10, 1 * kXRegSize);
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
// Get the arguments boilerplate from the current (global) context.
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
- // x7 param_count number of function parameters
- // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
+ // x14 recv_arg pointer to receiver arguments
Register global_object = x10;
Register global_ctx = x10;
- Register args_offset = x11;
- Register aliased_args_offset = x10;
+ Register sloppy_args_map = x11;
+ Register aliased_args_map = x10;
__ Ldr(global_object, GlobalObjectMemOperand());
__ Ldr(global_ctx, FieldMemOperand(global_object,
GlobalObject::kNativeContextOffset));
- __ Ldr(args_offset,
- ContextMemOperand(global_ctx,
- Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX));
- __ Ldr(aliased_args_offset,
- ContextMemOperand(global_ctx,
- Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Ldr(sloppy_args_map,
+ ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Ldr(aliased_args_map,
+ ContextMemOperand(global_ctx, Context::ALIASED_ARGUMENTS_MAP_INDEX));
__ Cmp(mapped_params, 0);
- __ CmovX(args_offset, aliased_args_offset, ne);
+ __ CmovX(sloppy_args_map, aliased_args_map, ne);
// Copy the JS object part.
- __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
- JSObject::kHeaderSize / kPointerSize);
+ __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
+ __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
+ __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
const int kCalleeOffset = JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ AssertNotSmi(function);
__ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
// Use the length and set that as an in-object property.
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, recv_arg, arg_count_smi);
- __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ Bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
// Get the arguments boilerplate from the current (native) context.
Register global_object = x10;
Register global_ctx = x10;
- Register args_offset = x4;
+ Register strict_args_map = x4;
__ Ldr(global_object, GlobalObjectMemOperand());
__ Ldr(global_ctx, FieldMemOperand(global_object,
GlobalObject::kNativeContextOffset));
- __ Ldr(args_offset,
- ContextMemOperand(global_ctx,
- Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Ldr(strict_args_map,
+ ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
// x0 alloc_obj pointer to allocated objects: parameter array and
// arguments object
// x1 param_count_smi number of parameters passed to function (smi)
// x2 params pointer to parameters
// x3 function function pointer
- // x4 args_offset offset to arguments boilerplate
+ // x4 strict_args_map offset to arguments map
// x13 param_count number of parameters passed to function
-
- // Copy the JS object part.
- __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
- JSObject::kHeaderSize / kPointerSize);
+ __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
+ __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
+ __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
// Set the smi-tagged length as an in-object property.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, params, param_count_smi);
- __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
// w0 string_type type of subject string
// x2 jsstring_length subject string length
// x3 jsregexp_object JSRegExp object
- // w4 string_encoding ASCII or UC16
+ // w4 string_encoding Latin1 or UC16
// w5 sliced_string_offset if the string is a SlicedString
// offset to the underlying string
// w6 string_representation groups attributes of the string:
const int kJSRegExpOffset = 7 * kPointerSize;
// Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ Mov(x10, address_of_regexp_stack_memory_size);
__ Ldr(x10, MemOperand(x10));
__ Cbz(x10, &runtime);
// Check that the first argument is a JSRegExp object.
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Peek(jsregexp_object, kJSRegExpOffset);
__ JumpIfSmi(jsregexp_object, &runtime);
__ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
// Initialize offset for possibly sliced string.
__ Mov(sliced_string_offset, 0);
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Peek(subject, kSubjectOffset);
__ JumpIfSmi(subject, &runtime);
__ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
// The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ TestAndBranchIfAnySet(string_type.X(),
kStringRepresentationMask,
&external_string); // Go to (7).
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Peek(x10, kPreviousIndexOffset);
__ JumpIfNotSmi(x10, &runtime);
__ Cmp(jsstring_length, x10);
STATIC_ASSERT(kStringEncodingMask == 0x04);
// Find the code object based on the assumptions above.
- // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
+ // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
- ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+ DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
- // We will need the encoding later: ASCII = 0x04
- // UC16 = 0x00
+ // We will need the encoding later: Latin1 = 0x04
+ // UC16 = 0x00
__ Ands(string_encoding, string_type, kStringEncodingMask);
__ CzeroX(x10, ne);
__ Add(x10, regexp_data, x10);
- __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
+ __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
// (E) Carry on. String handling is done.
__ JumpIfSmi(code_object, &runtime);
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1,
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
x10,
x11);
// Isolates: note we add an additional parameter here (isolate pointer).
__ EnterExitFrame(false, x10, 1);
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
// We have 9 arguments to pass to the regexp code, therefore we have to pass
// one on the stack and the rest as registers.
// csp[0]: Space for the return address placed by DirectCEntryStub.
// csp[8]: Argument 9, the current isolate address.
- __ Mov(x10, ExternalReference::isolate_address(isolate));
+ __ Mov(x10, ExternalReference::isolate_address(isolate()));
__ Poke(x10, kPointerSize);
Register length = w11;
__ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
// Handle UC16 encoding, two bytes make one character.
- // string_encoding: if ASCII: 0x04
- // if UC16: 0x00
+ // string_encoding: if Latin1: 0x04
+ // if UC16: 0x00
STATIC_ASSERT(kStringEncodingMask == 0x04);
__ Ubfx(string_encoding, string_encoding, 2, 1);
__ Eor(string_encoding, string_encoding, 1);
- // string_encoding: if ASCII: 0
- // if UC16: 1
+ // string_encoding: if Latin1: 0
+ // if UC16: 1
// Convert string positions from characters to bytes.
// Previous index is in x1.
__ Add(x3, x2, Operand(w10, UXTW));
// Argument 5 (x4): static offsets vector buffer.
- __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate));
+ __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
// Argument 6 (x5): Set the number of capture registers to zero to force
// global regexps to behave as non-global. This stub is not used for global
// Locate the code entry and call it.
__ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, code_object);
__ LeaveExitFrame(false, x10, true);
__ Add(number_of_capture_registers, x10, 2);
// Check that the fourth object is a JSArray object.
- ASSERT(jssp.Is(__ StackPointer()));
+ DCHECK(jssp.Is(__ StackPointer()));
__ Peek(x10, kLastMatchInfoOffset);
__ JumpIfSmi(x10, &runtime);
__ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
// Get the static offsets vector filled by the native regexp code
// and fill the last match info.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
+ ExternalReference::address_of_static_offsets_vector(isolate());
__ Mov(offsets_vector_index, address_of_static_offsets_vector);
Label next_capture, done;
// Store the smi values in the last match info.
__ SmiTag(x10, current_offset);
// Clearing the 32 bottom bits gives us a Smi.
- STATIC_ASSERT(kSmiShift == 32);
- __ And(x11, current_offset, ~kWRegMask);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Bic(x11, current_offset, kSmiShiftMask);
__ Stp(x10,
x11,
MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
// A stack overflow (on the backtrack stack) may have occured
// in the RegExp code but no exception has been created yet.
// If there is no pending exception, handle that in the runtime system.
- __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
__ Mov(x11,
Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
__ Ldr(exception_value, MemOperand(x11));
__ Cmp(x10, exception_value);
__ B(eq, &runtime);
__ ThrowUncatchable(exception_value, x10, x11, x12, x13);
__ Bind(&failure);
- __ Mov(x0, Operand(masm->isolate()->factory()->null_value()));
+ __ Mov(x0, Operand(isolate()->factory()->null_value()));
__ PopCPURegList(used_callee_saved_registers);
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
__ Bind(&runtime);
__ PopCPURegList(used_callee_saved_registers);
- __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
Register scratch1,
Register scratch2) {
ASM_LOCATION("GenerateRecordCallTarget");
- ASSERT(!AreAliased(scratch1, scratch2,
+ DCHECK(!AreAliased(scratch1, scratch2,
argc, function, feedback_vector, index));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// index : slot in feedback vector (smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
- ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state.
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
+ __ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ Bind(&megamorphic);
__ Add(scratch1, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
+ __ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
__ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
__ B(&done);
// slot.
{
FrameScope scope(masm, StackFrame::INTERNAL);
- CreateAllocationSiteStub create_stub;
+ CreateAllocationSiteStub create_stub(masm->isolate());
// Arguments register must be smi-tagged to call out.
__ SmiTag(argc);
// CreateAllocationSiteStub expect the feedback vector in x2 and the slot
// index in x3.
- ASSERT(feedback_vector.Is(x2) && index.Is(x3));
+ DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(&create_stub);
__ Pop(index, feedback_vector, function, argc);
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("CallFunctionStub::Generate");
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont);
+
+ // Do not transform the receiver for native (Compilerhints already in x3).
+ __ Tbnz(w4, SharedFunctionInfo::kNative, cont);
+}
+
+
+static void EmitSlowCase(MacroAssembler* masm,
+ int argc,
+ Register function,
+ Register type,
+ Label* non_function) {
+ // Check for function proxy.
+ // x10 : function type.
+ __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, non_function);
+ __ Push(function); // put proxy as additional argument
+ __ Mov(x0, argc + 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ Bind(non_function);
+ __ Poke(function, argc * kXRegSize);
+ __ Mov(x0, argc); // Set up the number of arguments.
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Pop(x1);
+ }
+ __ Poke(x0, argc * kPointerSize);
+ __ B(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
// x1 function the function to call
- // x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if x2 is not the megamorphic symbol)
Register function = x1;
- Register cache_cell = x2;
- Register slot = x3;
Register type = x4;
Label slow, non_function, wrap, cont;
// TODO(jbramley): This function has a lot of unnamed registers. Name them,
// and tidy things up a bit.
- if (NeedsChecks()) {
+ if (needs_checks) {
// Check that the function is really a JavaScript function.
__ JumpIfSmi(function, &non_function);
// Goto slow case if we do not have a function.
__ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
-
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, x0, function, cache_cell, slot, x4, x5);
- // Type information was updated. Because we may call Array, which
- // expects either undefined or an AllocationSite in ebx we need
- // to set ebx to undefined.
- __ LoadRoot(cache_cell, Heap::kUndefinedValueRootIndex);
- }
}
// Fast-case: Invoke the function now.
// x1 function pushed function
- ParameterCount actual(argc_);
+ ParameterCount actual(argc);
- if (CallAsMethod()) {
- if (NeedsChecks()) {
- // Do not transform the receiver for strict mode functions.
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
- __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont);
-
- // Do not transform the receiver for native (Compilerhints already in x3).
- __ Tbnz(w4, SharedFunctionInfo::kNative, &cont);
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
}
// Compute the receiver in sloppy mode.
- __ Peek(x3, argc_ * kPointerSize);
+ __ Peek(x3, argc * kPointerSize);
- if (NeedsChecks()) {
+ if (needs_checks) {
__ JumpIfSmi(x3, &wrap);
__ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
} else {
__ Bind(&cont);
}
+
__ InvokeFunction(function,
actual,
JUMP_FUNCTION,
NullCallWrapper());
-
- if (NeedsChecks()) {
+ if (needs_checks) {
// Slow-case: Non-function called.
__ Bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable object
- // (megamorphic symbol) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->megamorphic_symbol());
- __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot,
- kPointerSizeLog2));
- __ LoadRoot(x11, Heap::kMegamorphicSymbolRootIndex);
- __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
- }
- // Check for function proxy.
- // x10 : function type.
- __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function);
- __ Push(function); // put proxy as additional argument
- __ Mov(x0, argc_ + 1);
- __ Mov(x2, 0);
- __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ Bind(&non_function);
- __ Poke(function, argc_ * kXRegSize);
- __ Mov(x0, argc_); // Set up the number of arguments.
- __ Mov(x2, 0);
- __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ EmitSlowCase(masm, argc, function, type, &non_function);
}
- if (CallAsMethod()) {
+ if (call_as_method) {
__ Bind(&wrap);
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Pop(x1);
- }
- __ Poke(x0, argc_ * kPointerSize);
- __ B(&cont);
+ EmitWrapCase(masm, argc, &cont);
}
}
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallFunctionStub::Generate");
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
+}
+
+
void CallConstructStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallConstructStub::Generate");
// x0 : number of arguments
__ Bind(&do_call);
// Set expected number of arguments to zero (not changing x0).
__ Mov(x2, 0);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(vector, FieldMemOperand(vector,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(vector, FieldMemOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // x1 - function
+ // x3 - slot id
+ Label miss;
+ Register function = x1;
+ Register feedback_vector = x2;
+ Register index = x3;
+ Register scratch = x4;
+
+ EmitLoadTypeFeedbackVector(masm, feedback_vector);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
+ __ Cmp(function, scratch);
+ __ B(ne, &miss);
+
+ __ Mov(x0, Operand(arg_count()));
+
+ __ Add(scratch, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+
+ // Verify that scratch contains an AllocationSite
+ Register map = x5;
+ __ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
+
+ Register allocation_site = feedback_vector;
+ __ Mov(allocation_site, scratch);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ __ Unreachable();
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallICStub");
+
+ // x1 - function
+ // x3 - slot id (Smi)
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = arg_count();
+ ParameterCount actual(argc);
+
+ Register function = x1;
+ Register feedback_vector = x2;
+ Register index = x3;
+ Register type = x4;
+
+ EmitLoadTypeFeedbackVector(masm, feedback_vector);
+
+ // The checks. First, does x1 match the recorded monomorphic target?
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
+
+ __ Cmp(x4, function);
+ __ B(ne, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+
+ // Compute the receiver in sloppy mode.
+ __ Peek(x3, argc * kPointerSize);
+
+ __ JumpIfSmi(x3, &wrap);
+ __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
+
+ __ Bind(&cont);
+ }
+
+ __ InvokeFunction(function,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, function, type, &non_function);
+
+ if (CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
+ __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(x4);
+ __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
+ __ Add(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
+ __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
+ // We have to update statistics for runtime profiling.
+ const int with_types_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+ __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ __ Subs(x4, x4, Operand(Smi::FromInt(1)));
+ __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ const int generic_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
+ __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
+ __ Adds(x4, x4, Operand(Smi::FromInt(1)));
+ __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
+ __ B(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // the slow case
+ __ bind(&slow_start);
+
+ // Check that the function is really a JavaScript function.
+ __ JumpIfSmi(function, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
+ __ B(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("CallICStub[Miss]");
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ Peek(x4, (arg_count() + 1) * kPointerSize);
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(x4, x1, x2, x3);
+
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ Mov(x1, x0);
+ }
+}
+
+
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
+ if (check_mode_ == RECEIVER_IS_UNKNOWN) {
+ __ JumpIfSmi(object_, receiver_not_string_);
- // Fetch the instance type of the receiver into result register.
- __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
+ // If the receiver is not a string trigger the non-string case.
+ __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
+ }
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
__ Bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- result_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm);
// Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_);
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
__ B(hi, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
- __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
+ // At this point code register contains smi tagged one-byte char code.
+ __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
__ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
__ Bind(&exit_);
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
// Inputs are in x0 (lhs) and x1 (rhs).
- ASSERT(state_ == CompareIC::SMI);
- ASM_LOCATION("ICCompareStub[Smis]");
+ DCHECK(state() == CompareICState::SMI);
+ ASM_LOCATION("CompareICStub[Smis]");
Label miss;
// Bail out (to 'miss') unless both x0 and x1 are smis.
__ JumpIfEitherNotSmi(x0, x1, &miss);
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
- ASM_LOCATION("ICCompareStub[HeapNumbers]");
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
+ ASM_LOCATION("CompareICStub[HeapNumbers]");
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss, handle_lhs, values_in_d_regs;
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(lhs, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(rhs, &miss);
}
// Load rhs if it's a heap number.
__ JumpIfSmi(rhs, &handle_lhs);
- __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
__ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Load lhs if it's a heap number.
__ Bind(&handle_lhs);
__ JumpIfSmi(lhs, &values_in_d_regs);
- __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
+ __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
__ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
__ Bind(&values_in_d_regs);
__ Ret();
__ Bind(&unordered);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ Bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
__ JumpIfSmi(lhs, &unordered);
- __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
+ __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
__ B(&unordered);
}
__ Bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
}
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
- ASM_LOCATION("ICCompareStub[InternalizedStrings]");
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+ ASM_LOCATION("CompareICStub[InternalizedStrings]");
Label miss;
Register result = x0;
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASM_LOCATION("ICCompareStub[UniqueNames]");
- ASSERT(GetCondition() == eq);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
+ ASM_LOCATION("CompareICStub[UniqueNames]");
+ DCHECK(GetCondition() == eq);
Label miss;
Register result = x0;
// To avoid a miss, each instance type should be either SYMBOL_TYPE or it
// should have kInternalizedTag set.
- __ JumpIfNotUniqueName(lhs_instance_type, &miss);
- __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+ __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
+ __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
// Unique names are compared by identity.
STATIC_ASSERT(EQUAL == 0);
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
- ASM_LOCATION("ICCompareStub[Strings]");
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
+ ASM_LOCATION("CompareICStub[Strings]");
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
Register result = x0;
Register rhs = x0;
// because we already know they are not identical. We know they are both
// strings.
if (equality) {
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
STATIC_ASSERT(kInternalizedTag == 0);
Label not_internalized_strings;
__ Orr(x12, lhs_type, rhs_type);
__ Bind(¬_internalized_strings);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- lhs_type, rhs_type, x12, x13, &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
+ x13, &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one-byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, lhs, rhs, x10, x11, x12);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
+ x12);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, lhs, rhs, x10, x11, x12, x13);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
+ x12, x13);
}
// Handle more complex cases in runtime.
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ Bind(&miss);
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
- ASM_LOCATION("ICCompareStub[Objects]");
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
+ ASM_LOCATION("CompareICStub[Objects]");
Label miss;
__ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
__ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
__ Sub(result, rhs, lhs);
__ Ret();
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- ASM_LOCATION("ICCompareStub[KnownObjects]");
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+ ASM_LOCATION("CompareICStub[KnownObjects]");
Label miss;
// This method handles the case where a compare stub had the wrong
// implementation. It calls a miss handler, which re-writes the stub. All other
-// ICCompareStub::Generate* methods should fall back into this one if their
+// CompareICStub::Generate* methods should fall back into this one if their
// operands were not the expected types.
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("ICCompareStub[Miss]");
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("CompareICStub[Miss]");
Register stub_entry = x11;
{
ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
Register op = x10;
// Preserve some caller-saved registers.
__ Push(x1, x0, lr);
// Push the arguments.
- __ Mov(op, Smi::FromInt(op_));
+ __ Mov(op, Smi::FromInt(this->op()));
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
}
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- ASSERT(!AreAliased(hash, character));
-
- // hash = character + (character << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ Add(hash, character, Operand(hash, LSR, kSmiShift));
-
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
-
- // hash += hash << 10;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
- // hash ^= hash >> 6;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- ASSERT(!AreAliased(hash, character));
-
- // hash += character;
- __ Add(hash, hash, character);
-
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
-
- // hash += hash << 10;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
- // hash ^= hash >> 6;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
- Register scratch_w = scratch.W();
- ASSERT(!AreAliased(hash_w, scratch_w));
-
- // hash += hash << 3;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
- // hash ^= hash >> 11;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
- // hash += hash << 15;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
-
- __ Ands(hash_w, hash_w, String::kHashBitMask);
-
- // if (hash == 0) hash = 27;
- __ Mov(scratch_w, StringHasher::kZeroHash);
- __ Csel(hash_w, scratch_w, hash_w, eq);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("SubStringStub::Generate");
Label runtime;
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
- __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
- &runtime);
+ __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
+ &runtime);
__ B(&set_slice_header);
__ Bind(&two_byte_slice);
SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ Bind(&allocate_result);
- // Sequential ASCII string. Allocate the result.
+ // Sequential one-byte string. Allocate the result.
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
+ // Allocate and copy the resulting one-byte string.
+ __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
// Locate first character of substring to copy.
__ Add(substring_char0, unpacked_char0, from);
__ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
__ Bind(&return_x0);
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
__ Drop(3);
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// x1: result_length
// x12: input_type
// x15: from (untagged)
__ SmiTag(from);
- StringCharAtGenerator generator(
- input_string, from, result_length, x0,
- &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ StringCharAtGenerator generator(input_string, from, result_length, x0,
+ &runtime, &runtime, &runtime,
+ STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ Drop(3);
__ Ret();
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
+void StringHelper::GenerateFlatOneByteStringEquals(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
+ DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
Register result = x0;
Register left_length = scratch1;
Register right_length = scratch2;
// Compare characters. Falls through if all characters are equal.
__ Bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
- scratch3, &strings_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
+ scratch3, &strings_not_equal);
// Characters in strings are equal.
__ Mov(result, Smi::FromInt(EQUAL));
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4) {
+ DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ Cbz(min_length, &compare_lengths);
// Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4,
- &result_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ scratch4, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ Bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
// Use length_delta as result if it's zero.
Register result = x0;
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal) {
- ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Label* chars_not_equal) {
+ DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
// Stack frame on entry.
// sp[0]: right string
__ Bind(¬_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfEitherIsNotSequentialOneByteStrings(left, right, x12, x13, &runtime);
- // Compare flat ASCII strings natively. Remove arguments from stack first,
+ // Compare flat one-byte strings natively. Remove arguments from stack first,
// as this function will generate a return.
__ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
- GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, x12, x13,
+ x14, x15);
__ Bind(&runtime);
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
- __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
-}
-
-
-void ArrayPushStub::Generate(MacroAssembler* masm) {
- Register receiver = x0;
-
- int argc = arguments_count();
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
- return;
- }
-
- Isolate* isolate = masm->isolate();
-
- if (argc != 1) {
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
- return;
- }
-
- Label call_builtin, attempt_to_grow_elements, with_write_barrier;
-
- Register elements_length = x8;
- Register length = x7;
- Register elements = x6;
- Register end_elements = x5;
- Register value = x4;
- // Get the elements array of the object.
- __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- if (IsFastSmiOrObjectElementsKind(elements_kind())) {
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- x10,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
- }
-
- // Get the array's length and calculate new length.
- __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ Add(length, length, Smi::FromInt(argc));
-
- // Check if we could survive without allocation.
- __ Ldr(elements_length,
- FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(length, elements_length);
-
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
-
- if (IsFastSmiOrObjectElementsKind(elements_kind())) {
- __ B(gt, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ Peek(value, (argc - 1) * kPointerSize);
- __ JumpIfNotSmi(value, &with_write_barrier);
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ Add(end_elements, elements,
- Operand::UntagSmiAndScale(length, kPointerSizeLog2));
- __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
- } else {
- __ B(gt, &call_builtin);
-
- __ Peek(value, (argc - 1) * kPointerSize);
- __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1,
- &call_builtin, argc * kDoubleSize);
- }
-
- // Save new length.
- __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Return length.
- __ Drop(argc + 1);
- __ Mov(x0, length);
- __ Ret();
-
- if (IsFastDoubleElementsKind(elements_kind())) {
- __ Bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
- return;
- }
-
- __ Bind(&with_write_barrier);
-
- if (IsFastSmiElementsKind(elements_kind())) {
- if (FLAG_trace_elements_transitions) {
- __ B(&call_builtin);
- }
-
- __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
- __ JumpIfHeapNumber(x10, &call_builtin);
-
- ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
- ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset));
- __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX));
- const int header_size = FixedArrayBase::kHeaderSize;
- // Verify that the object can be transitioned in place.
- const int origin_offset = header_size + elements_kind() * kPointerSize;
- __ ldr(x11, FieldMemOperand(receiver, origin_offset));
- __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset));
- __ cmp(x11, x12);
- __ B(ne, &call_builtin);
-
- const int target_offset = header_size + target_kind * kPointerSize;
- __ Ldr(x10, FieldMemOperand(x10, target_offset));
- __ Mov(x11, receiver);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, DONT_TRACK_ALLOCATION_SITE, NULL);
- }
-
- // Save new length.
- __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ Add(end_elements, elements,
- Operand::UntagSmiAndScale(length, kPointerSizeLog2));
- __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- __ RecordWrite(elements,
- end_elements,
- value,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ Mov(x0, length);
- __ Ret();
-
- __ Bind(&attempt_to_grow_elements);
-
- if (!FLAG_inline_new) {
- __ B(&call_builtin);
- }
-
- Register argument = x2;
- __ Peek(argument, (argc - 1) * kPointerSize);
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- if (IsFastSmiElementsKind(elements_kind())) {
- __ JumpIfNotSmi(argument, &call_builtin);
- }
-
- // We could be lucky and the elements array could be at the top of new-space.
- // In this case we can just grow it in place by moving the allocation pointer
- // up.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate);
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate);
-
- const int kAllocationDelta = 4;
- ASSERT(kAllocationDelta >= argc);
- Register allocation_top_addr = x5;
- Register allocation_top = x9;
- // Load top and check if it is the end of elements.
- __ Add(end_elements, elements,
- Operand::UntagSmiAndScale(length, kPointerSizeLog2));
- __ Add(end_elements, end_elements, kEndElementsOffset);
- __ Mov(allocation_top_addr, new_space_allocation_top);
- __ Ldr(allocation_top, MemOperand(allocation_top_addr));
- __ Cmp(end_elements, allocation_top);
- __ B(ne, &call_builtin);
-
- __ Mov(x10, new_space_allocation_limit);
- __ Ldr(x10, MemOperand(x10));
- __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize);
- __ Cmp(allocation_top, x10);
- __ B(hi, &call_builtin);
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ Str(allocation_top, MemOperand(allocation_top_addr));
- // Push the argument.
- __ Str(argument, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- ASSERT(kAllocationDelta == 4);
- __ Stp(x10, x10, MemOperand(end_elements, 1 * kPointerSize));
- __ Stp(x10, x10, MemOperand(end_elements, 3 * kPointerSize));
-
- // Update elements' and array's sizes.
- __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Add(elements_length, elements_length, Smi::FromInt(kAllocationDelta));
- __ Str(elements_length,
- FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Mov(x0, length);
- __ Ret();
-
- __ Bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
// -- x0 : right
// -- lr : return address
// -----------------------------------
- Isolate* isolate = masm->isolate();
// Load x2 with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ LoadObject(x2, handle(isolate->heap()->undefined_value()));
+ __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
// Tail call into the stub that handles binary operations with allocation
// sites.
- BinaryOpWithAllocationSiteStub stub(state_);
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
-bool CodeStub::CanUseFPRegisters() {
- // FP registers always available on ARM64.
- return true;
-}
-
-
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// We need some extra registers for this stub, they have been allocated
// but we need to save them before using them.
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- Register value = regs_.scratch0();
- __ Ldr(value, MemOperand(regs_.address()));
- __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
+ Register val = regs_.scratch0();
+ __ Ldr(val, MemOperand(regs_.address()));
+ __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
- __ CheckPageFlagSet(regs_.object(),
- value,
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
InformIncrementalMarker(masm);
regs_.Restore(masm); // Restore the extra scratch registers we used.
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
__ Bind(&dont_need_remembered_set);
}
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
Register address =
x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.Is(regs_.object()));
- ASSERT(!address.Is(x0));
+ DCHECK(!address.Is(regs_.object()));
+ DCHECK(!address.Is(x0));
__ Mov(address, regs_.address());
__ Mov(x0, regs_.object());
__ Mov(x1, address);
- __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
AllowExternalCallThatCantCauseGC scope(masm);
ExternalReference function =
ExternalReference::incremental_marking_record_write_function(
- masm->isolate());
+ isolate());
__ CallCFunction(function, 3, 0);
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
regs_.Restore(masm); // Restore the extra scratch registers we used.
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ Bind(&on_black);
// Get the value from the slot.
- Register value = regs_.scratch0();
- __ Ldr(value, MemOperand(regs_.address()));
+ Register val = regs_.scratch0();
+ __ Ldr(val, MemOperand(regs_.address()));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
- __ CheckPageFlagClear(value,
- regs_.scratch1(),
+ __ CheckPageFlagClear(val, regs_.scratch1(),
MemoryChunk::kEvacuationCandidateMask,
&ensure_not_white);
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.address(), regs_.object());
- __ EnsureNotWhite(value,
+ __ EnsureNotWhite(val,
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
regs_.Restore(masm); // Restore the extra scratch registers we used.
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ adr(xzr, &skip_to_incremental_compacting);
}
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_, // scratch1
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(),
+ value(), // scratch1
+ save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
}
__ Ret();
__ JumpIfSmi(value, &smi_element);
// Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
- __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::kElementsKindShift),
+ __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
&fast_elements);
// Store into the array literal requires an elements transition. Call into
__ Bind(&double_elements);
__ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1,
+ __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
&slow_elements);
__ Ret();
}
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ Ldr(x1, MemOperand(fp, parameter_count_offset));
- if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Add(x1, x1, 1);
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
}
-// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
-// a "Push lr" instruction, followed by a call.
-static const unsigned int kProfileEntryHookCallSize =
- Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
+ // The entry hook is a "BumpSystemStackPointer" instruction (sub),
+ // followed by a "Push lr" instruction, followed by a call.
+ unsigned int size =
+ Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+ if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
+ // "BumpSystemStackPointer".
+ size += kInstructionSize;
+ }
+ return size;
+}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- ProfileEntryHookStub stub;
+ ProfileEntryHookStub stub(masm->isolate());
Assembler::BlockConstPoolScope no_const_pools(masm);
+ DontEmitDebugCodeScope no_debug_code(masm);
Label entry_hook_call_start;
__ Bind(&entry_hook_call_start);
__ Push(lr);
__ CallStub(&stub);
- ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
- kProfileEntryHookCallSize);
+ DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
+ GetProfileEntryHookCallSize(masm));
__ Pop(lr);
}
// from anywhere.
// TODO(jbramley): What about FP registers?
__ PushCPURegList(kCallerSaved);
- ASSERT(kCallerSaved.IncludesAliasOf(lr));
+ DCHECK(kCallerSaved.IncludesAliasOf(lr));
const int kNumSavedRegs = kCallerSaved.Count();
// Compute the function's address as the first argument.
- __ Sub(x0, lr, kProfileEntryHookCallSize);
+ __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
#if V8_HOST_ARCH_ARM64
uintptr_t entry_hook =
- reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
+ reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
__ Mov(x10, entry_hook);
#else
// Under the simulator we need to indirect the entry hook through a trampoline
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ Mov(x10, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
- masm->isolate())));
+ isolate())));
// It additionally takes an isolate as a third parameter
- __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
#endif
// The caller's return address is above the saved temporaries.
__ Blr(x10);
// Return to calling code.
__ Peek(lr, 0);
+ __ AssertFPCRState();
__ Ret();
__ SetStackPointer(old_stack_pointer);
Register target) {
// Make sure the caller configured the stack pointer (see comment in
// DirectCEntryStub::Generate).
- ASSERT(csp.Is(__ StackPointer()));
+ DCHECK(csp.Is(__ StackPointer()));
intptr_t code =
- reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ reinterpret_cast<intptr_t>(GetCode().location());
__ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
__ Mov(x10, target);
// Branch to the stub.
Register name,
Register scratch1,
Register scratch2) {
- ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+ DCHECK(!AreAliased(elements, name, scratch1, scratch2));
// Assert that name contains a string.
__ AssertName(name);
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
+ DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ Add(scratch2, scratch2, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
__ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
__ PushCPURegList(spill_list);
if (name.is(x0)) {
- ASSERT(!elements.is(x1));
+ DCHECK(!elements.is(x1));
__ Mov(x1, name);
__ Mov(x0, elements);
} else {
}
Label not_found;
- NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
__ Cbz(x0, ¬_found);
__ Mov(scratch2, x2); // Move entry index into scratch2.
Register properties,
Handle<Name> name,
Register scratch0) {
- ASSERT(!AreAliased(receiver, properties, scratch0));
- ASSERT(name->IsUniqueName());
+ DCHECK(!AreAliased(receiver, properties, scratch0));
+ DCHECK(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
__ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
// Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
__ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ Ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ Bind(&good);
}
__ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Mov(x1, Operand(name));
- NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
// Move stub return value to scratch0. Note that scratch0 is not included in
// spill_list and won't be clobbered by PopCPURegList.
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
+ DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ Add(index, hash,
NameDictionary::GetProbeOffset(i) << Name::kHashShift);
__ And(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
__ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
__ Cmp(entry_key, key);
__ B(eq, &in_dictionary);
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
__ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup, probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
+ if (mode() == POSITIVE_LOOKUP) {
__ Mov(result, 0);
__ Ret();
}
AllocationSiteOverrideMode mode) {
ASM_LOCATION("CreateArrayDispatch");
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(), mode);
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// TODO(jbramley): Is this the best way to handle this? Can we make the
// tail calls conditional, rather than hopping over each one?
__ CompareAndBranch(kind, candidate_kind, ne, &next);
- T stub(candidate_kind);
+ T stub(masm->isolate(), candidate_kind);
__ TailCallStub(&stub);
__ Bind(&next);
}
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ Bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
Label next;
ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
__ CompareAndBranch(kind, candidate_kind, ne, &next);
- ArraySingleArgumentConstructorStub stub(candidate_kind);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
__ TailCallStub(&stub);
__ Bind(&next);
}
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate);
+ T stub(isolate, kind);
+ stub.GetCode();
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
- T stub1(kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate);
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
}
}
}
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate);
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
}
}
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Register argc = x0;
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label zero_case, n_case;
__ Cbz(argc, &zero_case);
__ Cmp(argc, 1);
// N arguments.
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("ArrayConstructorStub::Generate");
// ----------- S t a t e -------------
- // -- x0 : argc (only if argument_count_ == ANY)
+ // -- x0 : argc (only if argument_count() == ANY)
// -- x1 : constructor
// -- x2 : AllocationSite or undefined
// -- sp[0] : return address
__ Cbz(x10, &packed_case);
InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey);
__ Bind(&packed_case);
}
- InternalArraySingleArgumentConstructorStub stub1(kind);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
__ Bind(&zero_case);
// No arguments.
- InternalArrayNoArgumentConstructorStub stub0(kind);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0);
__ Bind(&n_case);
// N arguments.
- InternalArrayNArgumentsConstructorStub stubN(kind);
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
__ TailCallStub(&stubN);
}
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
Register constructor = x1;
Register api_function_address = x1;
Register context = cp;
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- Isolate* isolate = masm->isolate();
-
// FunctionCallbackArguments: context, callee and call data.
__ Push(context, callee, call_data);
__ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
}
Register isolate_reg = x5;
- __ Mov(isolate_reg, ExternalReference::isolate_address(isolate));
+ __ Mov(isolate_reg, ExternalReference::isolate_address(isolate()));
// FunctionCallbackArguments:
// return value, return value default, isolate, holder.
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
- ASSERT(!AreAliased(x0, api_function_address));
+ DCHECK(!AreAliased(x0, api_function_address));
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Add(x0, masm->StackPointer(), 1 * kPointerSize);
__ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- masm->isolate());
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
// -- x2 : api_function_address
// -----------------------------------
- Register api_function_address = x2;
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(x2));
__ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
__ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
- ExternalReference::Type thunk_type =
- ExternalReference::PROFILING_GETTER_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- masm->isolate());
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
const int spill_offset = 1 + kApiStackSpace;
__ CallApiFunctionAndReturn(api_function_address,