// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
+#include "src/base/bits.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a0 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a0 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a3, a2, a1 };
- descriptor->register_param_count_ = 3;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a3, a2, a1, a0 };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a1, a0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a0 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a1 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2, a1, a0 };
- descriptor->register_param_count_ = 3;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a0, a1 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a0 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(CompareNilIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
-}
-
-
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // a0 -- number of arguments
- // a1 -- function
- // a2 -- type info cell with elements kind
- static Register registers[] = { a1, a2 };
- descriptor->register_param_count_ = 2;
- if (constant_stack_parameter_count != 0) {
- // stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = a0;
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kArrayConstructor)->entry;
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
- descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
- descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
}
static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor,
+ Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
- // register state
- // a0 -- number of arguments
- // a1 -- constructor function
- static Register registers[] = { a1 };
- descriptor->register_param_count_ = 1;
-
- if (constant_stack_parameter_count != 0) {
- // Stack param count needs (constructor pointer, and single argument).
- descriptor->stack_parameter_count_ = a0;
- }
- descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
- descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
-}
+ Address deopt_handler = Runtime::FunctionForId(
+ Runtime::kInternalArrayConstructor)->entry;
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+ if (constant_stack_parameter_count == 0) {
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ }
}
-void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a0 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ToBooleanIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
-void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a1, a2, a0 };
- descriptor->register_param_count_ = 3;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a0, a3, a1, a2 };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
Register rhs);
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
- int param_count = descriptor->register_param_count_;
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
- ASSERT(descriptor->register_param_count_ == 0 ||
- a0.is(descriptor->register_params_[param_count - 1]));
- // Push arguments
+ DCHECK(param_count == 0 ||
+ a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ // Push arguments, adjust sp.
+ __ Subu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->register_params_[i]);
+ // Store argument to stack.
+ __ sw(descriptor.GetEnvironmentParameterRegister(i),
+ MemOperand(sp, (param_count - 1 - i) * kPointerSize));
}
- ExternalReference miss = descriptor->miss_handler();
- __ CallExternalReference(miss, descriptor->register_param_count_);
+ __ CallExternalReference(miss, param_count);
}
__ Ret();
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ li(a1, Operand(Smi::FromInt(0)));
- __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(1);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(a3, &after_sentinel);
- if (FLAG_debug_code) {
- __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
- }
- __ lw(a3, GlobalObjectOperand());
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
- __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
- __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
- __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
- __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(2);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
- public:
- ConvertToDoubleStub(Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return ConvertToDouble; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
- Register exponent = result1_;
- Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
- Label not_special;
- // Convert from Smi to integer.
- __ sra(source_, source_, kSmiTagSize);
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ And(exponent, source_, Operand(HeapNumber::kSignMask));
- // Subtract from 0 if source was negative.
- __ subu(at, zero_reg, source_);
- __ Movn(source_, at, exponent);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ Branch(¬_special, gt, source_, Operand(1));
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- // Safe to use 'at' as dest reg here.
- __ Or(at, exponent, Operand(exponent_word_for_1));
- __ Movn(exponent, at, source_); // Write exp when source not 0.
- // 1, 0 and -1 all have 0 for the second word.
- __ Ret(USE_DELAY_SLOT);
- __ mov(mantissa, zero_reg);
-
- __ bind(¬_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ Clz(zeros_, source_);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here.
- __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
- __ subu(mantissa, mantissa, zeros_);
- __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
- __ Or(exponent, exponent, mantissa);
-
- // Shift up the source chopping the top bit off.
- __ Addu(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ sllv(source_, source_, zeros_);
- // Compute lower part of fraction (last 12 bits).
- __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
- // And the top (top 20 bits).
- __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
-
- __ Ret(USE_DELAY_SLOT);
- __ or_(exponent, exponent, source_);
-}
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
// Try a conversion to a signed integer.
__ Trunc_w_d(double_scratch, double_scratch);
// Move the converted value into the result register.
- __ mfc1(result_reg, double_scratch);
+ __ mfc1(scratch3, double_scratch);
// Retrieve and restore the FCSR.
__ cfc1(scratch, FCSR);
scratch, scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
| kFCSRInvalidOpFlagMask);
- // If we had no exceptions we are done.
- __ Branch(&done, eq, scratch, Operand(zero_reg));
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
}
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
- __ lw(input_low, MemOperand(input_reg, double_offset));
- __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
+ __ lw(input_low,
+ MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ __ lw(input_high,
+ MemOperand(input_reg, double_offset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(a1) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a2) &&
- sign_.is(a3)) {
- return true;
- }
- if (the_int_.is(a2) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a3) &&
- sign_.is(a0)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
- WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
+ WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
+ WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
+ stub1.GetCode();
+ stub2.GetCode();
}
// We test for the special value that has a different exponent.
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
// Test sign, and save for later conditionals.
- __ And(sign_, the_int_, Operand(0x80000000u));
- __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+ __ And(sign(), the_int(), Operand(0x80000000u));
+ __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ li(scratch_, Operand(non_smi_exponent));
+ __ li(scratch(), Operand(non_smi_exponent));
// Set the sign bit in scratch_ if the value was negative.
- __ or_(scratch_, scratch_, sign_);
+ __ or_(scratch(), scratch(), sign());
// Subtract from 0 if the value was negative.
- __ subu(at, zero_reg, the_int_);
- __ Movn(the_int_, at, sign_);
+ __ subu(at, zero_reg, the_int());
+ __ Movn(the_int(), at, sign());
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ srl(at, the_int_, shift_distance);
- __ or_(scratch_, scratch_, at);
- __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ __ srl(at, the_int(), shift_distance);
+ __ or_(scratch(), scratch(), at);
+ __ sw(scratch(), FieldMemOperand(the_heap_number(),
HeapNumber::kExponentOffset));
- __ sll(scratch_, the_int_, 32 - shift_distance);
+ __ sll(scratch(), the_int(), 32 - shift_distance);
__ Ret(USE_DELAY_SLOT);
- __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ __ sw(scratch(), FieldMemOperand(the_heap_number(),
HeapNumber::kMantissaOffset));
__ bind(&max_negative_int);
// The actual mantissa bits stored are all 0 because the implicit most
// significant 1 bit is not stored.
non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ sw(scratch_,
- FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(scratch_, zero_reg);
+ __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ sw(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
+ __ mov(scratch(), zero_reg);
__ Ret(USE_DELAY_SLOT);
- __ sw(scratch_,
- FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ sw(scratch(),
+ FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
}
__ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
__ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
__ Branch(&return_equal, ne, a0, Operand(t2));
- ASSERT(is_int16(GREATER) && is_int16(LESS));
+ DCHECK(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == le) {
// undefined <= undefined should fail.
}
__ bind(&return_equal);
- ASSERT(is_int16(GREATER) && is_int16(LESS));
+ DCHECK(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == less) {
__ li(v0, Operand(GREATER)); // Things aren't less than themselves.
if (cc != eq) {
// All-zero means Infinity means equal.
__ Ret(eq, v0, Operand(zero_reg));
- ASSERT(is_int16(GREATER) && is_int16(LESS));
+ DCHECK(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == le) {
__ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
Label* both_loaded_as_doubles,
Label* slow,
bool strict) {
- ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ DCHECK((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
Label lhs_is_smi;
Register rhs,
Label* possible_strings,
Label* not_both_strings) {
- ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ DCHECK((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of rhs.
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
Register scratch,
- CompareIC::State expected,
+ CompareICState::State expected,
Label* fail) {
Label ok;
- if (expected == CompareIC::SMI) {
+ if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
+ } else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
__ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
DONT_DO_SMI_CHECK);
// On entry a1 and a2 are the values to be compared.
// On exit a0 is 0, positive or negative to indicate the result of
// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Register lhs = a1;
Register rhs = a0;
Condition cc = GetCondition();
Label miss;
- ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
+ CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(0, Smi::FromInt(0));
__ And(t2, lhs, Operand(rhs));
__ JumpIfNotSmi(t2, ¬_smis, t0);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// f12, f14 are the double representations of the left hand side
// and the right hand side if we have FPU. Otherwise a2, a3 represent
// left hand side and a0, a1 represent right hand side.
-
- Isolate* isolate = masm->isolate();
Label nan;
__ li(t0, Operand(LESS));
__ li(t1, Operand(GREATER));
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
- __ c(OLT, D, f12, f14);
- __ Movt(v0, t0);
- // Use previous check to store conditionally to v0 oposite condition
- // (GREATER). If rhs is equal to lhs, this will be corrected in next
- // check.
- __ Movf(v0, t1);
- // Check if EQUAL condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(EQ, D, f12, f14);
- __ Movt(v0, t2);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ __ c(OLT, D, f12, f14);
+ __ Movt(v0, t0);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ Movf(v0, t1);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ Movt(v0, t2);
+ } else {
+ Label skip;
+ __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
+ __ mov(v0, t0); // Return LESS as result.
+
+ __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
+ __ mov(v0, t2); // Return EQUAL as result.
+
+ __ mov(v0, t1); // Return GREATER as result.
+ __ bind(&skip);
+ }
__ Ret();
__ bind(&nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
- ASSERT(is_int16(GREATER) && is_int16(LESS));
+ DCHECK(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == lt || cc == le) {
__ li(v0, Operand(GREATER));
masm, lhs, rhs, &flat_string_check, &slow);
}
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
+ __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
+ a3);
if (cc == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
- a2,
- a3,
- t0);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
- a2,
- a3,
- t0,
- t1);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
+ t1);
}
// Never falls through to here.
if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc == gt || cc == ge); // Remaining cases.
+ DCHECK(cc == gt || cc == ge); // Remaining cases.
ncr = LESS;
}
__ li(a0, Operand(Smi::FromInt(ncr)));
}
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ __ PushSafepointRegisters();
+ __ Jump(t9);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ __ PopSafepointRegisters();
+ __ Jump(t9);
+}
+
+
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
__ MultiPush(kJSCallerSaved | ra.bit());
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ MultiPushFPU(kCallerSavedFPU);
}
const int argument_count = 1;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
- if (save_doubles_ == kSaveFPRegs) {
+ if (save_doubles()) {
__ MultiPopFPU(kCallerSavedFPU);
}
}
-void BinaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a1, a0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in f4, double result goes
- // into f4.
- // Tagged case: tagged input on top of stack and in a0,
- // tagged result (heap number) goes into v0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = t5;
- const Register scratch1 = t3;
- const Register cache_entry = a0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (tagged) {
- // Argument is a number and is on stack and in a0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(a0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into a2, a3.
- __ sra(t0, a0, kSmiTagSize);
- __ mtc1(t0, f4);
- __ cvt_d_w(f4, f4);
- __ Move(a2, a3, f4);
- __ Branch(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(a0,
- a1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Store the
- // low and high words into a2, a3.
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
- } else {
- // Input is untagged double in f4. Output goes to f4.
- __ Move(a2, a3, f4);
- }
- __ bind(&loaded);
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ Xor(a1, a2, a3);
- __ sra(t0, a1, 16);
- __ Xor(a1, a1, t0);
- __ sra(t0, a1, 8);
- __ Xor(a1, a1, t0);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // a1 = TranscendentalCache::hash(double value).
- __ li(cache_entry, Operand(
- ExternalReference::transcendental_cache_array_address(
- masm->isolate())));
- // a0 points to cache array.
- __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
- Isolate::Current()->transcendental_cache()->caches_[0])));
- // a0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
- __ sll(t0, a1, 1);
- __ Addu(a1, a1, t0);
- __ sll(t0, a1, 2);
- __ Addu(cache_entry, cache_entry, t0);
-
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ lw(t0, MemOperand(cache_entry, 0));
- __ lw(t1, MemOperand(cache_entry, 4));
- __ lw(t2, MemOperand(cache_entry, 8));
- __ Branch(&calculate, ne, a2, Operand(t0));
- __ Branch(&calculate, ne, a3, Operand(t1));
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into v0.
- __ Drop(1);
- __ mov(v0, t2);
- } else {
- // Load result into f4.
- __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
- }
- __ Ret();
-
- __ bind(&calculate);
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
- masm->isolate()),
- 1,
- 1);
- } else {
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // a0: precalculated cache entry address.
- // a2 and a3: parts of the double value.
- // Store a0, a2 and a3 on stack for later before calling C function.
- __ Push(a3, a2, cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ Pop(a3, a2, cache_entry);
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
- __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
-
- __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
- __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
- __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, cache_entry);
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
- __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
- __ bind(&no_update);
-
- // We return the value in f4 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ li(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- __ push(ra);
- __ PrepareCallCFunction(2, scratch);
- if (IsMipsSoftFloatABI) {
- __ Move(a0, a1, f4);
- } else {
- __ mov_d(f12, f4);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- Isolate* isolate = masm->isolate();
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(
- ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(
- ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(
- ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(ra);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
- const Register exponent = a2;
+ const Register exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent.is(a2));
const Register heapnumbermap = t1;
const Register heapnumber = v0;
const DoubleRegister double_base = f2;
const Register scratch2 = t3;
Label call_runtime, done, int_exponent;
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
+ } else if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
- if (exponent_type_ != INTEGER) {
+ if (exponent_type() != INTEGER) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
// scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
- if (exponent_type_ == ON_STACK) {
+ if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch2);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type_ == INTEGER) {
+ if (exponent_type() == INTEGER) {
__ mov(scratch, exponent);
} else {
// Exponent has previously been stored into scratch as untagged integer.
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
+ Counters* counters = isolate()->counters();
+ if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- ASSERT(heapnumber.is(v0));
+ DCHECK(heapnumber.is(v0));
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ DropAndRet(2);
} else {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
- return (!save_doubles_ || isolate->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- BinaryOpStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ StoreRegistersStateStub stub(isolate);
+ stub.GetCode();
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ RestoreRegistersStateStub stub(isolate);
+ stub.GetCode();
}
void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
- CEntryStub save_doubles(1, mode);
- StoreBufferOverflowStub stub(mode);
- // These stubs might already be in the snapshot, detect that and don't
- // regenerate, which would lead to code stub initialization state being messed
- // up.
- Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
- save_doubles_code = *save_doubles.GetCode(isolate);
- }
- Code* store_buffer_overflow_code;
- if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
- store_buffer_overflow_code = *stub.GetCode(isolate);
- }
- save_doubles_code->set_is_pregenerated(true);
- store_buffer_overflow_code->set_is_pregenerated(true);
+ CEntryStub(isolate, 1, mode).GetCode();
+ StoreBufferOverflowStub(isolate, mode).GetCode();
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ andi(scratch, value, 0xf);
- __ Branch(oom_label, eq, scratch, Operand(0xf));
-}
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // s0: number of arguments including receiver
+ // s1: size of arguments excluding receiver
+ // s2: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // v0: result parameter for PerformGC, if any
- // s0: number of arguments including receiver (C callee-saved)
- // s1: pointer to the first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
+ // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
+ // The reason for this is that these arguments would need to be saved anyway
+ // so it's faster to set them up directly.
+ // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
- Isolate* isolate = masm->isolate();
+ // Compute the argv pointer in a callee-saved register.
+ __ Addu(s1, sp, s1);
- if (do_gc) {
- // Move result passed in v0 into a0 to call PerformGC.
- __ mov(a0, v0);
- __ PrepareCallCFunction(2, 0, a1);
- __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
- }
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles());
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ li(a0, Operand(scope_depth));
- __ lw(a1, MemOperand(a0));
- __ Addu(a1, a1, Operand(1));
- __ sw(a1, MemOperand(a0));
- }
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
// Prepare arguments for C routine.
// a0 = argc
__ AssertStackIsAligned();
- __ li(a2, Operand(ExternalReference::isolate_address(isolate)));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
// Set up sp in the delay slot.
masm->addiu(sp, sp, -kCArgsSlotsSize);
// Make sure the stored 'ra' points to this position.
- ASSERT_EQ(kNumInstructionsToJump,
+ DCHECK_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
- if (always_allocate) {
- // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
- __ li(a2, Operand(scope_depth));
- __ lw(a3, MemOperand(a2));
- __ Subu(a3, a3, Operand(1));
- __ sw(a3, MemOperand(a2));
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ Branch(&okay, ne, v0, Operand(t0));
+ __ stop("The hole escaped");
+ __ bind(&okay);
}
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ addiu(a2, v0, 1);
- __ andi(t0, a2, kFailureTagMask);
- __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
- // Restore stack (remove arg slots) in branch delay slot.
- __ addiu(sp, sp, kCArgsSlotsSize);
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ LoadRoot(t0, Heap::kExceptionRootIndex);
+ __ Branch(&exception_returned, eq, t0, Operand(v0));
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ li(a2, Operand(pending_exception_address));
+ __ lw(a2, MemOperand(a2));
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ Branch(&okay, eq, t0, Operand(a2));
+ __ stop("Unexpected pending exception");
+ __ bind(&okay);
+ }
// Exit C frame and return.
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
+ // s0: still holds argc (callee-saved).
+ __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
- // Check if we should retry or throw exception.
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
- __ Branch(&retry, eq, t0, Operand(zero_reg));
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
+ // Handling of exception.
+ __ bind(&exception_returned);
// Retrieve the pending exception.
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ lw(v0, MemOperand(t0));
-
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
+ __ li(a2, Operand(pending_exception_address));
+ __ lw(v0, MemOperand(a2));
// Clear the pending exception.
- __ li(a3, Operand(isolate->factory()->the_hole_value()));
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(a3, MemOperand(t0));
+ __ li(a3, Operand(isolate()->factory()->the_hole_value()));
+ __ sw(a3, MemOperand(a2));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
+ Label throw_termination_exception;
__ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
- __ Branch(throw_termination_exception, eq, v0, Operand(t0));
+ __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
// Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry);
- // Last failure (v0) will be moved to (a0) for parameter when retrying.
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // s0: number of arguments including receiver
- // s1: size of arguments excluding receiver
- // s2: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
- // The reason for this is that these arguments would need to be saved anyway
- // so it's faster to set them up directly.
- // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
-
- // Compute the argv pointer in a callee-saved register.
- __ Addu(s1, sp, s1);
-
- // Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
-
- // s0: number of arguments (C callee-saved)
- // s1: pointer to first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ li(a0, Operand(false, RelocInfo::NONE32));
- __ li(a2, Operand(external_caught));
- __ sw(a0, MemOperand(a2));
-
- // Set pending exception and v0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, v0, t0, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(v0, MemOperand(a2));
- // Fall through to the next label.
+ __ Throw(v0);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(v0);
-
- __ bind(&throw_normal_exception);
- __ Throw(v0);
}
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Isolate* isolate = masm->isolate();
// We build an EntryFrame.
__ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int marker = type();
__ li(t2, Operand(Smi::FromInt(marker)));
__ li(t1, Operand(Smi::FromInt(marker)));
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
- __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ LoadRoot(v0, Heap::kExceptionRootIndex);
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
// 4 args slots
// args
- if (is_construct) {
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate);
__ li(t0, Operand(construct_entry));
// in the safepoint slot for register t0.
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+ DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// ReturnTrueFalse is only implemented for inlined call sites.
- ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+ DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
// Fixed register usage throughout the stub:
const Register object = a0; // Object (lhs).
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
} else {
- ASSERT(HasArgsInRegisters());
+ DCHECK(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
// The offset was stored in t0 safepoint slot.
__ Branch(&loop);
__ bind(&is_instance);
- ASSERT(Smi::FromInt(0) == 0);
+ DCHECK(Smi::FromInt(0) == 0);
if (!HasCallSiteInlineCheck()) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
__ PatchRelocatedValue(inline_site, scratch, v0);
if (!ReturnTrueFalseObject()) {
- ASSERT_EQ(Smi::FromInt(0), 0);
+ DCHECK_EQ(Smi::FromInt(0), 0);
__ mov(v0, zero_reg);
}
}
__ Branch(&object_not_null,
ne,
scratch,
- Operand(masm->isolate()->factory()->null_value()));
+ Operand(isolate()->factory()->null_value()));
__ li(v0, Operand(Smi::FromInt(1)));
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->prototype_string()));
- receiver = a1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = a0;
- }
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = a0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
-
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
+ t0, &miss);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a1,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a2;
- value = a0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : key
- // -----------------------------------
- receiver = a1;
- value = a0;
- }
- Register scratch = a3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ GetObjectType(scratch, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&miss, eq, scratch, Operand(at));
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
}
-Register InstanceofStub::left() { return a0; }
-
-
-Register InstanceofStub::right() { return a1; }
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
+ DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
// Check that the key is a smiGenerateReadElement.
Label slow;
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
Label param_map_size;
- ASSERT_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(0, Smi::FromInt(0));
__ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
__ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
__ sll(t5, a1, 1);
__ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
- // a2 = argument count (tagged)
+ // a2 = argument count (smi-tagged)
// Get the arguments boilerplate from the current native context into t0.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
const int kAliasedOffset =
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
// v0 = address of new object (tagged)
// a1 = mapped parameter count (tagged)
- // a2 = argument count (tagged)
- // t0 = address of boilerplate object (tagged)
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ lw(a3, FieldMemOperand(t0, i));
- __ sw(a3, FieldMemOperand(v0, i));
- }
+ // a2 = argument count (smi-tagged)
+ // t0 = address of arguments map (tagged)
+ __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ lw(a3, MemOperand(sp, 2 * kPointerSize));
+ __ AssertNotSmi(a3);
const int kCalleeOffset = JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize;
__ sw(a3, FieldMemOperand(v0, kCalleeOffset));
// Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(a2);
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
const int kLengthOffset = JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize;
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, t0 will point there, otherwise
// it will point to the backing store.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
// v0 = address of new object (tagged)
__ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
- __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
__ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
__ Addu(t2, a1, Operand(Smi::FromInt(2)));
__ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
// a2 = argument count (tagged)
__ bind(&runtime);
__ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is in ra.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Check that the key is an array index, that is Uint32.
+ __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
+ __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
__ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(a1, v0, a2, a3, &runtime,
// Get the arguments boilerplate from the current native context.
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
- __ lw(t0, MemOperand(t0, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ __ lw(t0, MemOperand(
+ t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
- // Copy the JS object part.
- __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
+ __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ lw(a1, MemOperand(sp, 0 * kPointerSize));
+ __ AssertSmi(a1);
__ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
__ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
__ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
- Isolate* isolate = masm->isolate();
-
Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(
- isolate);
+ isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ li(a0, Operand(address_of_regexp_stack_memory_size));
__ lw(a0, MemOperand(a0, 0));
__ Branch(&runtime, eq, a0, Operand(zero_reg));
// Check that the RegExp has been compiled (data contains a fixed array).
__ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- __ And(t0, regexp_data, Operand(kSmiTagMask));
+ __ SmiTst(regexp_data, t0);
__ Check(nz,
kUnexpectedTypeForRegExpDataFixedArrayExpected,
t0,
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
// The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
// (5) Sequential string. Load regexp code according to encoding.
STATIC_ASSERT(kStringEncodingMask == 4);
STATIC_ASSERT(kOneByteStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
- __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+ __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
+ __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
__ JumpIfSmi(t9, &runtime);
// a1: previous index
- // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
// t9: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(),
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
// Argument 9: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots.
- __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
__ sw(a0, MemOperand(sp, 5 * kPointerSize));
// Argument 8: Indicate that this is a direct call from JavaScript.
// Argument 5: static offsets vector buffer.
__ li(a0, Operand(
- ExternalReference::address_of_static_offsets_vector(isolate)));
+ ExternalReference::address_of_static_offsets_vector(isolate())));
__ sw(a0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data
- // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+ // calculate the shift of the index (0 for one-byte and 1 for two-byte).
__ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
// Load the length from the original subject string from the previous stack
// Locate the code entry and call it.
__ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, t9);
__ LeaveExitFrame(false, no_reg, true);
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ li(a1, Operand(isolate->factory()->the_hole_value()));
+ __ li(a1, Operand(isolate()->factory()->the_hole_value()));
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
__ lw(v0, MemOperand(a2, 0));
__ Branch(&runtime, eq, v0, Operand(a1));
__ bind(&failure);
// For failure and exception return null.
- __ li(v0, Operand(isolate->factory()->null_value()));
+ __ li(v0, Operand(isolate()->factory()->null_value()));
__ DropAndRet(4);
// Process the result from the native regexp code.
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
+ ExternalReference::address_of_static_offsets_vector(isolate());
__ li(a2, Operand(address_of_static_offsets_vector));
// a1: number of capture registers
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ lw(a1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(a1, &slowcase);
- __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
- __ Addu(a2, t1, Operand(objects_size));
- __ Allocate(
- a2, // In: Size, in words.
- v0, // Out: Start of allocation (tagged).
- a3, // Scratch register.
- t0, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // v0: Start of allocated area, object-tagged.
- // a1: Number of elements in array, as smi.
- // t1: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
- __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
- __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ lw(a1, MemOperand(sp, kPointerSize * 0));
- __ lw(a2, MemOperand(sp, kPointerSize * 1));
- __ lw(t2, MemOperand(sp, kPointerSize * 2));
- __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
- __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
- __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // v0: JSArray, tagged.
- // a3: FixedArray, tagged.
- // t1: Number of elements in array, untagged.
-
- // Set map.
- __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ sll(t2, t1, kSmiTagSize);
- __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with undefined.
- // v0: JSArray, tagged.
- // a2: undefined.
- // a3: Start of elements in FixedArray.
- // t1: Number of elements to fill.
- Label loop;
- __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
- __ addu(t1, t1, a3); // Point past last element to store.
- __ bind(&loop);
- __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
- __ sw(a2, MemOperand(a3));
- __ Branch(&loop, USE_DELAY_SLOT);
- __ addiu(a3, a3, kPointerSize); // In branch delay slot.
-
- __ bind(&done);
- __ DropAndRet(3);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : Feedback vector
+ // a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ // Load the cache state into t0.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ Branch(&done, eq, a3, Operand(a1));
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in a3.
- __ lw(t1, FieldMemOperand(a3, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, t1, Operand(at));
-
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&megamorphic, ne, a1, Operand(a3));
- __ jmp(&done);
+ __ Branch(&done, eq, t0, Operand(a1));
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in a3.
+ __ lw(t1, FieldMemOperand(t0, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, t1, Operand(at));
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(&megamorphic, ne, a1, Operand(t0));
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialize, eq, a3, Operand(at));
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&initialize, eq, t0, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
+ // An uninitialized cache is patched with the function.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(¬_array_function, ne, a1, Operand(a3));
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(¬_array_function, ne, a1, Operand(t0));
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ const RegList kSavedRegs =
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
- // The target function is the Array constructor.
- // Create an AllocationSite if we don't already have it, store it in the cell.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs =
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6; // a2
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(a0);
+ __ MultiPush(kSavedRegs);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(a0);
- __ MultiPush(kSavedRegs);
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ __ CallStub(&create_stub);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ __ MultiPop(kSavedRegs);
+ __ SmiUntag(a0);
+ }
+ __ Branch(&done);
- __ MultiPop(kSavedRegs);
- __ SmiUntag(a0);
+ __ bind(¬_array_function);
}
- __ Branch(&done);
- __ bind(¬_array_function);
- __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sw(a1, MemOperand(t0, 0));
+
+ __ Push(t0, a2, a1);
+ __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(t0, a2, a1);
__ bind(&done);
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // a1 : the function to call
- // a2 : cache cell for call target
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call, ne, t0, Operand(at));
- // Patch the receiver on the stack with the global receiver object.
- __ lw(a3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ int32_t strict_mode_function_mask =
+ 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ // Do not transform the receiver for native (Compilerhints already in a3).
+ int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+ __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
+ __ Branch(cont, ne, at, Operand(zero_reg));
+}
+
+
+static void EmitSlowCase(MacroAssembler* masm,
+ int argc,
+ Label* non_function) {
+ // Check for function proxy.
+ __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ push(a1); // put proxy as additional argument
+ __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
+ __ mov(a2, zero_reg);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
}
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
- // Get the map of the function object.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(non_function);
+ __ sw(a1, MemOperand(sp, argc * kPointerSize));
+ __ li(a0, Operand(argc)); // Set up the number of arguments.
+ __ mov(a2, zero_reg);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(a1);
+ }
+ __ Branch(USE_DELAY_SLOT, cont);
+ __ sw(v0, MemOperand(sp, argc * kPointerSize));
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // a1 : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function is really a JavaScript function.
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
}
// Fast-case: Invoke the function now.
// a1: pushed function
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call_as_function, eq, t0, Operand(at));
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ lw(a3, MemOperand(sp, argc * kPointerSize));
+
+ if (needs_checks) {
+ __ JumpIfSmi(a3, &wrap);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
}
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
}
- // Check for function proxy.
- __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ push(a1); // Put proxy as additional argument.
- __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(t1, CALL_AS_METHOD);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ EmitWrapCase(masm, argc, &cont);
}
+}
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
- __ li(a0, Operand(argc_)); // Set up the number of arguments.
- __ mov(a2, zero_reg);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
}
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : feedback vector
+ // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, at);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into a2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by a3 + 1.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(a2, t1);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = a3;
+ Register jmp_reg = t0;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
// a0: number of arguments
// a1: called object
- // a3: object type
+ // t0: object type
Label do_call;
__ bind(&slow);
- __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
__ bind(&non_function_call);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ li(a2, Operand(0, RelocInfo::NONE32));
- __ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(vector, FieldMemOperand(vector,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ lw(vector, FieldMemOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // a1 - function
+ // a3 - slot id
+ Label miss;
+
+ EmitLoadTypeFeedbackVector(masm, a2);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
+ __ Branch(&miss, ne, a1, Operand(at));
+
+ __ li(a0, Operand(arg_count()));
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(at, a2, Operand(at));
+ __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize));
+
+ // Verify that t0 contains an AllocationSite
+ __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, t1, Operand(at));
+
+ __ mov(a2, t0);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unexpected code address");
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // r1 - function
+ // r3 - slot id (Smi)
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, a2);
+
+ // The checks. First, does r1 match the recorded monomorphic target?
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
+ __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
+
+ __ bind(&have_js_function);
+ if (CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ // Compute the receiver in sloppy mode.
+ __ lw(a3, MemOperand(sp, argc * kPointerSize));
+
+ __ JumpIfSmi(a3, &wrap);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+
+ if (CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ Branch(&slow_start, eq, t0, Operand(at));
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&miss, eq, t0, Operand(at));
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(t0);
+ __ GetObjectType(t0, t1, t1);
+ __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
+ __ Branch(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ lw(t0, MemOperand(sp, (arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(t0, a1, a2, a3);
+
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to a1 and exit the internal frame.
+ __ mov(a1, v0);
+ }
}
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
- ASSERT(!t0.is(index_));
- ASSERT(!t0.is(result_));
- ASSERT(!t0.is(object_));
+ DCHECK(!t0.is(index_));
+ DCHECK(!t0.is(result_));
+ DCHECK(!t0.is(object_));
// If the receiver is a smi trigger the non-string case.
__ JumpIfSmi(object_, receiver_not_string_);
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
call_helper.BeforeCall(masm);
__ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
__ Move(result_, v0);
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
- ASSERT(!t0.is(result_));
- ASSERT(!t0.is(code_));
+ DCHECK(!t0.is(result_));
+ DCHECK(!t0.is(code_));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
__ And(t0,
code_,
Operand(kSmiTagMask |
__ Branch(&slow_case_, ne, t0, Operand(zero_reg));
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
+ // At this point code register contains smi tagged one-byte char code.
STATIC_ASSERT(kSmiTag == 0);
__ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
__ Addu(result_, result_, t0);
}
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
+
+
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for
- // very short strings.
- if (!ascii) {
- __ addu(count, count, count);
- }
- __ Branch(&done, eq, count, Operand(zero_reg));
- __ addu(count, dest, count); // Count now points to the last dest byte.
-
- __ bind(&loop);
- __ lbu(scratch, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ Branch(&loop, lt, dest, Operand(count));
-
- __ bind(&done);
-}
-
-
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
-
-
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
- __ And(scratch4, dest, Operand(kPointerAlignmentMask));
+ String::Encoding encoding) {
+ if (FLAG_debug_code) {
+ // Check that destination is word aligned.
+ __ And(scratch, dest, Operand(kPointerAlignmentMask));
__ Check(eq,
kDestinationOfCopyNotAligned,
- scratch4,
+ scratch,
Operand(zero_reg));
}
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
// Assumes word reads and writes are little endian.
// Nothing to do for zero characters.
Label done;
- if (!ascii) {
- __ addu(count, count, count);
- }
- __ Branch(&done, eq, count, Operand(zero_reg));
-
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ Subu(scratch1, count, Operand(8));
- __ Addu(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ And(scratch4, dest, Operand(kReadAlignmentMask));
- Label dest_aligned;
- __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
- Label aligned_loop;
- __ bind(&aligned_loop);
- __ lbu(scratch1, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch1, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ addiu(scratch4, scratch4, 1);
- __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
- __ bind(&dest_aligned);
- }
-
- Label simple_loop;
-
- __ And(scratch4, src, Operand(kReadAlignmentMask));
- __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
-
- // Loop for src/dst that are not aligned the same way.
- // This loop uses lwl and lwr instructions. These instructions
- // depend on the endianness, and the implementation assumes little-endian.
- {
- Label loop;
- __ bind(&loop);
- __ lwr(scratch1, MemOperand(src));
- __ Addu(src, src, Operand(kReadAlignment));
- __ lwl(scratch1, MemOperand(src, -1));
- __ sw(scratch1, MemOperand(dest));
- __ Addu(dest, dest, Operand(kReadAlignment));
- __ Subu(scratch2, limit, dest);
- __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ __ Addu(count, count, count);
}
- __ Branch(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dest, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ lw(scratch1, MemOperand(src));
- __ Addu(src, src, Operand(kReadAlignment));
- __ sw(scratch1, MemOperand(dest));
- __ Addu(dest, dest, Operand(kReadAlignment));
- __ Subu(scratch2, limit, dest);
- __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
- }
+ Register limit = count; // Read until dest equals this.
+ __ Addu(limit, dest, Operand(count));
+ Label loop_entry, loop;
// Copy bytes from src to dest until dest hits limit.
- __ bind(&byte_loop);
- // Test if dest has already reached the limit.
- __ Branch(&done, ge, dest, Operand(limit));
- __ lbu(scratch1, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch1, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ Branch(&byte_loop);
+ __ Branch(&loop_entry);
+ __ bind(&loop);
+ __ lbu(scratch, MemOperand(src));
+ __ Addu(src, src, Operand(1));
+ __ sb(scratch, MemOperand(dest));
+ __ Addu(dest, dest, Operand(1));
+ __ bind(&loop_entry);
+ __ Branch(&loop, lt, dest, Operand(limit));
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ Subu(scratch, c1, Operand(static_cast<int>('0')));
- __ Branch(¬_array_index,
- Ugreater,
- scratch,
- Operand(static_cast<int>('9' - '0')));
- __ Subu(scratch, c2, Operand(static_cast<int>('0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register.
- Label tmp;
- __ sll(scratch1, c2, kBitsPerByte);
- __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
- __ Or(c1, c1, scratch1);
- __ bind(&tmp);
- __ Branch(
- not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
-
- __ bind(¬_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ sll(scratch, c2, kBitsPerByte);
- __ Or(chars, chars, scratch);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load string table.
- // Load address of first element of the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
- __ sra(mask, mask, 1);
- __ Addu(mask, mask, -1);
-
- // Calculate untagged address of the first element of the string table.
- Register first_string_table_element = string_table;
- __ Addu(first_string_table_element, string_table,
- Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers.
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_string_table_element: address of the first element of
- // the string table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the string table.
- const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- if (i > 0) {
- __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ And(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ sll(scratch, candidate, kPointerSizeLog2);
- __ Addu(scratch, scratch, first_string_table_element);
- __ lw(candidate, MemOperand(scratch));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ GetObjectType(candidate, scratch, scratch);
- __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
-
- __ Branch(not_found, eq, undefined, Operand(candidate));
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole,
- scratch, Operand(candidate));
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
- __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ mov(v0, result);
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = seed + character + ((seed + character) << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ SmiUntag(hash);
- __ addu(hash, hash, character);
- __ sll(at, hash, 10);
- __ addu(hash, hash, at);
- // hash ^= hash >> 6;
- __ srl(at, hash, 6);
- __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ addu(hash, hash, character);
- // hash += hash << 10;
- __ sll(at, hash, 10);
- __ addu(hash, hash, at);
- // hash ^= hash >> 6;
- __ srl(at, hash, 6);
- __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ sll(at, hash, 3);
- __ addu(hash, hash, at);
- // hash ^= hash >> 11;
- __ srl(at, hash, 11);
- __ xor_(hash, hash, at);
- // hash += hash << 15;
- __ sll(at, hash, 15);
- __ addu(hash, hash, at);
-
- __ li(at, Operand(String::kHashBitMask));
- __ and_(hash, hash, at);
-
- // if (hash == 0) hash = 27;
- __ ori(at, zero_reg, StringHasher::kZeroHash);
- __ Movz(hash, at, hash);
-}
-
-
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
// Stack frame on entry.
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
- __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
+ __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
__ jmp(&set_slice_header);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
// Handle external string.
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ And(t0, a1, Operand(kShortExternalStringTag));
__ Branch(&runtime, ne, t0, Operand(zero_reg));
__ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
__ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
// Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
+ __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
// Locate first character of substring to copy.
__ Addu(t1, t1, a3);
// a2: result string length
// t1: first character of substring to copy
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharacters(
+ masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
__ jmp(&return_v0);
// Allocate and copy the resulting two-byte string.
// a2: result length.
// t1: first character of substring to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharacters(
+ masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
__ bind(&return_v0);
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
__ DropAndRet(3);
}
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
+void StringHelper::GenerateFlatOneByteStringEquals(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
Register length = scratch1;
// Compare lengths.
__ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Branch(&check_zero_length, eq, length, Operand(scratch2));
__ bind(&strings_not_equal);
- ASSERT(is_int16(NOT_EQUAL));
+ DCHECK(is_int16(NOT_EQUAL));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
__ Branch(&compare_chars, ne, length, Operand(zero_reg));
- ASSERT(is_int16(EQUAL));
+ DCHECK(is_int16(EQUAL));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
// Compare characters.
__ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm,
- left, right, length, scratch2, scratch3, v0,
- &strings_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
+ v0, &strings_not_equal);
// Characters are equal.
__ Ret(USE_DELAY_SLOT);
}
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4) {
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
__ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
// Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4, v0,
- &result_not_equal);
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ scratch4, v0, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
// Use length_delta as result if it's zero.
__ mov(scratch2, length_delta);
__ mov(scratch4, zero_reg);
}
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Register scratch2, Register scratch3,
Label* chars_not_equal) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
// Stack frame on entry.
// sp[0]: right string
__ bind(¬_same);
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
// Compare flat ASCII strings natively. Remove arguments from stack first.
__ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
__ Addu(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfEitherSmi(a0, a1, &call_runtime);
- // Load instance types.
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ Or(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
-
- // Both arguments are strings.
- // a0: first string
- // a1: second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- // These tests use zero-length check on string-length whch is an Smi.
- // Assert that Smi::FromInt(0) is really 0.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(Smi::FromInt(0) == 0);
- __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
- __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
- __ mov(v0, a0); // Assume we'll return first string (from a0).
- __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
- __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
- __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
- __ and_(t4, t4, t5); // Branch if both strings were non-empty.
- __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&strings_not_empty);
- }
-
- // Untag both string-lengths.
- __ sra(a2, a2, kSmiTagSize);
- __ sra(a3, a3, kSmiTagSize);
-
- // Both strings are non-empty.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ Addu(t2, a2, Operand(a3));
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return a string here.
- __ Branch(&longer_than_two, ne, t2, Operand(2));
-
- // Check that both strings are non-external ASCII strings.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in a2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode).
- __ li(t2, Operand(2));
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
- __ And(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kStringEncodingMask));
- __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ li(t0, Operand(high_promotion_mode));
- __ lw(t0, MemOperand(t0, 0));
- __ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
-
- __ mov(t3, v0);
- __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
- __ RecordWriteField(t3,
- ConsString::kFirstOffset,
- a0,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
- __ RecordWriteField(t3,
- ConsString::kSecondOffset,
- a1,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
-
- __ bind(&skip_write_barrier);
- __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
- __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
-
- __ bind(&after_writing);
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // t0: first instance type.
- // t1: second instance type.
- // Branch to if _both_ instances have kOneByteDataHintMask set.
- __ And(at, t0, Operand(kOneByteDataHintMask));
- __ and_(at, at, t1);
- __ Branch(&ascii_data, ne, at, Operand(zero_reg));
- __ Xor(t0, t0, Operand(t1));
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ And(t0, t0, Operand(kOneByteStringTag | kOneByteDataHintTag));
- __ Branch(&ascii_data, eq, t0,
- Operand(kOneByteStringTag | kOneByteDataHintTag));
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
- __ Branch(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t2: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- // Check whether both strings have same encoding
- __ Xor(t3, t0, Operand(t1));
- __ And(t3, t3, Operand(kStringEncodingMask));
- __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t0, Operand(kStringRepresentationMask));
-
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_first_add;
- __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_first_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t0, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_second_add;
- __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_second_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t1, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // t3: first character of first string
- // a1: first character of second string
- // a2: length of first string
- // a3: length of second string
- // t2: sum of lengths.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(t4, t1, Operand(kStringEncodingMask));
- __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
-
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
-
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string.
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : left
+ // -- a0 : right
+ // -- ra : return address
+ // -----------------------------------
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ // Load a2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ li(a2, handle(isolate()->heap()->undefined_value()));
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ And(at, a2, Operand(kSmiTagMask));
+ __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
+ __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
}
-}
-
-
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(a0);
- __ push(a1);
-}
-
-
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(a1);
- __ pop(a0);
-}
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, ¬_string);
- __ GetObjectType(arg, scratch1, scratch1);
- __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
-
- // Check the number to string cache.
- __ bind(¬_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
- __ mov(arg, scratch1);
- __ sw(arg, MemOperand(sp, stack_offset));
- __ bind(&done);
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
+ __ TailCallStub(&stub);
}
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
Label miss;
__ Or(a2, a1, a0);
__ JumpIfNotSmi(a2, &miss);
}
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- if (left_ == CompareIC::SMI) {
+ if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(a1, &miss);
}
- if (right_ == CompareIC::SMI) {
+ if (right() == CompareICState::SMI) {
__ JumpIfNotSmi(a0, &miss);
}
__ BranchF(&fpu_lt, NULL, lt, f0, f2);
// Otherwise it's greater, so just fall thru, and return.
- ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
+ DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(GREATER));
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&miss, ne, a0, Operand(at));
__ JumpIfSmi(a1, &unordered);
}
__ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
+ if (Token::IsOrderedRelationalCompareOp(op())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&unordered, eq, a1, Operand(at));
}
}
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
- ASSERT(right.is(a0));
+ DCHECK(right.is(a0));
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ mov(v0, right);
// Internalized strings are compared by identity.
__ Ret(ne, left, Operand(right));
- ASSERT(is_int16(EQUAL));
+ DCHECK(is_int16(EQUAL));
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
}
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASSERT(GetCondition() == eq);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
+ DCHECK(GetCondition() == eq);
Label miss;
// Registers containing left and right operands respectively.
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(tmp1, &miss);
- __ JumpIfNotUniqueName(tmp2, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
// Use a0 as result
__ mov(v0, a0);
__ Branch(&done, ne, left, Operand(right));
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
- ASSERT(right.is(a0));
+ DCHECK(right.is(a0));
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
}
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
Label miss;
- bool equality = Token::IsEqualityOp(op_);
+ bool equality = Token::IsEqualityOp(op());
// Registers containing left and right operands respectively.
Register left = a1;
// because we already know they are not identical. We know they are both
// strings.
if (equality) {
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
STATIC_ASSERT(kInternalizedTag == 0);
__ Or(tmp3, tmp1, Operand(tmp2));
__ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
__ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
- ASSERT(right.is(a0));
+ DCHECK(right.is(a0));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // In the delay slot.
__ bind(&is_symbol);
}
- // Check that both strings are sequential ASCII.
+ // Check that both strings are sequential one-byte.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- tmp1, tmp2, tmp3, tmp4, &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
- // Compare flat ASCII strings. Returns when done.
+ // Compare flat one-byte strings. Returns when done.
if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
+ tmp3);
} else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, tmp4);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+ tmp2, tmp3, tmp4);
}
// Handle more complex cases in runtime.
}
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
__ GetObjectType(a1, a2, a2);
__ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
}
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
__ And(a2, a1, a0);
__ JumpIfSmi(a2, &miss);
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
- __ push(ra);
- __ Push(a1, a0);
- __ li(t0, Operand(Smi::FromInt(op_)));
+ __ Push(ra, a1, a0);
+ __ li(t0, Operand(Smi::FromInt(op())));
__ addiu(sp, sp, -kPointerSize);
__ CallExternalReference(miss, 3, USE_DELAY_SLOT);
__ sw(t0, MemOperand(sp)); // In the delay slot.
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
intptr_t loc =
- reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target);
__ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(ra);
Register properties,
Handle<Name> name,
Register scratch0) {
- ASSERT(name->IsUniqueName());
+ DCHECK(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ sll(at, index, 1);
__ Addu(index, index, at);
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
+ DCHECK_EQ(kSmiTagSize, 1);
Register tmp = properties;
__ sll(scratch0, index, 1);
__ Addu(tmp, properties, scratch0);
__ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
- ASSERT(!tmp.is(entity_name));
+ DCHECK(!tmp.is(entity_name));
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ Branch(done, eq, entity_name, Operand(tmp));
__ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ lbu(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ bind(&good);
// Restore the properties.
__ MultiPush(spill_mask);
__ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ li(a1, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
__ mov(at, v0);
__ MultiPop(spill_mask);
Register name,
Register scratch1,
Register scratch2) {
- ASSERT(!elements.is(scratch1));
- ASSERT(!elements.is(scratch2));
- ASSERT(!name.is(scratch1));
- ASSERT(!name.is(scratch2));
+ DCHECK(!elements.is(scratch1));
+ DCHECK(!elements.is(scratch2));
+ DCHECK(!name.is(scratch1));
+ DCHECK(!name.is(scratch2));
__ AssertName(name);
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
+ DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ Addu(scratch2, scratch2, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
__ And(scratch2, scratch1, scratch2);
// Scale the index by multiplying by the element size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ sll(at, scratch2, 1);
__ MultiPush(spill_mask);
if (name.is(a0)) {
- ASSERT(!elements.is(a1));
+ DCHECK(!elements.is(a1));
__ Move(a1, name);
__ Move(a0, elements);
} else {
__ Move(a0, elements);
__ Move(a1, name);
}
- NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
__ mov(scratch2, a2);
__ mov(at, v0);
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
+ DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ Addu(index, hash, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
__ And(index, mask, index);
// Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
// index *= 3.
__ mov(at, index);
__ sll(index, index, 1);
__ Addu(index, index, at);
- ASSERT_EQ(kSmiTagSize, 1);
+ DCHECK_EQ(kSmiTagSize, 1);
__ sll(index, index, 2);
__ Addu(index, index, dictionary);
__ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Stop if found the property.
__ Branch(&in_dictionary, eq, entry_key, Operand(key));
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
__ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ lbu(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
+ if (mode() == POSITIVE_LOOKUP) {
__ Ret(USE_DELAY_SLOT);
__ mov(result, zero_reg);
}
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
- // FastNewClosureStub::Generate
- { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
- // StringAddStub::Generate
- { REG(t3), REG(a1), REG(t0), EMIT_REMEMBERED_SET },
- { REG(t3), REG(a0), REG(t0), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
// Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return true; // FPU is a base requirement for V8.
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
__ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
__ nop();
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
}
__ Ret();
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
Register address =
a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(a0));
+ DCHECK(!address.is(regs_.object()));
+ DCHECK(!address.is(a0));
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
__ Move(a1, address);
- __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
+ __ RememberedSetHelper(object(),
+ address(),
+ value(),
+ save_fp_regs_mode(),
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ lw(a1, MemOperand(fp, parameter_count_offset));
- if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Addu(a1, a1, Operand(1));
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
}
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- AllowStubCallsScope allow_stub_calls(masm, true);
- ProfileEntryHookStub stub;
+ ProfileEntryHookStub stub(masm->isolate());
__ push(ra);
__ CallStub(&stub);
__ pop(ra);
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(s5, sp);
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
-
+ __ Subu(sp, sp, kCArgsSlotsSize);
#if defined(V8_HOST_ARCH_MIPS)
int32_t entry_hook =
- reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
- __ li(at, Operand(entry_hook));
+ reinterpret_cast<int32_t>(isolate()->function_entry_hook());
+ __ li(t9, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
// It additionally takes an isolate as a third parameter.
- __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ li(at, Operand(ExternalReference(&dispatcher,
+ __ li(t9, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
- masm->isolate())));
+ isolate())));
#endif
- __ Call(at);
+ // Call C function through t9 to conform ABI for PIC.
+ __ Call(t9);
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
__ mov(sp, s5);
+ } else {
+ __ Addu(sp, sp, kCArgsSlotsSize);
}
// Also pop ra to get Ret(0).
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
- mode);
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- T stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
}
// If we reached this point there is a problem.
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // a2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// a0 - number of arguments
// a1 - constructor?
// sp[0] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ DCHECK(FAST_SMI_ELEMENTS == 0);
+ DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+ DCHECK(FAST_ELEMENTS == 2);
+ DCHECK(FAST_HOLEY_ELEMENTS == 3);
+ DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+ DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ And(at, a3, Operand(1));
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ Addu(a3, a3, Operand(1));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
if (FLAG_debug_code) {
- __ lw(t1, FieldMemOperand(t1, 0));
+ __ lw(t1, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSiteInCell, t1, Operand(at));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
}
- // Save the resulting elements kind in type info
- __ SmiTag(a3);
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
- __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(a3);
+ // Save the resulting elements kind in type info. We can't just store a3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- ArraySingleArgumentConstructorStub stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
}
// If we reached this point there is a problem.
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
}
}
}
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
}
}
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count_ == ANY) {
+ if (argument_count() == ANY) {
Label not_zero_case, not_one_case;
__ And(at, a0, a0);
__ Branch(¬_zero_case, ne, at, Operand(zero_reg));
__ bind(¬_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else if (argument_count_ == NONE) {
+ } else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
+ } else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
+ } else if (argument_count() == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
} else {
UNREACHABLE();
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- a0 : argc (only if argument_count_ == ANY)
+ // -- a0 : argc (only if argument_count() == ANY)
// -- a1 : constructor
- // -- a2 : type info cell
+ // -- a2 : AllocationSite or undefined
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(t0, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
- __ GetObjectType(a3, a3, t0);
+ __ GetObjectType(t0, t0, t1);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t0, Operand(MAP_TYPE));
+ t1, Operand(MAP_TYPE));
- // We should either have undefined in a2 or a valid cell.
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&okay_here, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, kExpectedPropertyCellInRegisterA2,
- a3, Operand(cell_map));
- __ bind(&okay_here);
+ // We should either have undefined in a2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(a2, t0);
}
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
-
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ lw(t0, FieldMemOperand(a3, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&no_info, ne, t0, Operand(at));
- __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
+ __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
- __ Branch(¬_zero_case, ne, a0, Operand(zero_reg));
- InternalArrayNoArgumentConstructorStub stub0(kind);
- __ TailCallStub(&stub0);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0, lo, a0, Operand(1));
- __ bind(¬_zero_case);
- __ Branch(¬_one_case, gt, a0, Operand(1));
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN, hi, a0, Operand(1));
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
__ lw(at, MemOperand(sp, 0));
- __ Branch(&normal_sequence, eq, at, Operand(zero_reg));
InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
}
- __ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(kind);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
-
- __ bind(¬_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
- __ TailCallStub(&stubN);
}
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
// but the following bit field extraction takes care of that anyway.
__ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ __ DecodeField<Map::ElementsKindBits>(a3);
if (FLAG_debug_code) {
Label done;
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : callee
+ // -- t0 : call_data
+ // -- a2 : holder
+ // -- a1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = a0;
+ Register call_data = t0;
+ Register holder = a2;
+ Register api_function_address = a1;
+ Register context = cp;
+
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // Save context, callee and call data.
+ __ Push(context, callee, call_data);
+ // Load context from callee.
+ __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // Push return value and default return value.
+ __ Push(scratch, scratch);
+ __ li(scratch,
+ Operand(ExternalReference::isolate_address(isolate())));
+ // Push isolate and holder.
+ __ Push(scratch, holder);
+
+ // Prepare arguments.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
+ // a0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Addu(a0, sp, Operand(1 * kPointerSize));
+ // FunctionCallbackInfo::implicit_args_
+ __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ sw(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(at, Operand(argc));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument.
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- a2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(a2));
+
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // a1 (internal::Object** args_) as the data.
+ __ sw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal